tipc: simplify tipc_link_rcv() reception loop
[deliverable/linux.git] / fs / nfs / delegation.c
1 /*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15
16 #include <linux/nfs4.h>
17 #include <linux/nfs_fs.h>
18 #include <linux/nfs_xdr.h>
19
20 #include "nfs4_fs.h"
21 #include "delegation.h"
22 #include "internal.h"
23 #include "nfs4trace.h"
24
25 static void nfs_free_delegation(struct nfs_delegation *delegation)
26 {
27 if (delegation->cred) {
28 put_rpccred(delegation->cred);
29 delegation->cred = NULL;
30 }
31 kfree_rcu(delegation, rcu);
32 }
33
34 /**
35 * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
36 * @delegation: delegation to process
37 *
38 */
39 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
40 {
41 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
42 }
43
44 static int
45 nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
46 {
47 struct nfs_delegation *delegation;
48 int ret = 0;
49
50 flags &= FMODE_READ|FMODE_WRITE;
51 rcu_read_lock();
52 delegation = rcu_dereference(NFS_I(inode)->delegation);
53 if (delegation != NULL && (delegation->type & flags) == flags &&
54 !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
55 if (mark)
56 nfs_mark_delegation_referenced(delegation);
57 ret = 1;
58 }
59 rcu_read_unlock();
60 return ret;
61 }
62 /**
63 * nfs_have_delegation - check if inode has a delegation, mark it
64 * NFS_DELEGATION_REFERENCED if there is one.
65 * @inode: inode to check
66 * @flags: delegation types to check for
67 *
68 * Returns one if inode has the indicated delegation, otherwise zero.
69 */
70 int nfs4_have_delegation(struct inode *inode, fmode_t flags)
71 {
72 return nfs4_do_check_delegation(inode, flags, true);
73 }
74
75 /*
76 * nfs4_check_delegation - check if inode has a delegation, do not mark
77 * NFS_DELEGATION_REFERENCED if it has one.
78 */
79 int nfs4_check_delegation(struct inode *inode, fmode_t flags)
80 {
81 return nfs4_do_check_delegation(inode, flags, false);
82 }
83
84 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
85 {
86 struct inode *inode = state->inode;
87 struct file_lock *fl;
88 struct file_lock_context *flctx = inode->i_flctx;
89 struct list_head *list;
90 int status = 0;
91
92 if (flctx == NULL)
93 goto out;
94
95 list = &flctx->flc_posix;
96 spin_lock(&flctx->flc_lock);
97 restart:
98 list_for_each_entry(fl, list, fl_list) {
99 if (nfs_file_open_context(fl->fl_file) != ctx)
100 continue;
101 spin_unlock(&flctx->flc_lock);
102 status = nfs4_lock_delegation_recall(fl, state, stateid);
103 if (status < 0)
104 goto out;
105 spin_lock(&flctx->flc_lock);
106 }
107 if (list == &flctx->flc_posix) {
108 list = &flctx->flc_flock;
109 goto restart;
110 }
111 spin_unlock(&flctx->flc_lock);
112 out:
113 return status;
114 }
115
116 static int nfs_delegation_claim_opens(struct inode *inode,
117 const nfs4_stateid *stateid, fmode_t type)
118 {
119 struct nfs_inode *nfsi = NFS_I(inode);
120 struct nfs_open_context *ctx;
121 struct nfs4_state_owner *sp;
122 struct nfs4_state *state;
123 unsigned int seq;
124 int err;
125
126 again:
127 spin_lock(&inode->i_lock);
128 list_for_each_entry(ctx, &nfsi->open_files, list) {
129 state = ctx->state;
130 if (state == NULL)
131 continue;
132 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
133 continue;
134 if (!nfs4_valid_open_stateid(state))
135 continue;
136 if (!nfs4_stateid_match(&state->stateid, stateid))
137 continue;
138 get_nfs_open_context(ctx);
139 spin_unlock(&inode->i_lock);
140 sp = state->owner;
141 /* Block nfs4_proc_unlck */
142 mutex_lock(&sp->so_delegreturn_mutex);
143 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
144 err = nfs4_open_delegation_recall(ctx, state, stateid, type);
145 if (!err)
146 err = nfs_delegation_claim_locks(ctx, state, stateid);
147 if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
148 err = -EAGAIN;
149 mutex_unlock(&sp->so_delegreturn_mutex);
150 put_nfs_open_context(ctx);
151 if (err != 0)
152 return err;
153 goto again;
154 }
155 spin_unlock(&inode->i_lock);
156 return 0;
157 }
158
159 /**
160 * nfs_inode_reclaim_delegation - process a delegation reclaim request
161 * @inode: inode to process
162 * @cred: credential to use for request
163 * @res: new delegation state from server
164 *
165 */
166 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
167 struct nfs_openres *res)
168 {
169 struct nfs_delegation *delegation;
170 struct rpc_cred *oldcred = NULL;
171
172 rcu_read_lock();
173 delegation = rcu_dereference(NFS_I(inode)->delegation);
174 if (delegation != NULL) {
175 spin_lock(&delegation->lock);
176 if (delegation->inode != NULL) {
177 nfs4_stateid_copy(&delegation->stateid, &res->delegation);
178 delegation->type = res->delegation_type;
179 delegation->pagemod_limit = res->pagemod_limit;
180 oldcred = delegation->cred;
181 delegation->cred = get_rpccred(cred);
182 clear_bit(NFS_DELEGATION_NEED_RECLAIM,
183 &delegation->flags);
184 spin_unlock(&delegation->lock);
185 rcu_read_unlock();
186 put_rpccred(oldcred);
187 trace_nfs4_reclaim_delegation(inode, res->delegation_type);
188 } else {
189 /* We appear to have raced with a delegation return. */
190 spin_unlock(&delegation->lock);
191 rcu_read_unlock();
192 nfs_inode_set_delegation(inode, cred, res);
193 }
194 } else {
195 rcu_read_unlock();
196 }
197 }
198
199 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
200 {
201 int res = 0;
202
203 if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
204 res = nfs4_proc_delegreturn(inode,
205 delegation->cred,
206 &delegation->stateid,
207 issync);
208 nfs_free_delegation(delegation);
209 return res;
210 }
211
212 static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
213 {
214 struct inode *inode = NULL;
215
216 spin_lock(&delegation->lock);
217 if (delegation->inode != NULL)
218 inode = igrab(delegation->inode);
219 spin_unlock(&delegation->lock);
220 return inode;
221 }
222
223 static struct nfs_delegation *
224 nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
225 {
226 struct nfs_delegation *ret = NULL;
227 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
228
229 if (delegation == NULL)
230 goto out;
231 spin_lock(&delegation->lock);
232 if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
233 ret = delegation;
234 spin_unlock(&delegation->lock);
235 out:
236 return ret;
237 }
238
239 static struct nfs_delegation *
240 nfs_start_delegation_return(struct nfs_inode *nfsi)
241 {
242 struct nfs_delegation *delegation;
243
244 rcu_read_lock();
245 delegation = nfs_start_delegation_return_locked(nfsi);
246 rcu_read_unlock();
247 return delegation;
248 }
249
250 static void
251 nfs_abort_delegation_return(struct nfs_delegation *delegation,
252 struct nfs_client *clp)
253 {
254
255 spin_lock(&delegation->lock);
256 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
257 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
258 spin_unlock(&delegation->lock);
259 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
260 }
261
262 static struct nfs_delegation *
263 nfs_detach_delegation_locked(struct nfs_inode *nfsi,
264 struct nfs_delegation *delegation,
265 struct nfs_client *clp)
266 {
267 struct nfs_delegation *deleg_cur =
268 rcu_dereference_protected(nfsi->delegation,
269 lockdep_is_held(&clp->cl_lock));
270
271 if (deleg_cur == NULL || delegation != deleg_cur)
272 return NULL;
273
274 spin_lock(&delegation->lock);
275 set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
276 list_del_rcu(&delegation->super_list);
277 delegation->inode = NULL;
278 rcu_assign_pointer(nfsi->delegation, NULL);
279 spin_unlock(&delegation->lock);
280 return delegation;
281 }
282
283 static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
284 struct nfs_delegation *delegation,
285 struct nfs_server *server)
286 {
287 struct nfs_client *clp = server->nfs_client;
288
289 spin_lock(&clp->cl_lock);
290 delegation = nfs_detach_delegation_locked(nfsi, delegation, clp);
291 spin_unlock(&clp->cl_lock);
292 return delegation;
293 }
294
295 static struct nfs_delegation *
296 nfs_inode_detach_delegation(struct inode *inode)
297 {
298 struct nfs_inode *nfsi = NFS_I(inode);
299 struct nfs_server *server = NFS_SERVER(inode);
300 struct nfs_delegation *delegation;
301
302 delegation = nfs_start_delegation_return(nfsi);
303 if (delegation == NULL)
304 return NULL;
305 return nfs_detach_delegation(nfsi, delegation, server);
306 }
307
308 static void
309 nfs_update_inplace_delegation(struct nfs_delegation *delegation,
310 const struct nfs_delegation *update)
311 {
312 if (nfs4_stateid_is_newer(&update->stateid, &delegation->stateid)) {
313 delegation->stateid.seqid = update->stateid.seqid;
314 smp_wmb();
315 delegation->type = update->type;
316 }
317 }
318
319 /**
320 * nfs_inode_set_delegation - set up a delegation on an inode
321 * @inode: inode to which delegation applies
322 * @cred: cred to use for subsequent delegation processing
323 * @res: new delegation state from server
324 *
325 * Returns zero on success, or a negative errno value.
326 */
327 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
328 {
329 struct nfs_server *server = NFS_SERVER(inode);
330 struct nfs_client *clp = server->nfs_client;
331 struct nfs_inode *nfsi = NFS_I(inode);
332 struct nfs_delegation *delegation, *old_delegation;
333 struct nfs_delegation *freeme = NULL;
334 int status = 0;
335
336 delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
337 if (delegation == NULL)
338 return -ENOMEM;
339 nfs4_stateid_copy(&delegation->stateid, &res->delegation);
340 delegation->type = res->delegation_type;
341 delegation->pagemod_limit = res->pagemod_limit;
342 delegation->change_attr = inode->i_version;
343 delegation->cred = get_rpccred(cred);
344 delegation->inode = inode;
345 delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
346 spin_lock_init(&delegation->lock);
347
348 spin_lock(&clp->cl_lock);
349 old_delegation = rcu_dereference_protected(nfsi->delegation,
350 lockdep_is_held(&clp->cl_lock));
351 if (old_delegation != NULL) {
352 /* Is this an update of the existing delegation? */
353 if (nfs4_stateid_match_other(&old_delegation->stateid,
354 &delegation->stateid)) {
355 nfs_update_inplace_delegation(old_delegation,
356 delegation);
357 goto out;
358 }
359 /*
360 * Deal with broken servers that hand out two
361 * delegations for the same file.
362 * Allow for upgrades to a WRITE delegation, but
363 * nothing else.
364 */
365 dfprintk(FILE, "%s: server %s handed out "
366 "a duplicate delegation!\n",
367 __func__, clp->cl_hostname);
368 if (delegation->type == old_delegation->type ||
369 !(delegation->type & FMODE_WRITE)) {
370 freeme = delegation;
371 delegation = NULL;
372 goto out;
373 }
374 if (test_and_set_bit(NFS_DELEGATION_RETURNING,
375 &old_delegation->flags))
376 goto out;
377 freeme = nfs_detach_delegation_locked(nfsi,
378 old_delegation, clp);
379 if (freeme == NULL)
380 goto out;
381 }
382 list_add_tail_rcu(&delegation->super_list, &server->delegations);
383 rcu_assign_pointer(nfsi->delegation, delegation);
384 delegation = NULL;
385
386 /* Ensure we revalidate the attributes and page cache! */
387 spin_lock(&inode->i_lock);
388 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
389 spin_unlock(&inode->i_lock);
390 trace_nfs4_set_delegation(inode, res->delegation_type);
391
392 out:
393 spin_unlock(&clp->cl_lock);
394 if (delegation != NULL)
395 nfs_free_delegation(delegation);
396 if (freeme != NULL)
397 nfs_do_return_delegation(inode, freeme, 0);
398 return status;
399 }
400
401 /*
402 * Basic procedure for returning a delegation to the server
403 */
404 static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
405 {
406 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
407 struct nfs_inode *nfsi = NFS_I(inode);
408 int err = 0;
409
410 if (delegation == NULL)
411 return 0;
412 do {
413 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
414 break;
415 err = nfs_delegation_claim_opens(inode, &delegation->stateid,
416 delegation->type);
417 if (!issync || err != -EAGAIN)
418 break;
419 /*
420 * Guard against state recovery
421 */
422 err = nfs4_wait_clnt_recover(clp);
423 } while (err == 0);
424
425 if (err) {
426 nfs_abort_delegation_return(delegation, clp);
427 goto out;
428 }
429 if (!nfs_detach_delegation(nfsi, delegation, NFS_SERVER(inode)))
430 goto out;
431
432 err = nfs_do_return_delegation(inode, delegation, issync);
433 out:
434 return err;
435 }
436
437 static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
438 {
439 bool ret = false;
440
441 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
442 goto out;
443 if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
444 ret = true;
445 if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) {
446 struct inode *inode;
447
448 spin_lock(&delegation->lock);
449 inode = delegation->inode;
450 if (inode && list_empty(&NFS_I(inode)->open_files))
451 ret = true;
452 spin_unlock(&delegation->lock);
453 }
454 out:
455 return ret;
456 }
457
458 /**
459 * nfs_client_return_marked_delegations - return previously marked delegations
460 * @clp: nfs_client to process
461 *
462 * Note that this function is designed to be called by the state
463 * manager thread. For this reason, it cannot flush the dirty data,
464 * since that could deadlock in case of a state recovery error.
465 *
466 * Returns zero on success, or a negative errno value.
467 */
468 int nfs_client_return_marked_delegations(struct nfs_client *clp)
469 {
470 struct nfs_delegation *delegation;
471 struct nfs_server *server;
472 struct inode *inode;
473 int err = 0;
474
475 restart:
476 rcu_read_lock();
477 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
478 list_for_each_entry_rcu(delegation, &server->delegations,
479 super_list) {
480 if (!nfs_delegation_need_return(delegation))
481 continue;
482 if (!nfs_sb_active(server->super))
483 continue;
484 inode = nfs_delegation_grab_inode(delegation);
485 if (inode == NULL) {
486 rcu_read_unlock();
487 nfs_sb_deactive(server->super);
488 goto restart;
489 }
490 delegation = nfs_start_delegation_return_locked(NFS_I(inode));
491 rcu_read_unlock();
492
493 err = nfs_end_delegation_return(inode, delegation, 0);
494 iput(inode);
495 nfs_sb_deactive(server->super);
496 if (!err)
497 goto restart;
498 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
499 return err;
500 }
501 }
502 rcu_read_unlock();
503 return 0;
504 }
505
506 /**
507 * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
508 * @inode: inode to process
509 *
510 * Does not protect against delegation reclaims, therefore really only safe
511 * to be called from nfs4_clear_inode().
512 */
513 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
514 {
515 struct nfs_delegation *delegation;
516
517 delegation = nfs_inode_detach_delegation(inode);
518 if (delegation != NULL)
519 nfs_do_return_delegation(inode, delegation, 1);
520 }
521
522 /**
523 * nfs_inode_return_delegation - synchronously return a delegation
524 * @inode: inode to process
525 *
526 * This routine will always flush any dirty data to disk on the
527 * assumption that if we need to return the delegation, then
528 * we should stop caching.
529 *
530 * Returns zero on success, or a negative errno value.
531 */
532 int nfs4_inode_return_delegation(struct inode *inode)
533 {
534 struct nfs_inode *nfsi = NFS_I(inode);
535 struct nfs_delegation *delegation;
536 int err = 0;
537
538 nfs_wb_all(inode);
539 delegation = nfs_start_delegation_return(nfsi);
540 if (delegation != NULL)
541 err = nfs_end_delegation_return(inode, delegation, 1);
542 return err;
543 }
544
545 static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
546 struct nfs_delegation *delegation)
547 {
548 set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
549 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
550 }
551
552 static void nfs_mark_return_delegation(struct nfs_server *server,
553 struct nfs_delegation *delegation)
554 {
555 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
556 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
557 }
558
559 static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
560 {
561 struct nfs_delegation *delegation;
562 bool ret = false;
563
564 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
565 nfs_mark_return_delegation(server, delegation);
566 ret = true;
567 }
568 return ret;
569 }
570
571 static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
572 {
573 struct nfs_server *server;
574
575 rcu_read_lock();
576 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
577 nfs_server_mark_return_all_delegations(server);
578 rcu_read_unlock();
579 }
580
581 static void nfs_delegation_run_state_manager(struct nfs_client *clp)
582 {
583 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
584 nfs4_schedule_state_manager(clp);
585 }
586
587 /**
588 * nfs_expire_all_delegations
589 * @clp: client to process
590 *
591 */
592 void nfs_expire_all_delegations(struct nfs_client *clp)
593 {
594 nfs_client_mark_return_all_delegations(clp);
595 nfs_delegation_run_state_manager(clp);
596 }
597
598 /**
599 * nfs_super_return_all_delegations - return delegations for one superblock
600 * @sb: sb to process
601 *
602 */
603 void nfs_server_return_all_delegations(struct nfs_server *server)
604 {
605 struct nfs_client *clp = server->nfs_client;
606 bool need_wait;
607
608 if (clp == NULL)
609 return;
610
611 rcu_read_lock();
612 need_wait = nfs_server_mark_return_all_delegations(server);
613 rcu_read_unlock();
614
615 if (need_wait) {
616 nfs4_schedule_state_manager(clp);
617 nfs4_wait_clnt_recover(clp);
618 }
619 }
620
621 static void nfs_mark_return_unused_delegation_types(struct nfs_server *server,
622 fmode_t flags)
623 {
624 struct nfs_delegation *delegation;
625
626 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
627 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
628 continue;
629 if (delegation->type & flags)
630 nfs_mark_return_if_closed_delegation(server, delegation);
631 }
632 }
633
634 static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *clp,
635 fmode_t flags)
636 {
637 struct nfs_server *server;
638
639 rcu_read_lock();
640 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
641 nfs_mark_return_unused_delegation_types(server, flags);
642 rcu_read_unlock();
643 }
644
645 static void nfs_revoke_delegation(struct inode *inode)
646 {
647 struct nfs_delegation *delegation;
648 rcu_read_lock();
649 delegation = rcu_dereference(NFS_I(inode)->delegation);
650 if (delegation != NULL) {
651 set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
652 nfs_mark_return_delegation(NFS_SERVER(inode), delegation);
653 }
654 rcu_read_unlock();
655 }
656
657 void nfs_remove_bad_delegation(struct inode *inode)
658 {
659 struct nfs_delegation *delegation;
660
661 nfs_revoke_delegation(inode);
662 delegation = nfs_inode_detach_delegation(inode);
663 if (delegation) {
664 nfs_inode_find_state_and_recover(inode, &delegation->stateid);
665 nfs_free_delegation(delegation);
666 }
667 }
668 EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
669
670 /**
671 * nfs_expire_unused_delegation_types
672 * @clp: client to process
673 * @flags: delegation types to expire
674 *
675 */
676 void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags)
677 {
678 nfs_client_mark_return_unused_delegation_types(clp, flags);
679 nfs_delegation_run_state_manager(clp);
680 }
681
682 static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
683 {
684 struct nfs_delegation *delegation;
685
686 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
687 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
688 continue;
689 nfs_mark_return_if_closed_delegation(server, delegation);
690 }
691 }
692
693 /**
694 * nfs_expire_unreferenced_delegations - Eliminate unused delegations
695 * @clp: nfs_client to process
696 *
697 */
698 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
699 {
700 struct nfs_server *server;
701
702 rcu_read_lock();
703 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
704 nfs_mark_return_unreferenced_delegations(server);
705 rcu_read_unlock();
706
707 nfs_delegation_run_state_manager(clp);
708 }
709
710 /**
711 * nfs_async_inode_return_delegation - asynchronously return a delegation
712 * @inode: inode to process
713 * @stateid: state ID information
714 *
715 * Returns zero on success, or a negative errno value.
716 */
717 int nfs_async_inode_return_delegation(struct inode *inode,
718 const nfs4_stateid *stateid)
719 {
720 struct nfs_server *server = NFS_SERVER(inode);
721 struct nfs_client *clp = server->nfs_client;
722 struct nfs_delegation *delegation;
723
724 filemap_flush(inode->i_mapping);
725
726 rcu_read_lock();
727 delegation = rcu_dereference(NFS_I(inode)->delegation);
728 if (delegation == NULL)
729 goto out_enoent;
730
731 if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
732 goto out_enoent;
733 nfs_mark_return_delegation(server, delegation);
734 rcu_read_unlock();
735
736 nfs_delegation_run_state_manager(clp);
737 return 0;
738 out_enoent:
739 rcu_read_unlock();
740 return -ENOENT;
741 }
742
743 static struct inode *
744 nfs_delegation_find_inode_server(struct nfs_server *server,
745 const struct nfs_fh *fhandle)
746 {
747 struct nfs_delegation *delegation;
748 struct inode *res = NULL;
749
750 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
751 spin_lock(&delegation->lock);
752 if (delegation->inode != NULL &&
753 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
754 res = igrab(delegation->inode);
755 }
756 spin_unlock(&delegation->lock);
757 if (res != NULL)
758 break;
759 }
760 return res;
761 }
762
763 /**
764 * nfs_delegation_find_inode - retrieve the inode associated with a delegation
765 * @clp: client state handle
766 * @fhandle: filehandle from a delegation recall
767 *
768 * Returns pointer to inode matching "fhandle," or NULL if a matching inode
769 * cannot be found.
770 */
771 struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
772 const struct nfs_fh *fhandle)
773 {
774 struct nfs_server *server;
775 struct inode *res = NULL;
776
777 rcu_read_lock();
778 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
779 res = nfs_delegation_find_inode_server(server, fhandle);
780 if (res != NULL)
781 break;
782 }
783 rcu_read_unlock();
784 return res;
785 }
786
787 static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
788 {
789 struct nfs_delegation *delegation;
790
791 list_for_each_entry_rcu(delegation, &server->delegations, super_list)
792 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
793 }
794
795 /**
796 * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
797 * @clp: nfs_client to process
798 *
799 */
800 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
801 {
802 struct nfs_server *server;
803
804 rcu_read_lock();
805 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
806 nfs_delegation_mark_reclaim_server(server);
807 rcu_read_unlock();
808 }
809
810 /**
811 * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
812 * @clp: nfs_client to process
813 *
814 */
815 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
816 {
817 struct nfs_delegation *delegation;
818 struct nfs_server *server;
819 struct inode *inode;
820
821 restart:
822 rcu_read_lock();
823 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
824 list_for_each_entry_rcu(delegation, &server->delegations,
825 super_list) {
826 if (test_bit(NFS_DELEGATION_RETURNING,
827 &delegation->flags))
828 continue;
829 if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
830 &delegation->flags) == 0)
831 continue;
832 if (!nfs_sb_active(server->super))
833 continue;
834 inode = nfs_delegation_grab_inode(delegation);
835 if (inode == NULL) {
836 rcu_read_unlock();
837 nfs_sb_deactive(server->super);
838 goto restart;
839 }
840 delegation = nfs_start_delegation_return_locked(NFS_I(inode));
841 rcu_read_unlock();
842 if (delegation != NULL) {
843 delegation = nfs_detach_delegation(NFS_I(inode),
844 delegation, server);
845 if (delegation != NULL)
846 nfs_free_delegation(delegation);
847 }
848 iput(inode);
849 nfs_sb_deactive(server->super);
850 goto restart;
851 }
852 }
853 rcu_read_unlock();
854 }
855
856 /**
857 * nfs_delegations_present - check for existence of delegations
858 * @clp: client state handle
859 *
860 * Returns one if there are any nfs_delegation structures attached
861 * to this nfs_client.
862 */
863 int nfs_delegations_present(struct nfs_client *clp)
864 {
865 struct nfs_server *server;
866 int ret = 0;
867
868 rcu_read_lock();
869 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
870 if (!list_empty(&server->delegations)) {
871 ret = 1;
872 break;
873 }
874 rcu_read_unlock();
875 return ret;
876 }
877
878 /**
879 * nfs4_copy_delegation_stateid - Copy inode's state ID information
880 * @dst: stateid data structure to fill in
881 * @inode: inode to check
882 * @flags: delegation type requirement
883 *
884 * Returns "true" and fills in "dst->data" * if inode had a delegation,
885 * otherwise "false" is returned.
886 */
887 bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
888 fmode_t flags)
889 {
890 struct nfs_inode *nfsi = NFS_I(inode);
891 struct nfs_delegation *delegation;
892 bool ret;
893
894 flags &= FMODE_READ|FMODE_WRITE;
895 rcu_read_lock();
896 delegation = rcu_dereference(nfsi->delegation);
897 ret = (delegation != NULL && (delegation->type & flags) == flags);
898 if (ret) {
899 nfs4_stateid_copy(dst, &delegation->stateid);
900 nfs_mark_delegation_referenced(delegation);
901 }
902 rcu_read_unlock();
903 return ret;
904 }
905
906 /**
907 * nfs4_delegation_flush_on_close - Check if we must flush file on close
908 * @inode: inode to check
909 *
910 * This function checks the number of outstanding writes to the file
911 * against the delegation 'space_limit' field to see if
912 * the spec requires us to flush the file on close.
913 */
914 bool nfs4_delegation_flush_on_close(const struct inode *inode)
915 {
916 struct nfs_inode *nfsi = NFS_I(inode);
917 struct nfs_delegation *delegation;
918 bool ret = true;
919
920 rcu_read_lock();
921 delegation = rcu_dereference(nfsi->delegation);
922 if (delegation == NULL || !(delegation->type & FMODE_WRITE))
923 goto out;
924 if (nfsi->nrequests < delegation->pagemod_limit)
925 ret = false;
926 out:
927 rcu_read_unlock();
928 return ret;
929 }
This page took 0.055272 seconds and 5 git commands to generate.