cfq-iosched: get rid of the need for __GFP_NOFAIL in cfq_find_alloc_queue()
[deliverable/linux.git] / fs / nfs / delegation.c
1 /*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
18
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 #include "internal.h"
22
23 static void nfs_do_free_delegation(struct nfs_delegation *delegation)
24 {
25 kfree(delegation);
26 }
27
28 static void nfs_free_delegation_callback(struct rcu_head *head)
29 {
30 struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
31
32 nfs_do_free_delegation(delegation);
33 }
34
35 static void nfs_free_delegation(struct nfs_delegation *delegation)
36 {
37 struct rpc_cred *cred;
38
39 cred = rcu_dereference(delegation->cred);
40 rcu_assign_pointer(delegation->cred, NULL);
41 call_rcu(&delegation->rcu, nfs_free_delegation_callback);
42 if (cred)
43 put_rpccred(cred);
44 }
45
46 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
47 {
48 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
49 }
50
51 int nfs_have_delegation(struct inode *inode, fmode_t flags)
52 {
53 struct nfs_delegation *delegation;
54 int ret = 0;
55
56 flags &= FMODE_READ|FMODE_WRITE;
57 rcu_read_lock();
58 delegation = rcu_dereference(NFS_I(inode)->delegation);
59 if (delegation != NULL && (delegation->type & flags) == flags) {
60 nfs_mark_delegation_referenced(delegation);
61 ret = 1;
62 }
63 rcu_read_unlock();
64 return ret;
65 }
66
67 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
68 {
69 struct inode *inode = state->inode;
70 struct file_lock *fl;
71 int status = 0;
72
73 if (inode->i_flock == NULL)
74 goto out;
75
76 /* Protect inode->i_flock using the BKL */
77 lock_kernel();
78 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
79 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
80 continue;
81 if (nfs_file_open_context(fl->fl_file) != ctx)
82 continue;
83 unlock_kernel();
84 status = nfs4_lock_delegation_recall(state, fl);
85 if (status < 0)
86 goto out;
87 lock_kernel();
88 }
89 unlock_kernel();
90 out:
91 return status;
92 }
93
94 static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
95 {
96 struct nfs_inode *nfsi = NFS_I(inode);
97 struct nfs_open_context *ctx;
98 struct nfs4_state *state;
99 int err;
100
101 again:
102 spin_lock(&inode->i_lock);
103 list_for_each_entry(ctx, &nfsi->open_files, list) {
104 state = ctx->state;
105 if (state == NULL)
106 continue;
107 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
108 continue;
109 if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
110 continue;
111 get_nfs_open_context(ctx);
112 spin_unlock(&inode->i_lock);
113 err = nfs4_open_delegation_recall(ctx, state, stateid);
114 if (err >= 0)
115 err = nfs_delegation_claim_locks(ctx, state);
116 put_nfs_open_context(ctx);
117 if (err != 0)
118 return;
119 goto again;
120 }
121 spin_unlock(&inode->i_lock);
122 }
123
124 /*
125 * Set up a delegation on an inode
126 */
127 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
128 {
129 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
130 struct rpc_cred *oldcred;
131
132 if (delegation == NULL)
133 return;
134 memcpy(delegation->stateid.data, res->delegation.data,
135 sizeof(delegation->stateid.data));
136 delegation->type = res->delegation_type;
137 delegation->maxsize = res->maxsize;
138 oldcred = delegation->cred;
139 delegation->cred = get_rpccred(cred);
140 clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
141 NFS_I(inode)->delegation_state = delegation->type;
142 smp_wmb();
143 put_rpccred(oldcred);
144 }
145
146 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
147 {
148 int res = 0;
149
150 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
151 nfs_free_delegation(delegation);
152 return res;
153 }
154
155 static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
156 {
157 struct inode *inode = NULL;
158
159 spin_lock(&delegation->lock);
160 if (delegation->inode != NULL)
161 inode = igrab(delegation->inode);
162 spin_unlock(&delegation->lock);
163 return inode;
164 }
165
166 static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
167 {
168 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
169
170 if (delegation == NULL)
171 goto nomatch;
172 spin_lock(&delegation->lock);
173 if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
174 sizeof(delegation->stateid.data)) != 0)
175 goto nomatch_unlock;
176 list_del_rcu(&delegation->super_list);
177 delegation->inode = NULL;
178 nfsi->delegation_state = 0;
179 rcu_assign_pointer(nfsi->delegation, NULL);
180 spin_unlock(&delegation->lock);
181 return delegation;
182 nomatch_unlock:
183 spin_unlock(&delegation->lock);
184 nomatch:
185 return NULL;
186 }
187
188 /*
189 * Set up a delegation on an inode
190 */
191 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
192 {
193 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
194 struct nfs_inode *nfsi = NFS_I(inode);
195 struct nfs_delegation *delegation;
196 struct nfs_delegation *freeme = NULL;
197 int status = 0;
198
199 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
200 if (delegation == NULL)
201 return -ENOMEM;
202 memcpy(delegation->stateid.data, res->delegation.data,
203 sizeof(delegation->stateid.data));
204 delegation->type = res->delegation_type;
205 delegation->maxsize = res->maxsize;
206 delegation->change_attr = nfsi->change_attr;
207 delegation->cred = get_rpccred(cred);
208 delegation->inode = inode;
209 delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
210 spin_lock_init(&delegation->lock);
211
212 spin_lock(&clp->cl_lock);
213 if (rcu_dereference(nfsi->delegation) != NULL) {
214 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
215 sizeof(delegation->stateid)) == 0 &&
216 delegation->type == nfsi->delegation->type) {
217 goto out;
218 }
219 /*
220 * Deal with broken servers that hand out two
221 * delegations for the same file.
222 */
223 dfprintk(FILE, "%s: server %s handed out "
224 "a duplicate delegation!\n",
225 __func__, clp->cl_hostname);
226 if (delegation->type <= nfsi->delegation->type) {
227 freeme = delegation;
228 delegation = NULL;
229 goto out;
230 }
231 freeme = nfs_detach_delegation_locked(nfsi, NULL);
232 }
233 list_add_rcu(&delegation->super_list, &clp->cl_delegations);
234 nfsi->delegation_state = delegation->type;
235 rcu_assign_pointer(nfsi->delegation, delegation);
236 delegation = NULL;
237
238 /* Ensure we revalidate the attributes and page cache! */
239 spin_lock(&inode->i_lock);
240 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
241 spin_unlock(&inode->i_lock);
242
243 out:
244 spin_unlock(&clp->cl_lock);
245 if (delegation != NULL)
246 nfs_free_delegation(delegation);
247 if (freeme != NULL)
248 nfs_do_return_delegation(inode, freeme, 0);
249 return status;
250 }
251
252 /* Sync all data to disk upon delegation return */
253 static void nfs_msync_inode(struct inode *inode)
254 {
255 filemap_fdatawrite(inode->i_mapping);
256 nfs_wb_all(inode);
257 filemap_fdatawait(inode->i_mapping);
258 }
259
260 /*
261 * Basic procedure for returning a delegation to the server
262 */
263 static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
264 {
265 struct nfs_inode *nfsi = NFS_I(inode);
266
267 nfs_msync_inode(inode);
268 /*
269 * Guard against new delegated open/lock/unlock calls and against
270 * state recovery
271 */
272 down_write(&nfsi->rwsem);
273 nfs_delegation_claim_opens(inode, &delegation->stateid);
274 up_write(&nfsi->rwsem);
275 nfs_msync_inode(inode);
276
277 return nfs_do_return_delegation(inode, delegation, 1);
278 }
279
280 /*
281 * Return all delegations that have been marked for return
282 */
283 void nfs_client_return_marked_delegations(struct nfs_client *clp)
284 {
285 struct nfs_delegation *delegation;
286 struct inode *inode;
287
288 restart:
289 rcu_read_lock();
290 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
291 if (!test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
292 continue;
293 inode = nfs_delegation_grab_inode(delegation);
294 if (inode == NULL)
295 continue;
296 spin_lock(&clp->cl_lock);
297 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
298 spin_unlock(&clp->cl_lock);
299 rcu_read_unlock();
300 if (delegation != NULL)
301 __nfs_inode_return_delegation(inode, delegation);
302 iput(inode);
303 goto restart;
304 }
305 rcu_read_unlock();
306 }
307
308 /*
309 * This function returns the delegation without reclaiming opens
310 * or protecting against delegation reclaims.
311 * It is therefore really only safe to be called from
312 * nfs4_clear_inode()
313 */
314 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
315 {
316 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
317 struct nfs_inode *nfsi = NFS_I(inode);
318 struct nfs_delegation *delegation;
319
320 if (rcu_dereference(nfsi->delegation) != NULL) {
321 spin_lock(&clp->cl_lock);
322 delegation = nfs_detach_delegation_locked(nfsi, NULL);
323 spin_unlock(&clp->cl_lock);
324 if (delegation != NULL)
325 nfs_do_return_delegation(inode, delegation, 0);
326 }
327 }
328
329 int nfs_inode_return_delegation(struct inode *inode)
330 {
331 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
332 struct nfs_inode *nfsi = NFS_I(inode);
333 struct nfs_delegation *delegation;
334 int err = 0;
335
336 if (rcu_dereference(nfsi->delegation) != NULL) {
337 spin_lock(&clp->cl_lock);
338 delegation = nfs_detach_delegation_locked(nfsi, NULL);
339 spin_unlock(&clp->cl_lock);
340 if (delegation != NULL)
341 err = __nfs_inode_return_delegation(inode, delegation);
342 }
343 return err;
344 }
345
346 static void nfs_mark_return_delegation(struct nfs_client *clp, struct nfs_delegation *delegation)
347 {
348 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
349 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
350 }
351
352 /*
353 * Return all delegations associated to a super block
354 */
355 void nfs_super_return_all_delegations(struct super_block *sb)
356 {
357 struct nfs_client *clp = NFS_SB(sb)->nfs_client;
358 struct nfs_delegation *delegation;
359
360 if (clp == NULL)
361 return;
362 rcu_read_lock();
363 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
364 spin_lock(&delegation->lock);
365 if (delegation->inode != NULL && delegation->inode->i_sb == sb)
366 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
367 spin_unlock(&delegation->lock);
368 }
369 rcu_read_unlock();
370 nfs_client_return_marked_delegations(clp);
371 }
372
373 static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
374 {
375 struct nfs_delegation *delegation;
376
377 rcu_read_lock();
378 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
379 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
380 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
381 }
382 rcu_read_unlock();
383 }
384
385 static void nfs_delegation_run_state_manager(struct nfs_client *clp)
386 {
387 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
388 nfs4_schedule_state_manager(clp);
389 }
390
391 void nfs_expire_all_delegations(struct nfs_client *clp)
392 {
393 nfs_client_mark_return_all_delegations(clp);
394 nfs_delegation_run_state_manager(clp);
395 }
396
397 /*
398 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
399 */
400 void nfs_handle_cb_pathdown(struct nfs_client *clp)
401 {
402 if (clp == NULL)
403 return;
404 nfs_client_mark_return_all_delegations(clp);
405 }
406
407 static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *clp)
408 {
409 struct nfs_delegation *delegation;
410
411 rcu_read_lock();
412 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
413 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
414 continue;
415 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
416 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
417 }
418 rcu_read_unlock();
419 }
420
421 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
422 {
423 nfs_client_mark_return_unreferenced_delegations(clp);
424 nfs_delegation_run_state_manager(clp);
425 }
426
427 /*
428 * Asynchronous delegation recall!
429 */
430 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
431 {
432 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
433 struct nfs_delegation *delegation;
434
435 rcu_read_lock();
436 delegation = rcu_dereference(NFS_I(inode)->delegation);
437 if (delegation == NULL || memcmp(delegation->stateid.data, stateid->data,
438 sizeof(delegation->stateid.data)) != 0) {
439 rcu_read_unlock();
440 return -ENOENT;
441 }
442 nfs_mark_return_delegation(clp, delegation);
443 rcu_read_unlock();
444 nfs_delegation_run_state_manager(clp);
445 return 0;
446 }
447
448 /*
449 * Retrieve the inode associated with a delegation
450 */
451 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
452 {
453 struct nfs_delegation *delegation;
454 struct inode *res = NULL;
455 rcu_read_lock();
456 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
457 spin_lock(&delegation->lock);
458 if (delegation->inode != NULL &&
459 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
460 res = igrab(delegation->inode);
461 }
462 spin_unlock(&delegation->lock);
463 if (res != NULL)
464 break;
465 }
466 rcu_read_unlock();
467 return res;
468 }
469
470 /*
471 * Mark all delegations as needing to be reclaimed
472 */
473 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
474 {
475 struct nfs_delegation *delegation;
476 rcu_read_lock();
477 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
478 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
479 rcu_read_unlock();
480 }
481
482 /*
483 * Reap all unclaimed delegations after reboot recovery is done
484 */
485 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
486 {
487 struct nfs_delegation *delegation;
488 struct inode *inode;
489 restart:
490 rcu_read_lock();
491 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
492 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0)
493 continue;
494 inode = nfs_delegation_grab_inode(delegation);
495 if (inode == NULL)
496 continue;
497 spin_lock(&clp->cl_lock);
498 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
499 spin_unlock(&clp->cl_lock);
500 rcu_read_unlock();
501 if (delegation != NULL)
502 nfs_free_delegation(delegation);
503 iput(inode);
504 goto restart;
505 }
506 rcu_read_unlock();
507 }
508
509 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
510 {
511 struct nfs_inode *nfsi = NFS_I(inode);
512 struct nfs_delegation *delegation;
513 int ret = 0;
514
515 rcu_read_lock();
516 delegation = rcu_dereference(nfsi->delegation);
517 if (delegation != NULL) {
518 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
519 ret = 1;
520 }
521 rcu_read_unlock();
522 return ret;
523 }
This page took 0.041809 seconds and 5 git commands to generate.