NFS: Use atomic bitops when changing struct nfs_delegation->flags
[deliverable/linux.git] / fs / nfs / delegation.c
1 /*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
18
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 #include "internal.h"
22
23 static void nfs_do_free_delegation(struct nfs_delegation *delegation)
24 {
25 kfree(delegation);
26 }
27
28 static void nfs_free_delegation_callback(struct rcu_head *head)
29 {
30 struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
31
32 nfs_do_free_delegation(delegation);
33 }
34
35 static void nfs_free_delegation(struct nfs_delegation *delegation)
36 {
37 struct rpc_cred *cred;
38
39 cred = rcu_dereference(delegation->cred);
40 rcu_assign_pointer(delegation->cred, NULL);
41 call_rcu(&delegation->rcu, nfs_free_delegation_callback);
42 if (cred)
43 put_rpccred(cred);
44 }
45
46 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
47 {
48 struct inode *inode = state->inode;
49 struct file_lock *fl;
50 int status;
51
52 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
53 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
54 continue;
55 if (nfs_file_open_context(fl->fl_file) != ctx)
56 continue;
57 status = nfs4_lock_delegation_recall(state, fl);
58 if (status >= 0)
59 continue;
60 switch (status) {
61 default:
62 printk(KERN_ERR "%s: unhandled error %d.\n",
63 __func__, status);
64 case -NFS4ERR_EXPIRED:
65 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
66 case -NFS4ERR_STALE_CLIENTID:
67 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
68 goto out_err;
69 }
70 }
71 return 0;
72 out_err:
73 return status;
74 }
75
76 static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
77 {
78 struct nfs_inode *nfsi = NFS_I(inode);
79 struct nfs_open_context *ctx;
80 struct nfs4_state *state;
81 int err;
82
83 again:
84 spin_lock(&inode->i_lock);
85 list_for_each_entry(ctx, &nfsi->open_files, list) {
86 state = ctx->state;
87 if (state == NULL)
88 continue;
89 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
90 continue;
91 if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
92 continue;
93 get_nfs_open_context(ctx);
94 spin_unlock(&inode->i_lock);
95 err = nfs4_open_delegation_recall(ctx, state, stateid);
96 if (err >= 0)
97 err = nfs_delegation_claim_locks(ctx, state);
98 put_nfs_open_context(ctx);
99 if (err != 0)
100 return;
101 goto again;
102 }
103 spin_unlock(&inode->i_lock);
104 }
105
106 /*
107 * Set up a delegation on an inode
108 */
109 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
110 {
111 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
112 struct rpc_cred *oldcred;
113
114 if (delegation == NULL)
115 return;
116 memcpy(delegation->stateid.data, res->delegation.data,
117 sizeof(delegation->stateid.data));
118 delegation->type = res->delegation_type;
119 delegation->maxsize = res->maxsize;
120 oldcred = delegation->cred;
121 delegation->cred = get_rpccred(cred);
122 clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
123 NFS_I(inode)->delegation_state = delegation->type;
124 smp_wmb();
125 put_rpccred(oldcred);
126 }
127
128 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
129 {
130 int res = 0;
131
132 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
133 nfs_free_delegation(delegation);
134 return res;
135 }
136
137 static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
138 {
139 struct inode *inode = NULL;
140
141 spin_lock(&delegation->lock);
142 if (delegation->inode != NULL)
143 inode = igrab(delegation->inode);
144 spin_unlock(&delegation->lock);
145 return inode;
146 }
147
148 static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
149 {
150 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
151
152 if (delegation == NULL)
153 goto nomatch;
154 spin_lock(&delegation->lock);
155 if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
156 sizeof(delegation->stateid.data)) != 0)
157 goto nomatch_unlock;
158 list_del_rcu(&delegation->super_list);
159 delegation->inode = NULL;
160 nfsi->delegation_state = 0;
161 rcu_assign_pointer(nfsi->delegation, NULL);
162 spin_unlock(&delegation->lock);
163 return delegation;
164 nomatch_unlock:
165 spin_unlock(&delegation->lock);
166 nomatch:
167 return NULL;
168 }
169
170 /*
171 * Set up a delegation on an inode
172 */
173 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
174 {
175 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
176 struct nfs_inode *nfsi = NFS_I(inode);
177 struct nfs_delegation *delegation;
178 struct nfs_delegation *freeme = NULL;
179 int status = 0;
180
181 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
182 if (delegation == NULL)
183 return -ENOMEM;
184 memcpy(delegation->stateid.data, res->delegation.data,
185 sizeof(delegation->stateid.data));
186 delegation->type = res->delegation_type;
187 delegation->maxsize = res->maxsize;
188 delegation->change_attr = nfsi->change_attr;
189 delegation->cred = get_rpccred(cred);
190 delegation->inode = inode;
191 spin_lock_init(&delegation->lock);
192
193 spin_lock(&clp->cl_lock);
194 if (rcu_dereference(nfsi->delegation) != NULL) {
195 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
196 sizeof(delegation->stateid)) == 0 &&
197 delegation->type == nfsi->delegation->type) {
198 goto out;
199 }
200 /*
201 * Deal with broken servers that hand out two
202 * delegations for the same file.
203 */
204 dfprintk(FILE, "%s: server %s handed out "
205 "a duplicate delegation!\n",
206 __func__, clp->cl_hostname);
207 if (delegation->type <= nfsi->delegation->type) {
208 freeme = delegation;
209 delegation = NULL;
210 goto out;
211 }
212 freeme = nfs_detach_delegation_locked(nfsi, NULL);
213 }
214 list_add_rcu(&delegation->super_list, &clp->cl_delegations);
215 nfsi->delegation_state = delegation->type;
216 rcu_assign_pointer(nfsi->delegation, delegation);
217 delegation = NULL;
218
219 /* Ensure we revalidate the attributes and page cache! */
220 spin_lock(&inode->i_lock);
221 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
222 spin_unlock(&inode->i_lock);
223
224 out:
225 spin_unlock(&clp->cl_lock);
226 if (delegation != NULL)
227 nfs_free_delegation(delegation);
228 if (freeme != NULL)
229 nfs_do_return_delegation(inode, freeme, 0);
230 return status;
231 }
232
233 /* Sync all data to disk upon delegation return */
234 static void nfs_msync_inode(struct inode *inode)
235 {
236 filemap_fdatawrite(inode->i_mapping);
237 nfs_wb_all(inode);
238 filemap_fdatawait(inode->i_mapping);
239 }
240
241 /*
242 * Basic procedure for returning a delegation to the server
243 */
244 static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
245 {
246 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
247 struct nfs_inode *nfsi = NFS_I(inode);
248
249 nfs_msync_inode(inode);
250 down_read(&clp->cl_sem);
251 /* Guard against new delegated open calls */
252 down_write(&nfsi->rwsem);
253 nfs_delegation_claim_opens(inode, &delegation->stateid);
254 up_write(&nfsi->rwsem);
255 up_read(&clp->cl_sem);
256 nfs_msync_inode(inode);
257
258 return nfs_do_return_delegation(inode, delegation, 1);
259 }
260
261 /*
262 * This function returns the delegation without reclaiming opens
263 * or protecting against delegation reclaims.
264 * It is therefore really only safe to be called from
265 * nfs4_clear_inode()
266 */
267 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
268 {
269 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
270 struct nfs_inode *nfsi = NFS_I(inode);
271 struct nfs_delegation *delegation;
272
273 if (rcu_dereference(nfsi->delegation) != NULL) {
274 spin_lock(&clp->cl_lock);
275 delegation = nfs_detach_delegation_locked(nfsi, NULL);
276 spin_unlock(&clp->cl_lock);
277 if (delegation != NULL)
278 nfs_do_return_delegation(inode, delegation, 0);
279 }
280 }
281
282 int nfs_inode_return_delegation(struct inode *inode)
283 {
284 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
285 struct nfs_inode *nfsi = NFS_I(inode);
286 struct nfs_delegation *delegation;
287 int err = 0;
288
289 if (rcu_dereference(nfsi->delegation) != NULL) {
290 spin_lock(&clp->cl_lock);
291 delegation = nfs_detach_delegation_locked(nfsi, NULL);
292 spin_unlock(&clp->cl_lock);
293 if (delegation != NULL)
294 err = __nfs_inode_return_delegation(inode, delegation);
295 }
296 return err;
297 }
298
299 /*
300 * Return all delegations associated to a super block
301 */
302 void nfs_return_all_delegations(struct super_block *sb)
303 {
304 struct nfs_client *clp = NFS_SB(sb)->nfs_client;
305 struct nfs_delegation *delegation;
306 struct inode *inode;
307
308 if (clp == NULL)
309 return;
310 restart:
311 rcu_read_lock();
312 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
313 inode = NULL;
314 spin_lock(&delegation->lock);
315 if (delegation->inode != NULL && delegation->inode->i_sb == sb)
316 inode = igrab(delegation->inode);
317 spin_unlock(&delegation->lock);
318 if (inode == NULL)
319 continue;
320 spin_lock(&clp->cl_lock);
321 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
322 spin_unlock(&clp->cl_lock);
323 rcu_read_unlock();
324 if (delegation != NULL)
325 __nfs_inode_return_delegation(inode, delegation);
326 iput(inode);
327 goto restart;
328 }
329 rcu_read_unlock();
330 }
331
332 static int nfs_do_expire_all_delegations(void *ptr)
333 {
334 struct nfs_client *clp = ptr;
335 struct nfs_delegation *delegation;
336 struct inode *inode;
337
338 allow_signal(SIGKILL);
339 restart:
340 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
341 goto out;
342 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
343 goto out;
344 rcu_read_lock();
345 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
346 inode = nfs_delegation_grab_inode(delegation);
347 if (inode == NULL)
348 continue;
349 spin_lock(&clp->cl_lock);
350 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
351 spin_unlock(&clp->cl_lock);
352 rcu_read_unlock();
353 if (delegation)
354 __nfs_inode_return_delegation(inode, delegation);
355 iput(inode);
356 goto restart;
357 }
358 rcu_read_unlock();
359 out:
360 nfs_put_client(clp);
361 module_put_and_exit(0);
362 }
363
364 void nfs_expire_all_delegations(struct nfs_client *clp)
365 {
366 struct task_struct *task;
367
368 __module_get(THIS_MODULE);
369 atomic_inc(&clp->cl_count);
370 task = kthread_run(nfs_do_expire_all_delegations, clp,
371 "%s-delegreturn",
372 rpc_peeraddr2str(clp->cl_rpcclient,
373 RPC_DISPLAY_ADDR));
374 if (!IS_ERR(task))
375 return;
376 nfs_put_client(clp);
377 module_put(THIS_MODULE);
378 }
379
380 /*
381 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
382 */
383 void nfs_handle_cb_pathdown(struct nfs_client *clp)
384 {
385 struct nfs_delegation *delegation;
386 struct inode *inode;
387
388 if (clp == NULL)
389 return;
390 restart:
391 rcu_read_lock();
392 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
393 inode = nfs_delegation_grab_inode(delegation);
394 if (inode == NULL)
395 continue;
396 spin_lock(&clp->cl_lock);
397 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
398 spin_unlock(&clp->cl_lock);
399 rcu_read_unlock();
400 if (delegation != NULL)
401 __nfs_inode_return_delegation(inode, delegation);
402 iput(inode);
403 goto restart;
404 }
405 rcu_read_unlock();
406 }
407
408 struct recall_threadargs {
409 struct inode *inode;
410 struct nfs_client *clp;
411 const nfs4_stateid *stateid;
412
413 struct completion started;
414 int result;
415 };
416
417 static int recall_thread(void *data)
418 {
419 struct recall_threadargs *args = (struct recall_threadargs *)data;
420 struct inode *inode = igrab(args->inode);
421 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
422 struct nfs_inode *nfsi = NFS_I(inode);
423 struct nfs_delegation *delegation;
424
425 daemonize("nfsv4-delegreturn");
426
427 nfs_msync_inode(inode);
428 down_read(&clp->cl_sem);
429 down_write(&nfsi->rwsem);
430 spin_lock(&clp->cl_lock);
431 delegation = nfs_detach_delegation_locked(nfsi, args->stateid);
432 if (delegation != NULL)
433 args->result = 0;
434 else
435 args->result = -ENOENT;
436 spin_unlock(&clp->cl_lock);
437 complete(&args->started);
438 nfs_delegation_claim_opens(inode, args->stateid);
439 up_write(&nfsi->rwsem);
440 up_read(&clp->cl_sem);
441 nfs_msync_inode(inode);
442
443 if (delegation != NULL)
444 nfs_do_return_delegation(inode, delegation, 1);
445 iput(inode);
446 module_put_and_exit(0);
447 }
448
449 /*
450 * Asynchronous delegation recall!
451 */
452 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
453 {
454 struct recall_threadargs data = {
455 .inode = inode,
456 .stateid = stateid,
457 };
458 int status;
459
460 init_completion(&data.started);
461 __module_get(THIS_MODULE);
462 status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
463 if (status < 0)
464 goto out_module_put;
465 wait_for_completion(&data.started);
466 return data.result;
467 out_module_put:
468 module_put(THIS_MODULE);
469 return status;
470 }
471
472 /*
473 * Retrieve the inode associated with a delegation
474 */
475 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
476 {
477 struct nfs_delegation *delegation;
478 struct inode *res = NULL;
479 rcu_read_lock();
480 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
481 spin_lock(&delegation->lock);
482 if (delegation->inode != NULL &&
483 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
484 res = igrab(delegation->inode);
485 }
486 spin_unlock(&delegation->lock);
487 if (res != NULL)
488 break;
489 }
490 rcu_read_unlock();
491 return res;
492 }
493
494 /*
495 * Mark all delegations as needing to be reclaimed
496 */
497 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
498 {
499 struct nfs_delegation *delegation;
500 rcu_read_lock();
501 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
502 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
503 rcu_read_unlock();
504 }
505
506 /*
507 * Reap all unclaimed delegations after reboot recovery is done
508 */
509 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
510 {
511 struct nfs_delegation *delegation;
512 struct inode *inode;
513 restart:
514 rcu_read_lock();
515 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
516 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0)
517 continue;
518 inode = nfs_delegation_grab_inode(delegation);
519 if (inode == NULL)
520 continue;
521 spin_lock(&clp->cl_lock);
522 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
523 spin_unlock(&clp->cl_lock);
524 rcu_read_unlock();
525 if (delegation != NULL)
526 nfs_free_delegation(delegation);
527 iput(inode);
528 goto restart;
529 }
530 rcu_read_unlock();
531 }
532
533 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
534 {
535 struct nfs_inode *nfsi = NFS_I(inode);
536 struct nfs_delegation *delegation;
537 int ret = 0;
538
539 rcu_read_lock();
540 delegation = rcu_dereference(nfsi->delegation);
541 if (delegation != NULL) {
542 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
543 ret = 1;
544 }
545 rcu_read_unlock();
546 return ret;
547 }
This page took 0.042735 seconds and 5 git commands to generate.