NFSv4: Use RCU to protect delegations
[deliverable/linux.git] / fs / nfs / delegation.c
1 /*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
18
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 #include "internal.h"
22
23 static void nfs_free_delegation(struct nfs_delegation *delegation)
24 {
25 if (delegation->cred)
26 put_rpccred(delegation->cred);
27 kfree(delegation);
28 }
29
30 static void nfs_free_delegation_callback(struct rcu_head *head)
31 {
32 struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
33
34 nfs_free_delegation(delegation);
35 }
36
37 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
38 {
39 struct inode *inode = state->inode;
40 struct file_lock *fl;
41 int status;
42
43 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
44 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
45 continue;
46 if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
47 continue;
48 status = nfs4_lock_delegation_recall(state, fl);
49 if (status >= 0)
50 continue;
51 switch (status) {
52 default:
53 printk(KERN_ERR "%s: unhandled error %d.\n",
54 __FUNCTION__, status);
55 case -NFS4ERR_EXPIRED:
56 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
57 case -NFS4ERR_STALE_CLIENTID:
58 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
59 goto out_err;
60 }
61 }
62 return 0;
63 out_err:
64 return status;
65 }
66
67 static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
68 {
69 struct nfs_inode *nfsi = NFS_I(inode);
70 struct nfs_open_context *ctx;
71 struct nfs4_state *state;
72 int err;
73
74 again:
75 spin_lock(&inode->i_lock);
76 list_for_each_entry(ctx, &nfsi->open_files, list) {
77 state = ctx->state;
78 if (state == NULL)
79 continue;
80 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
81 continue;
82 if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
83 continue;
84 get_nfs_open_context(ctx);
85 spin_unlock(&inode->i_lock);
86 err = nfs4_open_delegation_recall(ctx, state, stateid);
87 if (err >= 0)
88 err = nfs_delegation_claim_locks(ctx, state);
89 put_nfs_open_context(ctx);
90 if (err != 0)
91 return;
92 goto again;
93 }
94 spin_unlock(&inode->i_lock);
95 }
96
97 /*
98 * Set up a delegation on an inode
99 */
100 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
101 {
102 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
103
104 if (delegation == NULL)
105 return;
106 memcpy(delegation->stateid.data, res->delegation.data,
107 sizeof(delegation->stateid.data));
108 delegation->type = res->delegation_type;
109 delegation->maxsize = res->maxsize;
110 put_rpccred(cred);
111 delegation->cred = get_rpccred(cred);
112 delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
113 NFS_I(inode)->delegation_state = delegation->type;
114 smp_wmb();
115 }
116
117 /*
118 * Set up a delegation on an inode
119 */
120 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
121 {
122 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
123 struct nfs_inode *nfsi = NFS_I(inode);
124 struct nfs_delegation *delegation;
125 int status = 0;
126
127 /* Ensure we first revalidate the attributes and page cache! */
128 if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
129 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
130
131 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
132 if (delegation == NULL)
133 return -ENOMEM;
134 memcpy(delegation->stateid.data, res->delegation.data,
135 sizeof(delegation->stateid.data));
136 delegation->type = res->delegation_type;
137 delegation->maxsize = res->maxsize;
138 delegation->change_attr = nfsi->change_attr;
139 delegation->cred = get_rpccred(cred);
140 delegation->inode = inode;
141
142 spin_lock(&clp->cl_lock);
143 if (rcu_dereference(nfsi->delegation) == NULL) {
144 list_add_rcu(&delegation->super_list, &clp->cl_delegations);
145 nfsi->delegation_state = delegation->type;
146 rcu_assign_pointer(nfsi->delegation, delegation);
147 delegation = NULL;
148 } else {
149 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
150 sizeof(delegation->stateid)) != 0 ||
151 delegation->type != nfsi->delegation->type) {
152 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
153 __FUNCTION__, NIPQUAD(clp->cl_addr.sin_addr));
154 status = -EIO;
155 }
156 }
157 spin_unlock(&clp->cl_lock);
158 kfree(delegation);
159 return status;
160 }
161
162 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
163 {
164 int res = 0;
165
166 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
167 call_rcu(&delegation->rcu, nfs_free_delegation_callback);
168 return res;
169 }
170
171 /* Sync all data to disk upon delegation return */
172 static void nfs_msync_inode(struct inode *inode)
173 {
174 filemap_fdatawrite(inode->i_mapping);
175 nfs_wb_all(inode);
176 filemap_fdatawait(inode->i_mapping);
177 }
178
179 /*
180 * Basic procedure for returning a delegation to the server
181 */
182 static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
183 {
184 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
185 struct nfs_inode *nfsi = NFS_I(inode);
186
187 nfs_msync_inode(inode);
188 down_read(&clp->cl_sem);
189 /* Guard against new delegated open calls */
190 down_write(&nfsi->rwsem);
191 nfs_delegation_claim_opens(inode, &delegation->stateid);
192 up_write(&nfsi->rwsem);
193 up_read(&clp->cl_sem);
194 nfs_msync_inode(inode);
195
196 return nfs_do_return_delegation(inode, delegation);
197 }
198
199 static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
200 {
201 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
202
203 if (delegation == NULL)
204 goto nomatch;
205 if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
206 sizeof(delegation->stateid.data)) != 0)
207 goto nomatch;
208 list_del_rcu(&delegation->super_list);
209 nfsi->delegation_state = 0;
210 rcu_assign_pointer(nfsi->delegation, NULL);
211 return delegation;
212 nomatch:
213 return NULL;
214 }
215
216 int nfs_inode_return_delegation(struct inode *inode)
217 {
218 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
219 struct nfs_inode *nfsi = NFS_I(inode);
220 struct nfs_delegation *delegation;
221 int err = 0;
222
223 if (rcu_dereference(nfsi->delegation) != NULL) {
224 spin_lock(&clp->cl_lock);
225 delegation = nfs_detach_delegation_locked(nfsi, NULL);
226 spin_unlock(&clp->cl_lock);
227 if (delegation != NULL)
228 err = __nfs_inode_return_delegation(inode, delegation);
229 }
230 return err;
231 }
232
233 /*
234 * Return all delegations associated to a super block
235 */
236 void nfs_return_all_delegations(struct super_block *sb)
237 {
238 struct nfs_client *clp = NFS_SB(sb)->nfs_client;
239 struct nfs_delegation *delegation;
240 struct inode *inode;
241
242 if (clp == NULL)
243 return;
244 restart:
245 rcu_read_lock();
246 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
247 if (delegation->inode->i_sb != sb)
248 continue;
249 inode = igrab(delegation->inode);
250 if (inode == NULL)
251 continue;
252 spin_lock(&clp->cl_lock);
253 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
254 spin_unlock(&clp->cl_lock);
255 rcu_read_unlock();
256 if (delegation != NULL)
257 __nfs_inode_return_delegation(inode, delegation);
258 iput(inode);
259 goto restart;
260 }
261 rcu_read_unlock();
262 }
263
264 static int nfs_do_expire_all_delegations(void *ptr)
265 {
266 struct nfs_client *clp = ptr;
267 struct nfs_delegation *delegation;
268 struct inode *inode;
269
270 allow_signal(SIGKILL);
271 restart:
272 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
273 goto out;
274 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
275 goto out;
276 rcu_read_lock();
277 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
278 inode = igrab(delegation->inode);
279 if (inode == NULL)
280 continue;
281 spin_lock(&clp->cl_lock);
282 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
283 spin_unlock(&clp->cl_lock);
284 rcu_read_unlock();
285 if (delegation)
286 __nfs_inode_return_delegation(inode, delegation);
287 iput(inode);
288 goto restart;
289 }
290 rcu_read_unlock();
291 out:
292 nfs_put_client(clp);
293 module_put_and_exit(0);
294 }
295
296 void nfs_expire_all_delegations(struct nfs_client *clp)
297 {
298 struct task_struct *task;
299
300 __module_get(THIS_MODULE);
301 atomic_inc(&clp->cl_count);
302 task = kthread_run(nfs_do_expire_all_delegations, clp,
303 "%u.%u.%u.%u-delegreturn",
304 NIPQUAD(clp->cl_addr.sin_addr));
305 if (!IS_ERR(task))
306 return;
307 nfs_put_client(clp);
308 module_put(THIS_MODULE);
309 }
310
311 /*
312 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
313 */
314 void nfs_handle_cb_pathdown(struct nfs_client *clp)
315 {
316 struct nfs_delegation *delegation;
317 struct inode *inode;
318
319 if (clp == NULL)
320 return;
321 restart:
322 rcu_read_lock();
323 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
324 inode = igrab(delegation->inode);
325 if (inode == NULL)
326 continue;
327 spin_lock(&clp->cl_lock);
328 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
329 spin_unlock(&clp->cl_lock);
330 rcu_read_unlock();
331 if (delegation != NULL)
332 __nfs_inode_return_delegation(inode, delegation);
333 iput(inode);
334 goto restart;
335 }
336 rcu_read_unlock();
337 }
338
339 struct recall_threadargs {
340 struct inode *inode;
341 struct nfs_client *clp;
342 const nfs4_stateid *stateid;
343
344 struct completion started;
345 int result;
346 };
347
348 static int recall_thread(void *data)
349 {
350 struct recall_threadargs *args = (struct recall_threadargs *)data;
351 struct inode *inode = igrab(args->inode);
352 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
353 struct nfs_inode *nfsi = NFS_I(inode);
354 struct nfs_delegation *delegation;
355
356 daemonize("nfsv4-delegreturn");
357
358 nfs_msync_inode(inode);
359 down_read(&clp->cl_sem);
360 down_write(&nfsi->rwsem);
361 spin_lock(&clp->cl_lock);
362 delegation = nfs_detach_delegation_locked(nfsi, args->stateid);
363 if (delegation != NULL)
364 args->result = 0;
365 else
366 args->result = -ENOENT;
367 spin_unlock(&clp->cl_lock);
368 complete(&args->started);
369 nfs_delegation_claim_opens(inode, args->stateid);
370 up_write(&nfsi->rwsem);
371 up_read(&clp->cl_sem);
372 nfs_msync_inode(inode);
373
374 if (delegation != NULL)
375 nfs_do_return_delegation(inode, delegation);
376 iput(inode);
377 module_put_and_exit(0);
378 }
379
380 /*
381 * Asynchronous delegation recall!
382 */
383 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
384 {
385 struct recall_threadargs data = {
386 .inode = inode,
387 .stateid = stateid,
388 };
389 int status;
390
391 init_completion(&data.started);
392 __module_get(THIS_MODULE);
393 status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
394 if (status < 0)
395 goto out_module_put;
396 wait_for_completion(&data.started);
397 return data.result;
398 out_module_put:
399 module_put(THIS_MODULE);
400 return status;
401 }
402
403 /*
404 * Retrieve the inode associated with a delegation
405 */
406 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
407 {
408 struct nfs_delegation *delegation;
409 struct inode *res = NULL;
410 rcu_read_lock();
411 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
412 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
413 res = igrab(delegation->inode);
414 break;
415 }
416 }
417 rcu_read_unlock();
418 return res;
419 }
420
421 /*
422 * Mark all delegations as needing to be reclaimed
423 */
424 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
425 {
426 struct nfs_delegation *delegation;
427 rcu_read_lock();
428 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
429 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
430 rcu_read_unlock();
431 }
432
433 /*
434 * Reap all unclaimed delegations after reboot recovery is done
435 */
436 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
437 {
438 struct nfs_delegation *delegation;
439 restart:
440 rcu_read_lock();
441 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
442 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
443 continue;
444 spin_lock(&clp->cl_lock);
445 delegation = nfs_detach_delegation_locked(NFS_I(delegation->inode), NULL);
446 spin_unlock(&clp->cl_lock);
447 rcu_read_unlock();
448 if (delegation != NULL)
449 call_rcu(&delegation->rcu, nfs_free_delegation_callback);
450 goto restart;
451 }
452 rcu_read_unlock();
453 }
454
455 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
456 {
457 struct nfs_inode *nfsi = NFS_I(inode);
458 struct nfs_delegation *delegation;
459 int ret = 0;
460
461 rcu_read_lock();
462 delegation = rcu_dereference(nfsi->delegation);
463 if (delegation != NULL) {
464 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
465 ret = 1;
466 }
467 rcu_read_unlock();
468 return ret;
469 }
This page took 0.050047 seconds and 5 git commands to generate.