NFSv4: Ensure change attribute returned by GETATTR callback conforms to spec
[deliverable/linux.git] / fs / nfs / delegation.c
1 /*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9 #include <linux/config.h>
10 #include <linux/completion.h>
11 #include <linux/kthread.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15
16 #include <linux/nfs4.h>
17 #include <linux/nfs_fs.h>
18 #include <linux/nfs_xdr.h>
19
20 #include "nfs4_fs.h"
21 #include "delegation.h"
22
23 static struct nfs_delegation *nfs_alloc_delegation(void)
24 {
25 return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
26 }
27
28 static void nfs_free_delegation(struct nfs_delegation *delegation)
29 {
30 if (delegation->cred)
31 put_rpccred(delegation->cred);
32 kfree(delegation);
33 }
34
35 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
36 {
37 struct inode *inode = state->inode;
38 struct file_lock *fl;
39 int status;
40
41 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
42 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
43 continue;
44 if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
45 continue;
46 status = nfs4_lock_delegation_recall(state, fl);
47 if (status >= 0)
48 continue;
49 switch (status) {
50 default:
51 printk(KERN_ERR "%s: unhandled error %d.\n",
52 __FUNCTION__, status);
53 case -NFS4ERR_EXPIRED:
54 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
55 case -NFS4ERR_STALE_CLIENTID:
56 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs4_state);
57 goto out_err;
58 }
59 }
60 return 0;
61 out_err:
62 return status;
63 }
64
65 static void nfs_delegation_claim_opens(struct inode *inode)
66 {
67 struct nfs_inode *nfsi = NFS_I(inode);
68 struct nfs_open_context *ctx;
69 struct nfs4_state *state;
70 int err;
71
72 again:
73 spin_lock(&inode->i_lock);
74 list_for_each_entry(ctx, &nfsi->open_files, list) {
75 state = ctx->state;
76 if (state == NULL)
77 continue;
78 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
79 continue;
80 get_nfs_open_context(ctx);
81 spin_unlock(&inode->i_lock);
82 err = nfs4_open_delegation_recall(ctx->dentry, state);
83 if (err >= 0)
84 err = nfs_delegation_claim_locks(ctx, state);
85 put_nfs_open_context(ctx);
86 if (err != 0)
87 return;
88 goto again;
89 }
90 spin_unlock(&inode->i_lock);
91 }
92
93 /*
94 * Set up a delegation on an inode
95 */
96 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
97 {
98 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
99
100 if (delegation == NULL)
101 return;
102 memcpy(delegation->stateid.data, res->delegation.data,
103 sizeof(delegation->stateid.data));
104 delegation->type = res->delegation_type;
105 delegation->maxsize = res->maxsize;
106 put_rpccred(cred);
107 delegation->cred = get_rpccred(cred);
108 delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
109 NFS_I(inode)->delegation_state = delegation->type;
110 smp_wmb();
111 }
112
113 /*
114 * Set up a delegation on an inode
115 */
116 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
117 {
118 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
119 struct nfs_inode *nfsi = NFS_I(inode);
120 struct nfs_delegation *delegation;
121 int status = 0;
122
123 /* Ensure we first revalidate the attributes and page cache! */
124 if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
125 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
126
127 delegation = nfs_alloc_delegation();
128 if (delegation == NULL)
129 return -ENOMEM;
130 memcpy(delegation->stateid.data, res->delegation.data,
131 sizeof(delegation->stateid.data));
132 delegation->type = res->delegation_type;
133 delegation->maxsize = res->maxsize;
134 delegation->change_attr = nfsi->change_attr;
135 delegation->cred = get_rpccred(cred);
136 delegation->inode = inode;
137
138 spin_lock(&clp->cl_lock);
139 if (nfsi->delegation == NULL) {
140 list_add(&delegation->super_list, &clp->cl_delegations);
141 nfsi->delegation = delegation;
142 nfsi->delegation_state = delegation->type;
143 delegation = NULL;
144 } else {
145 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
146 sizeof(delegation->stateid)) != 0 ||
147 delegation->type != nfsi->delegation->type) {
148 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
149 __FUNCTION__, NIPQUAD(clp->cl_addr));
150 status = -EIO;
151 }
152 }
153 spin_unlock(&clp->cl_lock);
154 kfree(delegation);
155 return status;
156 }
157
158 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
159 {
160 int res = 0;
161
162 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
163
164 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
165 nfs_free_delegation(delegation);
166 return res;
167 }
168
169 /* Sync all data to disk upon delegation return */
170 static void nfs_msync_inode(struct inode *inode)
171 {
172 filemap_fdatawrite(inode->i_mapping);
173 nfs_wb_all(inode);
174 filemap_fdatawait(inode->i_mapping);
175 }
176
177 /*
178 * Basic procedure for returning a delegation to the server
179 */
180 int __nfs_inode_return_delegation(struct inode *inode)
181 {
182 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
183 struct nfs_inode *nfsi = NFS_I(inode);
184 struct nfs_delegation *delegation;
185 int res = 0;
186
187 nfs_msync_inode(inode);
188 down_read(&clp->cl_sem);
189 /* Guard against new delegated open calls */
190 down_write(&nfsi->rwsem);
191 spin_lock(&clp->cl_lock);
192 delegation = nfsi->delegation;
193 if (delegation != NULL) {
194 list_del_init(&delegation->super_list);
195 nfsi->delegation = NULL;
196 nfsi->delegation_state = 0;
197 }
198 spin_unlock(&clp->cl_lock);
199 nfs_delegation_claim_opens(inode);
200 up_write(&nfsi->rwsem);
201 up_read(&clp->cl_sem);
202 nfs_msync_inode(inode);
203
204 if (delegation != NULL)
205 res = nfs_do_return_delegation(inode, delegation);
206 return res;
207 }
208
209 /*
210 * Return all delegations associated to a super block
211 */
212 void nfs_return_all_delegations(struct super_block *sb)
213 {
214 struct nfs4_client *clp = NFS_SB(sb)->nfs4_state;
215 struct nfs_delegation *delegation;
216 struct inode *inode;
217
218 if (clp == NULL)
219 return;
220 restart:
221 spin_lock(&clp->cl_lock);
222 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
223 if (delegation->inode->i_sb != sb)
224 continue;
225 inode = igrab(delegation->inode);
226 if (inode == NULL)
227 continue;
228 spin_unlock(&clp->cl_lock);
229 nfs_inode_return_delegation(inode);
230 iput(inode);
231 goto restart;
232 }
233 spin_unlock(&clp->cl_lock);
234 }
235
236 int nfs_do_expire_all_delegations(void *ptr)
237 {
238 struct nfs4_client *clp = ptr;
239 struct nfs_delegation *delegation;
240 struct inode *inode;
241 int err = 0;
242
243 allow_signal(SIGKILL);
244 restart:
245 spin_lock(&clp->cl_lock);
246 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
247 goto out;
248 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
249 goto out;
250 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
251 inode = igrab(delegation->inode);
252 if (inode == NULL)
253 continue;
254 spin_unlock(&clp->cl_lock);
255 err = nfs_inode_return_delegation(inode);
256 iput(inode);
257 if (!err)
258 goto restart;
259 }
260 out:
261 spin_unlock(&clp->cl_lock);
262 nfs4_put_client(clp);
263 module_put_and_exit(0);
264 }
265
266 void nfs_expire_all_delegations(struct nfs4_client *clp)
267 {
268 struct task_struct *task;
269
270 __module_get(THIS_MODULE);
271 atomic_inc(&clp->cl_count);
272 task = kthread_run(nfs_do_expire_all_delegations, clp,
273 "%u.%u.%u.%u-delegreturn",
274 NIPQUAD(clp->cl_addr));
275 if (!IS_ERR(task))
276 return;
277 nfs4_put_client(clp);
278 module_put(THIS_MODULE);
279 }
280
281 /*
282 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
283 */
284 void nfs_handle_cb_pathdown(struct nfs4_client *clp)
285 {
286 struct nfs_delegation *delegation;
287 struct inode *inode;
288
289 if (clp == NULL)
290 return;
291 restart:
292 spin_lock(&clp->cl_lock);
293 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
294 inode = igrab(delegation->inode);
295 if (inode == NULL)
296 continue;
297 spin_unlock(&clp->cl_lock);
298 nfs_inode_return_delegation(inode);
299 iput(inode);
300 goto restart;
301 }
302 spin_unlock(&clp->cl_lock);
303 }
304
305 struct recall_threadargs {
306 struct inode *inode;
307 struct nfs4_client *clp;
308 const nfs4_stateid *stateid;
309
310 struct completion started;
311 int result;
312 };
313
314 static int recall_thread(void *data)
315 {
316 struct recall_threadargs *args = (struct recall_threadargs *)data;
317 struct inode *inode = igrab(args->inode);
318 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
319 struct nfs_inode *nfsi = NFS_I(inode);
320 struct nfs_delegation *delegation;
321
322 daemonize("nfsv4-delegreturn");
323
324 nfs_msync_inode(inode);
325 down_read(&clp->cl_sem);
326 down_write(&nfsi->rwsem);
327 spin_lock(&clp->cl_lock);
328 delegation = nfsi->delegation;
329 if (delegation != NULL && memcmp(delegation->stateid.data,
330 args->stateid->data,
331 sizeof(delegation->stateid.data)) == 0) {
332 list_del_init(&delegation->super_list);
333 nfsi->delegation = NULL;
334 nfsi->delegation_state = 0;
335 args->result = 0;
336 } else {
337 delegation = NULL;
338 args->result = -ENOENT;
339 }
340 spin_unlock(&clp->cl_lock);
341 complete(&args->started);
342 nfs_delegation_claim_opens(inode);
343 up_write(&nfsi->rwsem);
344 up_read(&clp->cl_sem);
345 nfs_msync_inode(inode);
346
347 if (delegation != NULL)
348 nfs_do_return_delegation(inode, delegation);
349 iput(inode);
350 module_put_and_exit(0);
351 }
352
353 /*
354 * Asynchronous delegation recall!
355 */
356 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
357 {
358 struct recall_threadargs data = {
359 .inode = inode,
360 .stateid = stateid,
361 };
362 int status;
363
364 init_completion(&data.started);
365 __module_get(THIS_MODULE);
366 status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
367 if (status < 0)
368 goto out_module_put;
369 wait_for_completion(&data.started);
370 return data.result;
371 out_module_put:
372 module_put(THIS_MODULE);
373 return status;
374 }
375
376 /*
377 * Retrieve the inode associated with a delegation
378 */
379 struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle)
380 {
381 struct nfs_delegation *delegation;
382 struct inode *res = NULL;
383 spin_lock(&clp->cl_lock);
384 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
385 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
386 res = igrab(delegation->inode);
387 break;
388 }
389 }
390 spin_unlock(&clp->cl_lock);
391 return res;
392 }
393
394 /*
395 * Mark all delegations as needing to be reclaimed
396 */
397 void nfs_delegation_mark_reclaim(struct nfs4_client *clp)
398 {
399 struct nfs_delegation *delegation;
400 spin_lock(&clp->cl_lock);
401 list_for_each_entry(delegation, &clp->cl_delegations, super_list)
402 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
403 spin_unlock(&clp->cl_lock);
404 }
405
406 /*
407 * Reap all unclaimed delegations after reboot recovery is done
408 */
409 void nfs_delegation_reap_unclaimed(struct nfs4_client *clp)
410 {
411 struct nfs_delegation *delegation, *n;
412 LIST_HEAD(head);
413 spin_lock(&clp->cl_lock);
414 list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
415 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
416 continue;
417 list_move(&delegation->super_list, &head);
418 NFS_I(delegation->inode)->delegation = NULL;
419 NFS_I(delegation->inode)->delegation_state = 0;
420 }
421 spin_unlock(&clp->cl_lock);
422 while(!list_empty(&head)) {
423 delegation = list_entry(head.next, struct nfs_delegation, super_list);
424 list_del(&delegation->super_list);
425 nfs_free_delegation(delegation);
426 }
427 }
This page took 0.040049 seconds and 5 git commands to generate.