nfsd: move some blocklayout code
[deliverable/linux.git] / fs / nfsd / nfs4layouts.c
CommitLineData
9cf514cc
CH
1/*
2 * Copyright (c) 2014 Christoph Hellwig.
3 */
c5c707f9
CH
4#include <linux/kmod.h>
5#include <linux/file.h>
9cf514cc
CH
6#include <linux/jhash.h>
7#include <linux/sched.h>
c5c707f9 8#include <linux/sunrpc/addr.h>
9cf514cc
CH
9
10#include "pnfs.h"
11#include "netns.h"
31ef83dc 12#include "trace.h"
9cf514cc
CH
13
14#define NFSDDBG_FACILITY NFSDDBG_PNFS
15
16struct nfs4_layout {
17 struct list_head lo_perstate;
18 struct nfs4_layout_stateid *lo_state;
19 struct nfsd4_layout_seg lo_seg;
20};
21
22static struct kmem_cache *nfs4_layout_cache;
23static struct kmem_cache *nfs4_layout_stateid_cache;
24
c4cb8974 25static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
c5c707f9
CH
26static const struct lock_manager_operations nfsd4_layouts_lm_ops;
27
9cf514cc 28const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = {
81c39329 29#ifdef CONFIG_NFSD_BLOCKLAYOUT
8650b8a0 30 [LAYOUT_BLOCK_VOLUME] = &bl_layout_ops,
81c39329 31#endif
9cf514cc
CH
32};
33
34/* pNFS device ID to export fsid mapping */
35#define DEVID_HASH_BITS 8
36#define DEVID_HASH_SIZE (1 << DEVID_HASH_BITS)
37#define DEVID_HASH_MASK (DEVID_HASH_SIZE - 1)
38static u64 nfsd_devid_seq = 1;
39static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
40static DEFINE_SPINLOCK(nfsd_devid_lock);
41
42static inline u32 devid_hashfn(u64 idx)
43{
44 return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
45}
46
47static void
48nfsd4_alloc_devid_map(const struct svc_fh *fhp)
49{
50 const struct knfsd_fh *fh = &fhp->fh_handle;
51 size_t fsid_len = key_len(fh->fh_fsid_type);
52 struct nfsd4_deviceid_map *map, *old;
53 int i;
54
55 map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
56 if (!map)
57 return;
58
59 map->fsid_type = fh->fh_fsid_type;
60 memcpy(&map->fsid, fh->fh_fsid, fsid_len);
61
62 spin_lock(&nfsd_devid_lock);
63 if (fhp->fh_export->ex_devid_map)
64 goto out_unlock;
65
66 for (i = 0; i < DEVID_HASH_SIZE; i++) {
67 list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
68 if (old->fsid_type != fh->fh_fsid_type)
69 continue;
70 if (memcmp(old->fsid, fh->fh_fsid,
71 key_len(old->fsid_type)))
72 continue;
73
74 fhp->fh_export->ex_devid_map = old;
75 goto out_unlock;
76 }
77 }
78
79 map->idx = nfsd_devid_seq++;
80 list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
81 fhp->fh_export->ex_devid_map = map;
82 map = NULL;
83
84out_unlock:
85 spin_unlock(&nfsd_devid_lock);
86 kfree(map);
87}
88
89struct nfsd4_deviceid_map *
90nfsd4_find_devid_map(int idx)
91{
92 struct nfsd4_deviceid_map *map, *ret = NULL;
93
94 rcu_read_lock();
95 list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
96 if (map->idx == idx)
97 ret = map;
98 rcu_read_unlock();
99
100 return ret;
101}
102
103int
104nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
105 u32 device_generation)
106{
107 if (!fhp->fh_export->ex_devid_map) {
108 nfsd4_alloc_devid_map(fhp);
109 if (!fhp->fh_export->ex_devid_map)
110 return -ENOMEM;
111 }
112
113 id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
114 id->generation = device_generation;
115 id->pad = 0;
116 return 0;
117}
118
119void nfsd4_setup_layout_type(struct svc_export *exp)
120{
8650b8a0
CH
121 struct super_block *sb = exp->ex_path.mnt->mnt_sb;
122
f3f03330 123 if (!(exp->ex_flags & NFSEXP_PNFS))
9cf514cc 124 return;
8650b8a0 125
81c39329 126#ifdef CONFIG_NFSD_BLOCKLAYOUT
8650b8a0
CH
127 if (sb->s_export_op->get_uuid &&
128 sb->s_export_op->map_blocks &&
129 sb->s_export_op->commit_blocks)
130 exp->ex_layout_type = LAYOUT_BLOCK_VOLUME;
81c39329 131#endif
9cf514cc
CH
132}
133
134static void
135nfsd4_free_layout_stateid(struct nfs4_stid *stid)
136{
137 struct nfs4_layout_stateid *ls = layoutstateid(stid);
138 struct nfs4_client *clp = ls->ls_stid.sc_client;
139 struct nfs4_file *fp = ls->ls_stid.sc_file;
140
31ef83dc
CH
141 trace_layoutstate_free(&ls->ls_stid.sc_stateid);
142
9cf514cc
CH
143 spin_lock(&clp->cl_lock);
144 list_del_init(&ls->ls_perclnt);
145 spin_unlock(&clp->cl_lock);
146
147 spin_lock(&fp->fi_lock);
148 list_del_init(&ls->ls_perfile);
149 spin_unlock(&fp->fi_lock);
150
c5c707f9
CH
151 vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls);
152 fput(ls->ls_file);
153
154 if (ls->ls_recalled)
155 atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
156
9cf514cc
CH
157 kmem_cache_free(nfs4_layout_stateid_cache, ls);
158}
159
c5c707f9
CH
160static int
161nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
162{
163 struct file_lock *fl;
164 int status;
165
166 fl = locks_alloc_lock();
167 if (!fl)
168 return -ENOMEM;
169 locks_init_lock(fl);
170 fl->fl_lmops = &nfsd4_layouts_lm_ops;
171 fl->fl_flags = FL_LAYOUT;
172 fl->fl_type = F_RDLCK;
173 fl->fl_end = OFFSET_MAX;
174 fl->fl_owner = ls;
175 fl->fl_pid = current->tgid;
176 fl->fl_file = ls->ls_file;
177
178 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
179 if (status) {
180 locks_free_lock(fl);
181 return status;
182 }
183 BUG_ON(fl != NULL);
184 return 0;
185}
186
9cf514cc
CH
187static struct nfs4_layout_stateid *
188nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
189 struct nfs4_stid *parent, u32 layout_type)
190{
191 struct nfs4_client *clp = cstate->clp;
192 struct nfs4_file *fp = parent->sc_file;
193 struct nfs4_layout_stateid *ls;
194 struct nfs4_stid *stp;
195
196 stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
197 if (!stp)
198 return NULL;
199 stp->sc_free = nfsd4_free_layout_stateid;
200 get_nfs4_file(fp);
201 stp->sc_file = fp;
202
203 ls = layoutstateid(stp);
204 INIT_LIST_HEAD(&ls->ls_perclnt);
205 INIT_LIST_HEAD(&ls->ls_perfile);
206 spin_lock_init(&ls->ls_lock);
207 INIT_LIST_HEAD(&ls->ls_layouts);
cc8a5532 208 mutex_init(&ls->ls_mutex);
9cf514cc 209 ls->ls_layout_type = layout_type;
c5c707f9
CH
210 nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
211 NFSPROC4_CLNT_CB_LAYOUT);
212
213 if (parent->sc_type == NFS4_DELEG_STID)
214 ls->ls_file = get_file(fp->fi_deleg_file);
215 else
216 ls->ls_file = find_any_file(fp);
217 BUG_ON(!ls->ls_file);
218
219 if (nfsd4_layout_setlease(ls)) {
1ca4b88e 220 fput(ls->ls_file);
c5c707f9
CH
221 put_nfs4_file(fp);
222 kmem_cache_free(nfs4_layout_stateid_cache, ls);
223 return NULL;
224 }
9cf514cc
CH
225
226 spin_lock(&clp->cl_lock);
227 stp->sc_type = NFS4_LAYOUT_STID;
228 list_add(&ls->ls_perclnt, &clp->cl_lo_states);
229 spin_unlock(&clp->cl_lock);
230
231 spin_lock(&fp->fi_lock);
232 list_add(&ls->ls_perfile, &fp->fi_lo_states);
233 spin_unlock(&fp->fi_lock);
234
31ef83dc 235 trace_layoutstate_alloc(&ls->ls_stid.sc_stateid);
9cf514cc
CH
236 return ls;
237}
238
239__be32
240nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
241 struct nfsd4_compound_state *cstate, stateid_t *stateid,
242 bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
243{
244 struct nfs4_layout_stateid *ls;
245 struct nfs4_stid *stid;
246 unsigned char typemask = NFS4_LAYOUT_STID;
247 __be32 status;
248
249 if (create)
250 typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
251
252 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
253 net_generic(SVC_NET(rqstp), nfsd_net_id));
254 if (status)
255 goto out;
256
257 if (!fh_match(&cstate->current_fh.fh_handle,
258 &stid->sc_file->fi_fhandle)) {
259 status = nfserr_bad_stateid;
260 goto out_put_stid;
261 }
262
263 if (stid->sc_type != NFS4_LAYOUT_STID) {
264 ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
265 nfs4_put_stid(stid);
266
267 status = nfserr_jukebox;
268 if (!ls)
269 goto out;
cc8a5532 270 mutex_lock(&ls->ls_mutex);
9cf514cc
CH
271 } else {
272 ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
273
274 status = nfserr_bad_stateid;
cc8a5532 275 mutex_lock(&ls->ls_mutex);
9cf514cc 276 if (stateid->si_generation > stid->sc_stateid.si_generation)
cc8a5532 277 goto out_unlock_stid;
9cf514cc 278 if (layout_type != ls->ls_layout_type)
cc8a5532 279 goto out_unlock_stid;
9cf514cc
CH
280 }
281
282 *lsp = ls;
283 return 0;
284
cc8a5532
JL
285out_unlock_stid:
286 mutex_unlock(&ls->ls_mutex);
9cf514cc
CH
287out_put_stid:
288 nfs4_put_stid(stid);
289out:
290 return status;
291}
292
c5c707f9
CH
293static void
294nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
295{
296 spin_lock(&ls->ls_lock);
297 if (ls->ls_recalled)
298 goto out_unlock;
299
300 ls->ls_recalled = true;
301 atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
302 if (list_empty(&ls->ls_layouts))
303 goto out_unlock;
304
31ef83dc
CH
305 trace_layout_recall(&ls->ls_stid.sc_stateid);
306
c5c707f9 307 atomic_inc(&ls->ls_stid.sc_count);
c5c707f9
CH
308 nfsd4_run_cb(&ls->ls_recall);
309
310out_unlock:
311 spin_unlock(&ls->ls_lock);
312}
313
9cf514cc
CH
314static inline u64
315layout_end(struct nfsd4_layout_seg *seg)
316{
317 u64 end = seg->offset + seg->length;
318 return end >= seg->offset ? end : NFS4_MAX_UINT64;
319}
320
321static void
322layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
323{
324 if (end == NFS4_MAX_UINT64)
325 lo->length = NFS4_MAX_UINT64;
326 else
327 lo->length = end - lo->offset;
328}
329
330static bool
331layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
332{
333 if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
334 return false;
335 if (layout_end(&lo->lo_seg) <= s->offset)
336 return false;
337 if (layout_end(s) <= lo->lo_seg.offset)
338 return false;
339 return true;
340}
341
342static bool
343layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
344{
345 if (lo->iomode != new->iomode)
346 return false;
347 if (layout_end(new) < lo->offset)
348 return false;
349 if (layout_end(lo) < new->offset)
350 return false;
351
352 lo->offset = min(lo->offset, new->offset);
353 layout_update_len(lo, max(layout_end(lo), layout_end(new)));
354 return true;
355}
356
c5c707f9
CH
357static __be32
358nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
359{
360 struct nfs4_file *fp = ls->ls_stid.sc_file;
361 struct nfs4_layout_stateid *l, *n;
362 __be32 nfserr = nfs_ok;
363
364 assert_spin_locked(&fp->fi_lock);
365
366 list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
367 if (l != ls) {
368 nfsd4_recall_file_layout(l);
369 nfserr = nfserr_recallconflict;
370 }
371 }
372
373 return nfserr;
374}
375
9cf514cc
CH
376__be32
377nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
378{
379 struct nfsd4_layout_seg *seg = &lgp->lg_seg;
c5c707f9 380 struct nfs4_file *fp = ls->ls_stid.sc_file;
9cf514cc 381 struct nfs4_layout *lp, *new = NULL;
c5c707f9 382 __be32 nfserr;
9cf514cc 383
c5c707f9
CH
384 spin_lock(&fp->fi_lock);
385 nfserr = nfsd4_recall_conflict(ls);
386 if (nfserr)
387 goto out;
9cf514cc
CH
388 spin_lock(&ls->ls_lock);
389 list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
390 if (layouts_try_merge(&lp->lo_seg, seg))
391 goto done;
392 }
393 spin_unlock(&ls->ls_lock);
c5c707f9 394 spin_unlock(&fp->fi_lock);
9cf514cc
CH
395
396 new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
397 if (!new)
398 return nfserr_jukebox;
399 memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg));
400 new->lo_state = ls;
401
c5c707f9
CH
402 spin_lock(&fp->fi_lock);
403 nfserr = nfsd4_recall_conflict(ls);
404 if (nfserr)
405 goto out;
9cf514cc
CH
406 spin_lock(&ls->ls_lock);
407 list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
408 if (layouts_try_merge(&lp->lo_seg, seg))
409 goto done;
410 }
411
412 atomic_inc(&ls->ls_stid.sc_count);
413 list_add_tail(&new->lo_perstate, &ls->ls_layouts);
414 new = NULL;
415done:
9767feb2 416 nfs4_inc_and_copy_stateid(&lgp->lg_sid, &ls->ls_stid);
9cf514cc 417 spin_unlock(&ls->ls_lock);
c5c707f9
CH
418out:
419 spin_unlock(&fp->fi_lock);
9cf514cc
CH
420 if (new)
421 kmem_cache_free(nfs4_layout_cache, new);
c5c707f9 422 return nfserr;
9cf514cc
CH
423}
424
425static void
426nfsd4_free_layouts(struct list_head *reaplist)
427{
428 while (!list_empty(reaplist)) {
429 struct nfs4_layout *lp = list_first_entry(reaplist,
430 struct nfs4_layout, lo_perstate);
431
432 list_del(&lp->lo_perstate);
433 nfs4_put_stid(&lp->lo_state->ls_stid);
434 kmem_cache_free(nfs4_layout_cache, lp);
435 }
436}
437
438static void
439nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
440 struct list_head *reaplist)
441{
442 struct nfsd4_layout_seg *lo = &lp->lo_seg;
443 u64 end = layout_end(lo);
444
445 if (seg->offset <= lo->offset) {
446 if (layout_end(seg) >= end) {
447 list_move_tail(&lp->lo_perstate, reaplist);
448 return;
449 }
7890203d 450 lo->offset = layout_end(seg);
9cf514cc
CH
451 } else {
452 /* retain the whole layout segment on a split. */
453 if (layout_end(seg) < end) {
454 dprintk("%s: split not supported\n", __func__);
455 return;
456 }
7890203d 457 end = seg->offset;
9cf514cc
CH
458 }
459
460 layout_update_len(lo, end);
461}
462
463__be32
464nfsd4_return_file_layouts(struct svc_rqst *rqstp,
465 struct nfsd4_compound_state *cstate,
466 struct nfsd4_layoutreturn *lrp)
467{
468 struct nfs4_layout_stateid *ls;
469 struct nfs4_layout *lp, *n;
470 LIST_HEAD(reaplist);
471 __be32 nfserr;
472 int found = 0;
473
474 nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
475 false, lrp->lr_layout_type,
476 &ls);
31ef83dc
CH
477 if (nfserr) {
478 trace_layout_return_lookup_fail(&lrp->lr_sid);
9cf514cc 479 return nfserr;
31ef83dc 480 }
9cf514cc
CH
481
482 spin_lock(&ls->ls_lock);
483 list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
484 if (layouts_overlapping(lp, &lrp->lr_seg)) {
485 nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
486 found++;
487 }
488 }
489 if (!list_empty(&ls->ls_layouts)) {
9767feb2
JL
490 if (found)
491 nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
9cf514cc
CH
492 lrp->lrs_present = 1;
493 } else {
31ef83dc 494 trace_layoutstate_unhash(&ls->ls_stid.sc_stateid);
9cf514cc
CH
495 nfs4_unhash_stid(&ls->ls_stid);
496 lrp->lrs_present = 0;
497 }
498 spin_unlock(&ls->ls_lock);
499
cc8a5532 500 mutex_unlock(&ls->ls_mutex);
9cf514cc
CH
501 nfs4_put_stid(&ls->ls_stid);
502 nfsd4_free_layouts(&reaplist);
503 return nfs_ok;
504}
505
506__be32
507nfsd4_return_client_layouts(struct svc_rqst *rqstp,
508 struct nfsd4_compound_state *cstate,
509 struct nfsd4_layoutreturn *lrp)
510{
511 struct nfs4_layout_stateid *ls, *n;
512 struct nfs4_client *clp = cstate->clp;
513 struct nfs4_layout *lp, *t;
514 LIST_HEAD(reaplist);
515
516 lrp->lrs_present = 0;
517
518 spin_lock(&clp->cl_lock);
519 list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
6f8f28ec
KM
520 if (ls->ls_layout_type != lrp->lr_layout_type)
521 continue;
522
9cf514cc
CH
523 if (lrp->lr_return_type == RETURN_FSID &&
524 !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
525 &cstate->current_fh.fh_handle))
526 continue;
527
528 spin_lock(&ls->ls_lock);
529 list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
530 if (lrp->lr_seg.iomode == IOMODE_ANY ||
531 lrp->lr_seg.iomode == lp->lo_seg.iomode)
532 list_move_tail(&lp->lo_perstate, &reaplist);
533 }
534 spin_unlock(&ls->ls_lock);
535 }
536 spin_unlock(&clp->cl_lock);
537
538 nfsd4_free_layouts(&reaplist);
539 return 0;
540}
541
542static void
543nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
544 struct list_head *reaplist)
545{
546 spin_lock(&ls->ls_lock);
547 list_splice_init(&ls->ls_layouts, reaplist);
548 spin_unlock(&ls->ls_lock);
549}
550
551void
552nfsd4_return_all_client_layouts(struct nfs4_client *clp)
553{
554 struct nfs4_layout_stateid *ls, *n;
555 LIST_HEAD(reaplist);
556
557 spin_lock(&clp->cl_lock);
558 list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
559 nfsd4_return_all_layouts(ls, &reaplist);
560 spin_unlock(&clp->cl_lock);
561
562 nfsd4_free_layouts(&reaplist);
563}
564
565void
566nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
567{
568 struct nfs4_layout_stateid *ls, *n;
569 LIST_HEAD(reaplist);
570
571 spin_lock(&fp->fi_lock);
572 list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
573 if (ls->ls_stid.sc_client == clp)
574 nfsd4_return_all_layouts(ls, &reaplist);
575 }
576 spin_unlock(&fp->fi_lock);
577
578 nfsd4_free_layouts(&reaplist);
579}
580
c5c707f9
CH
581static void
582nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
583{
584 struct nfs4_client *clp = ls->ls_stid.sc_client;
585 char addr_str[INET6_ADDRSTRLEN];
586 static char *envp[] = {
587 "HOME=/",
588 "TERM=linux",
589 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
590 NULL
591 };
592 char *argv[8];
593 int error;
594
595 rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
596
715a03d2
KM
597 trace_layout_recall_fail(&ls->ls_stid.sc_stateid);
598
c5c707f9
CH
599 printk(KERN_WARNING
600 "nfsd: client %s failed to respond to layout recall. "
601 " Fencing..\n", addr_str);
602
603 argv[0] = "/sbin/nfsd-recall-failed";
604 argv[1] = addr_str;
605 argv[2] = ls->ls_file->f_path.mnt->mnt_sb->s_id;
606 argv[3] = NULL;
607
608 error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
609 if (error) {
610 printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
611 addr_str, error);
612 }
613}
614
cc8a5532
JL
615static void
616nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
617{
618 struct nfs4_layout_stateid *ls =
619 container_of(cb, struct nfs4_layout_stateid, ls_recall);
620
621 mutex_lock(&ls->ls_mutex);
9767feb2 622 nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
be20aa00 623 mutex_unlock(&ls->ls_mutex);
cc8a5532
JL
624}
625
c5c707f9
CH
626static int
627nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
628{
629 struct nfs4_layout_stateid *ls =
630 container_of(cb, struct nfs4_layout_stateid, ls_recall);
6b9b2107
JL
631 struct nfsd_net *nn;
632 ktime_t now, cutoff;
c5c707f9
CH
633 LIST_HEAD(reaplist);
634
6b9b2107 635
c5c707f9
CH
636 switch (task->tk_status) {
637 case 0:
6b9b2107
JL
638 case -NFS4ERR_DELAY:
639 /*
640 * Anything left? If not, then call it done. Note that we don't
641 * take the spinlock since this is an optimization and nothing
642 * should get added until the cb counter goes to zero.
643 */
644 if (list_empty(&ls->ls_layouts))
645 return 1;
646
647 /* Poll the client until it's done with the layout */
648 now = ktime_get();
649 nn = net_generic(ls->ls_stid.sc_client->net, nfsd_net_id);
650
651 /* Client gets 2 lease periods to return it */
652 cutoff = ktime_add_ns(task->tk_start,
653 nn->nfsd4_lease * NSEC_PER_SEC * 2);
654
655 if (ktime_before(now, cutoff)) {
656 rpc_delay(task, HZ/100); /* 10 mili-seconds */
657 return 0;
658 }
659 /* Fallthrough */
c5c707f9 660 case -NFS4ERR_NOMATCHING_LAYOUT:
31ef83dc 661 trace_layout_recall_done(&ls->ls_stid.sc_stateid);
c5c707f9
CH
662 task->tk_status = 0;
663 return 1;
c5c707f9
CH
664 default:
665 /*
666 * Unknown error or non-responding client, we'll need to fence.
667 */
668 nfsd4_cb_layout_fail(ls);
669 return -1;
670 }
671}
672
673static void
674nfsd4_cb_layout_release(struct nfsd4_callback *cb)
675{
676 struct nfs4_layout_stateid *ls =
677 container_of(cb, struct nfs4_layout_stateid, ls_recall);
678 LIST_HEAD(reaplist);
679
31ef83dc
CH
680 trace_layout_recall_release(&ls->ls_stid.sc_stateid);
681
c5c707f9
CH
682 nfsd4_return_all_layouts(ls, &reaplist);
683 nfsd4_free_layouts(&reaplist);
684 nfs4_put_stid(&ls->ls_stid);
685}
686
c4cb8974 687static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
cc8a5532 688 .prepare = nfsd4_cb_layout_prepare,
c5c707f9
CH
689 .done = nfsd4_cb_layout_done,
690 .release = nfsd4_cb_layout_release,
691};
692
693static bool
694nfsd4_layout_lm_break(struct file_lock *fl)
695{
696 /*
697 * We don't want the locks code to timeout the lease for us;
698 * we'll remove it ourself if a layout isn't returned
699 * in time:
700 */
701 fl->fl_break_time = 0;
702 nfsd4_recall_file_layout(fl->fl_owner);
703 return false;
704}
705
706static int
707nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
708 struct list_head *dispose)
709{
710 BUG_ON(!(arg & F_UNLCK));
711 return lease_modify(onlist, arg, dispose);
712}
713
714static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
715 .lm_break = nfsd4_layout_lm_break,
716 .lm_change = nfsd4_layout_lm_change,
717};
718
9cf514cc
CH
719int
720nfsd4_init_pnfs(void)
721{
722 int i;
723
724 for (i = 0; i < DEVID_HASH_SIZE; i++)
725 INIT_LIST_HEAD(&nfsd_devid_hash[i]);
726
727 nfs4_layout_cache = kmem_cache_create("nfs4_layout",
728 sizeof(struct nfs4_layout), 0, 0, NULL);
729 if (!nfs4_layout_cache)
730 return -ENOMEM;
731
732 nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
733 sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
734 if (!nfs4_layout_stateid_cache) {
735 kmem_cache_destroy(nfs4_layout_cache);
736 return -ENOMEM;
737 }
738 return 0;
739}
740
741void
742nfsd4_exit_pnfs(void)
743{
744 int i;
745
746 kmem_cache_destroy(nfs4_layout_cache);
747 kmem_cache_destroy(nfs4_layout_stateid_cache);
748
749 for (i = 0; i < DEVID_HASH_SIZE; i++) {
750 struct nfsd4_deviceid_map *map, *n;
751
752 list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
753 kfree(map);
754 }
755}
This page took 0.093717 seconds and 5 git commands to generate.