Merge tag 'nfs-for-4.7-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
[deliverable/linux.git] / fs / nfs / callback_proc.c
1 /*
2 * linux/fs/nfs/callback_proc.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFSv4 callback procedures
7 */
8 #include <linux/nfs4.h>
9 #include <linux/nfs_fs.h>
10 #include <linux/slab.h>
11 #include <linux/rcupdate.h>
12 #include "nfs4_fs.h"
13 #include "callback.h"
14 #include "delegation.h"
15 #include "internal.h"
16 #include "pnfs.h"
17 #include "nfs4session.h"
18 #include "nfs4trace.h"
19
20 #define NFSDBG_FACILITY NFSDBG_CALLBACK
21
22 __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
23 struct cb_getattrres *res,
24 struct cb_process_state *cps)
25 {
26 struct nfs_delegation *delegation;
27 struct nfs_inode *nfsi;
28 struct inode *inode;
29
30 res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
31 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
32 goto out;
33
34 res->bitmap[0] = res->bitmap[1] = 0;
35 res->status = htonl(NFS4ERR_BADHANDLE);
36
37 dprintk_rcu("NFS: GETATTR callback request from %s\n",
38 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
39
40 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
41 if (inode == NULL) {
42 trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
43 -ntohl(res->status));
44 goto out;
45 }
46 nfsi = NFS_I(inode);
47 rcu_read_lock();
48 delegation = rcu_dereference(nfsi->delegation);
49 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
50 goto out_iput;
51 res->size = i_size_read(inode);
52 res->change_attr = delegation->change_attr;
53 if (nfsi->nrequests != 0)
54 res->change_attr++;
55 res->ctime = inode->i_ctime;
56 res->mtime = inode->i_mtime;
57 res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
58 args->bitmap[0];
59 res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
60 args->bitmap[1];
61 res->status = 0;
62 out_iput:
63 rcu_read_unlock();
64 trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
65 iput(inode);
66 out:
67 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
68 return res->status;
69 }
70
71 __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
72 struct cb_process_state *cps)
73 {
74 struct inode *inode;
75 __be32 res;
76
77 res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
78 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
79 goto out;
80
81 dprintk_rcu("NFS: RECALL callback request from %s\n",
82 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
83
84 res = htonl(NFS4ERR_BADHANDLE);
85 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
86 if (inode == NULL) {
87 trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
88 &args->stateid, -ntohl(res));
89 goto out;
90 }
91 /* Set up a helper thread to actually return the delegation */
92 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
93 case 0:
94 res = 0;
95 break;
96 case -ENOENT:
97 res = htonl(NFS4ERR_BAD_STATEID);
98 break;
99 default:
100 res = htonl(NFS4ERR_RESOURCE);
101 }
102 trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
103 &args->stateid, -ntohl(res));
104 iput(inode);
105 out:
106 dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
107 return res;
108 }
109
110 #if defined(CONFIG_NFS_V4_1)
111
112 /*
113 * Lookup a layout by filehandle.
114 *
115 * Note: gets a refcount on the layout hdr and on its respective inode.
116 * Caller must put the layout hdr and the inode.
117 *
118 * TODO: keep track of all layouts (and delegations) in a hash table
119 * hashed by filehandle.
120 */
121 static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
122 struct nfs_fh *fh, nfs4_stateid *stateid)
123 {
124 struct nfs_server *server;
125 struct inode *ino;
126 struct pnfs_layout_hdr *lo;
127
128 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
129 list_for_each_entry(lo, &server->layouts, plh_layouts) {
130 if (!nfs4_stateid_match_other(&lo->plh_stateid, stateid))
131 continue;
132 if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh))
133 continue;
134 ino = igrab(lo->plh_inode);
135 if (!ino)
136 break;
137 spin_lock(&ino->i_lock);
138 /* Is this layout in the process of being freed? */
139 if (NFS_I(ino)->layout != lo) {
140 spin_unlock(&ino->i_lock);
141 iput(ino);
142 break;
143 }
144 pnfs_get_layout_hdr(lo);
145 spin_unlock(&ino->i_lock);
146 return lo;
147 }
148 }
149
150 return NULL;
151 }
152
153 static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp,
154 struct nfs_fh *fh, nfs4_stateid *stateid)
155 {
156 struct pnfs_layout_hdr *lo;
157
158 spin_lock(&clp->cl_lock);
159 rcu_read_lock();
160 lo = get_layout_by_fh_locked(clp, fh, stateid);
161 rcu_read_unlock();
162 spin_unlock(&clp->cl_lock);
163
164 return lo;
165 }
166
167 /*
168 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
169 */
170 static bool pnfs_check_stateid_sequence(struct pnfs_layout_hdr *lo,
171 const nfs4_stateid *new)
172 {
173 u32 oldseq, newseq;
174
175 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
176 newseq = be32_to_cpu(new->seqid);
177
178 if (newseq > oldseq + 1)
179 return false;
180 return true;
181 }
182
183 static u32 initiate_file_draining(struct nfs_client *clp,
184 struct cb_layoutrecallargs *args)
185 {
186 struct inode *ino;
187 struct pnfs_layout_hdr *lo;
188 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
189 LIST_HEAD(free_me_list);
190
191 lo = get_layout_by_fh(clp, &args->cbl_fh, &args->cbl_stateid);
192 if (!lo) {
193 trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, NULL,
194 &args->cbl_stateid, -rv);
195 goto out;
196 }
197
198 ino = lo->plh_inode;
199
200 spin_lock(&ino->i_lock);
201 if (!pnfs_check_stateid_sequence(lo, &args->cbl_stateid)) {
202 rv = NFS4ERR_DELAY;
203 goto unlock;
204 }
205 pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
206 spin_unlock(&ino->i_lock);
207
208 pnfs_layoutcommit_inode(ino, false);
209
210 spin_lock(&ino->i_lock);
211 /*
212 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
213 */
214 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
215 rv = NFS4ERR_DELAY;
216 goto unlock;
217 }
218
219 if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
220 &args->cbl_range,
221 be32_to_cpu(args->cbl_stateid.seqid))) {
222 rv = NFS4_OK;
223 goto unlock;
224 }
225
226 if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
227 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
228 &args->cbl_range);
229 }
230 pnfs_mark_layout_returned_if_empty(lo);
231 unlock:
232 spin_unlock(&ino->i_lock);
233 pnfs_free_lseg_list(&free_me_list);
234 /* Free all lsegs that are attached to commit buckets */
235 nfs_commit_inode(ino, 0);
236 pnfs_put_layout_hdr(lo);
237 trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
238 &args->cbl_stateid, -rv);
239 iput(ino);
240 out:
241 return rv;
242 }
243
244 static u32 initiate_bulk_draining(struct nfs_client *clp,
245 struct cb_layoutrecallargs *args)
246 {
247 int stat;
248
249 if (args->cbl_recall_type == RETURN_FSID)
250 stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
251 else
252 stat = pnfs_destroy_layouts_byclid(clp, true);
253 if (stat != 0)
254 return NFS4ERR_DELAY;
255 return NFS4ERR_NOMATCHING_LAYOUT;
256 }
257
258 static u32 do_callback_layoutrecall(struct nfs_client *clp,
259 struct cb_layoutrecallargs *args)
260 {
261 u32 res;
262
263 dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
264 if (args->cbl_recall_type == RETURN_FILE)
265 res = initiate_file_draining(clp, args);
266 else
267 res = initiate_bulk_draining(clp, args);
268 dprintk("%s returning %i\n", __func__, res);
269 return res;
270
271 }
272
273 __be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
274 void *dummy, struct cb_process_state *cps)
275 {
276 u32 res;
277
278 dprintk("%s: -->\n", __func__);
279
280 if (cps->clp)
281 res = do_callback_layoutrecall(cps->clp, args);
282 else
283 res = NFS4ERR_OP_NOT_IN_SESSION;
284
285 dprintk("%s: exit with status = %d\n", __func__, res);
286 return cpu_to_be32(res);
287 }
288
289 static void pnfs_recall_all_layouts(struct nfs_client *clp)
290 {
291 struct cb_layoutrecallargs args;
292
293 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
294 memset(&args, 0, sizeof(args));
295 args.cbl_recall_type = RETURN_ALL;
296 /* FIXME we ignore errors, what should we do? */
297 do_callback_layoutrecall(clp, &args);
298 }
299
300 __be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
301 void *dummy, struct cb_process_state *cps)
302 {
303 int i;
304 __be32 res = 0;
305 struct nfs_client *clp = cps->clp;
306 struct nfs_server *server = NULL;
307
308 dprintk("%s: -->\n", __func__);
309
310 if (!clp) {
311 res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
312 goto out;
313 }
314
315 for (i = 0; i < args->ndevs; i++) {
316 struct cb_devicenotifyitem *dev = &args->devs[i];
317
318 if (!server ||
319 server->pnfs_curr_ld->id != dev->cbd_layout_type) {
320 rcu_read_lock();
321 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
322 if (server->pnfs_curr_ld &&
323 server->pnfs_curr_ld->id == dev->cbd_layout_type) {
324 rcu_read_unlock();
325 goto found;
326 }
327 rcu_read_unlock();
328 dprintk("%s: layout type %u not found\n",
329 __func__, dev->cbd_layout_type);
330 continue;
331 }
332
333 found:
334 nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
335 }
336
337 out:
338 kfree(args->devs);
339 dprintk("%s: exit with status = %u\n",
340 __func__, be32_to_cpu(res));
341 return res;
342 }
343
344 /*
345 * Validate the sequenceID sent by the server.
346 * Return success if the sequenceID is one more than what we last saw on
347 * this slot, accounting for wraparound. Increments the slot's sequence.
348 *
349 * We don't yet implement a duplicate request cache, instead we set the
350 * back channel ca_maxresponsesize_cached to zero. This is OK for now
351 * since we only currently implement idempotent callbacks anyway.
352 *
353 * We have a single slot backchannel at this time, so we don't bother
354 * checking the used_slots bit array on the table. The lower layer guarantees
355 * a single outstanding callback request at a time.
356 */
357 static __be32
358 validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
359 const struct cb_sequenceargs * args)
360 {
361 dprintk("%s enter. slotid %u seqid %u, slot table seqid: %u\n",
362 __func__, args->csa_slotid, args->csa_sequenceid, slot->seq_nr);
363
364 if (args->csa_slotid > tbl->server_highest_slotid)
365 return htonl(NFS4ERR_BADSLOT);
366
367 /* Replay */
368 if (args->csa_sequenceid == slot->seq_nr) {
369 dprintk("%s seqid %u is a replay\n",
370 __func__, args->csa_sequenceid);
371 if (nfs4_test_locked_slot(tbl, slot->slot_nr))
372 return htonl(NFS4ERR_DELAY);
373 /* Signal process_op to set this error on next op */
374 if (args->csa_cachethis == 0)
375 return htonl(NFS4ERR_RETRY_UNCACHED_REP);
376
377 /* Liar! We never allowed you to set csa_cachethis != 0 */
378 return htonl(NFS4ERR_SEQ_FALSE_RETRY);
379 }
380
381 /* Wraparound */
382 if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
383 if (args->csa_sequenceid == 1)
384 return htonl(NFS4_OK);
385 } else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
386 return htonl(NFS4_OK);
387
388 /* Misordered request */
389 return htonl(NFS4ERR_SEQ_MISORDERED);
390 }
391
392 /*
393 * For each referring call triple, check the session's slot table for
394 * a match. If the slot is in use and the sequence numbers match, the
395 * client is still waiting for a response to the original request.
396 */
397 static bool referring_call_exists(struct nfs_client *clp,
398 uint32_t nrclists,
399 struct referring_call_list *rclists)
400 {
401 bool status = 0;
402 int i, j;
403 struct nfs4_session *session;
404 struct nfs4_slot_table *tbl;
405 struct referring_call_list *rclist;
406 struct referring_call *ref;
407
408 /*
409 * XXX When client trunking is implemented, this becomes
410 * a session lookup from within the loop
411 */
412 session = clp->cl_session;
413 tbl = &session->fc_slot_table;
414
415 for (i = 0; i < nrclists; i++) {
416 rclist = &rclists[i];
417 if (memcmp(session->sess_id.data,
418 rclist->rcl_sessionid.data,
419 NFS4_MAX_SESSIONID_LEN) != 0)
420 continue;
421
422 for (j = 0; j < rclist->rcl_nrefcalls; j++) {
423 ref = &rclist->rcl_refcalls[j];
424
425 dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u "
426 "slotid %u\n", __func__,
427 ((u32 *)&rclist->rcl_sessionid.data)[0],
428 ((u32 *)&rclist->rcl_sessionid.data)[1],
429 ((u32 *)&rclist->rcl_sessionid.data)[2],
430 ((u32 *)&rclist->rcl_sessionid.data)[3],
431 ref->rc_sequenceid, ref->rc_slotid);
432
433 spin_lock(&tbl->slot_tbl_lock);
434 status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
435 tbl->slots[ref->rc_slotid].seq_nr ==
436 ref->rc_sequenceid);
437 spin_unlock(&tbl->slot_tbl_lock);
438 if (status)
439 goto out;
440 }
441 }
442
443 out:
444 return status;
445 }
446
447 __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
448 struct cb_sequenceres *res,
449 struct cb_process_state *cps)
450 {
451 struct nfs4_slot_table *tbl;
452 struct nfs4_slot *slot;
453 struct nfs_client *clp;
454 int i;
455 __be32 status = htonl(NFS4ERR_BADSESSION);
456
457 clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
458 &args->csa_sessionid, cps->minorversion);
459 if (clp == NULL)
460 goto out;
461
462 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
463 goto out;
464
465 tbl = &clp->cl_session->bc_slot_table;
466 slot = tbl->slots + args->csa_slotid;
467
468 /* Set up res before grabbing the spinlock */
469 memcpy(&res->csr_sessionid, &args->csa_sessionid,
470 sizeof(res->csr_sessionid));
471 res->csr_sequenceid = args->csa_sequenceid;
472 res->csr_slotid = args->csa_slotid;
473
474 spin_lock(&tbl->slot_tbl_lock);
475 /* state manager is resetting the session */
476 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
477 status = htonl(NFS4ERR_DELAY);
478 /* Return NFS4ERR_BADSESSION if we're draining the session
479 * in order to reset it.
480 */
481 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
482 status = htonl(NFS4ERR_BADSESSION);
483 goto out_unlock;
484 }
485
486 status = htonl(NFS4ERR_BADSLOT);
487 slot = nfs4_lookup_slot(tbl, args->csa_slotid);
488 if (IS_ERR(slot))
489 goto out_unlock;
490
491 res->csr_highestslotid = tbl->server_highest_slotid;
492 res->csr_target_highestslotid = tbl->target_highest_slotid;
493
494 status = validate_seqid(tbl, slot, args);
495 if (status)
496 goto out_unlock;
497 if (!nfs4_try_to_lock_slot(tbl, slot)) {
498 status = htonl(NFS4ERR_DELAY);
499 goto out_unlock;
500 }
501 cps->slot = slot;
502
503 /* The ca_maxresponsesize_cached is 0 with no DRC */
504 if (args->csa_cachethis != 0) {
505 status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
506 goto out_unlock;
507 }
508
509 /*
510 * Check for pending referring calls. If a match is found, a
511 * related callback was received before the response to the original
512 * call.
513 */
514 if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
515 status = htonl(NFS4ERR_DELAY);
516 goto out_unlock;
517 }
518
519 /*
520 * RFC5661 20.9.3
521 * If CB_SEQUENCE returns an error, then the state of the slot
522 * (sequence ID, cached reply) MUST NOT change.
523 */
524 slot->seq_nr = args->csa_sequenceid;
525 out_unlock:
526 spin_unlock(&tbl->slot_tbl_lock);
527
528 out:
529 cps->clp = clp; /* put in nfs4_callback_compound */
530 for (i = 0; i < args->csa_nrclists; i++)
531 kfree(args->csa_rclists[i].rcl_refcalls);
532 kfree(args->csa_rclists);
533
534 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
535 cps->drc_status = status;
536 status = 0;
537 } else
538 res->csr_status = status;
539
540 trace_nfs4_cb_sequence(args, res, status);
541 dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
542 ntohl(status), ntohl(res->csr_status));
543 return status;
544 }
545
546 static bool
547 validate_bitmap_values(unsigned long mask)
548 {
549 return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
550 }
551
552 __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
553 struct cb_process_state *cps)
554 {
555 __be32 status;
556 fmode_t flags = 0;
557
558 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
559 if (!cps->clp) /* set in cb_sequence */
560 goto out;
561
562 dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
563 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
564
565 status = cpu_to_be32(NFS4ERR_INVAL);
566 if (!validate_bitmap_values(args->craa_type_mask))
567 goto out;
568
569 status = cpu_to_be32(NFS4_OK);
570 if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
571 &args->craa_type_mask))
572 flags = FMODE_READ;
573 if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
574 &args->craa_type_mask))
575 flags |= FMODE_WRITE;
576 if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
577 &args->craa_type_mask))
578 pnfs_recall_all_layouts(cps->clp);
579 if (flags)
580 nfs_expire_unused_delegation_types(cps->clp, flags);
581 out:
582 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
583 return status;
584 }
585
586 /* Reduce the fore channel's max_slots to the target value */
587 __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
588 struct cb_process_state *cps)
589 {
590 struct nfs4_slot_table *fc_tbl;
591 __be32 status;
592
593 status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
594 if (!cps->clp) /* set in cb_sequence */
595 goto out;
596
597 dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
598 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
599 args->crsa_target_highest_slotid);
600
601 fc_tbl = &cps->clp->cl_session->fc_slot_table;
602
603 status = htonl(NFS4_OK);
604
605 nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
606 nfs41_notify_server(cps->clp);
607 out:
608 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
609 return status;
610 }
611 #endif /* CONFIG_NFS_V4_1 */
This page took 0.141101 seconds and 5 git commands to generate.