NFS41: map NFS4ERR_LAYOUTUNAVAILABLE to ENODATA
[deliverable/linux.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/file.h>
42 #include <linux/string.h>
43 #include <linux/ratelimit.h>
44 #include <linux/printk.h>
45 #include <linux/slab.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/xattr.h>
56 #include <linux/utsname.h>
57 #include <linux/freezer.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4idmap.h"
67 #include "nfs4session.h"
68 #include "fscache.h"
69
70 #include "nfs4trace.h"
71
72 #define NFSDBG_FACILITY NFSDBG_PROC
73
74 #define NFS4_POLL_RETRY_MIN (HZ/10)
75 #define NFS4_POLL_RETRY_MAX (15*HZ)
76
77 struct nfs4_opendata;
78 static int _nfs4_proc_open(struct nfs4_opendata *data);
79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
82 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
83 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
84 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
85 struct nfs_fattr *fattr, struct iattr *sattr,
86 struct nfs4_state *state, struct nfs4_label *ilabel,
87 struct nfs4_label *olabel);
88 #ifdef CONFIG_NFS_V4_1
89 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
90 struct rpc_cred *);
91 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
92 struct rpc_cred *);
93 #endif
94
95 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
96 static inline struct nfs4_label *
97 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
98 struct iattr *sattr, struct nfs4_label *label)
99 {
100 int err;
101
102 if (label == NULL)
103 return NULL;
104
105 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
106 return NULL;
107
108 err = security_dentry_init_security(dentry, sattr->ia_mode,
109 &dentry->d_name, (void **)&label->label, &label->len);
110 if (err == 0)
111 return label;
112
113 return NULL;
114 }
115 static inline void
116 nfs4_label_release_security(struct nfs4_label *label)
117 {
118 if (label)
119 security_release_secctx(label->label, label->len);
120 }
121 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
122 {
123 if (label)
124 return server->attr_bitmask;
125
126 return server->attr_bitmask_nl;
127 }
128 #else
129 static inline struct nfs4_label *
130 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
131 struct iattr *sattr, struct nfs4_label *l)
132 { return NULL; }
133 static inline void
134 nfs4_label_release_security(struct nfs4_label *label)
135 { return; }
136 static inline u32 *
137 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
138 { return server->attr_bitmask; }
139 #endif
140
141 /* Prevent leaks of NFSv4 errors into userland */
142 static int nfs4_map_errors(int err)
143 {
144 if (err >= -1000)
145 return err;
146 switch (err) {
147 case -NFS4ERR_RESOURCE:
148 case -NFS4ERR_LAYOUTTRYLATER:
149 case -NFS4ERR_RECALLCONFLICT:
150 return -EREMOTEIO;
151 case -NFS4ERR_WRONGSEC:
152 case -NFS4ERR_WRONG_CRED:
153 return -EPERM;
154 case -NFS4ERR_BADOWNER:
155 case -NFS4ERR_BADNAME:
156 return -EINVAL;
157 case -NFS4ERR_SHARE_DENIED:
158 return -EACCES;
159 case -NFS4ERR_MINOR_VERS_MISMATCH:
160 return -EPROTONOSUPPORT;
161 case -NFS4ERR_FILE_OPEN:
162 return -EBUSY;
163 default:
164 dprintk("%s could not handle NFSv4 error %d\n",
165 __func__, -err);
166 break;
167 }
168 return -EIO;
169 }
170
171 /*
172 * This is our standard bitmap for GETATTR requests.
173 */
174 const u32 nfs4_fattr_bitmap[3] = {
175 FATTR4_WORD0_TYPE
176 | FATTR4_WORD0_CHANGE
177 | FATTR4_WORD0_SIZE
178 | FATTR4_WORD0_FSID
179 | FATTR4_WORD0_FILEID,
180 FATTR4_WORD1_MODE
181 | FATTR4_WORD1_NUMLINKS
182 | FATTR4_WORD1_OWNER
183 | FATTR4_WORD1_OWNER_GROUP
184 | FATTR4_WORD1_RAWDEV
185 | FATTR4_WORD1_SPACE_USED
186 | FATTR4_WORD1_TIME_ACCESS
187 | FATTR4_WORD1_TIME_METADATA
188 | FATTR4_WORD1_TIME_MODIFY
189 | FATTR4_WORD1_MOUNTED_ON_FILEID,
190 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
191 FATTR4_WORD2_SECURITY_LABEL
192 #endif
193 };
194
195 static const u32 nfs4_pnfs_open_bitmap[3] = {
196 FATTR4_WORD0_TYPE
197 | FATTR4_WORD0_CHANGE
198 | FATTR4_WORD0_SIZE
199 | FATTR4_WORD0_FSID
200 | FATTR4_WORD0_FILEID,
201 FATTR4_WORD1_MODE
202 | FATTR4_WORD1_NUMLINKS
203 | FATTR4_WORD1_OWNER
204 | FATTR4_WORD1_OWNER_GROUP
205 | FATTR4_WORD1_RAWDEV
206 | FATTR4_WORD1_SPACE_USED
207 | FATTR4_WORD1_TIME_ACCESS
208 | FATTR4_WORD1_TIME_METADATA
209 | FATTR4_WORD1_TIME_MODIFY,
210 FATTR4_WORD2_MDSTHRESHOLD
211 };
212
213 static const u32 nfs4_open_noattr_bitmap[3] = {
214 FATTR4_WORD0_TYPE
215 | FATTR4_WORD0_CHANGE
216 | FATTR4_WORD0_FILEID,
217 };
218
219 const u32 nfs4_statfs_bitmap[3] = {
220 FATTR4_WORD0_FILES_AVAIL
221 | FATTR4_WORD0_FILES_FREE
222 | FATTR4_WORD0_FILES_TOTAL,
223 FATTR4_WORD1_SPACE_AVAIL
224 | FATTR4_WORD1_SPACE_FREE
225 | FATTR4_WORD1_SPACE_TOTAL
226 };
227
228 const u32 nfs4_pathconf_bitmap[3] = {
229 FATTR4_WORD0_MAXLINK
230 | FATTR4_WORD0_MAXNAME,
231 0
232 };
233
234 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
235 | FATTR4_WORD0_MAXREAD
236 | FATTR4_WORD0_MAXWRITE
237 | FATTR4_WORD0_LEASE_TIME,
238 FATTR4_WORD1_TIME_DELTA
239 | FATTR4_WORD1_FS_LAYOUT_TYPES,
240 FATTR4_WORD2_LAYOUT_BLKSIZE
241 | FATTR4_WORD2_CLONE_BLKSIZE
242 };
243
244 const u32 nfs4_fs_locations_bitmap[3] = {
245 FATTR4_WORD0_TYPE
246 | FATTR4_WORD0_CHANGE
247 | FATTR4_WORD0_SIZE
248 | FATTR4_WORD0_FSID
249 | FATTR4_WORD0_FILEID
250 | FATTR4_WORD0_FS_LOCATIONS,
251 FATTR4_WORD1_MODE
252 | FATTR4_WORD1_NUMLINKS
253 | FATTR4_WORD1_OWNER
254 | FATTR4_WORD1_OWNER_GROUP
255 | FATTR4_WORD1_RAWDEV
256 | FATTR4_WORD1_SPACE_USED
257 | FATTR4_WORD1_TIME_ACCESS
258 | FATTR4_WORD1_TIME_METADATA
259 | FATTR4_WORD1_TIME_MODIFY
260 | FATTR4_WORD1_MOUNTED_ON_FILEID,
261 };
262
263 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
264 struct nfs4_readdir_arg *readdir)
265 {
266 __be32 *start, *p;
267
268 if (cookie > 2) {
269 readdir->cookie = cookie;
270 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
271 return;
272 }
273
274 readdir->cookie = 0;
275 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
276 if (cookie == 2)
277 return;
278
279 /*
280 * NFSv4 servers do not return entries for '.' and '..'
281 * Therefore, we fake these entries here. We let '.'
282 * have cookie 0 and '..' have cookie 1. Note that
283 * when talking to the server, we always send cookie 0
284 * instead of 1 or 2.
285 */
286 start = p = kmap_atomic(*readdir->pages);
287
288 if (cookie == 0) {
289 *p++ = xdr_one; /* next */
290 *p++ = xdr_zero; /* cookie, first word */
291 *p++ = xdr_one; /* cookie, second word */
292 *p++ = xdr_one; /* entry len */
293 memcpy(p, ".\0\0\0", 4); /* entry */
294 p++;
295 *p++ = xdr_one; /* bitmap length */
296 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
297 *p++ = htonl(8); /* attribute buffer length */
298 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
299 }
300
301 *p++ = xdr_one; /* next */
302 *p++ = xdr_zero; /* cookie, first word */
303 *p++ = xdr_two; /* cookie, second word */
304 *p++ = xdr_two; /* entry len */
305 memcpy(p, "..\0\0", 4); /* entry */
306 p++;
307 *p++ = xdr_one; /* bitmap length */
308 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
309 *p++ = htonl(8); /* attribute buffer length */
310 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
311
312 readdir->pgbase = (char *)p - (char *)start;
313 readdir->count -= readdir->pgbase;
314 kunmap_atomic(start);
315 }
316
317 static long nfs4_update_delay(long *timeout)
318 {
319 long ret;
320 if (!timeout)
321 return NFS4_POLL_RETRY_MAX;
322 if (*timeout <= 0)
323 *timeout = NFS4_POLL_RETRY_MIN;
324 if (*timeout > NFS4_POLL_RETRY_MAX)
325 *timeout = NFS4_POLL_RETRY_MAX;
326 ret = *timeout;
327 *timeout <<= 1;
328 return ret;
329 }
330
331 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
332 {
333 int res = 0;
334
335 might_sleep();
336
337 freezable_schedule_timeout_killable_unsafe(
338 nfs4_update_delay(timeout));
339 if (fatal_signal_pending(current))
340 res = -ERESTARTSYS;
341 return res;
342 }
343
344 /* This is the error handling routine for processes that are allowed
345 * to sleep.
346 */
347 static int nfs4_do_handle_exception(struct nfs_server *server,
348 int errorcode, struct nfs4_exception *exception)
349 {
350 struct nfs_client *clp = server->nfs_client;
351 struct nfs4_state *state = exception->state;
352 struct inode *inode = exception->inode;
353 int ret = errorcode;
354
355 exception->delay = 0;
356 exception->recovering = 0;
357 exception->retry = 0;
358 switch(errorcode) {
359 case 0:
360 return 0;
361 case -NFS4ERR_OPENMODE:
362 case -NFS4ERR_DELEG_REVOKED:
363 case -NFS4ERR_ADMIN_REVOKED:
364 case -NFS4ERR_BAD_STATEID:
365 if (inode && nfs_async_inode_return_delegation(inode,
366 NULL) == 0)
367 goto wait_on_recovery;
368 if (state == NULL)
369 break;
370 ret = nfs4_schedule_stateid_recovery(server, state);
371 if (ret < 0)
372 break;
373 goto wait_on_recovery;
374 case -NFS4ERR_EXPIRED:
375 if (state != NULL) {
376 ret = nfs4_schedule_stateid_recovery(server, state);
377 if (ret < 0)
378 break;
379 }
380 case -NFS4ERR_STALE_STATEID:
381 case -NFS4ERR_STALE_CLIENTID:
382 nfs4_schedule_lease_recovery(clp);
383 goto wait_on_recovery;
384 case -NFS4ERR_MOVED:
385 ret = nfs4_schedule_migration_recovery(server);
386 if (ret < 0)
387 break;
388 goto wait_on_recovery;
389 case -NFS4ERR_LEASE_MOVED:
390 nfs4_schedule_lease_moved_recovery(clp);
391 goto wait_on_recovery;
392 #if defined(CONFIG_NFS_V4_1)
393 case -NFS4ERR_BADSESSION:
394 case -NFS4ERR_BADSLOT:
395 case -NFS4ERR_BAD_HIGH_SLOT:
396 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
397 case -NFS4ERR_DEADSESSION:
398 case -NFS4ERR_SEQ_FALSE_RETRY:
399 case -NFS4ERR_SEQ_MISORDERED:
400 dprintk("%s ERROR: %d Reset session\n", __func__,
401 errorcode);
402 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
403 goto wait_on_recovery;
404 #endif /* defined(CONFIG_NFS_V4_1) */
405 case -NFS4ERR_FILE_OPEN:
406 if (exception->timeout > HZ) {
407 /* We have retried a decent amount, time to
408 * fail
409 */
410 ret = -EBUSY;
411 break;
412 }
413 case -NFS4ERR_DELAY:
414 nfs_inc_server_stats(server, NFSIOS_DELAY);
415 case -NFS4ERR_GRACE:
416 exception->delay = 1;
417 return 0;
418
419 case -NFS4ERR_RETRY_UNCACHED_REP:
420 case -NFS4ERR_OLD_STATEID:
421 exception->retry = 1;
422 break;
423 case -NFS4ERR_BADOWNER:
424 /* The following works around a Linux server bug! */
425 case -NFS4ERR_BADNAME:
426 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
427 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
428 exception->retry = 1;
429 printk(KERN_WARNING "NFS: v4 server %s "
430 "does not accept raw "
431 "uid/gids. "
432 "Reenabling the idmapper.\n",
433 server->nfs_client->cl_hostname);
434 }
435 }
436 /* We failed to handle the error */
437 return nfs4_map_errors(ret);
438 wait_on_recovery:
439 exception->recovering = 1;
440 return 0;
441 }
442
443 /* This is the error handling routine for processes that are allowed
444 * to sleep.
445 */
446 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
447 {
448 struct nfs_client *clp = server->nfs_client;
449 int ret;
450
451 ret = nfs4_do_handle_exception(server, errorcode, exception);
452 if (exception->delay) {
453 ret = nfs4_delay(server->client, &exception->timeout);
454 goto out_retry;
455 }
456 if (exception->recovering) {
457 ret = nfs4_wait_clnt_recover(clp);
458 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
459 return -EIO;
460 goto out_retry;
461 }
462 return ret;
463 out_retry:
464 if (ret == 0)
465 exception->retry = 1;
466 return ret;
467 }
468
469 static int
470 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
471 int errorcode, struct nfs4_exception *exception)
472 {
473 struct nfs_client *clp = server->nfs_client;
474 int ret;
475
476 ret = nfs4_do_handle_exception(server, errorcode, exception);
477 if (exception->delay) {
478 rpc_delay(task, nfs4_update_delay(&exception->timeout));
479 goto out_retry;
480 }
481 if (exception->recovering) {
482 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
483 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
484 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
485 goto out_retry;
486 }
487 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
488 ret = -EIO;
489 return ret;
490 out_retry:
491 if (ret == 0)
492 exception->retry = 1;
493 return ret;
494 }
495
496 static int
497 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
498 struct nfs4_state *state, long *timeout)
499 {
500 struct nfs4_exception exception = {
501 .state = state,
502 };
503
504 if (task->tk_status >= 0)
505 return 0;
506 if (timeout)
507 exception.timeout = *timeout;
508 task->tk_status = nfs4_async_handle_exception(task, server,
509 task->tk_status,
510 &exception);
511 if (exception.delay && timeout)
512 *timeout = exception.timeout;
513 if (exception.retry)
514 return -EAGAIN;
515 return 0;
516 }
517
518 /*
519 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
520 * or 'false' otherwise.
521 */
522 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
523 {
524 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
525
526 if (flavor == RPC_AUTH_GSS_KRB5I ||
527 flavor == RPC_AUTH_GSS_KRB5P)
528 return true;
529
530 return false;
531 }
532
533 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
534 {
535 spin_lock(&clp->cl_lock);
536 if (time_before(clp->cl_last_renewal,timestamp))
537 clp->cl_last_renewal = timestamp;
538 spin_unlock(&clp->cl_lock);
539 }
540
541 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
542 {
543 struct nfs_client *clp = server->nfs_client;
544
545 if (!nfs4_has_session(clp))
546 do_renew_lease(clp, timestamp);
547 }
548
549 struct nfs4_call_sync_data {
550 const struct nfs_server *seq_server;
551 struct nfs4_sequence_args *seq_args;
552 struct nfs4_sequence_res *seq_res;
553 };
554
555 void nfs4_init_sequence(struct nfs4_sequence_args *args,
556 struct nfs4_sequence_res *res, int cache_reply)
557 {
558 args->sa_slot = NULL;
559 args->sa_cache_this = cache_reply;
560 args->sa_privileged = 0;
561
562 res->sr_slot = NULL;
563 }
564
565 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
566 {
567 args->sa_privileged = 1;
568 }
569
570 int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
571 struct nfs4_sequence_args *args,
572 struct nfs4_sequence_res *res,
573 struct rpc_task *task)
574 {
575 struct nfs4_slot *slot;
576
577 /* slot already allocated? */
578 if (res->sr_slot != NULL)
579 goto out_start;
580
581 spin_lock(&tbl->slot_tbl_lock);
582 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
583 goto out_sleep;
584
585 slot = nfs4_alloc_slot(tbl);
586 if (IS_ERR(slot)) {
587 if (slot == ERR_PTR(-ENOMEM))
588 task->tk_timeout = HZ >> 2;
589 goto out_sleep;
590 }
591 spin_unlock(&tbl->slot_tbl_lock);
592
593 args->sa_slot = slot;
594 res->sr_slot = slot;
595
596 out_start:
597 rpc_call_start(task);
598 return 0;
599
600 out_sleep:
601 if (args->sa_privileged)
602 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
603 NULL, RPC_PRIORITY_PRIVILEGED);
604 else
605 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
606 spin_unlock(&tbl->slot_tbl_lock);
607 return -EAGAIN;
608 }
609 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
610
611 static int nfs40_sequence_done(struct rpc_task *task,
612 struct nfs4_sequence_res *res)
613 {
614 struct nfs4_slot *slot = res->sr_slot;
615 struct nfs4_slot_table *tbl;
616
617 if (slot == NULL)
618 goto out;
619
620 tbl = slot->table;
621 spin_lock(&tbl->slot_tbl_lock);
622 if (!nfs41_wake_and_assign_slot(tbl, slot))
623 nfs4_free_slot(tbl, slot);
624 spin_unlock(&tbl->slot_tbl_lock);
625
626 res->sr_slot = NULL;
627 out:
628 return 1;
629 }
630
631 #if defined(CONFIG_NFS_V4_1)
632
633 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
634 {
635 struct nfs4_session *session;
636 struct nfs4_slot_table *tbl;
637 struct nfs4_slot *slot = res->sr_slot;
638 bool send_new_highest_used_slotid = false;
639
640 tbl = slot->table;
641 session = tbl->session;
642
643 spin_lock(&tbl->slot_tbl_lock);
644 /* Be nice to the server: try to ensure that the last transmitted
645 * value for highest_user_slotid <= target_highest_slotid
646 */
647 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
648 send_new_highest_used_slotid = true;
649
650 if (nfs41_wake_and_assign_slot(tbl, slot)) {
651 send_new_highest_used_slotid = false;
652 goto out_unlock;
653 }
654 nfs4_free_slot(tbl, slot);
655
656 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
657 send_new_highest_used_slotid = false;
658 out_unlock:
659 spin_unlock(&tbl->slot_tbl_lock);
660 res->sr_slot = NULL;
661 if (send_new_highest_used_slotid)
662 nfs41_notify_server(session->clp);
663 }
664
665 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
666 {
667 struct nfs4_session *session;
668 struct nfs4_slot *slot = res->sr_slot;
669 struct nfs_client *clp;
670 bool interrupted = false;
671 int ret = 1;
672
673 if (slot == NULL)
674 goto out_noaction;
675 /* don't increment the sequence number if the task wasn't sent */
676 if (!RPC_WAS_SENT(task))
677 goto out;
678
679 session = slot->table->session;
680
681 if (slot->interrupted) {
682 slot->interrupted = 0;
683 interrupted = true;
684 }
685
686 trace_nfs4_sequence_done(session, res);
687 /* Check the SEQUENCE operation status */
688 switch (res->sr_status) {
689 case 0:
690 /* Update the slot's sequence and clientid lease timer */
691 ++slot->seq_nr;
692 clp = session->clp;
693 do_renew_lease(clp, res->sr_timestamp);
694 /* Check sequence flags */
695 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
696 nfs41_update_target_slotid(slot->table, slot, res);
697 break;
698 case 1:
699 /*
700 * sr_status remains 1 if an RPC level error occurred.
701 * The server may or may not have processed the sequence
702 * operation..
703 * Mark the slot as having hosted an interrupted RPC call.
704 */
705 slot->interrupted = 1;
706 goto out;
707 case -NFS4ERR_DELAY:
708 /* The server detected a resend of the RPC call and
709 * returned NFS4ERR_DELAY as per Section 2.10.6.2
710 * of RFC5661.
711 */
712 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
713 __func__,
714 slot->slot_nr,
715 slot->seq_nr);
716 goto out_retry;
717 case -NFS4ERR_BADSLOT:
718 /*
719 * The slot id we used was probably retired. Try again
720 * using a different slot id.
721 */
722 goto retry_nowait;
723 case -NFS4ERR_SEQ_MISORDERED:
724 /*
725 * Was the last operation on this sequence interrupted?
726 * If so, retry after bumping the sequence number.
727 */
728 if (interrupted) {
729 ++slot->seq_nr;
730 goto retry_nowait;
731 }
732 /*
733 * Could this slot have been previously retired?
734 * If so, then the server may be expecting seq_nr = 1!
735 */
736 if (slot->seq_nr != 1) {
737 slot->seq_nr = 1;
738 goto retry_nowait;
739 }
740 break;
741 case -NFS4ERR_SEQ_FALSE_RETRY:
742 ++slot->seq_nr;
743 goto retry_nowait;
744 default:
745 /* Just update the slot sequence no. */
746 ++slot->seq_nr;
747 }
748 out:
749 /* The session may be reset by one of the error handlers. */
750 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
751 nfs41_sequence_free_slot(res);
752 out_noaction:
753 return ret;
754 retry_nowait:
755 if (rpc_restart_call_prepare(task)) {
756 task->tk_status = 0;
757 ret = 0;
758 }
759 goto out;
760 out_retry:
761 if (!rpc_restart_call(task))
762 goto out;
763 rpc_delay(task, NFS4_POLL_RETRY_MAX);
764 return 0;
765 }
766 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
767
768 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
769 {
770 if (res->sr_slot == NULL)
771 return 1;
772 if (!res->sr_slot->table->session)
773 return nfs40_sequence_done(task, res);
774 return nfs41_sequence_done(task, res);
775 }
776 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
777
778 int nfs41_setup_sequence(struct nfs4_session *session,
779 struct nfs4_sequence_args *args,
780 struct nfs4_sequence_res *res,
781 struct rpc_task *task)
782 {
783 struct nfs4_slot *slot;
784 struct nfs4_slot_table *tbl;
785
786 dprintk("--> %s\n", __func__);
787 /* slot already allocated? */
788 if (res->sr_slot != NULL)
789 goto out_success;
790
791 tbl = &session->fc_slot_table;
792
793 task->tk_timeout = 0;
794
795 spin_lock(&tbl->slot_tbl_lock);
796 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
797 !args->sa_privileged) {
798 /* The state manager will wait until the slot table is empty */
799 dprintk("%s session is draining\n", __func__);
800 goto out_sleep;
801 }
802
803 slot = nfs4_alloc_slot(tbl);
804 if (IS_ERR(slot)) {
805 /* If out of memory, try again in 1/4 second */
806 if (slot == ERR_PTR(-ENOMEM))
807 task->tk_timeout = HZ >> 2;
808 dprintk("<-- %s: no free slots\n", __func__);
809 goto out_sleep;
810 }
811 spin_unlock(&tbl->slot_tbl_lock);
812
813 args->sa_slot = slot;
814
815 dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
816 slot->slot_nr, slot->seq_nr);
817
818 res->sr_slot = slot;
819 res->sr_timestamp = jiffies;
820 res->sr_status_flags = 0;
821 /*
822 * sr_status is only set in decode_sequence, and so will remain
823 * set to 1 if an rpc level failure occurs.
824 */
825 res->sr_status = 1;
826 trace_nfs4_setup_sequence(session, args);
827 out_success:
828 rpc_call_start(task);
829 return 0;
830 out_sleep:
831 /* Privileged tasks are queued with top priority */
832 if (args->sa_privileged)
833 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
834 NULL, RPC_PRIORITY_PRIVILEGED);
835 else
836 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
837 spin_unlock(&tbl->slot_tbl_lock);
838 return -EAGAIN;
839 }
840 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
841
842 static int nfs4_setup_sequence(const struct nfs_server *server,
843 struct nfs4_sequence_args *args,
844 struct nfs4_sequence_res *res,
845 struct rpc_task *task)
846 {
847 struct nfs4_session *session = nfs4_get_session(server);
848 int ret = 0;
849
850 if (!session)
851 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
852 args, res, task);
853
854 dprintk("--> %s clp %p session %p sr_slot %u\n",
855 __func__, session->clp, session, res->sr_slot ?
856 res->sr_slot->slot_nr : NFS4_NO_SLOT);
857
858 ret = nfs41_setup_sequence(session, args, res, task);
859
860 dprintk("<-- %s status=%d\n", __func__, ret);
861 return ret;
862 }
863
864 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
865 {
866 struct nfs4_call_sync_data *data = calldata;
867 struct nfs4_session *session = nfs4_get_session(data->seq_server);
868
869 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
870
871 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
872 }
873
874 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
875 {
876 struct nfs4_call_sync_data *data = calldata;
877
878 nfs41_sequence_done(task, data->seq_res);
879 }
880
881 static const struct rpc_call_ops nfs41_call_sync_ops = {
882 .rpc_call_prepare = nfs41_call_sync_prepare,
883 .rpc_call_done = nfs41_call_sync_done,
884 };
885
886 #else /* !CONFIG_NFS_V4_1 */
887
888 static int nfs4_setup_sequence(const struct nfs_server *server,
889 struct nfs4_sequence_args *args,
890 struct nfs4_sequence_res *res,
891 struct rpc_task *task)
892 {
893 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
894 args, res, task);
895 }
896
897 int nfs4_sequence_done(struct rpc_task *task,
898 struct nfs4_sequence_res *res)
899 {
900 return nfs40_sequence_done(task, res);
901 }
902 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
903
904 #endif /* !CONFIG_NFS_V4_1 */
905
906 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
907 {
908 struct nfs4_call_sync_data *data = calldata;
909 nfs4_setup_sequence(data->seq_server,
910 data->seq_args, data->seq_res, task);
911 }
912
913 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
914 {
915 struct nfs4_call_sync_data *data = calldata;
916 nfs4_sequence_done(task, data->seq_res);
917 }
918
919 static const struct rpc_call_ops nfs40_call_sync_ops = {
920 .rpc_call_prepare = nfs40_call_sync_prepare,
921 .rpc_call_done = nfs40_call_sync_done,
922 };
923
924 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
925 struct nfs_server *server,
926 struct rpc_message *msg,
927 struct nfs4_sequence_args *args,
928 struct nfs4_sequence_res *res)
929 {
930 int ret;
931 struct rpc_task *task;
932 struct nfs_client *clp = server->nfs_client;
933 struct nfs4_call_sync_data data = {
934 .seq_server = server,
935 .seq_args = args,
936 .seq_res = res,
937 };
938 struct rpc_task_setup task_setup = {
939 .rpc_client = clnt,
940 .rpc_message = msg,
941 .callback_ops = clp->cl_mvops->call_sync_ops,
942 .callback_data = &data
943 };
944
945 task = rpc_run_task(&task_setup);
946 if (IS_ERR(task))
947 ret = PTR_ERR(task);
948 else {
949 ret = task->tk_status;
950 rpc_put_task(task);
951 }
952 return ret;
953 }
954
955 int nfs4_call_sync(struct rpc_clnt *clnt,
956 struct nfs_server *server,
957 struct rpc_message *msg,
958 struct nfs4_sequence_args *args,
959 struct nfs4_sequence_res *res,
960 int cache_reply)
961 {
962 nfs4_init_sequence(args, res, cache_reply);
963 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
964 }
965
966 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
967 {
968 struct nfs_inode *nfsi = NFS_I(dir);
969
970 spin_lock(&dir->i_lock);
971 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
972 if (!cinfo->atomic || cinfo->before != dir->i_version)
973 nfs_force_lookup_revalidate(dir);
974 dir->i_version = cinfo->after;
975 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
976 nfs_fscache_invalidate(dir);
977 spin_unlock(&dir->i_lock);
978 }
979
980 struct nfs4_opendata {
981 struct kref kref;
982 struct nfs_openargs o_arg;
983 struct nfs_openres o_res;
984 struct nfs_open_confirmargs c_arg;
985 struct nfs_open_confirmres c_res;
986 struct nfs4_string owner_name;
987 struct nfs4_string group_name;
988 struct nfs4_label *a_label;
989 struct nfs_fattr f_attr;
990 struct nfs4_label *f_label;
991 struct dentry *dir;
992 struct dentry *dentry;
993 struct nfs4_state_owner *owner;
994 struct nfs4_state *state;
995 struct iattr attrs;
996 unsigned long timestamp;
997 unsigned int rpc_done : 1;
998 unsigned int file_created : 1;
999 unsigned int is_recover : 1;
1000 int rpc_status;
1001 int cancelled;
1002 };
1003
1004 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1005 int err, struct nfs4_exception *exception)
1006 {
1007 if (err != -EINVAL)
1008 return false;
1009 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1010 return false;
1011 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1012 exception->retry = 1;
1013 return true;
1014 }
1015
1016 static u32
1017 nfs4_map_atomic_open_share(struct nfs_server *server,
1018 fmode_t fmode, int openflags)
1019 {
1020 u32 res = 0;
1021
1022 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1023 case FMODE_READ:
1024 res = NFS4_SHARE_ACCESS_READ;
1025 break;
1026 case FMODE_WRITE:
1027 res = NFS4_SHARE_ACCESS_WRITE;
1028 break;
1029 case FMODE_READ|FMODE_WRITE:
1030 res = NFS4_SHARE_ACCESS_BOTH;
1031 }
1032 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1033 goto out;
1034 /* Want no delegation if we're using O_DIRECT */
1035 if (openflags & O_DIRECT)
1036 res |= NFS4_SHARE_WANT_NO_DELEG;
1037 out:
1038 return res;
1039 }
1040
1041 static enum open_claim_type4
1042 nfs4_map_atomic_open_claim(struct nfs_server *server,
1043 enum open_claim_type4 claim)
1044 {
1045 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1046 return claim;
1047 switch (claim) {
1048 default:
1049 return claim;
1050 case NFS4_OPEN_CLAIM_FH:
1051 return NFS4_OPEN_CLAIM_NULL;
1052 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1053 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1054 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1055 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1056 }
1057 }
1058
1059 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1060 {
1061 p->o_res.f_attr = &p->f_attr;
1062 p->o_res.f_label = p->f_label;
1063 p->o_res.seqid = p->o_arg.seqid;
1064 p->c_res.seqid = p->c_arg.seqid;
1065 p->o_res.server = p->o_arg.server;
1066 p->o_res.access_request = p->o_arg.access;
1067 nfs_fattr_init(&p->f_attr);
1068 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1069 }
1070
1071 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1072 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1073 const struct iattr *attrs,
1074 struct nfs4_label *label,
1075 enum open_claim_type4 claim,
1076 gfp_t gfp_mask)
1077 {
1078 struct dentry *parent = dget_parent(dentry);
1079 struct inode *dir = d_inode(parent);
1080 struct nfs_server *server = NFS_SERVER(dir);
1081 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1082 struct nfs4_opendata *p;
1083
1084 p = kzalloc(sizeof(*p), gfp_mask);
1085 if (p == NULL)
1086 goto err;
1087
1088 p->f_label = nfs4_label_alloc(server, gfp_mask);
1089 if (IS_ERR(p->f_label))
1090 goto err_free_p;
1091
1092 p->a_label = nfs4_label_alloc(server, gfp_mask);
1093 if (IS_ERR(p->a_label))
1094 goto err_free_f;
1095
1096 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1097 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1098 if (IS_ERR(p->o_arg.seqid))
1099 goto err_free_label;
1100 nfs_sb_active(dentry->d_sb);
1101 p->dentry = dget(dentry);
1102 p->dir = parent;
1103 p->owner = sp;
1104 atomic_inc(&sp->so_count);
1105 p->o_arg.open_flags = flags;
1106 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1107 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1108 fmode, flags);
1109 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1110 * will return permission denied for all bits until close */
1111 if (!(flags & O_EXCL)) {
1112 /* ask server to check for all possible rights as results
1113 * are cached */
1114 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1115 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
1116 }
1117 p->o_arg.clientid = server->nfs_client->cl_clientid;
1118 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1119 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1120 p->o_arg.name = &dentry->d_name;
1121 p->o_arg.server = server;
1122 p->o_arg.bitmask = nfs4_bitmask(server, label);
1123 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1124 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1125 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1126 switch (p->o_arg.claim) {
1127 case NFS4_OPEN_CLAIM_NULL:
1128 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1129 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1130 p->o_arg.fh = NFS_FH(dir);
1131 break;
1132 case NFS4_OPEN_CLAIM_PREVIOUS:
1133 case NFS4_OPEN_CLAIM_FH:
1134 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1135 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1136 p->o_arg.fh = NFS_FH(d_inode(dentry));
1137 }
1138 if (attrs != NULL && attrs->ia_valid != 0) {
1139 __u32 verf[2];
1140
1141 p->o_arg.u.attrs = &p->attrs;
1142 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1143
1144 verf[0] = jiffies;
1145 verf[1] = current->pid;
1146 memcpy(p->o_arg.u.verifier.data, verf,
1147 sizeof(p->o_arg.u.verifier.data));
1148 }
1149 p->c_arg.fh = &p->o_res.fh;
1150 p->c_arg.stateid = &p->o_res.stateid;
1151 p->c_arg.seqid = p->o_arg.seqid;
1152 nfs4_init_opendata_res(p);
1153 kref_init(&p->kref);
1154 return p;
1155
1156 err_free_label:
1157 nfs4_label_free(p->a_label);
1158 err_free_f:
1159 nfs4_label_free(p->f_label);
1160 err_free_p:
1161 kfree(p);
1162 err:
1163 dput(parent);
1164 return NULL;
1165 }
1166
1167 static void nfs4_opendata_free(struct kref *kref)
1168 {
1169 struct nfs4_opendata *p = container_of(kref,
1170 struct nfs4_opendata, kref);
1171 struct super_block *sb = p->dentry->d_sb;
1172
1173 nfs_free_seqid(p->o_arg.seqid);
1174 if (p->state != NULL)
1175 nfs4_put_open_state(p->state);
1176 nfs4_put_state_owner(p->owner);
1177
1178 nfs4_label_free(p->a_label);
1179 nfs4_label_free(p->f_label);
1180
1181 dput(p->dir);
1182 dput(p->dentry);
1183 nfs_sb_deactive(sb);
1184 nfs_fattr_free_names(&p->f_attr);
1185 kfree(p->f_attr.mdsthreshold);
1186 kfree(p);
1187 }
1188
1189 static void nfs4_opendata_put(struct nfs4_opendata *p)
1190 {
1191 if (p != NULL)
1192 kref_put(&p->kref, nfs4_opendata_free);
1193 }
1194
1195 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1196 {
1197 int ret;
1198
1199 ret = rpc_wait_for_completion_task(task);
1200 return ret;
1201 }
1202
1203 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1204 fmode_t fmode)
1205 {
1206 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1207 case FMODE_READ|FMODE_WRITE:
1208 return state->n_rdwr != 0;
1209 case FMODE_WRITE:
1210 return state->n_wronly != 0;
1211 case FMODE_READ:
1212 return state->n_rdonly != 0;
1213 }
1214 WARN_ON_ONCE(1);
1215 return false;
1216 }
1217
1218 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1219 {
1220 int ret = 0;
1221
1222 if (open_mode & (O_EXCL|O_TRUNC))
1223 goto out;
1224 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1225 case FMODE_READ:
1226 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1227 && state->n_rdonly != 0;
1228 break;
1229 case FMODE_WRITE:
1230 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1231 && state->n_wronly != 0;
1232 break;
1233 case FMODE_READ|FMODE_WRITE:
1234 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1235 && state->n_rdwr != 0;
1236 }
1237 out:
1238 return ret;
1239 }
1240
1241 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1242 enum open_claim_type4 claim)
1243 {
1244 if (delegation == NULL)
1245 return 0;
1246 if ((delegation->type & fmode) != fmode)
1247 return 0;
1248 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1249 return 0;
1250 switch (claim) {
1251 case NFS4_OPEN_CLAIM_NULL:
1252 case NFS4_OPEN_CLAIM_FH:
1253 break;
1254 case NFS4_OPEN_CLAIM_PREVIOUS:
1255 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1256 break;
1257 default:
1258 return 0;
1259 }
1260 nfs_mark_delegation_referenced(delegation);
1261 return 1;
1262 }
1263
1264 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1265 {
1266 switch (fmode) {
1267 case FMODE_WRITE:
1268 state->n_wronly++;
1269 break;
1270 case FMODE_READ:
1271 state->n_rdonly++;
1272 break;
1273 case FMODE_READ|FMODE_WRITE:
1274 state->n_rdwr++;
1275 }
1276 nfs4_state_set_mode_locked(state, state->state | fmode);
1277 }
1278
1279 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1280 {
1281 struct nfs_client *clp = state->owner->so_server->nfs_client;
1282 bool need_recover = false;
1283
1284 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1285 need_recover = true;
1286 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1287 need_recover = true;
1288 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1289 need_recover = true;
1290 if (need_recover)
1291 nfs4_state_mark_reclaim_nograce(clp, state);
1292 }
1293
1294 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1295 nfs4_stateid *stateid)
1296 {
1297 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
1298 return true;
1299 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1300 nfs_test_and_clear_all_open_stateid(state);
1301 return true;
1302 }
1303 if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
1304 return true;
1305 return false;
1306 }
1307
1308 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1309 {
1310 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1311 return;
1312 if (state->n_wronly)
1313 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1314 if (state->n_rdonly)
1315 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1316 if (state->n_rdwr)
1317 set_bit(NFS_O_RDWR_STATE, &state->flags);
1318 set_bit(NFS_OPEN_STATE, &state->flags);
1319 }
1320
1321 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1322 nfs4_stateid *arg_stateid,
1323 nfs4_stateid *stateid, fmode_t fmode)
1324 {
1325 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1326 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1327 case FMODE_WRITE:
1328 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1329 break;
1330 case FMODE_READ:
1331 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1332 break;
1333 case 0:
1334 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1335 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1336 clear_bit(NFS_OPEN_STATE, &state->flags);
1337 }
1338 if (stateid == NULL)
1339 return;
1340 /* Handle races with OPEN */
1341 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
1342 (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1343 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
1344 nfs_resync_open_stateid_locked(state);
1345 return;
1346 }
1347 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1348 nfs4_stateid_copy(&state->stateid, stateid);
1349 nfs4_stateid_copy(&state->open_stateid, stateid);
1350 }
1351
1352 static void nfs_clear_open_stateid(struct nfs4_state *state,
1353 nfs4_stateid *arg_stateid,
1354 nfs4_stateid *stateid, fmode_t fmode)
1355 {
1356 write_seqlock(&state->seqlock);
1357 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
1358 write_sequnlock(&state->seqlock);
1359 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1360 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1361 }
1362
1363 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1364 {
1365 switch (fmode) {
1366 case FMODE_READ:
1367 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1368 break;
1369 case FMODE_WRITE:
1370 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1371 break;
1372 case FMODE_READ|FMODE_WRITE:
1373 set_bit(NFS_O_RDWR_STATE, &state->flags);
1374 }
1375 if (!nfs_need_update_open_stateid(state, stateid))
1376 return;
1377 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1378 nfs4_stateid_copy(&state->stateid, stateid);
1379 nfs4_stateid_copy(&state->open_stateid, stateid);
1380 }
1381
1382 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1383 {
1384 /*
1385 * Protect the call to nfs4_state_set_mode_locked and
1386 * serialise the stateid update
1387 */
1388 write_seqlock(&state->seqlock);
1389 if (deleg_stateid != NULL) {
1390 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1391 set_bit(NFS_DELEGATED_STATE, &state->flags);
1392 }
1393 if (open_stateid != NULL)
1394 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1395 write_sequnlock(&state->seqlock);
1396 spin_lock(&state->owner->so_lock);
1397 update_open_stateflags(state, fmode);
1398 spin_unlock(&state->owner->so_lock);
1399 }
1400
1401 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1402 {
1403 struct nfs_inode *nfsi = NFS_I(state->inode);
1404 struct nfs_delegation *deleg_cur;
1405 int ret = 0;
1406
1407 fmode &= (FMODE_READ|FMODE_WRITE);
1408
1409 rcu_read_lock();
1410 deleg_cur = rcu_dereference(nfsi->delegation);
1411 if (deleg_cur == NULL)
1412 goto no_delegation;
1413
1414 spin_lock(&deleg_cur->lock);
1415 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1416 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1417 (deleg_cur->type & fmode) != fmode)
1418 goto no_delegation_unlock;
1419
1420 if (delegation == NULL)
1421 delegation = &deleg_cur->stateid;
1422 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1423 goto no_delegation_unlock;
1424
1425 nfs_mark_delegation_referenced(deleg_cur);
1426 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1427 ret = 1;
1428 no_delegation_unlock:
1429 spin_unlock(&deleg_cur->lock);
1430 no_delegation:
1431 rcu_read_unlock();
1432
1433 if (!ret && open_stateid != NULL) {
1434 __update_open_stateid(state, open_stateid, NULL, fmode);
1435 ret = 1;
1436 }
1437 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1438 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1439
1440 return ret;
1441 }
1442
1443 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1444 const nfs4_stateid *stateid)
1445 {
1446 struct nfs4_state *state = lsp->ls_state;
1447 bool ret = false;
1448
1449 spin_lock(&state->state_lock);
1450 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1451 goto out_noupdate;
1452 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1453 goto out_noupdate;
1454 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1455 ret = true;
1456 out_noupdate:
1457 spin_unlock(&state->state_lock);
1458 return ret;
1459 }
1460
1461 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1462 {
1463 struct nfs_delegation *delegation;
1464
1465 rcu_read_lock();
1466 delegation = rcu_dereference(NFS_I(inode)->delegation);
1467 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1468 rcu_read_unlock();
1469 return;
1470 }
1471 rcu_read_unlock();
1472 nfs4_inode_return_delegation(inode);
1473 }
1474
1475 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1476 {
1477 struct nfs4_state *state = opendata->state;
1478 struct nfs_inode *nfsi = NFS_I(state->inode);
1479 struct nfs_delegation *delegation;
1480 int open_mode = opendata->o_arg.open_flags;
1481 fmode_t fmode = opendata->o_arg.fmode;
1482 enum open_claim_type4 claim = opendata->o_arg.claim;
1483 nfs4_stateid stateid;
1484 int ret = -EAGAIN;
1485
1486 for (;;) {
1487 spin_lock(&state->owner->so_lock);
1488 if (can_open_cached(state, fmode, open_mode)) {
1489 update_open_stateflags(state, fmode);
1490 spin_unlock(&state->owner->so_lock);
1491 goto out_return_state;
1492 }
1493 spin_unlock(&state->owner->so_lock);
1494 rcu_read_lock();
1495 delegation = rcu_dereference(nfsi->delegation);
1496 if (!can_open_delegated(delegation, fmode, claim)) {
1497 rcu_read_unlock();
1498 break;
1499 }
1500 /* Save the delegation */
1501 nfs4_stateid_copy(&stateid, &delegation->stateid);
1502 rcu_read_unlock();
1503 nfs_release_seqid(opendata->o_arg.seqid);
1504 if (!opendata->is_recover) {
1505 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1506 if (ret != 0)
1507 goto out;
1508 }
1509 ret = -EAGAIN;
1510
1511 /* Try to update the stateid using the delegation */
1512 if (update_open_stateid(state, NULL, &stateid, fmode))
1513 goto out_return_state;
1514 }
1515 out:
1516 return ERR_PTR(ret);
1517 out_return_state:
1518 atomic_inc(&state->count);
1519 return state;
1520 }
1521
1522 static void
1523 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1524 {
1525 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1526 struct nfs_delegation *delegation;
1527 int delegation_flags = 0;
1528
1529 rcu_read_lock();
1530 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1531 if (delegation)
1532 delegation_flags = delegation->flags;
1533 rcu_read_unlock();
1534 switch (data->o_arg.claim) {
1535 default:
1536 break;
1537 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1538 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1539 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1540 "returning a delegation for "
1541 "OPEN(CLAIM_DELEGATE_CUR)\n",
1542 clp->cl_hostname);
1543 return;
1544 }
1545 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1546 nfs_inode_set_delegation(state->inode,
1547 data->owner->so_cred,
1548 &data->o_res);
1549 else
1550 nfs_inode_reclaim_delegation(state->inode,
1551 data->owner->so_cred,
1552 &data->o_res);
1553 }
1554
1555 /*
1556 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1557 * and update the nfs4_state.
1558 */
1559 static struct nfs4_state *
1560 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1561 {
1562 struct inode *inode = data->state->inode;
1563 struct nfs4_state *state = data->state;
1564 int ret;
1565
1566 if (!data->rpc_done) {
1567 if (data->rpc_status) {
1568 ret = data->rpc_status;
1569 goto err;
1570 }
1571 /* cached opens have already been processed */
1572 goto update;
1573 }
1574
1575 ret = nfs_refresh_inode(inode, &data->f_attr);
1576 if (ret)
1577 goto err;
1578
1579 if (data->o_res.delegation_type != 0)
1580 nfs4_opendata_check_deleg(data, state);
1581 update:
1582 update_open_stateid(state, &data->o_res.stateid, NULL,
1583 data->o_arg.fmode);
1584 atomic_inc(&state->count);
1585
1586 return state;
1587 err:
1588 return ERR_PTR(ret);
1589
1590 }
1591
1592 static struct nfs4_state *
1593 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1594 {
1595 struct inode *inode;
1596 struct nfs4_state *state = NULL;
1597 int ret;
1598
1599 if (!data->rpc_done) {
1600 state = nfs4_try_open_cached(data);
1601 trace_nfs4_cached_open(data->state);
1602 goto out;
1603 }
1604
1605 ret = -EAGAIN;
1606 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1607 goto err;
1608 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label);
1609 ret = PTR_ERR(inode);
1610 if (IS_ERR(inode))
1611 goto err;
1612 ret = -ENOMEM;
1613 state = nfs4_get_open_state(inode, data->owner);
1614 if (state == NULL)
1615 goto err_put_inode;
1616 if (data->o_res.delegation_type != 0)
1617 nfs4_opendata_check_deleg(data, state);
1618 update_open_stateid(state, &data->o_res.stateid, NULL,
1619 data->o_arg.fmode);
1620 iput(inode);
1621 out:
1622 nfs_release_seqid(data->o_arg.seqid);
1623 return state;
1624 err_put_inode:
1625 iput(inode);
1626 err:
1627 return ERR_PTR(ret);
1628 }
1629
1630 static struct nfs4_state *
1631 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1632 {
1633 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1634 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1635 return _nfs4_opendata_to_nfs4_state(data);
1636 }
1637
1638 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1639 {
1640 struct nfs_inode *nfsi = NFS_I(state->inode);
1641 struct nfs_open_context *ctx;
1642
1643 spin_lock(&state->inode->i_lock);
1644 list_for_each_entry(ctx, &nfsi->open_files, list) {
1645 if (ctx->state != state)
1646 continue;
1647 get_nfs_open_context(ctx);
1648 spin_unlock(&state->inode->i_lock);
1649 return ctx;
1650 }
1651 spin_unlock(&state->inode->i_lock);
1652 return ERR_PTR(-ENOENT);
1653 }
1654
1655 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1656 struct nfs4_state *state, enum open_claim_type4 claim)
1657 {
1658 struct nfs4_opendata *opendata;
1659
1660 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1661 NULL, NULL, claim, GFP_NOFS);
1662 if (opendata == NULL)
1663 return ERR_PTR(-ENOMEM);
1664 opendata->state = state;
1665 atomic_inc(&state->count);
1666 return opendata;
1667 }
1668
1669 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
1670 fmode_t fmode)
1671 {
1672 struct nfs4_state *newstate;
1673 int ret;
1674
1675 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
1676 return 0;
1677 opendata->o_arg.open_flags = 0;
1678 opendata->o_arg.fmode = fmode;
1679 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1680 NFS_SB(opendata->dentry->d_sb),
1681 fmode, 0);
1682 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1683 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1684 nfs4_init_opendata_res(opendata);
1685 ret = _nfs4_recover_proc_open(opendata);
1686 if (ret != 0)
1687 return ret;
1688 newstate = nfs4_opendata_to_nfs4_state(opendata);
1689 if (IS_ERR(newstate))
1690 return PTR_ERR(newstate);
1691 if (newstate != opendata->state)
1692 ret = -ESTALE;
1693 nfs4_close_state(newstate, fmode);
1694 return ret;
1695 }
1696
1697 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1698 {
1699 int ret;
1700
1701 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1702 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1703 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1704 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1705 /* memory barrier prior to reading state->n_* */
1706 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1707 clear_bit(NFS_OPEN_STATE, &state->flags);
1708 smp_rmb();
1709 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1710 if (ret != 0)
1711 return ret;
1712 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1713 if (ret != 0)
1714 return ret;
1715 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
1716 if (ret != 0)
1717 return ret;
1718 /*
1719 * We may have performed cached opens for all three recoveries.
1720 * Check if we need to update the current stateid.
1721 */
1722 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1723 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1724 write_seqlock(&state->seqlock);
1725 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1726 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1727 write_sequnlock(&state->seqlock);
1728 }
1729 return 0;
1730 }
1731
1732 /*
1733 * OPEN_RECLAIM:
1734 * reclaim state on the server after a reboot.
1735 */
1736 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1737 {
1738 struct nfs_delegation *delegation;
1739 struct nfs4_opendata *opendata;
1740 fmode_t delegation_type = 0;
1741 int status;
1742
1743 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1744 NFS4_OPEN_CLAIM_PREVIOUS);
1745 if (IS_ERR(opendata))
1746 return PTR_ERR(opendata);
1747 rcu_read_lock();
1748 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1749 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1750 delegation_type = delegation->type;
1751 rcu_read_unlock();
1752 opendata->o_arg.u.delegation_type = delegation_type;
1753 status = nfs4_open_recover(opendata, state);
1754 nfs4_opendata_put(opendata);
1755 return status;
1756 }
1757
1758 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1759 {
1760 struct nfs_server *server = NFS_SERVER(state->inode);
1761 struct nfs4_exception exception = { };
1762 int err;
1763 do {
1764 err = _nfs4_do_open_reclaim(ctx, state);
1765 trace_nfs4_open_reclaim(ctx, 0, err);
1766 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1767 continue;
1768 if (err != -NFS4ERR_DELAY)
1769 break;
1770 nfs4_handle_exception(server, err, &exception);
1771 } while (exception.retry);
1772 return err;
1773 }
1774
1775 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1776 {
1777 struct nfs_open_context *ctx;
1778 int ret;
1779
1780 ctx = nfs4_state_find_open_context(state);
1781 if (IS_ERR(ctx))
1782 return -EAGAIN;
1783 ret = nfs4_do_open_reclaim(ctx, state);
1784 put_nfs_open_context(ctx);
1785 return ret;
1786 }
1787
1788 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1789 {
1790 switch (err) {
1791 default:
1792 printk(KERN_ERR "NFS: %s: unhandled error "
1793 "%d.\n", __func__, err);
1794 case 0:
1795 case -ENOENT:
1796 case -EAGAIN:
1797 case -ESTALE:
1798 break;
1799 case -NFS4ERR_BADSESSION:
1800 case -NFS4ERR_BADSLOT:
1801 case -NFS4ERR_BAD_HIGH_SLOT:
1802 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1803 case -NFS4ERR_DEADSESSION:
1804 set_bit(NFS_DELEGATED_STATE, &state->flags);
1805 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1806 return -EAGAIN;
1807 case -NFS4ERR_STALE_CLIENTID:
1808 case -NFS4ERR_STALE_STATEID:
1809 set_bit(NFS_DELEGATED_STATE, &state->flags);
1810 case -NFS4ERR_EXPIRED:
1811 /* Don't recall a delegation if it was lost */
1812 nfs4_schedule_lease_recovery(server->nfs_client);
1813 return -EAGAIN;
1814 case -NFS4ERR_MOVED:
1815 nfs4_schedule_migration_recovery(server);
1816 return -EAGAIN;
1817 case -NFS4ERR_LEASE_MOVED:
1818 nfs4_schedule_lease_moved_recovery(server->nfs_client);
1819 return -EAGAIN;
1820 case -NFS4ERR_DELEG_REVOKED:
1821 case -NFS4ERR_ADMIN_REVOKED:
1822 case -NFS4ERR_BAD_STATEID:
1823 case -NFS4ERR_OPENMODE:
1824 nfs_inode_find_state_and_recover(state->inode,
1825 stateid);
1826 nfs4_schedule_stateid_recovery(server, state);
1827 return -EAGAIN;
1828 case -NFS4ERR_DELAY:
1829 case -NFS4ERR_GRACE:
1830 set_bit(NFS_DELEGATED_STATE, &state->flags);
1831 ssleep(1);
1832 return -EAGAIN;
1833 case -ENOMEM:
1834 case -NFS4ERR_DENIED:
1835 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1836 return 0;
1837 }
1838 return err;
1839 }
1840
1841 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
1842 struct nfs4_state *state, const nfs4_stateid *stateid,
1843 fmode_t type)
1844 {
1845 struct nfs_server *server = NFS_SERVER(state->inode);
1846 struct nfs4_opendata *opendata;
1847 int err = 0;
1848
1849 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1850 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1851 if (IS_ERR(opendata))
1852 return PTR_ERR(opendata);
1853 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1854 write_seqlock(&state->seqlock);
1855 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1856 write_sequnlock(&state->seqlock);
1857 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1858 switch (type & (FMODE_READ|FMODE_WRITE)) {
1859 case FMODE_READ|FMODE_WRITE:
1860 case FMODE_WRITE:
1861 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1862 if (err)
1863 break;
1864 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1865 if (err)
1866 break;
1867 case FMODE_READ:
1868 err = nfs4_open_recover_helper(opendata, FMODE_READ);
1869 }
1870 nfs4_opendata_put(opendata);
1871 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1872 }
1873
1874 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1875 {
1876 struct nfs4_opendata *data = calldata;
1877
1878 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
1879 &data->c_arg.seq_args, &data->c_res.seq_res, task);
1880 }
1881
1882 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1883 {
1884 struct nfs4_opendata *data = calldata;
1885
1886 nfs40_sequence_done(task, &data->c_res.seq_res);
1887
1888 data->rpc_status = task->tk_status;
1889 if (data->rpc_status == 0) {
1890 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1891 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1892 renew_lease(data->o_res.server, data->timestamp);
1893 data->rpc_done = 1;
1894 }
1895 }
1896
1897 static void nfs4_open_confirm_release(void *calldata)
1898 {
1899 struct nfs4_opendata *data = calldata;
1900 struct nfs4_state *state = NULL;
1901
1902 /* If this request hasn't been cancelled, do nothing */
1903 if (data->cancelled == 0)
1904 goto out_free;
1905 /* In case of error, no cleanup! */
1906 if (!data->rpc_done)
1907 goto out_free;
1908 state = nfs4_opendata_to_nfs4_state(data);
1909 if (!IS_ERR(state))
1910 nfs4_close_state(state, data->o_arg.fmode);
1911 out_free:
1912 nfs4_opendata_put(data);
1913 }
1914
1915 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1916 .rpc_call_prepare = nfs4_open_confirm_prepare,
1917 .rpc_call_done = nfs4_open_confirm_done,
1918 .rpc_release = nfs4_open_confirm_release,
1919 };
1920
1921 /*
1922 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1923 */
1924 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1925 {
1926 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
1927 struct rpc_task *task;
1928 struct rpc_message msg = {
1929 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1930 .rpc_argp = &data->c_arg,
1931 .rpc_resp = &data->c_res,
1932 .rpc_cred = data->owner->so_cred,
1933 };
1934 struct rpc_task_setup task_setup_data = {
1935 .rpc_client = server->client,
1936 .rpc_message = &msg,
1937 .callback_ops = &nfs4_open_confirm_ops,
1938 .callback_data = data,
1939 .workqueue = nfsiod_workqueue,
1940 .flags = RPC_TASK_ASYNC,
1941 };
1942 int status;
1943
1944 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
1945 kref_get(&data->kref);
1946 data->rpc_done = 0;
1947 data->rpc_status = 0;
1948 data->timestamp = jiffies;
1949 if (data->is_recover)
1950 nfs4_set_sequence_privileged(&data->c_arg.seq_args);
1951 task = rpc_run_task(&task_setup_data);
1952 if (IS_ERR(task))
1953 return PTR_ERR(task);
1954 status = nfs4_wait_for_completion_rpc_task(task);
1955 if (status != 0) {
1956 data->cancelled = 1;
1957 smp_wmb();
1958 } else
1959 status = data->rpc_status;
1960 rpc_put_task(task);
1961 return status;
1962 }
1963
1964 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1965 {
1966 struct nfs4_opendata *data = calldata;
1967 struct nfs4_state_owner *sp = data->owner;
1968 struct nfs_client *clp = sp->so_server->nfs_client;
1969 enum open_claim_type4 claim = data->o_arg.claim;
1970
1971 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1972 goto out_wait;
1973 /*
1974 * Check if we still need to send an OPEN call, or if we can use
1975 * a delegation instead.
1976 */
1977 if (data->state != NULL) {
1978 struct nfs_delegation *delegation;
1979
1980 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1981 goto out_no_action;
1982 rcu_read_lock();
1983 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1984 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
1985 goto unlock_no_action;
1986 rcu_read_unlock();
1987 }
1988 /* Update client id. */
1989 data->o_arg.clientid = clp->cl_clientid;
1990 switch (claim) {
1991 default:
1992 break;
1993 case NFS4_OPEN_CLAIM_PREVIOUS:
1994 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1995 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1996 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1997 case NFS4_OPEN_CLAIM_FH:
1998 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1999 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
2000 }
2001 data->timestamp = jiffies;
2002 if (nfs4_setup_sequence(data->o_arg.server,
2003 &data->o_arg.seq_args,
2004 &data->o_res.seq_res,
2005 task) != 0)
2006 nfs_release_seqid(data->o_arg.seqid);
2007
2008 /* Set the create mode (note dependency on the session type) */
2009 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2010 if (data->o_arg.open_flags & O_EXCL) {
2011 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2012 if (nfs4_has_persistent_session(clp))
2013 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2014 else if (clp->cl_mvops->minor_version > 0)
2015 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2016 }
2017 return;
2018 unlock_no_action:
2019 trace_nfs4_cached_open(data->state);
2020 rcu_read_unlock();
2021 out_no_action:
2022 task->tk_action = NULL;
2023 out_wait:
2024 nfs4_sequence_done(task, &data->o_res.seq_res);
2025 }
2026
2027 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2028 {
2029 struct nfs4_opendata *data = calldata;
2030
2031 data->rpc_status = task->tk_status;
2032
2033 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
2034 return;
2035
2036 if (task->tk_status == 0) {
2037 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2038 switch (data->o_res.f_attr->mode & S_IFMT) {
2039 case S_IFREG:
2040 break;
2041 case S_IFLNK:
2042 data->rpc_status = -ELOOP;
2043 break;
2044 case S_IFDIR:
2045 data->rpc_status = -EISDIR;
2046 break;
2047 default:
2048 data->rpc_status = -ENOTDIR;
2049 }
2050 }
2051 renew_lease(data->o_res.server, data->timestamp);
2052 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2053 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2054 }
2055 data->rpc_done = 1;
2056 }
2057
2058 static void nfs4_open_release(void *calldata)
2059 {
2060 struct nfs4_opendata *data = calldata;
2061 struct nfs4_state *state = NULL;
2062
2063 /* If this request hasn't been cancelled, do nothing */
2064 if (data->cancelled == 0)
2065 goto out_free;
2066 /* In case of error, no cleanup! */
2067 if (data->rpc_status != 0 || !data->rpc_done)
2068 goto out_free;
2069 /* In case we need an open_confirm, no cleanup! */
2070 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2071 goto out_free;
2072 state = nfs4_opendata_to_nfs4_state(data);
2073 if (!IS_ERR(state))
2074 nfs4_close_state(state, data->o_arg.fmode);
2075 out_free:
2076 nfs4_opendata_put(data);
2077 }
2078
2079 static const struct rpc_call_ops nfs4_open_ops = {
2080 .rpc_call_prepare = nfs4_open_prepare,
2081 .rpc_call_done = nfs4_open_done,
2082 .rpc_release = nfs4_open_release,
2083 };
2084
2085 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
2086 {
2087 struct inode *dir = d_inode(data->dir);
2088 struct nfs_server *server = NFS_SERVER(dir);
2089 struct nfs_openargs *o_arg = &data->o_arg;
2090 struct nfs_openres *o_res = &data->o_res;
2091 struct rpc_task *task;
2092 struct rpc_message msg = {
2093 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2094 .rpc_argp = o_arg,
2095 .rpc_resp = o_res,
2096 .rpc_cred = data->owner->so_cred,
2097 };
2098 struct rpc_task_setup task_setup_data = {
2099 .rpc_client = server->client,
2100 .rpc_message = &msg,
2101 .callback_ops = &nfs4_open_ops,
2102 .callback_data = data,
2103 .workqueue = nfsiod_workqueue,
2104 .flags = RPC_TASK_ASYNC,
2105 };
2106 int status;
2107
2108 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
2109 kref_get(&data->kref);
2110 data->rpc_done = 0;
2111 data->rpc_status = 0;
2112 data->cancelled = 0;
2113 data->is_recover = 0;
2114 if (isrecover) {
2115 nfs4_set_sequence_privileged(&o_arg->seq_args);
2116 data->is_recover = 1;
2117 }
2118 task = rpc_run_task(&task_setup_data);
2119 if (IS_ERR(task))
2120 return PTR_ERR(task);
2121 status = nfs4_wait_for_completion_rpc_task(task);
2122 if (status != 0) {
2123 data->cancelled = 1;
2124 smp_wmb();
2125 } else
2126 status = data->rpc_status;
2127 rpc_put_task(task);
2128
2129 return status;
2130 }
2131
2132 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2133 {
2134 struct inode *dir = d_inode(data->dir);
2135 struct nfs_openres *o_res = &data->o_res;
2136 int status;
2137
2138 status = nfs4_run_open_task(data, 1);
2139 if (status != 0 || !data->rpc_done)
2140 return status;
2141
2142 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2143
2144 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2145 status = _nfs4_proc_open_confirm(data);
2146 if (status != 0)
2147 return status;
2148 }
2149
2150 return status;
2151 }
2152
2153 /*
2154 * Additional permission checks in order to distinguish between an
2155 * open for read, and an open for execute. This works around the
2156 * fact that NFSv4 OPEN treats read and execute permissions as being
2157 * the same.
2158 * Note that in the non-execute case, we want to turn off permission
2159 * checking if we just created a new file (POSIX open() semantics).
2160 */
2161 static int nfs4_opendata_access(struct rpc_cred *cred,
2162 struct nfs4_opendata *opendata,
2163 struct nfs4_state *state, fmode_t fmode,
2164 int openflags)
2165 {
2166 struct nfs_access_entry cache;
2167 u32 mask;
2168
2169 /* access call failed or for some reason the server doesn't
2170 * support any access modes -- defer access call until later */
2171 if (opendata->o_res.access_supported == 0)
2172 return 0;
2173
2174 mask = 0;
2175 /*
2176 * Use openflags to check for exec, because fmode won't
2177 * always have FMODE_EXEC set when file open for exec.
2178 */
2179 if (openflags & __FMODE_EXEC) {
2180 /* ONLY check for exec rights */
2181 mask = MAY_EXEC;
2182 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2183 mask = MAY_READ;
2184
2185 cache.cred = cred;
2186 cache.jiffies = jiffies;
2187 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2188 nfs_access_add_cache(state->inode, &cache);
2189
2190 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2191 return 0;
2192
2193 /* even though OPEN succeeded, access is denied. Close the file */
2194 nfs4_close_state(state, fmode);
2195 return -EACCES;
2196 }
2197
2198 /*
2199 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2200 */
2201 static int _nfs4_proc_open(struct nfs4_opendata *data)
2202 {
2203 struct inode *dir = d_inode(data->dir);
2204 struct nfs_server *server = NFS_SERVER(dir);
2205 struct nfs_openargs *o_arg = &data->o_arg;
2206 struct nfs_openres *o_res = &data->o_res;
2207 int status;
2208
2209 status = nfs4_run_open_task(data, 0);
2210 if (!data->rpc_done)
2211 return status;
2212 if (status != 0) {
2213 if (status == -NFS4ERR_BADNAME &&
2214 !(o_arg->open_flags & O_CREAT))
2215 return -ENOENT;
2216 return status;
2217 }
2218
2219 nfs_fattr_map_and_free_names(server, &data->f_attr);
2220
2221 if (o_arg->open_flags & O_CREAT) {
2222 update_changeattr(dir, &o_res->cinfo);
2223 if (o_arg->open_flags & O_EXCL)
2224 data->file_created = 1;
2225 else if (o_res->cinfo.before != o_res->cinfo.after)
2226 data->file_created = 1;
2227 }
2228 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2229 server->caps &= ~NFS_CAP_POSIX_LOCK;
2230 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2231 status = _nfs4_proc_open_confirm(data);
2232 if (status != 0)
2233 return status;
2234 }
2235 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
2236 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2237 return 0;
2238 }
2239
2240 static int nfs4_recover_expired_lease(struct nfs_server *server)
2241 {
2242 return nfs4_client_recover_expired_lease(server->nfs_client);
2243 }
2244
2245 /*
2246 * OPEN_EXPIRED:
2247 * reclaim state on the server after a network partition.
2248 * Assumes caller holds the appropriate lock
2249 */
2250 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2251 {
2252 struct nfs4_opendata *opendata;
2253 int ret;
2254
2255 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2256 NFS4_OPEN_CLAIM_FH);
2257 if (IS_ERR(opendata))
2258 return PTR_ERR(opendata);
2259 ret = nfs4_open_recover(opendata, state);
2260 if (ret == -ESTALE)
2261 d_drop(ctx->dentry);
2262 nfs4_opendata_put(opendata);
2263 return ret;
2264 }
2265
2266 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2267 {
2268 struct nfs_server *server = NFS_SERVER(state->inode);
2269 struct nfs4_exception exception = { };
2270 int err;
2271
2272 do {
2273 err = _nfs4_open_expired(ctx, state);
2274 trace_nfs4_open_expired(ctx, 0, err);
2275 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2276 continue;
2277 switch (err) {
2278 default:
2279 goto out;
2280 case -NFS4ERR_GRACE:
2281 case -NFS4ERR_DELAY:
2282 nfs4_handle_exception(server, err, &exception);
2283 err = 0;
2284 }
2285 } while (exception.retry);
2286 out:
2287 return err;
2288 }
2289
2290 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2291 {
2292 struct nfs_open_context *ctx;
2293 int ret;
2294
2295 ctx = nfs4_state_find_open_context(state);
2296 if (IS_ERR(ctx))
2297 return -EAGAIN;
2298 ret = nfs4_do_open_expired(ctx, state);
2299 put_nfs_open_context(ctx);
2300 return ret;
2301 }
2302
2303 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2304 {
2305 nfs_remove_bad_delegation(state->inode);
2306 write_seqlock(&state->seqlock);
2307 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2308 write_sequnlock(&state->seqlock);
2309 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2310 }
2311
2312 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2313 {
2314 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2315 nfs_finish_clear_delegation_stateid(state);
2316 }
2317
2318 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2319 {
2320 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2321 nfs40_clear_delegation_stateid(state);
2322 return nfs4_open_expired(sp, state);
2323 }
2324
2325 #if defined(CONFIG_NFS_V4_1)
2326 static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2327 {
2328 struct nfs_server *server = NFS_SERVER(state->inode);
2329 nfs4_stateid stateid;
2330 struct nfs_delegation *delegation;
2331 struct rpc_cred *cred;
2332 int status;
2333
2334 /* Get the delegation credential for use by test/free_stateid */
2335 rcu_read_lock();
2336 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2337 if (delegation == NULL) {
2338 rcu_read_unlock();
2339 return;
2340 }
2341
2342 nfs4_stateid_copy(&stateid, &delegation->stateid);
2343 cred = get_rpccred(delegation->cred);
2344 rcu_read_unlock();
2345 status = nfs41_test_stateid(server, &stateid, cred);
2346 trace_nfs4_test_delegation_stateid(state, NULL, status);
2347
2348 if (status != NFS_OK) {
2349 /* Free the stateid unless the server explicitly
2350 * informs us the stateid is unrecognized. */
2351 if (status != -NFS4ERR_BAD_STATEID)
2352 nfs41_free_stateid(server, &stateid, cred);
2353 nfs_finish_clear_delegation_stateid(state);
2354 }
2355
2356 put_rpccred(cred);
2357 }
2358
2359 /**
2360 * nfs41_check_open_stateid - possibly free an open stateid
2361 *
2362 * @state: NFSv4 state for an inode
2363 *
2364 * Returns NFS_OK if recovery for this stateid is now finished.
2365 * Otherwise a negative NFS4ERR value is returned.
2366 */
2367 static int nfs41_check_open_stateid(struct nfs4_state *state)
2368 {
2369 struct nfs_server *server = NFS_SERVER(state->inode);
2370 nfs4_stateid *stateid = &state->open_stateid;
2371 struct rpc_cred *cred = state->owner->so_cred;
2372 int status;
2373
2374 /* If a state reset has been done, test_stateid is unneeded */
2375 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2376 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2377 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2378 return -NFS4ERR_BAD_STATEID;
2379
2380 status = nfs41_test_stateid(server, stateid, cred);
2381 trace_nfs4_test_open_stateid(state, NULL, status);
2382 if (status != NFS_OK) {
2383 /* Free the stateid unless the server explicitly
2384 * informs us the stateid is unrecognized. */
2385 if (status != -NFS4ERR_BAD_STATEID)
2386 nfs41_free_stateid(server, stateid, cred);
2387
2388 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2389 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2390 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2391 clear_bit(NFS_OPEN_STATE, &state->flags);
2392 }
2393 return status;
2394 }
2395
2396 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2397 {
2398 int status;
2399
2400 nfs41_check_delegation_stateid(state);
2401 status = nfs41_check_open_stateid(state);
2402 if (status != NFS_OK)
2403 status = nfs4_open_expired(sp, state);
2404 return status;
2405 }
2406 #endif
2407
2408 /*
2409 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2410 * fields corresponding to attributes that were used to store the verifier.
2411 * Make sure we clobber those fields in the later setattr call
2412 */
2413 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2414 struct iattr *sattr, struct nfs4_label **label)
2415 {
2416 const u32 *attrset = opendata->o_res.attrset;
2417
2418 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2419 !(sattr->ia_valid & ATTR_ATIME_SET))
2420 sattr->ia_valid |= ATTR_ATIME;
2421
2422 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2423 !(sattr->ia_valid & ATTR_MTIME_SET))
2424 sattr->ia_valid |= ATTR_MTIME;
2425
2426 /* Except MODE, it seems harmless of setting twice. */
2427 if ((attrset[1] & FATTR4_WORD1_MODE))
2428 sattr->ia_valid &= ~ATTR_MODE;
2429
2430 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
2431 *label = NULL;
2432 }
2433
2434 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2435 fmode_t fmode,
2436 int flags,
2437 struct nfs_open_context *ctx)
2438 {
2439 struct nfs4_state_owner *sp = opendata->owner;
2440 struct nfs_server *server = sp->so_server;
2441 struct dentry *dentry;
2442 struct nfs4_state *state;
2443 unsigned int seq;
2444 int ret;
2445
2446 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2447
2448 ret = _nfs4_proc_open(opendata);
2449 if (ret != 0)
2450 goto out;
2451
2452 state = nfs4_opendata_to_nfs4_state(opendata);
2453 ret = PTR_ERR(state);
2454 if (IS_ERR(state))
2455 goto out;
2456 if (server->caps & NFS_CAP_POSIX_LOCK)
2457 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2458
2459 dentry = opendata->dentry;
2460 if (d_really_is_negative(dentry)) {
2461 /* FIXME: Is this d_drop() ever needed? */
2462 d_drop(dentry);
2463 dentry = d_add_unique(dentry, igrab(state->inode));
2464 if (dentry == NULL) {
2465 dentry = opendata->dentry;
2466 } else if (dentry != ctx->dentry) {
2467 dput(ctx->dentry);
2468 ctx->dentry = dget(dentry);
2469 }
2470 nfs_set_verifier(dentry,
2471 nfs_save_change_attribute(d_inode(opendata->dir)));
2472 }
2473
2474 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2475 if (ret != 0)
2476 goto out;
2477
2478 ctx->state = state;
2479 if (d_inode(dentry) == state->inode) {
2480 nfs_inode_attach_open_context(ctx);
2481 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2482 nfs4_schedule_stateid_recovery(server, state);
2483 }
2484 out:
2485 return ret;
2486 }
2487
2488 /*
2489 * Returns a referenced nfs4_state
2490 */
2491 static int _nfs4_do_open(struct inode *dir,
2492 struct nfs_open_context *ctx,
2493 int flags,
2494 struct iattr *sattr,
2495 struct nfs4_label *label,
2496 int *opened)
2497 {
2498 struct nfs4_state_owner *sp;
2499 struct nfs4_state *state = NULL;
2500 struct nfs_server *server = NFS_SERVER(dir);
2501 struct nfs4_opendata *opendata;
2502 struct dentry *dentry = ctx->dentry;
2503 struct rpc_cred *cred = ctx->cred;
2504 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2505 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2506 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2507 struct nfs4_label *olabel = NULL;
2508 int status;
2509
2510 /* Protect against reboot recovery conflicts */
2511 status = -ENOMEM;
2512 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2513 if (sp == NULL) {
2514 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2515 goto out_err;
2516 }
2517 status = nfs4_recover_expired_lease(server);
2518 if (status != 0)
2519 goto err_put_state_owner;
2520 if (d_really_is_positive(dentry))
2521 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2522 status = -ENOMEM;
2523 if (d_really_is_positive(dentry))
2524 claim = NFS4_OPEN_CLAIM_FH;
2525 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2526 label, claim, GFP_KERNEL);
2527 if (opendata == NULL)
2528 goto err_put_state_owner;
2529
2530 if (label) {
2531 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2532 if (IS_ERR(olabel)) {
2533 status = PTR_ERR(olabel);
2534 goto err_opendata_put;
2535 }
2536 }
2537
2538 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2539 if (!opendata->f_attr.mdsthreshold) {
2540 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2541 if (!opendata->f_attr.mdsthreshold)
2542 goto err_free_label;
2543 }
2544 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2545 }
2546 if (d_really_is_positive(dentry))
2547 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2548
2549 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2550 if (status != 0)
2551 goto err_free_label;
2552 state = ctx->state;
2553
2554 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
2555 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2556 nfs4_exclusive_attrset(opendata, sattr, &label);
2557
2558 nfs_fattr_init(opendata->o_res.f_attr);
2559 status = nfs4_do_setattr(state->inode, cred,
2560 opendata->o_res.f_attr, sattr,
2561 state, label, olabel);
2562 if (status == 0) {
2563 nfs_setattr_update_inode(state->inode, sattr,
2564 opendata->o_res.f_attr);
2565 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2566 }
2567 }
2568 if (opened && opendata->file_created)
2569 *opened |= FILE_CREATED;
2570
2571 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2572 *ctx_th = opendata->f_attr.mdsthreshold;
2573 opendata->f_attr.mdsthreshold = NULL;
2574 }
2575
2576 nfs4_label_free(olabel);
2577
2578 nfs4_opendata_put(opendata);
2579 nfs4_put_state_owner(sp);
2580 return 0;
2581 err_free_label:
2582 nfs4_label_free(olabel);
2583 err_opendata_put:
2584 nfs4_opendata_put(opendata);
2585 err_put_state_owner:
2586 nfs4_put_state_owner(sp);
2587 out_err:
2588 return status;
2589 }
2590
2591
2592 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2593 struct nfs_open_context *ctx,
2594 int flags,
2595 struct iattr *sattr,
2596 struct nfs4_label *label,
2597 int *opened)
2598 {
2599 struct nfs_server *server = NFS_SERVER(dir);
2600 struct nfs4_exception exception = { };
2601 struct nfs4_state *res;
2602 int status;
2603
2604 do {
2605 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2606 res = ctx->state;
2607 trace_nfs4_open_file(ctx, flags, status);
2608 if (status == 0)
2609 break;
2610 /* NOTE: BAD_SEQID means the server and client disagree about the
2611 * book-keeping w.r.t. state-changing operations
2612 * (OPEN/CLOSE/LOCK/LOCKU...)
2613 * It is actually a sign of a bug on the client or on the server.
2614 *
2615 * If we receive a BAD_SEQID error in the particular case of
2616 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2617 * have unhashed the old state_owner for us, and that we can
2618 * therefore safely retry using a new one. We should still warn
2619 * the user though...
2620 */
2621 if (status == -NFS4ERR_BAD_SEQID) {
2622 pr_warn_ratelimited("NFS: v4 server %s "
2623 " returned a bad sequence-id error!\n",
2624 NFS_SERVER(dir)->nfs_client->cl_hostname);
2625 exception.retry = 1;
2626 continue;
2627 }
2628 /*
2629 * BAD_STATEID on OPEN means that the server cancelled our
2630 * state before it received the OPEN_CONFIRM.
2631 * Recover by retrying the request as per the discussion
2632 * on Page 181 of RFC3530.
2633 */
2634 if (status == -NFS4ERR_BAD_STATEID) {
2635 exception.retry = 1;
2636 continue;
2637 }
2638 if (status == -EAGAIN) {
2639 /* We must have found a delegation */
2640 exception.retry = 1;
2641 continue;
2642 }
2643 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2644 continue;
2645 res = ERR_PTR(nfs4_handle_exception(server,
2646 status, &exception));
2647 } while (exception.retry);
2648 return res;
2649 }
2650
2651 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2652 struct nfs_fattr *fattr, struct iattr *sattr,
2653 struct nfs4_state *state, struct nfs4_label *ilabel,
2654 struct nfs4_label *olabel)
2655 {
2656 struct nfs_server *server = NFS_SERVER(inode);
2657 struct nfs_setattrargs arg = {
2658 .fh = NFS_FH(inode),
2659 .iap = sattr,
2660 .server = server,
2661 .bitmask = server->attr_bitmask,
2662 .label = ilabel,
2663 };
2664 struct nfs_setattrres res = {
2665 .fattr = fattr,
2666 .label = olabel,
2667 .server = server,
2668 };
2669 struct rpc_message msg = {
2670 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2671 .rpc_argp = &arg,
2672 .rpc_resp = &res,
2673 .rpc_cred = cred,
2674 };
2675 unsigned long timestamp = jiffies;
2676 fmode_t fmode;
2677 bool truncate;
2678 int status;
2679
2680 arg.bitmask = nfs4_bitmask(server, ilabel);
2681 if (ilabel)
2682 arg.bitmask = nfs4_bitmask(server, olabel);
2683
2684 nfs_fattr_init(fattr);
2685
2686 /* Servers should only apply open mode checks for file size changes */
2687 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
2688 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2689
2690 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2691 /* Use that stateid */
2692 } else if (truncate && state != NULL) {
2693 struct nfs_lockowner lockowner = {
2694 .l_owner = current->files,
2695 .l_pid = current->tgid,
2696 };
2697 if (!nfs4_valid_open_stateid(state))
2698 return -EBADF;
2699 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2700 &lockowner) == -EIO)
2701 return -EBADF;
2702 } else
2703 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2704
2705 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2706 if (status == 0 && state != NULL)
2707 renew_lease(server, timestamp);
2708 trace_nfs4_setattr(inode, &arg.stateid, status);
2709 return status;
2710 }
2711
2712 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2713 struct nfs_fattr *fattr, struct iattr *sattr,
2714 struct nfs4_state *state, struct nfs4_label *ilabel,
2715 struct nfs4_label *olabel)
2716 {
2717 struct nfs_server *server = NFS_SERVER(inode);
2718 struct nfs4_exception exception = {
2719 .state = state,
2720 .inode = inode,
2721 };
2722 int err;
2723 do {
2724 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel);
2725 switch (err) {
2726 case -NFS4ERR_OPENMODE:
2727 if (!(sattr->ia_valid & ATTR_SIZE)) {
2728 pr_warn_once("NFSv4: server %s is incorrectly "
2729 "applying open mode checks to "
2730 "a SETATTR that is not "
2731 "changing file size.\n",
2732 server->nfs_client->cl_hostname);
2733 }
2734 if (state && !(state->state & FMODE_WRITE)) {
2735 err = -EBADF;
2736 if (sattr->ia_valid & ATTR_OPEN)
2737 err = -EACCES;
2738 goto out;
2739 }
2740 }
2741 err = nfs4_handle_exception(server, err, &exception);
2742 } while (exception.retry);
2743 out:
2744 return err;
2745 }
2746
2747 static bool
2748 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
2749 {
2750 if (inode == NULL || !nfs_have_layout(inode))
2751 return false;
2752
2753 return pnfs_wait_on_layoutreturn(inode, task);
2754 }
2755
2756 struct nfs4_closedata {
2757 struct inode *inode;
2758 struct nfs4_state *state;
2759 struct nfs_closeargs arg;
2760 struct nfs_closeres res;
2761 struct nfs_fattr fattr;
2762 unsigned long timestamp;
2763 bool roc;
2764 u32 roc_barrier;
2765 };
2766
2767 static void nfs4_free_closedata(void *data)
2768 {
2769 struct nfs4_closedata *calldata = data;
2770 struct nfs4_state_owner *sp = calldata->state->owner;
2771 struct super_block *sb = calldata->state->inode->i_sb;
2772
2773 if (calldata->roc)
2774 pnfs_roc_release(calldata->state->inode);
2775 nfs4_put_open_state(calldata->state);
2776 nfs_free_seqid(calldata->arg.seqid);
2777 nfs4_put_state_owner(sp);
2778 nfs_sb_deactive(sb);
2779 kfree(calldata);
2780 }
2781
2782 static void nfs4_close_done(struct rpc_task *task, void *data)
2783 {
2784 struct nfs4_closedata *calldata = data;
2785 struct nfs4_state *state = calldata->state;
2786 struct nfs_server *server = NFS_SERVER(calldata->inode);
2787 nfs4_stateid *res_stateid = NULL;
2788
2789 dprintk("%s: begin!\n", __func__);
2790 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2791 return;
2792 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
2793 /* hmm. we are done with the inode, and in the process of freeing
2794 * the state_owner. we keep this around to process errors
2795 */
2796 switch (task->tk_status) {
2797 case 0:
2798 res_stateid = &calldata->res.stateid;
2799 if (calldata->roc)
2800 pnfs_roc_set_barrier(state->inode,
2801 calldata->roc_barrier);
2802 renew_lease(server, calldata->timestamp);
2803 break;
2804 case -NFS4ERR_ADMIN_REVOKED:
2805 case -NFS4ERR_STALE_STATEID:
2806 case -NFS4ERR_OLD_STATEID:
2807 case -NFS4ERR_BAD_STATEID:
2808 case -NFS4ERR_EXPIRED:
2809 if (!nfs4_stateid_match(&calldata->arg.stateid,
2810 &state->open_stateid)) {
2811 rpc_restart_call_prepare(task);
2812 goto out_release;
2813 }
2814 if (calldata->arg.fmode == 0)
2815 break;
2816 default:
2817 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
2818 rpc_restart_call_prepare(task);
2819 goto out_release;
2820 }
2821 }
2822 nfs_clear_open_stateid(state, &calldata->arg.stateid,
2823 res_stateid, calldata->arg.fmode);
2824 out_release:
2825 nfs_release_seqid(calldata->arg.seqid);
2826 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2827 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2828 }
2829
2830 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2831 {
2832 struct nfs4_closedata *calldata = data;
2833 struct nfs4_state *state = calldata->state;
2834 struct inode *inode = calldata->inode;
2835 bool is_rdonly, is_wronly, is_rdwr;
2836 int call_close = 0;
2837
2838 dprintk("%s: begin!\n", __func__);
2839 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2840 goto out_wait;
2841
2842 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2843 spin_lock(&state->owner->so_lock);
2844 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2845 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2846 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2847 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2848 /* Calculate the change in open mode */
2849 calldata->arg.fmode = 0;
2850 if (state->n_rdwr == 0) {
2851 if (state->n_rdonly == 0)
2852 call_close |= is_rdonly;
2853 else if (is_rdonly)
2854 calldata->arg.fmode |= FMODE_READ;
2855 if (state->n_wronly == 0)
2856 call_close |= is_wronly;
2857 else if (is_wronly)
2858 calldata->arg.fmode |= FMODE_WRITE;
2859 } else if (is_rdwr)
2860 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2861
2862 if (calldata->arg.fmode == 0)
2863 call_close |= is_rdwr;
2864
2865 if (!nfs4_valid_open_stateid(state))
2866 call_close = 0;
2867 spin_unlock(&state->owner->so_lock);
2868
2869 if (!call_close) {
2870 /* Note: exit _without_ calling nfs4_close_done */
2871 goto out_no_action;
2872 }
2873
2874 if (nfs4_wait_on_layoutreturn(inode, task)) {
2875 nfs_release_seqid(calldata->arg.seqid);
2876 goto out_wait;
2877 }
2878
2879 if (calldata->arg.fmode == 0)
2880 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2881 if (calldata->roc)
2882 pnfs_roc_get_barrier(inode, &calldata->roc_barrier);
2883
2884 calldata->arg.share_access =
2885 nfs4_map_atomic_open_share(NFS_SERVER(inode),
2886 calldata->arg.fmode, 0);
2887
2888 nfs_fattr_init(calldata->res.fattr);
2889 calldata->timestamp = jiffies;
2890 if (nfs4_setup_sequence(NFS_SERVER(inode),
2891 &calldata->arg.seq_args,
2892 &calldata->res.seq_res,
2893 task) != 0)
2894 nfs_release_seqid(calldata->arg.seqid);
2895 dprintk("%s: done!\n", __func__);
2896 return;
2897 out_no_action:
2898 task->tk_action = NULL;
2899 out_wait:
2900 nfs4_sequence_done(task, &calldata->res.seq_res);
2901 }
2902
2903 static const struct rpc_call_ops nfs4_close_ops = {
2904 .rpc_call_prepare = nfs4_close_prepare,
2905 .rpc_call_done = nfs4_close_done,
2906 .rpc_release = nfs4_free_closedata,
2907 };
2908
2909 static bool nfs4_roc(struct inode *inode)
2910 {
2911 if (!nfs_have_layout(inode))
2912 return false;
2913 return pnfs_roc(inode);
2914 }
2915
2916 /*
2917 * It is possible for data to be read/written from a mem-mapped file
2918 * after the sys_close call (which hits the vfs layer as a flush).
2919 * This means that we can't safely call nfsv4 close on a file until
2920 * the inode is cleared. This in turn means that we are not good
2921 * NFSv4 citizens - we do not indicate to the server to update the file's
2922 * share state even when we are done with one of the three share
2923 * stateid's in the inode.
2924 *
2925 * NOTE: Caller must be holding the sp->so_owner semaphore!
2926 */
2927 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2928 {
2929 struct nfs_server *server = NFS_SERVER(state->inode);
2930 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
2931 struct nfs4_closedata *calldata;
2932 struct nfs4_state_owner *sp = state->owner;
2933 struct rpc_task *task;
2934 struct rpc_message msg = {
2935 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2936 .rpc_cred = state->owner->so_cred,
2937 };
2938 struct rpc_task_setup task_setup_data = {
2939 .rpc_client = server->client,
2940 .rpc_message = &msg,
2941 .callback_ops = &nfs4_close_ops,
2942 .workqueue = nfsiod_workqueue,
2943 .flags = RPC_TASK_ASYNC,
2944 };
2945 int status = -ENOMEM;
2946
2947 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
2948 &task_setup_data.rpc_client, &msg);
2949
2950 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2951 if (calldata == NULL)
2952 goto out;
2953 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2954 calldata->inode = state->inode;
2955 calldata->state = state;
2956 calldata->arg.fh = NFS_FH(state->inode);
2957 /* Serialization for the sequence id */
2958 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
2959 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
2960 if (IS_ERR(calldata->arg.seqid))
2961 goto out_free_calldata;
2962 calldata->arg.fmode = 0;
2963 calldata->arg.bitmask = server->cache_consistency_bitmask;
2964 calldata->res.fattr = &calldata->fattr;
2965 calldata->res.seqid = calldata->arg.seqid;
2966 calldata->res.server = server;
2967 calldata->roc = nfs4_roc(state->inode);
2968 nfs_sb_active(calldata->inode->i_sb);
2969
2970 msg.rpc_argp = &calldata->arg;
2971 msg.rpc_resp = &calldata->res;
2972 task_setup_data.callback_data = calldata;
2973 task = rpc_run_task(&task_setup_data);
2974 if (IS_ERR(task))
2975 return PTR_ERR(task);
2976 status = 0;
2977 if (wait)
2978 status = rpc_wait_for_completion_task(task);
2979 rpc_put_task(task);
2980 return status;
2981 out_free_calldata:
2982 kfree(calldata);
2983 out:
2984 nfs4_put_open_state(state);
2985 nfs4_put_state_owner(sp);
2986 return status;
2987 }
2988
2989 static struct inode *
2990 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2991 int open_flags, struct iattr *attr, int *opened)
2992 {
2993 struct nfs4_state *state;
2994 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
2995
2996 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
2997
2998 /* Protect against concurrent sillydeletes */
2999 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3000
3001 nfs4_label_release_security(label);
3002
3003 if (IS_ERR(state))
3004 return ERR_CAST(state);
3005 return state->inode;
3006 }
3007
3008 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3009 {
3010 if (ctx->state == NULL)
3011 return;
3012 if (is_sync)
3013 nfs4_close_sync(ctx->state, ctx->mode);
3014 else
3015 nfs4_close_state(ctx->state, ctx->mode);
3016 }
3017
3018 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3019 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3020 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
3021
3022 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3023 {
3024 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3025 struct nfs4_server_caps_arg args = {
3026 .fhandle = fhandle,
3027 .bitmask = bitmask,
3028 };
3029 struct nfs4_server_caps_res res = {};
3030 struct rpc_message msg = {
3031 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3032 .rpc_argp = &args,
3033 .rpc_resp = &res,
3034 };
3035 int status;
3036
3037 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3038 FATTR4_WORD0_FH_EXPIRE_TYPE |
3039 FATTR4_WORD0_LINK_SUPPORT |
3040 FATTR4_WORD0_SYMLINK_SUPPORT |
3041 FATTR4_WORD0_ACLSUPPORT;
3042 if (minorversion)
3043 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3044
3045 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3046 if (status == 0) {
3047 /* Sanity check the server answers */
3048 switch (minorversion) {
3049 case 0:
3050 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3051 res.attr_bitmask[2] = 0;
3052 break;
3053 case 1:
3054 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3055 break;
3056 case 2:
3057 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3058 }
3059 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3060 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
3061 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
3062 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
3063 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
3064 NFS_CAP_CTIME|NFS_CAP_MTIME|
3065 NFS_CAP_SECURITY_LABEL);
3066 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3067 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3068 server->caps |= NFS_CAP_ACLS;
3069 if (res.has_links != 0)
3070 server->caps |= NFS_CAP_HARDLINKS;
3071 if (res.has_symlinks != 0)
3072 server->caps |= NFS_CAP_SYMLINKS;
3073 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
3074 server->caps |= NFS_CAP_FILEID;
3075 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
3076 server->caps |= NFS_CAP_MODE;
3077 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
3078 server->caps |= NFS_CAP_NLINK;
3079 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
3080 server->caps |= NFS_CAP_OWNER;
3081 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
3082 server->caps |= NFS_CAP_OWNER_GROUP;
3083 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
3084 server->caps |= NFS_CAP_ATIME;
3085 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
3086 server->caps |= NFS_CAP_CTIME;
3087 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
3088 server->caps |= NFS_CAP_MTIME;
3089 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3090 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3091 server->caps |= NFS_CAP_SECURITY_LABEL;
3092 #endif
3093 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3094 sizeof(server->attr_bitmask));
3095 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3096
3097 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3098 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3099 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3100 server->cache_consistency_bitmask[2] = 0;
3101 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3102 sizeof(server->exclcreat_bitmask));
3103 server->acl_bitmask = res.acl_bitmask;
3104 server->fh_expire_type = res.fh_expire_type;
3105 }
3106
3107 return status;
3108 }
3109
3110 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3111 {
3112 struct nfs4_exception exception = { };
3113 int err;
3114 do {
3115 err = nfs4_handle_exception(server,
3116 _nfs4_server_capabilities(server, fhandle),
3117 &exception);
3118 } while (exception.retry);
3119 return err;
3120 }
3121
3122 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3123 struct nfs_fsinfo *info)
3124 {
3125 u32 bitmask[3];
3126 struct nfs4_lookup_root_arg args = {
3127 .bitmask = bitmask,
3128 };
3129 struct nfs4_lookup_res res = {
3130 .server = server,
3131 .fattr = info->fattr,
3132 .fh = fhandle,
3133 };
3134 struct rpc_message msg = {
3135 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
3136 .rpc_argp = &args,
3137 .rpc_resp = &res,
3138 };
3139
3140 bitmask[0] = nfs4_fattr_bitmap[0];
3141 bitmask[1] = nfs4_fattr_bitmap[1];
3142 /*
3143 * Process the label in the upcoming getfattr
3144 */
3145 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
3146
3147 nfs_fattr_init(info->fattr);
3148 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3149 }
3150
3151 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3152 struct nfs_fsinfo *info)
3153 {
3154 struct nfs4_exception exception = { };
3155 int err;
3156 do {
3157 err = _nfs4_lookup_root(server, fhandle, info);
3158 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3159 switch (err) {
3160 case 0:
3161 case -NFS4ERR_WRONGSEC:
3162 goto out;
3163 default:
3164 err = nfs4_handle_exception(server, err, &exception);
3165 }
3166 } while (exception.retry);
3167 out:
3168 return err;
3169 }
3170
3171 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3172 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3173 {
3174 struct rpc_auth_create_args auth_args = {
3175 .pseudoflavor = flavor,
3176 };
3177 struct rpc_auth *auth;
3178 int ret;
3179
3180 auth = rpcauth_create(&auth_args, server->client);
3181 if (IS_ERR(auth)) {
3182 ret = -EACCES;
3183 goto out;
3184 }
3185 ret = nfs4_lookup_root(server, fhandle, info);
3186 out:
3187 return ret;
3188 }
3189
3190 /*
3191 * Retry pseudoroot lookup with various security flavors. We do this when:
3192 *
3193 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3194 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3195 *
3196 * Returns zero on success, or a negative NFS4ERR value, or a
3197 * negative errno value.
3198 */
3199 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3200 struct nfs_fsinfo *info)
3201 {
3202 /* Per 3530bis 15.33.5 */
3203 static const rpc_authflavor_t flav_array[] = {
3204 RPC_AUTH_GSS_KRB5P,
3205 RPC_AUTH_GSS_KRB5I,
3206 RPC_AUTH_GSS_KRB5,
3207 RPC_AUTH_UNIX, /* courtesy */
3208 RPC_AUTH_NULL,
3209 };
3210 int status = -EPERM;
3211 size_t i;
3212
3213 if (server->auth_info.flavor_len > 0) {
3214 /* try each flavor specified by user */
3215 for (i = 0; i < server->auth_info.flavor_len; i++) {
3216 status = nfs4_lookup_root_sec(server, fhandle, info,
3217 server->auth_info.flavors[i]);
3218 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3219 continue;
3220 break;
3221 }
3222 } else {
3223 /* no flavors specified by user, try default list */
3224 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3225 status = nfs4_lookup_root_sec(server, fhandle, info,
3226 flav_array[i]);
3227 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3228 continue;
3229 break;
3230 }
3231 }
3232
3233 /*
3234 * -EACCESS could mean that the user doesn't have correct permissions
3235 * to access the mount. It could also mean that we tried to mount
3236 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3237 * existing mount programs don't handle -EACCES very well so it should
3238 * be mapped to -EPERM instead.
3239 */
3240 if (status == -EACCES)
3241 status = -EPERM;
3242 return status;
3243 }
3244
3245 static int nfs4_do_find_root_sec(struct nfs_server *server,
3246 struct nfs_fh *fhandle, struct nfs_fsinfo *info)
3247 {
3248 int mv = server->nfs_client->cl_minorversion;
3249 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
3250 }
3251
3252 /**
3253 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3254 * @server: initialized nfs_server handle
3255 * @fhandle: we fill in the pseudo-fs root file handle
3256 * @info: we fill in an FSINFO struct
3257 * @auth_probe: probe the auth flavours
3258 *
3259 * Returns zero on success, or a negative errno.
3260 */
3261 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3262 struct nfs_fsinfo *info,
3263 bool auth_probe)
3264 {
3265 int status = 0;
3266
3267 if (!auth_probe)
3268 status = nfs4_lookup_root(server, fhandle, info);
3269
3270 if (auth_probe || status == NFS4ERR_WRONGSEC)
3271 status = nfs4_do_find_root_sec(server, fhandle, info);
3272
3273 if (status == 0)
3274 status = nfs4_server_capabilities(server, fhandle);
3275 if (status == 0)
3276 status = nfs4_do_fsinfo(server, fhandle, info);
3277
3278 return nfs4_map_errors(status);
3279 }
3280
3281 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3282 struct nfs_fsinfo *info)
3283 {
3284 int error;
3285 struct nfs_fattr *fattr = info->fattr;
3286 struct nfs4_label *label = NULL;
3287
3288 error = nfs4_server_capabilities(server, mntfh);
3289 if (error < 0) {
3290 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3291 return error;
3292 }
3293
3294 label = nfs4_label_alloc(server, GFP_KERNEL);
3295 if (IS_ERR(label))
3296 return PTR_ERR(label);
3297
3298 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3299 if (error < 0) {
3300 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3301 goto err_free_label;
3302 }
3303
3304 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3305 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3306 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3307
3308 err_free_label:
3309 nfs4_label_free(label);
3310
3311 return error;
3312 }
3313
3314 /*
3315 * Get locations and (maybe) other attributes of a referral.
3316 * Note that we'll actually follow the referral later when
3317 * we detect fsid mismatch in inode revalidation
3318 */
3319 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3320 const struct qstr *name, struct nfs_fattr *fattr,
3321 struct nfs_fh *fhandle)
3322 {
3323 int status = -ENOMEM;
3324 struct page *page = NULL;
3325 struct nfs4_fs_locations *locations = NULL;
3326
3327 page = alloc_page(GFP_KERNEL);
3328 if (page == NULL)
3329 goto out;
3330 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3331 if (locations == NULL)
3332 goto out;
3333
3334 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3335 if (status != 0)
3336 goto out;
3337
3338 /*
3339 * If the fsid didn't change, this is a migration event, not a
3340 * referral. Cause us to drop into the exception handler, which
3341 * will kick off migration recovery.
3342 */
3343 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3344 dprintk("%s: server did not return a different fsid for"
3345 " a referral at %s\n", __func__, name->name);
3346 status = -NFS4ERR_MOVED;
3347 goto out;
3348 }
3349 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3350 nfs_fixup_referral_attributes(&locations->fattr);
3351
3352 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3353 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3354 memset(fhandle, 0, sizeof(struct nfs_fh));
3355 out:
3356 if (page)
3357 __free_page(page);
3358 kfree(locations);
3359 return status;
3360 }
3361
3362 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3363 struct nfs_fattr *fattr, struct nfs4_label *label)
3364 {
3365 struct nfs4_getattr_arg args = {
3366 .fh = fhandle,
3367 .bitmask = server->attr_bitmask,
3368 };
3369 struct nfs4_getattr_res res = {
3370 .fattr = fattr,
3371 .label = label,
3372 .server = server,
3373 };
3374 struct rpc_message msg = {
3375 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3376 .rpc_argp = &args,
3377 .rpc_resp = &res,
3378 };
3379
3380 args.bitmask = nfs4_bitmask(server, label);
3381
3382 nfs_fattr_init(fattr);
3383 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3384 }
3385
3386 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3387 struct nfs_fattr *fattr, struct nfs4_label *label)
3388 {
3389 struct nfs4_exception exception = { };
3390 int err;
3391 do {
3392 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3393 trace_nfs4_getattr(server, fhandle, fattr, err);
3394 err = nfs4_handle_exception(server, err,
3395 &exception);
3396 } while (exception.retry);
3397 return err;
3398 }
3399
3400 /*
3401 * The file is not closed if it is opened due to the a request to change
3402 * the size of the file. The open call will not be needed once the
3403 * VFS layer lookup-intents are implemented.
3404 *
3405 * Close is called when the inode is destroyed.
3406 * If we haven't opened the file for O_WRONLY, we
3407 * need to in the size_change case to obtain a stateid.
3408 *
3409 * Got race?
3410 * Because OPEN is always done by name in nfsv4, it is
3411 * possible that we opened a different file by the same
3412 * name. We can recognize this race condition, but we
3413 * can't do anything about it besides returning an error.
3414 *
3415 * This will be fixed with VFS changes (lookup-intent).
3416 */
3417 static int
3418 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3419 struct iattr *sattr)
3420 {
3421 struct inode *inode = d_inode(dentry);
3422 struct rpc_cred *cred = NULL;
3423 struct nfs4_state *state = NULL;
3424 struct nfs4_label *label = NULL;
3425 int status;
3426
3427 if (pnfs_ld_layoutret_on_setattr(inode) &&
3428 sattr->ia_valid & ATTR_SIZE &&
3429 sattr->ia_size < i_size_read(inode))
3430 pnfs_commit_and_return_layout(inode);
3431
3432 nfs_fattr_init(fattr);
3433
3434 /* Deal with open(O_TRUNC) */
3435 if (sattr->ia_valid & ATTR_OPEN)
3436 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3437
3438 /* Optimization: if the end result is no change, don't RPC */
3439 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3440 return 0;
3441
3442 /* Search for an existing open(O_WRITE) file */
3443 if (sattr->ia_valid & ATTR_FILE) {
3444 struct nfs_open_context *ctx;
3445
3446 ctx = nfs_file_open_context(sattr->ia_file);
3447 if (ctx) {
3448 cred = ctx->cred;
3449 state = ctx->state;
3450 }
3451 }
3452
3453 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3454 if (IS_ERR(label))
3455 return PTR_ERR(label);
3456
3457 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3458 if (status == 0) {
3459 nfs_setattr_update_inode(inode, sattr, fattr);
3460 nfs_setsecurity(inode, fattr, label);
3461 }
3462 nfs4_label_free(label);
3463 return status;
3464 }
3465
3466 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3467 const struct qstr *name, struct nfs_fh *fhandle,
3468 struct nfs_fattr *fattr, struct nfs4_label *label)
3469 {
3470 struct nfs_server *server = NFS_SERVER(dir);
3471 int status;
3472 struct nfs4_lookup_arg args = {
3473 .bitmask = server->attr_bitmask,
3474 .dir_fh = NFS_FH(dir),
3475 .name = name,
3476 };
3477 struct nfs4_lookup_res res = {
3478 .server = server,
3479 .fattr = fattr,
3480 .label = label,
3481 .fh = fhandle,
3482 };
3483 struct rpc_message msg = {
3484 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3485 .rpc_argp = &args,
3486 .rpc_resp = &res,
3487 };
3488
3489 args.bitmask = nfs4_bitmask(server, label);
3490
3491 nfs_fattr_init(fattr);
3492
3493 dprintk("NFS call lookup %s\n", name->name);
3494 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3495 dprintk("NFS reply lookup: %d\n", status);
3496 return status;
3497 }
3498
3499 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3500 {
3501 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3502 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3503 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3504 fattr->nlink = 2;
3505 }
3506
3507 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3508 struct qstr *name, struct nfs_fh *fhandle,
3509 struct nfs_fattr *fattr, struct nfs4_label *label)
3510 {
3511 struct nfs4_exception exception = { };
3512 struct rpc_clnt *client = *clnt;
3513 int err;
3514 do {
3515 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3516 trace_nfs4_lookup(dir, name, err);
3517 switch (err) {
3518 case -NFS4ERR_BADNAME:
3519 err = -ENOENT;
3520 goto out;
3521 case -NFS4ERR_MOVED:
3522 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3523 if (err == -NFS4ERR_MOVED)
3524 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3525 goto out;
3526 case -NFS4ERR_WRONGSEC:
3527 err = -EPERM;
3528 if (client != *clnt)
3529 goto out;
3530 client = nfs4_negotiate_security(client, dir, name);
3531 if (IS_ERR(client))
3532 return PTR_ERR(client);
3533
3534 exception.retry = 1;
3535 break;
3536 default:
3537 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3538 }
3539 } while (exception.retry);
3540
3541 out:
3542 if (err == 0)
3543 *clnt = client;
3544 else if (client != *clnt)
3545 rpc_shutdown_client(client);
3546
3547 return err;
3548 }
3549
3550 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
3551 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3552 struct nfs4_label *label)
3553 {
3554 int status;
3555 struct rpc_clnt *client = NFS_CLIENT(dir);
3556
3557 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3558 if (client != NFS_CLIENT(dir)) {
3559 rpc_shutdown_client(client);
3560 nfs_fixup_secinfo_attributes(fattr);
3561 }
3562 return status;
3563 }
3564
3565 struct rpc_clnt *
3566 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
3567 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3568 {
3569 struct rpc_clnt *client = NFS_CLIENT(dir);
3570 int status;
3571
3572 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3573 if (status < 0)
3574 return ERR_PTR(status);
3575 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3576 }
3577
3578 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3579 {
3580 struct nfs_server *server = NFS_SERVER(inode);
3581 struct nfs4_accessargs args = {
3582 .fh = NFS_FH(inode),
3583 .bitmask = server->cache_consistency_bitmask,
3584 };
3585 struct nfs4_accessres res = {
3586 .server = server,
3587 };
3588 struct rpc_message msg = {
3589 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
3590 .rpc_argp = &args,
3591 .rpc_resp = &res,
3592 .rpc_cred = entry->cred,
3593 };
3594 int mode = entry->mask;
3595 int status = 0;
3596
3597 /*
3598 * Determine which access bits we want to ask for...
3599 */
3600 if (mode & MAY_READ)
3601 args.access |= NFS4_ACCESS_READ;
3602 if (S_ISDIR(inode->i_mode)) {
3603 if (mode & MAY_WRITE)
3604 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
3605 if (mode & MAY_EXEC)
3606 args.access |= NFS4_ACCESS_LOOKUP;
3607 } else {
3608 if (mode & MAY_WRITE)
3609 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
3610 if (mode & MAY_EXEC)
3611 args.access |= NFS4_ACCESS_EXECUTE;
3612 }
3613
3614 res.fattr = nfs_alloc_fattr();
3615 if (res.fattr == NULL)
3616 return -ENOMEM;
3617
3618 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3619 if (!status) {
3620 nfs_access_set_mask(entry, res.access);
3621 nfs_refresh_inode(inode, res.fattr);
3622 }
3623 nfs_free_fattr(res.fattr);
3624 return status;
3625 }
3626
3627 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3628 {
3629 struct nfs4_exception exception = { };
3630 int err;
3631 do {
3632 err = _nfs4_proc_access(inode, entry);
3633 trace_nfs4_access(inode, err);
3634 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3635 &exception);
3636 } while (exception.retry);
3637 return err;
3638 }
3639
3640 /*
3641 * TODO: For the time being, we don't try to get any attributes
3642 * along with any of the zero-copy operations READ, READDIR,
3643 * READLINK, WRITE.
3644 *
3645 * In the case of the first three, we want to put the GETATTR
3646 * after the read-type operation -- this is because it is hard
3647 * to predict the length of a GETATTR response in v4, and thus
3648 * align the READ data correctly. This means that the GETATTR
3649 * may end up partially falling into the page cache, and we should
3650 * shift it into the 'tail' of the xdr_buf before processing.
3651 * To do this efficiently, we need to know the total length
3652 * of data received, which doesn't seem to be available outside
3653 * of the RPC layer.
3654 *
3655 * In the case of WRITE, we also want to put the GETATTR after
3656 * the operation -- in this case because we want to make sure
3657 * we get the post-operation mtime and size.
3658 *
3659 * Both of these changes to the XDR layer would in fact be quite
3660 * minor, but I decided to leave them for a subsequent patch.
3661 */
3662 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3663 unsigned int pgbase, unsigned int pglen)
3664 {
3665 struct nfs4_readlink args = {
3666 .fh = NFS_FH(inode),
3667 .pgbase = pgbase,
3668 .pglen = pglen,
3669 .pages = &page,
3670 };
3671 struct nfs4_readlink_res res;
3672 struct rpc_message msg = {
3673 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3674 .rpc_argp = &args,
3675 .rpc_resp = &res,
3676 };
3677
3678 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3679 }
3680
3681 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3682 unsigned int pgbase, unsigned int pglen)
3683 {
3684 struct nfs4_exception exception = { };
3685 int err;
3686 do {
3687 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
3688 trace_nfs4_readlink(inode, err);
3689 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3690 &exception);
3691 } while (exception.retry);
3692 return err;
3693 }
3694
3695 /*
3696 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3697 */
3698 static int
3699 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3700 int flags)
3701 {
3702 struct nfs4_label l, *ilabel = NULL;
3703 struct nfs_open_context *ctx;
3704 struct nfs4_state *state;
3705 int status = 0;
3706
3707 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3708 if (IS_ERR(ctx))
3709 return PTR_ERR(ctx);
3710
3711 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3712
3713 sattr->ia_mode &= ~current_umask();
3714 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
3715 if (IS_ERR(state)) {
3716 status = PTR_ERR(state);
3717 goto out;
3718 }
3719 out:
3720 nfs4_label_release_security(ilabel);
3721 put_nfs_open_context(ctx);
3722 return status;
3723 }
3724
3725 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3726 {
3727 struct nfs_server *server = NFS_SERVER(dir);
3728 struct nfs_removeargs args = {
3729 .fh = NFS_FH(dir),
3730 .name = *name,
3731 };
3732 struct nfs_removeres res = {
3733 .server = server,
3734 };
3735 struct rpc_message msg = {
3736 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3737 .rpc_argp = &args,
3738 .rpc_resp = &res,
3739 };
3740 int status;
3741
3742 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3743 if (status == 0)
3744 update_changeattr(dir, &res.cinfo);
3745 return status;
3746 }
3747
3748 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3749 {
3750 struct nfs4_exception exception = { };
3751 int err;
3752 do {
3753 err = _nfs4_proc_remove(dir, name);
3754 trace_nfs4_remove(dir, name, err);
3755 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3756 &exception);
3757 } while (exception.retry);
3758 return err;
3759 }
3760
3761 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3762 {
3763 struct nfs_server *server = NFS_SERVER(dir);
3764 struct nfs_removeargs *args = msg->rpc_argp;
3765 struct nfs_removeres *res = msg->rpc_resp;
3766
3767 res->server = server;
3768 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3769 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
3770
3771 nfs_fattr_init(res->dir_attr);
3772 }
3773
3774 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3775 {
3776 nfs4_setup_sequence(NFS_SERVER(data->dir),
3777 &data->args.seq_args,
3778 &data->res.seq_res,
3779 task);
3780 }
3781
3782 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3783 {
3784 struct nfs_unlinkdata *data = task->tk_calldata;
3785 struct nfs_removeres *res = &data->res;
3786
3787 if (!nfs4_sequence_done(task, &res->seq_res))
3788 return 0;
3789 if (nfs4_async_handle_error(task, res->server, NULL,
3790 &data->timeout) == -EAGAIN)
3791 return 0;
3792 update_changeattr(dir, &res->cinfo);
3793 return 1;
3794 }
3795
3796 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3797 {
3798 struct nfs_server *server = NFS_SERVER(dir);
3799 struct nfs_renameargs *arg = msg->rpc_argp;
3800 struct nfs_renameres *res = msg->rpc_resp;
3801
3802 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3803 res->server = server;
3804 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
3805 }
3806
3807 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3808 {
3809 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3810 &data->args.seq_args,
3811 &data->res.seq_res,
3812 task);
3813 }
3814
3815 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3816 struct inode *new_dir)
3817 {
3818 struct nfs_renamedata *data = task->tk_calldata;
3819 struct nfs_renameres *res = &data->res;
3820
3821 if (!nfs4_sequence_done(task, &res->seq_res))
3822 return 0;
3823 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
3824 return 0;
3825
3826 update_changeattr(old_dir, &res->old_cinfo);
3827 update_changeattr(new_dir, &res->new_cinfo);
3828 return 1;
3829 }
3830
3831 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3832 {
3833 struct nfs_server *server = NFS_SERVER(inode);
3834 struct nfs4_link_arg arg = {
3835 .fh = NFS_FH(inode),
3836 .dir_fh = NFS_FH(dir),
3837 .name = name,
3838 .bitmask = server->attr_bitmask,
3839 };
3840 struct nfs4_link_res res = {
3841 .server = server,
3842 .label = NULL,
3843 };
3844 struct rpc_message msg = {
3845 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3846 .rpc_argp = &arg,
3847 .rpc_resp = &res,
3848 };
3849 int status = -ENOMEM;
3850
3851 res.fattr = nfs_alloc_fattr();
3852 if (res.fattr == NULL)
3853 goto out;
3854
3855 res.label = nfs4_label_alloc(server, GFP_KERNEL);
3856 if (IS_ERR(res.label)) {
3857 status = PTR_ERR(res.label);
3858 goto out;
3859 }
3860 arg.bitmask = nfs4_bitmask(server, res.label);
3861
3862 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3863 if (!status) {
3864 update_changeattr(dir, &res.cinfo);
3865 status = nfs_post_op_update_inode(inode, res.fattr);
3866 if (!status)
3867 nfs_setsecurity(inode, res.fattr, res.label);
3868 }
3869
3870
3871 nfs4_label_free(res.label);
3872
3873 out:
3874 nfs_free_fattr(res.fattr);
3875 return status;
3876 }
3877
3878 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3879 {
3880 struct nfs4_exception exception = { };
3881 int err;
3882 do {
3883 err = nfs4_handle_exception(NFS_SERVER(inode),
3884 _nfs4_proc_link(inode, dir, name),
3885 &exception);
3886 } while (exception.retry);
3887 return err;
3888 }
3889
3890 struct nfs4_createdata {
3891 struct rpc_message msg;
3892 struct nfs4_create_arg arg;
3893 struct nfs4_create_res res;
3894 struct nfs_fh fh;
3895 struct nfs_fattr fattr;
3896 struct nfs4_label *label;
3897 };
3898
3899 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3900 struct qstr *name, struct iattr *sattr, u32 ftype)
3901 {
3902 struct nfs4_createdata *data;
3903
3904 data = kzalloc(sizeof(*data), GFP_KERNEL);
3905 if (data != NULL) {
3906 struct nfs_server *server = NFS_SERVER(dir);
3907
3908 data->label = nfs4_label_alloc(server, GFP_KERNEL);
3909 if (IS_ERR(data->label))
3910 goto out_free;
3911
3912 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3913 data->msg.rpc_argp = &data->arg;
3914 data->msg.rpc_resp = &data->res;
3915 data->arg.dir_fh = NFS_FH(dir);
3916 data->arg.server = server;
3917 data->arg.name = name;
3918 data->arg.attrs = sattr;
3919 data->arg.ftype = ftype;
3920 data->arg.bitmask = nfs4_bitmask(server, data->label);
3921 data->res.server = server;
3922 data->res.fh = &data->fh;
3923 data->res.fattr = &data->fattr;
3924 data->res.label = data->label;
3925 nfs_fattr_init(data->res.fattr);
3926 }
3927 return data;
3928 out_free:
3929 kfree(data);
3930 return NULL;
3931 }
3932
3933 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3934 {
3935 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3936 &data->arg.seq_args, &data->res.seq_res, 1);
3937 if (status == 0) {
3938 update_changeattr(dir, &data->res.dir_cinfo);
3939 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
3940 }
3941 return status;
3942 }
3943
3944 static void nfs4_free_createdata(struct nfs4_createdata *data)
3945 {
3946 nfs4_label_free(data->label);
3947 kfree(data);
3948 }
3949
3950 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3951 struct page *page, unsigned int len, struct iattr *sattr,
3952 struct nfs4_label *label)
3953 {
3954 struct nfs4_createdata *data;
3955 int status = -ENAMETOOLONG;
3956
3957 if (len > NFS4_MAXPATHLEN)
3958 goto out;
3959
3960 status = -ENOMEM;
3961 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3962 if (data == NULL)
3963 goto out;
3964
3965 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3966 data->arg.u.symlink.pages = &page;
3967 data->arg.u.symlink.len = len;
3968 data->arg.label = label;
3969
3970 status = nfs4_do_create(dir, dentry, data);
3971
3972 nfs4_free_createdata(data);
3973 out:
3974 return status;
3975 }
3976
3977 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3978 struct page *page, unsigned int len, struct iattr *sattr)
3979 {
3980 struct nfs4_exception exception = { };
3981 struct nfs4_label l, *label = NULL;
3982 int err;
3983
3984 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3985
3986 do {
3987 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
3988 trace_nfs4_symlink(dir, &dentry->d_name, err);
3989 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3990 &exception);
3991 } while (exception.retry);
3992
3993 nfs4_label_release_security(label);
3994 return err;
3995 }
3996
3997 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3998 struct iattr *sattr, struct nfs4_label *label)
3999 {
4000 struct nfs4_createdata *data;
4001 int status = -ENOMEM;
4002
4003 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
4004 if (data == NULL)
4005 goto out;
4006
4007 data->arg.label = label;
4008 status = nfs4_do_create(dir, dentry, data);
4009
4010 nfs4_free_createdata(data);
4011 out:
4012 return status;
4013 }
4014
4015 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4016 struct iattr *sattr)
4017 {
4018 struct nfs4_exception exception = { };
4019 struct nfs4_label l, *label = NULL;
4020 int err;
4021
4022 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4023
4024 sattr->ia_mode &= ~current_umask();
4025 do {
4026 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
4027 trace_nfs4_mkdir(dir, &dentry->d_name, err);
4028 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4029 &exception);
4030 } while (exception.retry);
4031 nfs4_label_release_security(label);
4032
4033 return err;
4034 }
4035
4036 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4037 u64 cookie, struct page **pages, unsigned int count, int plus)
4038 {
4039 struct inode *dir = d_inode(dentry);
4040 struct nfs4_readdir_arg args = {
4041 .fh = NFS_FH(dir),
4042 .pages = pages,
4043 .pgbase = 0,
4044 .count = count,
4045 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
4046 .plus = plus,
4047 };
4048 struct nfs4_readdir_res res;
4049 struct rpc_message msg = {
4050 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
4051 .rpc_argp = &args,
4052 .rpc_resp = &res,
4053 .rpc_cred = cred,
4054 };
4055 int status;
4056
4057 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
4058 dentry,
4059 (unsigned long long)cookie);
4060 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
4061 res.pgbase = args.pgbase;
4062 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
4063 if (status >= 0) {
4064 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
4065 status += args.pgbase;
4066 }
4067
4068 nfs_invalidate_atime(dir);
4069
4070 dprintk("%s: returns %d\n", __func__, status);
4071 return status;
4072 }
4073
4074 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4075 u64 cookie, struct page **pages, unsigned int count, int plus)
4076 {
4077 struct nfs4_exception exception = { };
4078 int err;
4079 do {
4080 err = _nfs4_proc_readdir(dentry, cred, cookie,
4081 pages, count, plus);
4082 trace_nfs4_readdir(d_inode(dentry), err);
4083 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
4084 &exception);
4085 } while (exception.retry);
4086 return err;
4087 }
4088
4089 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4090 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
4091 {
4092 struct nfs4_createdata *data;
4093 int mode = sattr->ia_mode;
4094 int status = -ENOMEM;
4095
4096 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
4097 if (data == NULL)
4098 goto out;
4099
4100 if (S_ISFIFO(mode))
4101 data->arg.ftype = NF4FIFO;
4102 else if (S_ISBLK(mode)) {
4103 data->arg.ftype = NF4BLK;
4104 data->arg.u.device.specdata1 = MAJOR(rdev);
4105 data->arg.u.device.specdata2 = MINOR(rdev);
4106 }
4107 else if (S_ISCHR(mode)) {
4108 data->arg.ftype = NF4CHR;
4109 data->arg.u.device.specdata1 = MAJOR(rdev);
4110 data->arg.u.device.specdata2 = MINOR(rdev);
4111 } else if (!S_ISSOCK(mode)) {
4112 status = -EINVAL;
4113 goto out_free;
4114 }
4115
4116 data->arg.label = label;
4117 status = nfs4_do_create(dir, dentry, data);
4118 out_free:
4119 nfs4_free_createdata(data);
4120 out:
4121 return status;
4122 }
4123
4124 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4125 struct iattr *sattr, dev_t rdev)
4126 {
4127 struct nfs4_exception exception = { };
4128 struct nfs4_label l, *label = NULL;
4129 int err;
4130
4131 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4132
4133 sattr->ia_mode &= ~current_umask();
4134 do {
4135 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
4136 trace_nfs4_mknod(dir, &dentry->d_name, err);
4137 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4138 &exception);
4139 } while (exception.retry);
4140
4141 nfs4_label_release_security(label);
4142
4143 return err;
4144 }
4145
4146 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
4147 struct nfs_fsstat *fsstat)
4148 {
4149 struct nfs4_statfs_arg args = {
4150 .fh = fhandle,
4151 .bitmask = server->attr_bitmask,
4152 };
4153 struct nfs4_statfs_res res = {
4154 .fsstat = fsstat,
4155 };
4156 struct rpc_message msg = {
4157 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
4158 .rpc_argp = &args,
4159 .rpc_resp = &res,
4160 };
4161
4162 nfs_fattr_init(fsstat->fattr);
4163 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4164 }
4165
4166 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4167 {
4168 struct nfs4_exception exception = { };
4169 int err;
4170 do {
4171 err = nfs4_handle_exception(server,
4172 _nfs4_proc_statfs(server, fhandle, fsstat),
4173 &exception);
4174 } while (exception.retry);
4175 return err;
4176 }
4177
4178 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4179 struct nfs_fsinfo *fsinfo)
4180 {
4181 struct nfs4_fsinfo_arg args = {
4182 .fh = fhandle,
4183 .bitmask = server->attr_bitmask,
4184 };
4185 struct nfs4_fsinfo_res res = {
4186 .fsinfo = fsinfo,
4187 };
4188 struct rpc_message msg = {
4189 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4190 .rpc_argp = &args,
4191 .rpc_resp = &res,
4192 };
4193
4194 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4195 }
4196
4197 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4198 {
4199 struct nfs4_exception exception = { };
4200 unsigned long now = jiffies;
4201 int err;
4202
4203 do {
4204 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4205 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4206 if (err == 0) {
4207 struct nfs_client *clp = server->nfs_client;
4208
4209 spin_lock(&clp->cl_lock);
4210 clp->cl_lease_time = fsinfo->lease_time * HZ;
4211 clp->cl_last_renewal = now;
4212 spin_unlock(&clp->cl_lock);
4213 break;
4214 }
4215 err = nfs4_handle_exception(server, err, &exception);
4216 } while (exception.retry);
4217 return err;
4218 }
4219
4220 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4221 {
4222 int error;
4223
4224 nfs_fattr_init(fsinfo->fattr);
4225 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4226 if (error == 0) {
4227 /* block layout checks this! */
4228 server->pnfs_blksize = fsinfo->blksize;
4229 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
4230 }
4231
4232 return error;
4233 }
4234
4235 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4236 struct nfs_pathconf *pathconf)
4237 {
4238 struct nfs4_pathconf_arg args = {
4239 .fh = fhandle,
4240 .bitmask = server->attr_bitmask,
4241 };
4242 struct nfs4_pathconf_res res = {
4243 .pathconf = pathconf,
4244 };
4245 struct rpc_message msg = {
4246 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4247 .rpc_argp = &args,
4248 .rpc_resp = &res,
4249 };
4250
4251 /* None of the pathconf attributes are mandatory to implement */
4252 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4253 memset(pathconf, 0, sizeof(*pathconf));
4254 return 0;
4255 }
4256
4257 nfs_fattr_init(pathconf->fattr);
4258 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4259 }
4260
4261 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4262 struct nfs_pathconf *pathconf)
4263 {
4264 struct nfs4_exception exception = { };
4265 int err;
4266
4267 do {
4268 err = nfs4_handle_exception(server,
4269 _nfs4_proc_pathconf(server, fhandle, pathconf),
4270 &exception);
4271 } while (exception.retry);
4272 return err;
4273 }
4274
4275 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4276 const struct nfs_open_context *ctx,
4277 const struct nfs_lock_context *l_ctx,
4278 fmode_t fmode)
4279 {
4280 const struct nfs_lockowner *lockowner = NULL;
4281
4282 if (l_ctx != NULL)
4283 lockowner = &l_ctx->lockowner;
4284 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
4285 }
4286 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4287
4288 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4289 const struct nfs_open_context *ctx,
4290 const struct nfs_lock_context *l_ctx,
4291 fmode_t fmode)
4292 {
4293 nfs4_stateid current_stateid;
4294
4295 /* If the current stateid represents a lost lock, then exit */
4296 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4297 return true;
4298 return nfs4_stateid_match(stateid, &current_stateid);
4299 }
4300
4301 static bool nfs4_error_stateid_expired(int err)
4302 {
4303 switch (err) {
4304 case -NFS4ERR_DELEG_REVOKED:
4305 case -NFS4ERR_ADMIN_REVOKED:
4306 case -NFS4ERR_BAD_STATEID:
4307 case -NFS4ERR_STALE_STATEID:
4308 case -NFS4ERR_OLD_STATEID:
4309 case -NFS4ERR_OPENMODE:
4310 case -NFS4ERR_EXPIRED:
4311 return true;
4312 }
4313 return false;
4314 }
4315
4316 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
4317 {
4318 nfs_invalidate_atime(hdr->inode);
4319 }
4320
4321 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4322 {
4323 struct nfs_server *server = NFS_SERVER(hdr->inode);
4324
4325 trace_nfs4_read(hdr, task->tk_status);
4326 if (nfs4_async_handle_error(task, server,
4327 hdr->args.context->state,
4328 NULL) == -EAGAIN) {
4329 rpc_restart_call_prepare(task);
4330 return -EAGAIN;
4331 }
4332
4333 __nfs4_read_done_cb(hdr);
4334 if (task->tk_status > 0)
4335 renew_lease(server, hdr->timestamp);
4336 return 0;
4337 }
4338
4339 static bool nfs4_read_stateid_changed(struct rpc_task *task,
4340 struct nfs_pgio_args *args)
4341 {
4342
4343 if (!nfs4_error_stateid_expired(task->tk_status) ||
4344 nfs4_stateid_is_current(&args->stateid,
4345 args->context,
4346 args->lock_context,
4347 FMODE_READ))
4348 return false;
4349 rpc_restart_call_prepare(task);
4350 return true;
4351 }
4352
4353 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4354 {
4355
4356 dprintk("--> %s\n", __func__);
4357
4358 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4359 return -EAGAIN;
4360 if (nfs4_read_stateid_changed(task, &hdr->args))
4361 return -EAGAIN;
4362 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4363 nfs4_read_done_cb(task, hdr);
4364 }
4365
4366 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4367 struct rpc_message *msg)
4368 {
4369 hdr->timestamp = jiffies;
4370 hdr->pgio_done_cb = nfs4_read_done_cb;
4371 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4372 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4373 }
4374
4375 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4376 struct nfs_pgio_header *hdr)
4377 {
4378 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode),
4379 &hdr->args.seq_args,
4380 &hdr->res.seq_res,
4381 task))
4382 return 0;
4383 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4384 hdr->args.lock_context,
4385 hdr->rw_ops->rw_mode) == -EIO)
4386 return -EIO;
4387 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4388 return -EIO;
4389 return 0;
4390 }
4391
4392 static int nfs4_write_done_cb(struct rpc_task *task,
4393 struct nfs_pgio_header *hdr)
4394 {
4395 struct inode *inode = hdr->inode;
4396
4397 trace_nfs4_write(hdr, task->tk_status);
4398 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4399 hdr->args.context->state,
4400 NULL) == -EAGAIN) {
4401 rpc_restart_call_prepare(task);
4402 return -EAGAIN;
4403 }
4404 if (task->tk_status >= 0) {
4405 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4406 nfs_writeback_update_inode(hdr);
4407 }
4408 return 0;
4409 }
4410
4411 static bool nfs4_write_stateid_changed(struct rpc_task *task,
4412 struct nfs_pgio_args *args)
4413 {
4414
4415 if (!nfs4_error_stateid_expired(task->tk_status) ||
4416 nfs4_stateid_is_current(&args->stateid,
4417 args->context,
4418 args->lock_context,
4419 FMODE_WRITE))
4420 return false;
4421 rpc_restart_call_prepare(task);
4422 return true;
4423 }
4424
4425 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4426 {
4427 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4428 return -EAGAIN;
4429 if (nfs4_write_stateid_changed(task, &hdr->args))
4430 return -EAGAIN;
4431 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4432 nfs4_write_done_cb(task, hdr);
4433 }
4434
4435 static
4436 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4437 {
4438 /* Don't request attributes for pNFS or O_DIRECT writes */
4439 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4440 return false;
4441 /* Otherwise, request attributes if and only if we don't hold
4442 * a delegation
4443 */
4444 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4445 }
4446
4447 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4448 struct rpc_message *msg)
4449 {
4450 struct nfs_server *server = NFS_SERVER(hdr->inode);
4451
4452 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4453 hdr->args.bitmask = NULL;
4454 hdr->res.fattr = NULL;
4455 } else
4456 hdr->args.bitmask = server->cache_consistency_bitmask;
4457
4458 if (!hdr->pgio_done_cb)
4459 hdr->pgio_done_cb = nfs4_write_done_cb;
4460 hdr->res.server = server;
4461 hdr->timestamp = jiffies;
4462
4463 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4464 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4465 }
4466
4467 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4468 {
4469 nfs4_setup_sequence(NFS_SERVER(data->inode),
4470 &data->args.seq_args,
4471 &data->res.seq_res,
4472 task);
4473 }
4474
4475 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4476 {
4477 struct inode *inode = data->inode;
4478
4479 trace_nfs4_commit(data, task->tk_status);
4480 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4481 NULL, NULL) == -EAGAIN) {
4482 rpc_restart_call_prepare(task);
4483 return -EAGAIN;
4484 }
4485 return 0;
4486 }
4487
4488 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4489 {
4490 if (!nfs4_sequence_done(task, &data->res.seq_res))
4491 return -EAGAIN;
4492 return data->commit_done_cb(task, data);
4493 }
4494
4495 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4496 {
4497 struct nfs_server *server = NFS_SERVER(data->inode);
4498
4499 if (data->commit_done_cb == NULL)
4500 data->commit_done_cb = nfs4_commit_done_cb;
4501 data->res.server = server;
4502 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4503 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4504 }
4505
4506 struct nfs4_renewdata {
4507 struct nfs_client *client;
4508 unsigned long timestamp;
4509 };
4510
4511 /*
4512 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4513 * standalone procedure for queueing an asynchronous RENEW.
4514 */
4515 static void nfs4_renew_release(void *calldata)
4516 {
4517 struct nfs4_renewdata *data = calldata;
4518 struct nfs_client *clp = data->client;
4519
4520 if (atomic_read(&clp->cl_count) > 1)
4521 nfs4_schedule_state_renewal(clp);
4522 nfs_put_client(clp);
4523 kfree(data);
4524 }
4525
4526 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4527 {
4528 struct nfs4_renewdata *data = calldata;
4529 struct nfs_client *clp = data->client;
4530 unsigned long timestamp = data->timestamp;
4531
4532 trace_nfs4_renew_async(clp, task->tk_status);
4533 switch (task->tk_status) {
4534 case 0:
4535 break;
4536 case -NFS4ERR_LEASE_MOVED:
4537 nfs4_schedule_lease_moved_recovery(clp);
4538 break;
4539 default:
4540 /* Unless we're shutting down, schedule state recovery! */
4541 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
4542 return;
4543 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
4544 nfs4_schedule_lease_recovery(clp);
4545 return;
4546 }
4547 nfs4_schedule_path_down_recovery(clp);
4548 }
4549 do_renew_lease(clp, timestamp);
4550 }
4551
4552 static const struct rpc_call_ops nfs4_renew_ops = {
4553 .rpc_call_done = nfs4_renew_done,
4554 .rpc_release = nfs4_renew_release,
4555 };
4556
4557 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
4558 {
4559 struct rpc_message msg = {
4560 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4561 .rpc_argp = clp,
4562 .rpc_cred = cred,
4563 };
4564 struct nfs4_renewdata *data;
4565
4566 if (renew_flags == 0)
4567 return 0;
4568 if (!atomic_inc_not_zero(&clp->cl_count))
4569 return -EIO;
4570 data = kmalloc(sizeof(*data), GFP_NOFS);
4571 if (data == NULL)
4572 return -ENOMEM;
4573 data->client = clp;
4574 data->timestamp = jiffies;
4575 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
4576 &nfs4_renew_ops, data);
4577 }
4578
4579 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
4580 {
4581 struct rpc_message msg = {
4582 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4583 .rpc_argp = clp,
4584 .rpc_cred = cred,
4585 };
4586 unsigned long now = jiffies;
4587 int status;
4588
4589 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4590 if (status < 0)
4591 return status;
4592 do_renew_lease(clp, now);
4593 return 0;
4594 }
4595
4596 static inline int nfs4_server_supports_acls(struct nfs_server *server)
4597 {
4598 return server->caps & NFS_CAP_ACLS;
4599 }
4600
4601 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
4602 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
4603 * the stack.
4604 */
4605 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
4606
4607 static int buf_to_pages_noslab(const void *buf, size_t buflen,
4608 struct page **pages)
4609 {
4610 struct page *newpage, **spages;
4611 int rc = 0;
4612 size_t len;
4613 spages = pages;
4614
4615 do {
4616 len = min_t(size_t, PAGE_SIZE, buflen);
4617 newpage = alloc_page(GFP_KERNEL);
4618
4619 if (newpage == NULL)
4620 goto unwind;
4621 memcpy(page_address(newpage), buf, len);
4622 buf += len;
4623 buflen -= len;
4624 *pages++ = newpage;
4625 rc++;
4626 } while (buflen != 0);
4627
4628 return rc;
4629
4630 unwind:
4631 for(; rc > 0; rc--)
4632 __free_page(spages[rc-1]);
4633 return -ENOMEM;
4634 }
4635
4636 struct nfs4_cached_acl {
4637 int cached;
4638 size_t len;
4639 char data[0];
4640 };
4641
4642 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
4643 {
4644 struct nfs_inode *nfsi = NFS_I(inode);
4645
4646 spin_lock(&inode->i_lock);
4647 kfree(nfsi->nfs4_acl);
4648 nfsi->nfs4_acl = acl;
4649 spin_unlock(&inode->i_lock);
4650 }
4651
4652 static void nfs4_zap_acl_attr(struct inode *inode)
4653 {
4654 nfs4_set_cached_acl(inode, NULL);
4655 }
4656
4657 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
4658 {
4659 struct nfs_inode *nfsi = NFS_I(inode);
4660 struct nfs4_cached_acl *acl;
4661 int ret = -ENOENT;
4662
4663 spin_lock(&inode->i_lock);
4664 acl = nfsi->nfs4_acl;
4665 if (acl == NULL)
4666 goto out;
4667 if (buf == NULL) /* user is just asking for length */
4668 goto out_len;
4669 if (acl->cached == 0)
4670 goto out;
4671 ret = -ERANGE; /* see getxattr(2) man page */
4672 if (acl->len > buflen)
4673 goto out;
4674 memcpy(buf, acl->data, acl->len);
4675 out_len:
4676 ret = acl->len;
4677 out:
4678 spin_unlock(&inode->i_lock);
4679 return ret;
4680 }
4681
4682 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
4683 {
4684 struct nfs4_cached_acl *acl;
4685 size_t buflen = sizeof(*acl) + acl_len;
4686
4687 if (buflen <= PAGE_SIZE) {
4688 acl = kmalloc(buflen, GFP_KERNEL);
4689 if (acl == NULL)
4690 goto out;
4691 acl->cached = 1;
4692 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4693 } else {
4694 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4695 if (acl == NULL)
4696 goto out;
4697 acl->cached = 0;
4698 }
4699 acl->len = acl_len;
4700 out:
4701 nfs4_set_cached_acl(inode, acl);
4702 }
4703
4704 /*
4705 * The getxattr API returns the required buffer length when called with a
4706 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4707 * the required buf. On a NULL buf, we send a page of data to the server
4708 * guessing that the ACL request can be serviced by a page. If so, we cache
4709 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4710 * the cache. If not so, we throw away the page, and cache the required
4711 * length. The next getxattr call will then produce another round trip to
4712 * the server, this time with the input buf of the required size.
4713 */
4714 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4715 {
4716 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4717 struct nfs_getaclargs args = {
4718 .fh = NFS_FH(inode),
4719 .acl_pages = pages,
4720 .acl_len = buflen,
4721 };
4722 struct nfs_getaclres res = {
4723 .acl_len = buflen,
4724 };
4725 struct rpc_message msg = {
4726 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4727 .rpc_argp = &args,
4728 .rpc_resp = &res,
4729 };
4730 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4731 int ret = -ENOMEM, i;
4732
4733 /* As long as we're doing a round trip to the server anyway,
4734 * let's be prepared for a page of acl data. */
4735 if (npages == 0)
4736 npages = 1;
4737 if (npages > ARRAY_SIZE(pages))
4738 return -ERANGE;
4739
4740 for (i = 0; i < npages; i++) {
4741 pages[i] = alloc_page(GFP_KERNEL);
4742 if (!pages[i])
4743 goto out_free;
4744 }
4745
4746 /* for decoding across pages */
4747 res.acl_scratch = alloc_page(GFP_KERNEL);
4748 if (!res.acl_scratch)
4749 goto out_free;
4750
4751 args.acl_len = npages * PAGE_SIZE;
4752
4753 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4754 __func__, buf, buflen, npages, args.acl_len);
4755 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4756 &msg, &args.seq_args, &res.seq_res, 0);
4757 if (ret)
4758 goto out_free;
4759
4760 /* Handle the case where the passed-in buffer is too short */
4761 if (res.acl_flags & NFS4_ACL_TRUNC) {
4762 /* Did the user only issue a request for the acl length? */
4763 if (buf == NULL)
4764 goto out_ok;
4765 ret = -ERANGE;
4766 goto out_free;
4767 }
4768 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4769 if (buf) {
4770 if (res.acl_len > buflen) {
4771 ret = -ERANGE;
4772 goto out_free;
4773 }
4774 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4775 }
4776 out_ok:
4777 ret = res.acl_len;
4778 out_free:
4779 for (i = 0; i < npages; i++)
4780 if (pages[i])
4781 __free_page(pages[i]);
4782 if (res.acl_scratch)
4783 __free_page(res.acl_scratch);
4784 return ret;
4785 }
4786
4787 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4788 {
4789 struct nfs4_exception exception = { };
4790 ssize_t ret;
4791 do {
4792 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4793 trace_nfs4_get_acl(inode, ret);
4794 if (ret >= 0)
4795 break;
4796 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4797 } while (exception.retry);
4798 return ret;
4799 }
4800
4801 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4802 {
4803 struct nfs_server *server = NFS_SERVER(inode);
4804 int ret;
4805
4806 if (!nfs4_server_supports_acls(server))
4807 return -EOPNOTSUPP;
4808 ret = nfs_revalidate_inode(server, inode);
4809 if (ret < 0)
4810 return ret;
4811 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4812 nfs_zap_acl_cache(inode);
4813 ret = nfs4_read_cached_acl(inode, buf, buflen);
4814 if (ret != -ENOENT)
4815 /* -ENOENT is returned if there is no ACL or if there is an ACL
4816 * but no cached acl data, just the acl length */
4817 return ret;
4818 return nfs4_get_acl_uncached(inode, buf, buflen);
4819 }
4820
4821 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4822 {
4823 struct nfs_server *server = NFS_SERVER(inode);
4824 struct page *pages[NFS4ACL_MAXPAGES];
4825 struct nfs_setaclargs arg = {
4826 .fh = NFS_FH(inode),
4827 .acl_pages = pages,
4828 .acl_len = buflen,
4829 };
4830 struct nfs_setaclres res;
4831 struct rpc_message msg = {
4832 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4833 .rpc_argp = &arg,
4834 .rpc_resp = &res,
4835 };
4836 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4837 int ret, i;
4838
4839 if (!nfs4_server_supports_acls(server))
4840 return -EOPNOTSUPP;
4841 if (npages > ARRAY_SIZE(pages))
4842 return -ERANGE;
4843 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages);
4844 if (i < 0)
4845 return i;
4846 nfs4_inode_return_delegation(inode);
4847 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4848
4849 /*
4850 * Free each page after tx, so the only ref left is
4851 * held by the network stack
4852 */
4853 for (; i > 0; i--)
4854 put_page(pages[i-1]);
4855
4856 /*
4857 * Acl update can result in inode attribute update.
4858 * so mark the attribute cache invalid.
4859 */
4860 spin_lock(&inode->i_lock);
4861 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4862 spin_unlock(&inode->i_lock);
4863 nfs_access_zap_cache(inode);
4864 nfs_zap_acl_cache(inode);
4865 return ret;
4866 }
4867
4868 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4869 {
4870 struct nfs4_exception exception = { };
4871 int err;
4872 do {
4873 err = __nfs4_proc_set_acl(inode, buf, buflen);
4874 trace_nfs4_set_acl(inode, err);
4875 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4876 &exception);
4877 } while (exception.retry);
4878 return err;
4879 }
4880
4881 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4882 static int _nfs4_get_security_label(struct inode *inode, void *buf,
4883 size_t buflen)
4884 {
4885 struct nfs_server *server = NFS_SERVER(inode);
4886 struct nfs_fattr fattr;
4887 struct nfs4_label label = {0, 0, buflen, buf};
4888
4889 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4890 struct nfs4_getattr_arg arg = {
4891 .fh = NFS_FH(inode),
4892 .bitmask = bitmask,
4893 };
4894 struct nfs4_getattr_res res = {
4895 .fattr = &fattr,
4896 .label = &label,
4897 .server = server,
4898 };
4899 struct rpc_message msg = {
4900 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4901 .rpc_argp = &arg,
4902 .rpc_resp = &res,
4903 };
4904 int ret;
4905
4906 nfs_fattr_init(&fattr);
4907
4908 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
4909 if (ret)
4910 return ret;
4911 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4912 return -ENOENT;
4913 if (buflen < label.len)
4914 return -ERANGE;
4915 return 0;
4916 }
4917
4918 static int nfs4_get_security_label(struct inode *inode, void *buf,
4919 size_t buflen)
4920 {
4921 struct nfs4_exception exception = { };
4922 int err;
4923
4924 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4925 return -EOPNOTSUPP;
4926
4927 do {
4928 err = _nfs4_get_security_label(inode, buf, buflen);
4929 trace_nfs4_get_security_label(inode, err);
4930 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4931 &exception);
4932 } while (exception.retry);
4933 return err;
4934 }
4935
4936 static int _nfs4_do_set_security_label(struct inode *inode,
4937 struct nfs4_label *ilabel,
4938 struct nfs_fattr *fattr,
4939 struct nfs4_label *olabel)
4940 {
4941
4942 struct iattr sattr = {0};
4943 struct nfs_server *server = NFS_SERVER(inode);
4944 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4945 struct nfs_setattrargs arg = {
4946 .fh = NFS_FH(inode),
4947 .iap = &sattr,
4948 .server = server,
4949 .bitmask = bitmask,
4950 .label = ilabel,
4951 };
4952 struct nfs_setattrres res = {
4953 .fattr = fattr,
4954 .label = olabel,
4955 .server = server,
4956 };
4957 struct rpc_message msg = {
4958 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
4959 .rpc_argp = &arg,
4960 .rpc_resp = &res,
4961 };
4962 int status;
4963
4964 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
4965
4966 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4967 if (status)
4968 dprintk("%s failed: %d\n", __func__, status);
4969
4970 return status;
4971 }
4972
4973 static int nfs4_do_set_security_label(struct inode *inode,
4974 struct nfs4_label *ilabel,
4975 struct nfs_fattr *fattr,
4976 struct nfs4_label *olabel)
4977 {
4978 struct nfs4_exception exception = { };
4979 int err;
4980
4981 do {
4982 err = _nfs4_do_set_security_label(inode, ilabel,
4983 fattr, olabel);
4984 trace_nfs4_set_security_label(inode, err);
4985 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4986 &exception);
4987 } while (exception.retry);
4988 return err;
4989 }
4990
4991 static int
4992 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
4993 {
4994 struct nfs4_label ilabel, *olabel = NULL;
4995 struct nfs_fattr fattr;
4996 struct rpc_cred *cred;
4997 struct inode *inode = d_inode(dentry);
4998 int status;
4999
5000 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5001 return -EOPNOTSUPP;
5002
5003 nfs_fattr_init(&fattr);
5004
5005 ilabel.pi = 0;
5006 ilabel.lfs = 0;
5007 ilabel.label = (char *)buf;
5008 ilabel.len = buflen;
5009
5010 cred = rpc_lookup_cred();
5011 if (IS_ERR(cred))
5012 return PTR_ERR(cred);
5013
5014 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
5015 if (IS_ERR(olabel)) {
5016 status = -PTR_ERR(olabel);
5017 goto out;
5018 }
5019
5020 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
5021 if (status == 0)
5022 nfs_setsecurity(inode, &fattr, olabel);
5023
5024 nfs4_label_free(olabel);
5025 out:
5026 put_rpccred(cred);
5027 return status;
5028 }
5029 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
5030
5031
5032 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
5033 nfs4_verifier *bootverf)
5034 {
5035 __be32 verf[2];
5036
5037 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
5038 /* An impossible timestamp guarantees this value
5039 * will never match a generated boot time. */
5040 verf[0] = 0;
5041 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
5042 } else {
5043 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
5044 verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
5045 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
5046 }
5047 memcpy(bootverf->data, verf, sizeof(bootverf->data));
5048 }
5049
5050 static int
5051 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
5052 {
5053 size_t len;
5054 char *str;
5055
5056 if (clp->cl_owner_id != NULL)
5057 return 0;
5058
5059 rcu_read_lock();
5060 len = 14 + strlen(clp->cl_ipaddr) + 1 +
5061 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
5062 1 +
5063 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
5064 1;
5065 rcu_read_unlock();
5066
5067 if (len > NFS4_OPAQUE_LIMIT + 1)
5068 return -EINVAL;
5069
5070 /*
5071 * Since this string is allocated at mount time, and held until the
5072 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5073 * about a memory-reclaim deadlock.
5074 */
5075 str = kmalloc(len, GFP_KERNEL);
5076 if (!str)
5077 return -ENOMEM;
5078
5079 rcu_read_lock();
5080 scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
5081 clp->cl_ipaddr,
5082 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
5083 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
5084 rcu_read_unlock();
5085
5086 clp->cl_owner_id = str;
5087 return 0;
5088 }
5089
5090 static int
5091 nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5092 {
5093 size_t len;
5094 char *str;
5095
5096 len = 10 + 10 + 1 + 10 + 1 +
5097 strlen(nfs4_client_id_uniquifier) + 1 +
5098 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5099
5100 if (len > NFS4_OPAQUE_LIMIT + 1)
5101 return -EINVAL;
5102
5103 /*
5104 * Since this string is allocated at mount time, and held until the
5105 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5106 * about a memory-reclaim deadlock.
5107 */
5108 str = kmalloc(len, GFP_KERNEL);
5109 if (!str)
5110 return -ENOMEM;
5111
5112 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5113 clp->rpc_ops->version, clp->cl_minorversion,
5114 nfs4_client_id_uniquifier,
5115 clp->cl_rpcclient->cl_nodename);
5116 clp->cl_owner_id = str;
5117 return 0;
5118 }
5119
5120 static int
5121 nfs4_init_uniform_client_string(struct nfs_client *clp)
5122 {
5123 size_t len;
5124 char *str;
5125
5126 if (clp->cl_owner_id != NULL)
5127 return 0;
5128
5129 if (nfs4_client_id_uniquifier[0] != '\0')
5130 return nfs4_init_uniquifier_client_string(clp);
5131
5132 len = 10 + 10 + 1 + 10 + 1 +
5133 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5134
5135 if (len > NFS4_OPAQUE_LIMIT + 1)
5136 return -EINVAL;
5137
5138 /*
5139 * Since this string is allocated at mount time, and held until the
5140 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5141 * about a memory-reclaim deadlock.
5142 */
5143 str = kmalloc(len, GFP_KERNEL);
5144 if (!str)
5145 return -ENOMEM;
5146
5147 scnprintf(str, len, "Linux NFSv%u.%u %s",
5148 clp->rpc_ops->version, clp->cl_minorversion,
5149 clp->cl_rpcclient->cl_nodename);
5150 clp->cl_owner_id = str;
5151 return 0;
5152 }
5153
5154 /*
5155 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5156 * services. Advertise one based on the address family of the
5157 * clientaddr.
5158 */
5159 static unsigned int
5160 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5161 {
5162 if (strchr(clp->cl_ipaddr, ':') != NULL)
5163 return scnprintf(buf, len, "tcp6");
5164 else
5165 return scnprintf(buf, len, "tcp");
5166 }
5167
5168 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5169 {
5170 struct nfs4_setclientid *sc = calldata;
5171
5172 if (task->tk_status == 0)
5173 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5174 }
5175
5176 static const struct rpc_call_ops nfs4_setclientid_ops = {
5177 .rpc_call_done = nfs4_setclientid_done,
5178 };
5179
5180 /**
5181 * nfs4_proc_setclientid - Negotiate client ID
5182 * @clp: state data structure
5183 * @program: RPC program for NFSv4 callback service
5184 * @port: IP port number for NFS4 callback service
5185 * @cred: RPC credential to use for this call
5186 * @res: where to place the result
5187 *
5188 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5189 */
5190 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5191 unsigned short port, struct rpc_cred *cred,
5192 struct nfs4_setclientid_res *res)
5193 {
5194 nfs4_verifier sc_verifier;
5195 struct nfs4_setclientid setclientid = {
5196 .sc_verifier = &sc_verifier,
5197 .sc_prog = program,
5198 .sc_clnt = clp,
5199 };
5200 struct rpc_message msg = {
5201 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5202 .rpc_argp = &setclientid,
5203 .rpc_resp = res,
5204 .rpc_cred = cred,
5205 };
5206 struct rpc_task *task;
5207 struct rpc_task_setup task_setup_data = {
5208 .rpc_client = clp->cl_rpcclient,
5209 .rpc_message = &msg,
5210 .callback_ops = &nfs4_setclientid_ops,
5211 .callback_data = &setclientid,
5212 .flags = RPC_TASK_TIMEOUT,
5213 };
5214 int status;
5215
5216 /* nfs_client_id4 */
5217 nfs4_init_boot_verifier(clp, &sc_verifier);
5218
5219 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5220 status = nfs4_init_uniform_client_string(clp);
5221 else
5222 status = nfs4_init_nonuniform_client_string(clp);
5223
5224 if (status)
5225 goto out;
5226
5227 /* cb_client4 */
5228 setclientid.sc_netid_len =
5229 nfs4_init_callback_netid(clp,
5230 setclientid.sc_netid,
5231 sizeof(setclientid.sc_netid));
5232 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5233 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5234 clp->cl_ipaddr, port >> 8, port & 255);
5235
5236 dprintk("NFS call setclientid auth=%s, '%s'\n",
5237 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5238 clp->cl_owner_id);
5239 task = rpc_run_task(&task_setup_data);
5240 if (IS_ERR(task)) {
5241 status = PTR_ERR(task);
5242 goto out;
5243 }
5244 status = task->tk_status;
5245 if (setclientid.sc_cred) {
5246 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5247 put_rpccred(setclientid.sc_cred);
5248 }
5249 rpc_put_task(task);
5250 out:
5251 trace_nfs4_setclientid(clp, status);
5252 dprintk("NFS reply setclientid: %d\n", status);
5253 return status;
5254 }
5255
5256 /**
5257 * nfs4_proc_setclientid_confirm - Confirm client ID
5258 * @clp: state data structure
5259 * @res: result of a previous SETCLIENTID
5260 * @cred: RPC credential to use for this call
5261 *
5262 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5263 */
5264 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5265 struct nfs4_setclientid_res *arg,
5266 struct rpc_cred *cred)
5267 {
5268 struct rpc_message msg = {
5269 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5270 .rpc_argp = arg,
5271 .rpc_cred = cred,
5272 };
5273 int status;
5274
5275 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5276 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5277 clp->cl_clientid);
5278 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5279 trace_nfs4_setclientid_confirm(clp, status);
5280 dprintk("NFS reply setclientid_confirm: %d\n", status);
5281 return status;
5282 }
5283
5284 struct nfs4_delegreturndata {
5285 struct nfs4_delegreturnargs args;
5286 struct nfs4_delegreturnres res;
5287 struct nfs_fh fh;
5288 nfs4_stateid stateid;
5289 unsigned long timestamp;
5290 struct nfs_fattr fattr;
5291 int rpc_status;
5292 struct inode *inode;
5293 bool roc;
5294 u32 roc_barrier;
5295 };
5296
5297 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5298 {
5299 struct nfs4_delegreturndata *data = calldata;
5300
5301 if (!nfs4_sequence_done(task, &data->res.seq_res))
5302 return;
5303
5304 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5305 switch (task->tk_status) {
5306 case 0:
5307 renew_lease(data->res.server, data->timestamp);
5308 case -NFS4ERR_ADMIN_REVOKED:
5309 case -NFS4ERR_DELEG_REVOKED:
5310 case -NFS4ERR_BAD_STATEID:
5311 case -NFS4ERR_OLD_STATEID:
5312 case -NFS4ERR_STALE_STATEID:
5313 case -NFS4ERR_EXPIRED:
5314 task->tk_status = 0;
5315 if (data->roc)
5316 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5317 break;
5318 default:
5319 if (nfs4_async_handle_error(task, data->res.server,
5320 NULL, NULL) == -EAGAIN) {
5321 rpc_restart_call_prepare(task);
5322 return;
5323 }
5324 }
5325 data->rpc_status = task->tk_status;
5326 }
5327
5328 static void nfs4_delegreturn_release(void *calldata)
5329 {
5330 struct nfs4_delegreturndata *data = calldata;
5331 struct inode *inode = data->inode;
5332
5333 if (inode) {
5334 if (data->roc)
5335 pnfs_roc_release(inode);
5336 nfs_iput_and_deactive(inode);
5337 }
5338 kfree(calldata);
5339 }
5340
5341 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5342 {
5343 struct nfs4_delegreturndata *d_data;
5344
5345 d_data = (struct nfs4_delegreturndata *)data;
5346
5347 if (nfs4_wait_on_layoutreturn(d_data->inode, task))
5348 return;
5349
5350 if (d_data->roc)
5351 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
5352
5353 nfs4_setup_sequence(d_data->res.server,
5354 &d_data->args.seq_args,
5355 &d_data->res.seq_res,
5356 task);
5357 }
5358
5359 static const struct rpc_call_ops nfs4_delegreturn_ops = {
5360 .rpc_call_prepare = nfs4_delegreturn_prepare,
5361 .rpc_call_done = nfs4_delegreturn_done,
5362 .rpc_release = nfs4_delegreturn_release,
5363 };
5364
5365 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5366 {
5367 struct nfs4_delegreturndata *data;
5368 struct nfs_server *server = NFS_SERVER(inode);
5369 struct rpc_task *task;
5370 struct rpc_message msg = {
5371 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5372 .rpc_cred = cred,
5373 };
5374 struct rpc_task_setup task_setup_data = {
5375 .rpc_client = server->client,
5376 .rpc_message = &msg,
5377 .callback_ops = &nfs4_delegreturn_ops,
5378 .flags = RPC_TASK_ASYNC,
5379 };
5380 int status = 0;
5381
5382 data = kzalloc(sizeof(*data), GFP_NOFS);
5383 if (data == NULL)
5384 return -ENOMEM;
5385 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5386 data->args.fhandle = &data->fh;
5387 data->args.stateid = &data->stateid;
5388 data->args.bitmask = server->cache_consistency_bitmask;
5389 nfs_copy_fh(&data->fh, NFS_FH(inode));
5390 nfs4_stateid_copy(&data->stateid, stateid);
5391 data->res.fattr = &data->fattr;
5392 data->res.server = server;
5393 nfs_fattr_init(data->res.fattr);
5394 data->timestamp = jiffies;
5395 data->rpc_status = 0;
5396 data->inode = nfs_igrab_and_active(inode);
5397 if (data->inode)
5398 data->roc = nfs4_roc(inode);
5399
5400 task_setup_data.callback_data = data;
5401 msg.rpc_argp = &data->args;
5402 msg.rpc_resp = &data->res;
5403 task = rpc_run_task(&task_setup_data);
5404 if (IS_ERR(task))
5405 return PTR_ERR(task);
5406 if (!issync)
5407 goto out;
5408 status = nfs4_wait_for_completion_rpc_task(task);
5409 if (status != 0)
5410 goto out;
5411 status = data->rpc_status;
5412 if (status == 0)
5413 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5414 else
5415 nfs_refresh_inode(inode, &data->fattr);
5416 out:
5417 rpc_put_task(task);
5418 return status;
5419 }
5420
5421 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5422 {
5423 struct nfs_server *server = NFS_SERVER(inode);
5424 struct nfs4_exception exception = { };
5425 int err;
5426 do {
5427 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5428 trace_nfs4_delegreturn(inode, stateid, err);
5429 switch (err) {
5430 case -NFS4ERR_STALE_STATEID:
5431 case -NFS4ERR_EXPIRED:
5432 case 0:
5433 return 0;
5434 }
5435 err = nfs4_handle_exception(server, err, &exception);
5436 } while (exception.retry);
5437 return err;
5438 }
5439
5440 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
5441 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
5442
5443 /*
5444 * sleep, with exponential backoff, and retry the LOCK operation.
5445 */
5446 static unsigned long
5447 nfs4_set_lock_task_retry(unsigned long timeout)
5448 {
5449 freezable_schedule_timeout_killable_unsafe(timeout);
5450 timeout <<= 1;
5451 if (timeout > NFS4_LOCK_MAXTIMEOUT)
5452 return NFS4_LOCK_MAXTIMEOUT;
5453 return timeout;
5454 }
5455
5456 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5457 {
5458 struct inode *inode = state->inode;
5459 struct nfs_server *server = NFS_SERVER(inode);
5460 struct nfs_client *clp = server->nfs_client;
5461 struct nfs_lockt_args arg = {
5462 .fh = NFS_FH(inode),
5463 .fl = request,
5464 };
5465 struct nfs_lockt_res res = {
5466 .denied = request,
5467 };
5468 struct rpc_message msg = {
5469 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5470 .rpc_argp = &arg,
5471 .rpc_resp = &res,
5472 .rpc_cred = state->owner->so_cred,
5473 };
5474 struct nfs4_lock_state *lsp;
5475 int status;
5476
5477 arg.lock_owner.clientid = clp->cl_clientid;
5478 status = nfs4_set_lock_state(state, request);
5479 if (status != 0)
5480 goto out;
5481 lsp = request->fl_u.nfs4_fl.owner;
5482 arg.lock_owner.id = lsp->ls_seqid.owner_id;
5483 arg.lock_owner.s_dev = server->s_dev;
5484 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5485 switch (status) {
5486 case 0:
5487 request->fl_type = F_UNLCK;
5488 break;
5489 case -NFS4ERR_DENIED:
5490 status = 0;
5491 }
5492 request->fl_ops->fl_release_private(request);
5493 request->fl_ops = NULL;
5494 out:
5495 return status;
5496 }
5497
5498 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5499 {
5500 struct nfs4_exception exception = { };
5501 int err;
5502
5503 do {
5504 err = _nfs4_proc_getlk(state, cmd, request);
5505 trace_nfs4_get_lock(request, state, cmd, err);
5506 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
5507 &exception);
5508 } while (exception.retry);
5509 return err;
5510 }
5511
5512 static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
5513 {
5514 return locks_lock_inode_wait(inode, fl);
5515 }
5516
5517 struct nfs4_unlockdata {
5518 struct nfs_locku_args arg;
5519 struct nfs_locku_res res;
5520 struct nfs4_lock_state *lsp;
5521 struct nfs_open_context *ctx;
5522 struct file_lock fl;
5523 struct nfs_server *server;
5524 unsigned long timestamp;
5525 };
5526
5527 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5528 struct nfs_open_context *ctx,
5529 struct nfs4_lock_state *lsp,
5530 struct nfs_seqid *seqid)
5531 {
5532 struct nfs4_unlockdata *p;
5533 struct inode *inode = lsp->ls_state->inode;
5534
5535 p = kzalloc(sizeof(*p), GFP_NOFS);
5536 if (p == NULL)
5537 return NULL;
5538 p->arg.fh = NFS_FH(inode);
5539 p->arg.fl = &p->fl;
5540 p->arg.seqid = seqid;
5541 p->res.seqid = seqid;
5542 p->lsp = lsp;
5543 atomic_inc(&lsp->ls_count);
5544 /* Ensure we don't close file until we're done freeing locks! */
5545 p->ctx = get_nfs_open_context(ctx);
5546 memcpy(&p->fl, fl, sizeof(p->fl));
5547 p->server = NFS_SERVER(inode);
5548 return p;
5549 }
5550
5551 static void nfs4_locku_release_calldata(void *data)
5552 {
5553 struct nfs4_unlockdata *calldata = data;
5554 nfs_free_seqid(calldata->arg.seqid);
5555 nfs4_put_lock_state(calldata->lsp);
5556 put_nfs_open_context(calldata->ctx);
5557 kfree(calldata);
5558 }
5559
5560 static void nfs4_locku_done(struct rpc_task *task, void *data)
5561 {
5562 struct nfs4_unlockdata *calldata = data;
5563
5564 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
5565 return;
5566 switch (task->tk_status) {
5567 case 0:
5568 renew_lease(calldata->server, calldata->timestamp);
5569 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
5570 if (nfs4_update_lock_stateid(calldata->lsp,
5571 &calldata->res.stateid))
5572 break;
5573 case -NFS4ERR_BAD_STATEID:
5574 case -NFS4ERR_OLD_STATEID:
5575 case -NFS4ERR_STALE_STATEID:
5576 case -NFS4ERR_EXPIRED:
5577 if (!nfs4_stateid_match(&calldata->arg.stateid,
5578 &calldata->lsp->ls_stateid))
5579 rpc_restart_call_prepare(task);
5580 break;
5581 default:
5582 if (nfs4_async_handle_error(task, calldata->server,
5583 NULL, NULL) == -EAGAIN)
5584 rpc_restart_call_prepare(task);
5585 }
5586 nfs_release_seqid(calldata->arg.seqid);
5587 }
5588
5589 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
5590 {
5591 struct nfs4_unlockdata *calldata = data;
5592
5593 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
5594 goto out_wait;
5595 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
5596 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
5597 /* Note: exit _without_ running nfs4_locku_done */
5598 goto out_no_action;
5599 }
5600 calldata->timestamp = jiffies;
5601 if (nfs4_setup_sequence(calldata->server,
5602 &calldata->arg.seq_args,
5603 &calldata->res.seq_res,
5604 task) != 0)
5605 nfs_release_seqid(calldata->arg.seqid);
5606 return;
5607 out_no_action:
5608 task->tk_action = NULL;
5609 out_wait:
5610 nfs4_sequence_done(task, &calldata->res.seq_res);
5611 }
5612
5613 static const struct rpc_call_ops nfs4_locku_ops = {
5614 .rpc_call_prepare = nfs4_locku_prepare,
5615 .rpc_call_done = nfs4_locku_done,
5616 .rpc_release = nfs4_locku_release_calldata,
5617 };
5618
5619 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
5620 struct nfs_open_context *ctx,
5621 struct nfs4_lock_state *lsp,
5622 struct nfs_seqid *seqid)
5623 {
5624 struct nfs4_unlockdata *data;
5625 struct rpc_message msg = {
5626 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
5627 .rpc_cred = ctx->cred,
5628 };
5629 struct rpc_task_setup task_setup_data = {
5630 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
5631 .rpc_message = &msg,
5632 .callback_ops = &nfs4_locku_ops,
5633 .workqueue = nfsiod_workqueue,
5634 .flags = RPC_TASK_ASYNC,
5635 };
5636
5637 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
5638 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
5639
5640 /* Ensure this is an unlock - when canceling a lock, the
5641 * canceled lock is passed in, and it won't be an unlock.
5642 */
5643 fl->fl_type = F_UNLCK;
5644
5645 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
5646 if (data == NULL) {
5647 nfs_free_seqid(seqid);
5648 return ERR_PTR(-ENOMEM);
5649 }
5650
5651 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5652 msg.rpc_argp = &data->arg;
5653 msg.rpc_resp = &data->res;
5654 task_setup_data.callback_data = data;
5655 return rpc_run_task(&task_setup_data);
5656 }
5657
5658 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
5659 {
5660 struct inode *inode = state->inode;
5661 struct nfs4_state_owner *sp = state->owner;
5662 struct nfs_inode *nfsi = NFS_I(inode);
5663 struct nfs_seqid *seqid;
5664 struct nfs4_lock_state *lsp;
5665 struct rpc_task *task;
5666 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5667 int status = 0;
5668 unsigned char fl_flags = request->fl_flags;
5669
5670 status = nfs4_set_lock_state(state, request);
5671 /* Unlock _before_ we do the RPC call */
5672 request->fl_flags |= FL_EXISTS;
5673 /* Exclude nfs_delegation_claim_locks() */
5674 mutex_lock(&sp->so_delegreturn_mutex);
5675 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5676 down_read(&nfsi->rwsem);
5677 if (do_vfs_lock(inode, request) == -ENOENT) {
5678 up_read(&nfsi->rwsem);
5679 mutex_unlock(&sp->so_delegreturn_mutex);
5680 goto out;
5681 }
5682 up_read(&nfsi->rwsem);
5683 mutex_unlock(&sp->so_delegreturn_mutex);
5684 if (status != 0)
5685 goto out;
5686 /* Is this a delegated lock? */
5687 lsp = request->fl_u.nfs4_fl.owner;
5688 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
5689 goto out;
5690 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
5691 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
5692 status = -ENOMEM;
5693 if (IS_ERR(seqid))
5694 goto out;
5695 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
5696 status = PTR_ERR(task);
5697 if (IS_ERR(task))
5698 goto out;
5699 status = nfs4_wait_for_completion_rpc_task(task);
5700 rpc_put_task(task);
5701 out:
5702 request->fl_flags = fl_flags;
5703 trace_nfs4_unlock(request, state, F_SETLK, status);
5704 return status;
5705 }
5706
5707 struct nfs4_lockdata {
5708 struct nfs_lock_args arg;
5709 struct nfs_lock_res res;
5710 struct nfs4_lock_state *lsp;
5711 struct nfs_open_context *ctx;
5712 struct file_lock fl;
5713 unsigned long timestamp;
5714 int rpc_status;
5715 int cancelled;
5716 struct nfs_server *server;
5717 };
5718
5719 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5720 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
5721 gfp_t gfp_mask)
5722 {
5723 struct nfs4_lockdata *p;
5724 struct inode *inode = lsp->ls_state->inode;
5725 struct nfs_server *server = NFS_SERVER(inode);
5726 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5727
5728 p = kzalloc(sizeof(*p), gfp_mask);
5729 if (p == NULL)
5730 return NULL;
5731
5732 p->arg.fh = NFS_FH(inode);
5733 p->arg.fl = &p->fl;
5734 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
5735 if (IS_ERR(p->arg.open_seqid))
5736 goto out_free;
5737 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
5738 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
5739 if (IS_ERR(p->arg.lock_seqid))
5740 goto out_free_seqid;
5741 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
5742 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
5743 p->arg.lock_owner.s_dev = server->s_dev;
5744 p->res.lock_seqid = p->arg.lock_seqid;
5745 p->lsp = lsp;
5746 p->server = server;
5747 atomic_inc(&lsp->ls_count);
5748 p->ctx = get_nfs_open_context(ctx);
5749 get_file(fl->fl_file);
5750 memcpy(&p->fl, fl, sizeof(p->fl));
5751 return p;
5752 out_free_seqid:
5753 nfs_free_seqid(p->arg.open_seqid);
5754 out_free:
5755 kfree(p);
5756 return NULL;
5757 }
5758
5759 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
5760 {
5761 struct nfs4_lockdata *data = calldata;
5762 struct nfs4_state *state = data->lsp->ls_state;
5763
5764 dprintk("%s: begin!\n", __func__);
5765 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
5766 goto out_wait;
5767 /* Do we need to do an open_to_lock_owner? */
5768 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
5769 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
5770 goto out_release_lock_seqid;
5771 }
5772 nfs4_stateid_copy(&data->arg.open_stateid,
5773 &state->open_stateid);
5774 data->arg.new_lock_owner = 1;
5775 data->res.open_seqid = data->arg.open_seqid;
5776 } else {
5777 data->arg.new_lock_owner = 0;
5778 nfs4_stateid_copy(&data->arg.lock_stateid,
5779 &data->lsp->ls_stateid);
5780 }
5781 if (!nfs4_valid_open_stateid(state)) {
5782 data->rpc_status = -EBADF;
5783 task->tk_action = NULL;
5784 goto out_release_open_seqid;
5785 }
5786 data->timestamp = jiffies;
5787 if (nfs4_setup_sequence(data->server,
5788 &data->arg.seq_args,
5789 &data->res.seq_res,
5790 task) == 0)
5791 return;
5792 out_release_open_seqid:
5793 nfs_release_seqid(data->arg.open_seqid);
5794 out_release_lock_seqid:
5795 nfs_release_seqid(data->arg.lock_seqid);
5796 out_wait:
5797 nfs4_sequence_done(task, &data->res.seq_res);
5798 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
5799 }
5800
5801 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5802 {
5803 struct nfs4_lockdata *data = calldata;
5804 struct nfs4_lock_state *lsp = data->lsp;
5805
5806 dprintk("%s: begin!\n", __func__);
5807
5808 if (!nfs4_sequence_done(task, &data->res.seq_res))
5809 return;
5810
5811 data->rpc_status = task->tk_status;
5812 switch (task->tk_status) {
5813 case 0:
5814 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
5815 data->timestamp);
5816 if (data->arg.new_lock) {
5817 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5818 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
5819 rpc_restart_call_prepare(task);
5820 break;
5821 }
5822 }
5823 if (data->arg.new_lock_owner != 0) {
5824 nfs_confirm_seqid(&lsp->ls_seqid, 0);
5825 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
5826 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5827 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
5828 rpc_restart_call_prepare(task);
5829 break;
5830 case -NFS4ERR_BAD_STATEID:
5831 case -NFS4ERR_OLD_STATEID:
5832 case -NFS4ERR_STALE_STATEID:
5833 case -NFS4ERR_EXPIRED:
5834 if (data->arg.new_lock_owner != 0) {
5835 if (!nfs4_stateid_match(&data->arg.open_stateid,
5836 &lsp->ls_state->open_stateid))
5837 rpc_restart_call_prepare(task);
5838 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
5839 &lsp->ls_stateid))
5840 rpc_restart_call_prepare(task);
5841 }
5842 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
5843 }
5844
5845 static void nfs4_lock_release(void *calldata)
5846 {
5847 struct nfs4_lockdata *data = calldata;
5848
5849 dprintk("%s: begin!\n", __func__);
5850 nfs_free_seqid(data->arg.open_seqid);
5851 if (data->cancelled != 0) {
5852 struct rpc_task *task;
5853 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
5854 data->arg.lock_seqid);
5855 if (!IS_ERR(task))
5856 rpc_put_task_async(task);
5857 dprintk("%s: cancelling lock!\n", __func__);
5858 } else
5859 nfs_free_seqid(data->arg.lock_seqid);
5860 nfs4_put_lock_state(data->lsp);
5861 put_nfs_open_context(data->ctx);
5862 fput(data->fl.fl_file);
5863 kfree(data);
5864 dprintk("%s: done!\n", __func__);
5865 }
5866
5867 static const struct rpc_call_ops nfs4_lock_ops = {
5868 .rpc_call_prepare = nfs4_lock_prepare,
5869 .rpc_call_done = nfs4_lock_done,
5870 .rpc_release = nfs4_lock_release,
5871 };
5872
5873 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
5874 {
5875 switch (error) {
5876 case -NFS4ERR_ADMIN_REVOKED:
5877 case -NFS4ERR_BAD_STATEID:
5878 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5879 if (new_lock_owner != 0 ||
5880 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
5881 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
5882 break;
5883 case -NFS4ERR_STALE_STATEID:
5884 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5885 case -NFS4ERR_EXPIRED:
5886 nfs4_schedule_lease_recovery(server->nfs_client);
5887 };
5888 }
5889
5890 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
5891 {
5892 struct nfs4_lockdata *data;
5893 struct rpc_task *task;
5894 struct rpc_message msg = {
5895 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
5896 .rpc_cred = state->owner->so_cred,
5897 };
5898 struct rpc_task_setup task_setup_data = {
5899 .rpc_client = NFS_CLIENT(state->inode),
5900 .rpc_message = &msg,
5901 .callback_ops = &nfs4_lock_ops,
5902 .workqueue = nfsiod_workqueue,
5903 .flags = RPC_TASK_ASYNC,
5904 };
5905 int ret;
5906
5907 dprintk("%s: begin!\n", __func__);
5908 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
5909 fl->fl_u.nfs4_fl.owner,
5910 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
5911 if (data == NULL)
5912 return -ENOMEM;
5913 if (IS_SETLKW(cmd))
5914 data->arg.block = 1;
5915 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5916 msg.rpc_argp = &data->arg;
5917 msg.rpc_resp = &data->res;
5918 task_setup_data.callback_data = data;
5919 if (recovery_type > NFS_LOCK_NEW) {
5920 if (recovery_type == NFS_LOCK_RECLAIM)
5921 data->arg.reclaim = NFS_LOCK_RECLAIM;
5922 nfs4_set_sequence_privileged(&data->arg.seq_args);
5923 } else
5924 data->arg.new_lock = 1;
5925 task = rpc_run_task(&task_setup_data);
5926 if (IS_ERR(task))
5927 return PTR_ERR(task);
5928 ret = nfs4_wait_for_completion_rpc_task(task);
5929 if (ret == 0) {
5930 ret = data->rpc_status;
5931 if (ret)
5932 nfs4_handle_setlk_error(data->server, data->lsp,
5933 data->arg.new_lock_owner, ret);
5934 } else
5935 data->cancelled = 1;
5936 rpc_put_task(task);
5937 dprintk("%s: done, ret = %d!\n", __func__, ret);
5938 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
5939 return ret;
5940 }
5941
5942 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
5943 {
5944 struct nfs_server *server = NFS_SERVER(state->inode);
5945 struct nfs4_exception exception = {
5946 .inode = state->inode,
5947 };
5948 int err;
5949
5950 do {
5951 /* Cache the lock if possible... */
5952 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5953 return 0;
5954 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
5955 if (err != -NFS4ERR_DELAY)
5956 break;
5957 nfs4_handle_exception(server, err, &exception);
5958 } while (exception.retry);
5959 return err;
5960 }
5961
5962 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
5963 {
5964 struct nfs_server *server = NFS_SERVER(state->inode);
5965 struct nfs4_exception exception = {
5966 .inode = state->inode,
5967 };
5968 int err;
5969
5970 err = nfs4_set_lock_state(state, request);
5971 if (err != 0)
5972 return err;
5973 if (!recover_lost_locks) {
5974 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
5975 return 0;
5976 }
5977 do {
5978 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5979 return 0;
5980 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5981 switch (err) {
5982 default:
5983 goto out;
5984 case -NFS4ERR_GRACE:
5985 case -NFS4ERR_DELAY:
5986 nfs4_handle_exception(server, err, &exception);
5987 err = 0;
5988 }
5989 } while (exception.retry);
5990 out:
5991 return err;
5992 }
5993
5994 #if defined(CONFIG_NFS_V4_1)
5995 /**
5996 * nfs41_check_expired_locks - possibly free a lock stateid
5997 *
5998 * @state: NFSv4 state for an inode
5999 *
6000 * Returns NFS_OK if recovery for this stateid is now finished.
6001 * Otherwise a negative NFS4ERR value is returned.
6002 */
6003 static int nfs41_check_expired_locks(struct nfs4_state *state)
6004 {
6005 int status, ret = -NFS4ERR_BAD_STATEID;
6006 struct nfs4_lock_state *lsp;
6007 struct nfs_server *server = NFS_SERVER(state->inode);
6008
6009 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
6010 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
6011 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
6012
6013 status = nfs41_test_stateid(server,
6014 &lsp->ls_stateid,
6015 cred);
6016 trace_nfs4_test_lock_stateid(state, lsp, status);
6017 if (status != NFS_OK) {
6018 /* Free the stateid unless the server
6019 * informs us the stateid is unrecognized. */
6020 if (status != -NFS4ERR_BAD_STATEID)
6021 nfs41_free_stateid(server,
6022 &lsp->ls_stateid,
6023 cred);
6024 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
6025 ret = status;
6026 }
6027 }
6028 };
6029
6030 return ret;
6031 }
6032
6033 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
6034 {
6035 int status = NFS_OK;
6036
6037 if (test_bit(LK_STATE_IN_USE, &state->flags))
6038 status = nfs41_check_expired_locks(state);
6039 if (status != NFS_OK)
6040 status = nfs4_lock_expired(state, request);
6041 return status;
6042 }
6043 #endif
6044
6045 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6046 {
6047 struct nfs_inode *nfsi = NFS_I(state->inode);
6048 unsigned char fl_flags = request->fl_flags;
6049 int status = -ENOLCK;
6050
6051 if ((fl_flags & FL_POSIX) &&
6052 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6053 goto out;
6054 /* Is this a delegated open? */
6055 status = nfs4_set_lock_state(state, request);
6056 if (status != 0)
6057 goto out;
6058 request->fl_flags |= FL_ACCESS;
6059 status = do_vfs_lock(state->inode, request);
6060 if (status < 0)
6061 goto out;
6062 down_read(&nfsi->rwsem);
6063 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6064 /* Yes: cache locks! */
6065 /* ...but avoid races with delegation recall... */
6066 request->fl_flags = fl_flags & ~FL_SLEEP;
6067 status = do_vfs_lock(state->inode, request);
6068 up_read(&nfsi->rwsem);
6069 goto out;
6070 }
6071 up_read(&nfsi->rwsem);
6072 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6073 out:
6074 request->fl_flags = fl_flags;
6075 return status;
6076 }
6077
6078 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6079 {
6080 struct nfs4_exception exception = {
6081 .state = state,
6082 .inode = state->inode,
6083 };
6084 int err;
6085
6086 do {
6087 err = _nfs4_proc_setlk(state, cmd, request);
6088 if (err == -NFS4ERR_DENIED)
6089 err = -EAGAIN;
6090 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6091 err, &exception);
6092 } while (exception.retry);
6093 return err;
6094 }
6095
6096 static int
6097 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6098 {
6099 struct nfs_open_context *ctx;
6100 struct nfs4_state *state;
6101 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6102 int status;
6103
6104 /* verify open state */
6105 ctx = nfs_file_open_context(filp);
6106 state = ctx->state;
6107
6108 if (request->fl_start < 0 || request->fl_end < 0)
6109 return -EINVAL;
6110
6111 if (IS_GETLK(cmd)) {
6112 if (state != NULL)
6113 return nfs4_proc_getlk(state, F_GETLK, request);
6114 return 0;
6115 }
6116
6117 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6118 return -EINVAL;
6119
6120 if (request->fl_type == F_UNLCK) {
6121 if (state != NULL)
6122 return nfs4_proc_unlck(state, cmd, request);
6123 return 0;
6124 }
6125
6126 if (state == NULL)
6127 return -ENOLCK;
6128 /*
6129 * Don't rely on the VFS having checked the file open mode,
6130 * since it won't do this for flock() locks.
6131 */
6132 switch (request->fl_type) {
6133 case F_RDLCK:
6134 if (!(filp->f_mode & FMODE_READ))
6135 return -EBADF;
6136 break;
6137 case F_WRLCK:
6138 if (!(filp->f_mode & FMODE_WRITE))
6139 return -EBADF;
6140 }
6141
6142 do {
6143 status = nfs4_proc_setlk(state, cmd, request);
6144 if ((status != -EAGAIN) || IS_SETLK(cmd))
6145 break;
6146 timeout = nfs4_set_lock_task_retry(timeout);
6147 status = -ERESTARTSYS;
6148 if (signalled())
6149 break;
6150 } while(status < 0);
6151 return status;
6152 }
6153
6154 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6155 {
6156 struct nfs_server *server = NFS_SERVER(state->inode);
6157 int err;
6158
6159 err = nfs4_set_lock_state(state, fl);
6160 if (err != 0)
6161 return err;
6162 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6163 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6164 }
6165
6166 struct nfs_release_lockowner_data {
6167 struct nfs4_lock_state *lsp;
6168 struct nfs_server *server;
6169 struct nfs_release_lockowner_args args;
6170 struct nfs_release_lockowner_res res;
6171 unsigned long timestamp;
6172 };
6173
6174 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6175 {
6176 struct nfs_release_lockowner_data *data = calldata;
6177 struct nfs_server *server = data->server;
6178 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
6179 &data->args.seq_args, &data->res.seq_res, task);
6180 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6181 data->timestamp = jiffies;
6182 }
6183
6184 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6185 {
6186 struct nfs_release_lockowner_data *data = calldata;
6187 struct nfs_server *server = data->server;
6188
6189 nfs40_sequence_done(task, &data->res.seq_res);
6190
6191 switch (task->tk_status) {
6192 case 0:
6193 renew_lease(server, data->timestamp);
6194 break;
6195 case -NFS4ERR_STALE_CLIENTID:
6196 case -NFS4ERR_EXPIRED:
6197 nfs4_schedule_lease_recovery(server->nfs_client);
6198 break;
6199 case -NFS4ERR_LEASE_MOVED:
6200 case -NFS4ERR_DELAY:
6201 if (nfs4_async_handle_error(task, server,
6202 NULL, NULL) == -EAGAIN)
6203 rpc_restart_call_prepare(task);
6204 }
6205 }
6206
6207 static void nfs4_release_lockowner_release(void *calldata)
6208 {
6209 struct nfs_release_lockowner_data *data = calldata;
6210 nfs4_free_lock_state(data->server, data->lsp);
6211 kfree(calldata);
6212 }
6213
6214 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6215 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6216 .rpc_call_done = nfs4_release_lockowner_done,
6217 .rpc_release = nfs4_release_lockowner_release,
6218 };
6219
6220 static void
6221 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6222 {
6223 struct nfs_release_lockowner_data *data;
6224 struct rpc_message msg = {
6225 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6226 };
6227
6228 if (server->nfs_client->cl_mvops->minor_version != 0)
6229 return;
6230
6231 data = kmalloc(sizeof(*data), GFP_NOFS);
6232 if (!data)
6233 return;
6234 data->lsp = lsp;
6235 data->server = server;
6236 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6237 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6238 data->args.lock_owner.s_dev = server->s_dev;
6239
6240 msg.rpc_argp = &data->args;
6241 msg.rpc_resp = &data->res;
6242 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6243 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6244 }
6245
6246 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6247
6248 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
6249 struct dentry *dentry, const char *key,
6250 const void *buf, size_t buflen,
6251 int flags)
6252 {
6253 if (strcmp(key, "") != 0)
6254 return -EINVAL;
6255
6256 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
6257 }
6258
6259 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
6260 struct dentry *dentry, const char *key,
6261 void *buf, size_t buflen)
6262 {
6263 if (strcmp(key, "") != 0)
6264 return -EINVAL;
6265
6266 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen);
6267 }
6268
6269 static size_t nfs4_xattr_list_nfs4_acl(const struct xattr_handler *handler,
6270 struct dentry *dentry, char *list,
6271 size_t list_len, const char *name,
6272 size_t name_len)
6273 {
6274 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
6275
6276 if (!nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))))
6277 return 0;
6278
6279 if (list && len <= list_len)
6280 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
6281 return len;
6282 }
6283
6284 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6285 static inline int nfs4_server_supports_labels(struct nfs_server *server)
6286 {
6287 return server->caps & NFS_CAP_SECURITY_LABEL;
6288 }
6289
6290 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
6291 struct dentry *dentry, const char *key,
6292 const void *buf, size_t buflen,
6293 int flags)
6294 {
6295 if (security_ismaclabel(key))
6296 return nfs4_set_security_label(dentry, buf, buflen);
6297
6298 return -EOPNOTSUPP;
6299 }
6300
6301 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
6302 struct dentry *dentry, const char *key,
6303 void *buf, size_t buflen)
6304 {
6305 if (security_ismaclabel(key))
6306 return nfs4_get_security_label(d_inode(dentry), buf, buflen);
6307 return -EOPNOTSUPP;
6308 }
6309
6310 static size_t nfs4_xattr_list_nfs4_label(const struct xattr_handler *handler,
6311 struct dentry *dentry, char *list,
6312 size_t list_len, const char *name,
6313 size_t name_len)
6314 {
6315 size_t len = 0;
6316
6317 if (nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
6318 len = security_inode_listsecurity(d_inode(dentry), NULL, 0);
6319 if (list && len <= list_len)
6320 security_inode_listsecurity(d_inode(dentry), list, len);
6321 }
6322 return len;
6323 }
6324
6325 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6326 .prefix = XATTR_SECURITY_PREFIX,
6327 .list = nfs4_xattr_list_nfs4_label,
6328 .get = nfs4_xattr_get_nfs4_label,
6329 .set = nfs4_xattr_set_nfs4_label,
6330 };
6331 #endif
6332
6333
6334 /*
6335 * nfs_fhget will use either the mounted_on_fileid or the fileid
6336 */
6337 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6338 {
6339 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6340 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6341 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6342 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6343 return;
6344
6345 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6346 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6347 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6348 fattr->nlink = 2;
6349 }
6350
6351 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6352 const struct qstr *name,
6353 struct nfs4_fs_locations *fs_locations,
6354 struct page *page)
6355 {
6356 struct nfs_server *server = NFS_SERVER(dir);
6357 u32 bitmask[3] = {
6358 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6359 };
6360 struct nfs4_fs_locations_arg args = {
6361 .dir_fh = NFS_FH(dir),
6362 .name = name,
6363 .page = page,
6364 .bitmask = bitmask,
6365 };
6366 struct nfs4_fs_locations_res res = {
6367 .fs_locations = fs_locations,
6368 };
6369 struct rpc_message msg = {
6370 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6371 .rpc_argp = &args,
6372 .rpc_resp = &res,
6373 };
6374 int status;
6375
6376 dprintk("%s: start\n", __func__);
6377
6378 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6379 * is not supported */
6380 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6381 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6382 else
6383 bitmask[0] |= FATTR4_WORD0_FILEID;
6384
6385 nfs_fattr_init(&fs_locations->fattr);
6386 fs_locations->server = server;
6387 fs_locations->nlocations = 0;
6388 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6389 dprintk("%s: returned status = %d\n", __func__, status);
6390 return status;
6391 }
6392
6393 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6394 const struct qstr *name,
6395 struct nfs4_fs_locations *fs_locations,
6396 struct page *page)
6397 {
6398 struct nfs4_exception exception = { };
6399 int err;
6400 do {
6401 err = _nfs4_proc_fs_locations(client, dir, name,
6402 fs_locations, page);
6403 trace_nfs4_get_fs_locations(dir, name, err);
6404 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6405 &exception);
6406 } while (exception.retry);
6407 return err;
6408 }
6409
6410 /*
6411 * This operation also signals the server that this client is
6412 * performing migration recovery. The server can stop returning
6413 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
6414 * appended to this compound to identify the client ID which is
6415 * performing recovery.
6416 */
6417 static int _nfs40_proc_get_locations(struct inode *inode,
6418 struct nfs4_fs_locations *locations,
6419 struct page *page, struct rpc_cred *cred)
6420 {
6421 struct nfs_server *server = NFS_SERVER(inode);
6422 struct rpc_clnt *clnt = server->client;
6423 u32 bitmask[2] = {
6424 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6425 };
6426 struct nfs4_fs_locations_arg args = {
6427 .clientid = server->nfs_client->cl_clientid,
6428 .fh = NFS_FH(inode),
6429 .page = page,
6430 .bitmask = bitmask,
6431 .migration = 1, /* skip LOOKUP */
6432 .renew = 1, /* append RENEW */
6433 };
6434 struct nfs4_fs_locations_res res = {
6435 .fs_locations = locations,
6436 .migration = 1,
6437 .renew = 1,
6438 };
6439 struct rpc_message msg = {
6440 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6441 .rpc_argp = &args,
6442 .rpc_resp = &res,
6443 .rpc_cred = cred,
6444 };
6445 unsigned long now = jiffies;
6446 int status;
6447
6448 nfs_fattr_init(&locations->fattr);
6449 locations->server = server;
6450 locations->nlocations = 0;
6451
6452 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6453 nfs4_set_sequence_privileged(&args.seq_args);
6454 status = nfs4_call_sync_sequence(clnt, server, &msg,
6455 &args.seq_args, &res.seq_res);
6456 if (status)
6457 return status;
6458
6459 renew_lease(server, now);
6460 return 0;
6461 }
6462
6463 #ifdef CONFIG_NFS_V4_1
6464
6465 /*
6466 * This operation also signals the server that this client is
6467 * performing migration recovery. The server can stop asserting
6468 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
6469 * performing this operation is identified in the SEQUENCE
6470 * operation in this compound.
6471 *
6472 * When the client supports GETATTR(fs_locations_info), it can
6473 * be plumbed in here.
6474 */
6475 static int _nfs41_proc_get_locations(struct inode *inode,
6476 struct nfs4_fs_locations *locations,
6477 struct page *page, struct rpc_cred *cred)
6478 {
6479 struct nfs_server *server = NFS_SERVER(inode);
6480 struct rpc_clnt *clnt = server->client;
6481 u32 bitmask[2] = {
6482 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6483 };
6484 struct nfs4_fs_locations_arg args = {
6485 .fh = NFS_FH(inode),
6486 .page = page,
6487 .bitmask = bitmask,
6488 .migration = 1, /* skip LOOKUP */
6489 };
6490 struct nfs4_fs_locations_res res = {
6491 .fs_locations = locations,
6492 .migration = 1,
6493 };
6494 struct rpc_message msg = {
6495 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6496 .rpc_argp = &args,
6497 .rpc_resp = &res,
6498 .rpc_cred = cred,
6499 };
6500 int status;
6501
6502 nfs_fattr_init(&locations->fattr);
6503 locations->server = server;
6504 locations->nlocations = 0;
6505
6506 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6507 nfs4_set_sequence_privileged(&args.seq_args);
6508 status = nfs4_call_sync_sequence(clnt, server, &msg,
6509 &args.seq_args, &res.seq_res);
6510 if (status == NFS4_OK &&
6511 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6512 status = -NFS4ERR_LEASE_MOVED;
6513 return status;
6514 }
6515
6516 #endif /* CONFIG_NFS_V4_1 */
6517
6518 /**
6519 * nfs4_proc_get_locations - discover locations for a migrated FSID
6520 * @inode: inode on FSID that is migrating
6521 * @locations: result of query
6522 * @page: buffer
6523 * @cred: credential to use for this operation
6524 *
6525 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
6526 * operation failed, or a negative errno if a local error occurred.
6527 *
6528 * On success, "locations" is filled in, but if the server has
6529 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
6530 * asserted.
6531 *
6532 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
6533 * from this client that require migration recovery.
6534 */
6535 int nfs4_proc_get_locations(struct inode *inode,
6536 struct nfs4_fs_locations *locations,
6537 struct page *page, struct rpc_cred *cred)
6538 {
6539 struct nfs_server *server = NFS_SERVER(inode);
6540 struct nfs_client *clp = server->nfs_client;
6541 const struct nfs4_mig_recovery_ops *ops =
6542 clp->cl_mvops->mig_recovery_ops;
6543 struct nfs4_exception exception = { };
6544 int status;
6545
6546 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6547 (unsigned long long)server->fsid.major,
6548 (unsigned long long)server->fsid.minor,
6549 clp->cl_hostname);
6550 nfs_display_fhandle(NFS_FH(inode), __func__);
6551
6552 do {
6553 status = ops->get_locations(inode, locations, page, cred);
6554 if (status != -NFS4ERR_DELAY)
6555 break;
6556 nfs4_handle_exception(server, status, &exception);
6557 } while (exception.retry);
6558 return status;
6559 }
6560
6561 /*
6562 * This operation also signals the server that this client is
6563 * performing "lease moved" recovery. The server can stop
6564 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
6565 * is appended to this compound to identify the client ID which is
6566 * performing recovery.
6567 */
6568 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6569 {
6570 struct nfs_server *server = NFS_SERVER(inode);
6571 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
6572 struct rpc_clnt *clnt = server->client;
6573 struct nfs4_fsid_present_arg args = {
6574 .fh = NFS_FH(inode),
6575 .clientid = clp->cl_clientid,
6576 .renew = 1, /* append RENEW */
6577 };
6578 struct nfs4_fsid_present_res res = {
6579 .renew = 1,
6580 };
6581 struct rpc_message msg = {
6582 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6583 .rpc_argp = &args,
6584 .rpc_resp = &res,
6585 .rpc_cred = cred,
6586 };
6587 unsigned long now = jiffies;
6588 int status;
6589
6590 res.fh = nfs_alloc_fhandle();
6591 if (res.fh == NULL)
6592 return -ENOMEM;
6593
6594 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6595 nfs4_set_sequence_privileged(&args.seq_args);
6596 status = nfs4_call_sync_sequence(clnt, server, &msg,
6597 &args.seq_args, &res.seq_res);
6598 nfs_free_fhandle(res.fh);
6599 if (status)
6600 return status;
6601
6602 do_renew_lease(clp, now);
6603 return 0;
6604 }
6605
6606 #ifdef CONFIG_NFS_V4_1
6607
6608 /*
6609 * This operation also signals the server that this client is
6610 * performing "lease moved" recovery. The server can stop asserting
6611 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
6612 * this operation is identified in the SEQUENCE operation in this
6613 * compound.
6614 */
6615 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6616 {
6617 struct nfs_server *server = NFS_SERVER(inode);
6618 struct rpc_clnt *clnt = server->client;
6619 struct nfs4_fsid_present_arg args = {
6620 .fh = NFS_FH(inode),
6621 };
6622 struct nfs4_fsid_present_res res = {
6623 };
6624 struct rpc_message msg = {
6625 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6626 .rpc_argp = &args,
6627 .rpc_resp = &res,
6628 .rpc_cred = cred,
6629 };
6630 int status;
6631
6632 res.fh = nfs_alloc_fhandle();
6633 if (res.fh == NULL)
6634 return -ENOMEM;
6635
6636 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6637 nfs4_set_sequence_privileged(&args.seq_args);
6638 status = nfs4_call_sync_sequence(clnt, server, &msg,
6639 &args.seq_args, &res.seq_res);
6640 nfs_free_fhandle(res.fh);
6641 if (status == NFS4_OK &&
6642 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6643 status = -NFS4ERR_LEASE_MOVED;
6644 return status;
6645 }
6646
6647 #endif /* CONFIG_NFS_V4_1 */
6648
6649 /**
6650 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
6651 * @inode: inode on FSID to check
6652 * @cred: credential to use for this operation
6653 *
6654 * Server indicates whether the FSID is present, moved, or not
6655 * recognized. This operation is necessary to clear a LEASE_MOVED
6656 * condition for this client ID.
6657 *
6658 * Returns NFS4_OK if the FSID is present on this server,
6659 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
6660 * NFS4ERR code if some error occurred on the server, or a
6661 * negative errno if a local failure occurred.
6662 */
6663 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6664 {
6665 struct nfs_server *server = NFS_SERVER(inode);
6666 struct nfs_client *clp = server->nfs_client;
6667 const struct nfs4_mig_recovery_ops *ops =
6668 clp->cl_mvops->mig_recovery_ops;
6669 struct nfs4_exception exception = { };
6670 int status;
6671
6672 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6673 (unsigned long long)server->fsid.major,
6674 (unsigned long long)server->fsid.minor,
6675 clp->cl_hostname);
6676 nfs_display_fhandle(NFS_FH(inode), __func__);
6677
6678 do {
6679 status = ops->fsid_present(inode, cred);
6680 if (status != -NFS4ERR_DELAY)
6681 break;
6682 nfs4_handle_exception(server, status, &exception);
6683 } while (exception.retry);
6684 return status;
6685 }
6686
6687 /**
6688 * If 'use_integrity' is true and the state managment nfs_client
6689 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
6690 * and the machine credential as per RFC3530bis and RFC5661 Security
6691 * Considerations sections. Otherwise, just use the user cred with the
6692 * filesystem's rpc_client.
6693 */
6694 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
6695 {
6696 int status;
6697 struct nfs4_secinfo_arg args = {
6698 .dir_fh = NFS_FH(dir),
6699 .name = name,
6700 };
6701 struct nfs4_secinfo_res res = {
6702 .flavors = flavors,
6703 };
6704 struct rpc_message msg = {
6705 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
6706 .rpc_argp = &args,
6707 .rpc_resp = &res,
6708 };
6709 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
6710 struct rpc_cred *cred = NULL;
6711
6712 if (use_integrity) {
6713 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
6714 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
6715 msg.rpc_cred = cred;
6716 }
6717
6718 dprintk("NFS call secinfo %s\n", name->name);
6719
6720 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
6721 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
6722
6723 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
6724 &res.seq_res, 0);
6725 dprintk("NFS reply secinfo: %d\n", status);
6726
6727 if (cred)
6728 put_rpccred(cred);
6729
6730 return status;
6731 }
6732
6733 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
6734 struct nfs4_secinfo_flavors *flavors)
6735 {
6736 struct nfs4_exception exception = { };
6737 int err;
6738 do {
6739 err = -NFS4ERR_WRONGSEC;
6740
6741 /* try to use integrity protection with machine cred */
6742 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
6743 err = _nfs4_proc_secinfo(dir, name, flavors, true);
6744
6745 /*
6746 * if unable to use integrity protection, or SECINFO with
6747 * integrity protection returns NFS4ERR_WRONGSEC (which is
6748 * disallowed by spec, but exists in deployed servers) use
6749 * the current filesystem's rpc_client and the user cred.
6750 */
6751 if (err == -NFS4ERR_WRONGSEC)
6752 err = _nfs4_proc_secinfo(dir, name, flavors, false);
6753
6754 trace_nfs4_secinfo(dir, name, err);
6755 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6756 &exception);
6757 } while (exception.retry);
6758 return err;
6759 }
6760
6761 #ifdef CONFIG_NFS_V4_1
6762 /*
6763 * Check the exchange flags returned by the server for invalid flags, having
6764 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
6765 * DS flags set.
6766 */
6767 static int nfs4_check_cl_exchange_flags(u32 flags)
6768 {
6769 if (flags & ~EXCHGID4_FLAG_MASK_R)
6770 goto out_inval;
6771 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
6772 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
6773 goto out_inval;
6774 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
6775 goto out_inval;
6776 return NFS_OK;
6777 out_inval:
6778 return -NFS4ERR_INVAL;
6779 }
6780
6781 static bool
6782 nfs41_same_server_scope(struct nfs41_server_scope *a,
6783 struct nfs41_server_scope *b)
6784 {
6785 if (a->server_scope_sz == b->server_scope_sz &&
6786 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
6787 return true;
6788
6789 return false;
6790 }
6791
6792 /*
6793 * nfs4_proc_bind_conn_to_session()
6794 *
6795 * The 4.1 client currently uses the same TCP connection for the
6796 * fore and backchannel.
6797 */
6798 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6799 {
6800 int status;
6801 struct nfs41_bind_conn_to_session_args args = {
6802 .client = clp,
6803 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6804 };
6805 struct nfs41_bind_conn_to_session_res res;
6806 struct rpc_message msg = {
6807 .rpc_proc =
6808 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6809 .rpc_argp = &args,
6810 .rpc_resp = &res,
6811 .rpc_cred = cred,
6812 };
6813
6814 dprintk("--> %s\n", __func__);
6815
6816 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6817 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6818 args.dir = NFS4_CDFC4_FORE;
6819
6820 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6821 trace_nfs4_bind_conn_to_session(clp, status);
6822 if (status == 0) {
6823 if (memcmp(res.sessionid.data,
6824 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6825 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6826 status = -EIO;
6827 goto out;
6828 }
6829 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6830 dprintk("NFS: %s: Unexpected direction from server\n",
6831 __func__);
6832 status = -EIO;
6833 goto out;
6834 }
6835 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6836 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6837 __func__);
6838 status = -EIO;
6839 goto out;
6840 }
6841 }
6842 out:
6843 dprintk("<-- %s status= %d\n", __func__, status);
6844 return status;
6845 }
6846
6847 /*
6848 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
6849 * and operations we'd like to see to enable certain features in the allow map
6850 */
6851 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
6852 .how = SP4_MACH_CRED,
6853 .enforce.u.words = {
6854 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6855 1 << (OP_EXCHANGE_ID - 32) |
6856 1 << (OP_CREATE_SESSION - 32) |
6857 1 << (OP_DESTROY_SESSION - 32) |
6858 1 << (OP_DESTROY_CLIENTID - 32)
6859 },
6860 .allow.u.words = {
6861 [0] = 1 << (OP_CLOSE) |
6862 1 << (OP_LOCKU) |
6863 1 << (OP_COMMIT),
6864 [1] = 1 << (OP_SECINFO - 32) |
6865 1 << (OP_SECINFO_NO_NAME - 32) |
6866 1 << (OP_TEST_STATEID - 32) |
6867 1 << (OP_FREE_STATEID - 32) |
6868 1 << (OP_WRITE - 32)
6869 }
6870 };
6871
6872 /*
6873 * Select the state protection mode for client `clp' given the server results
6874 * from exchange_id in `sp'.
6875 *
6876 * Returns 0 on success, negative errno otherwise.
6877 */
6878 static int nfs4_sp4_select_mode(struct nfs_client *clp,
6879 struct nfs41_state_protection *sp)
6880 {
6881 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
6882 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6883 1 << (OP_EXCHANGE_ID - 32) |
6884 1 << (OP_CREATE_SESSION - 32) |
6885 1 << (OP_DESTROY_SESSION - 32) |
6886 1 << (OP_DESTROY_CLIENTID - 32)
6887 };
6888 unsigned int i;
6889
6890 if (sp->how == SP4_MACH_CRED) {
6891 /* Print state protect result */
6892 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
6893 for (i = 0; i <= LAST_NFS4_OP; i++) {
6894 if (test_bit(i, sp->enforce.u.longs))
6895 dfprintk(MOUNT, " enforce op %d\n", i);
6896 if (test_bit(i, sp->allow.u.longs))
6897 dfprintk(MOUNT, " allow op %d\n", i);
6898 }
6899
6900 /* make sure nothing is on enforce list that isn't supported */
6901 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
6902 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
6903 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6904 return -EINVAL;
6905 }
6906 }
6907
6908 /*
6909 * Minimal mode - state operations are allowed to use machine
6910 * credential. Note this already happens by default, so the
6911 * client doesn't have to do anything more than the negotiation.
6912 *
6913 * NOTE: we don't care if EXCHANGE_ID is in the list -
6914 * we're already using the machine cred for exchange_id
6915 * and will never use a different cred.
6916 */
6917 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
6918 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
6919 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
6920 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
6921 dfprintk(MOUNT, "sp4_mach_cred:\n");
6922 dfprintk(MOUNT, " minimal mode enabled\n");
6923 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags);
6924 } else {
6925 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6926 return -EINVAL;
6927 }
6928
6929 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
6930 test_bit(OP_LOCKU, sp->allow.u.longs)) {
6931 dfprintk(MOUNT, " cleanup mode enabled\n");
6932 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags);
6933 }
6934
6935 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
6936 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
6937 dfprintk(MOUNT, " secinfo mode enabled\n");
6938 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags);
6939 }
6940
6941 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
6942 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
6943 dfprintk(MOUNT, " stateid mode enabled\n");
6944 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags);
6945 }
6946
6947 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
6948 dfprintk(MOUNT, " write mode enabled\n");
6949 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags);
6950 }
6951
6952 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
6953 dfprintk(MOUNT, " commit mode enabled\n");
6954 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags);
6955 }
6956 }
6957
6958 return 0;
6959 }
6960
6961 /*
6962 * _nfs4_proc_exchange_id()
6963 *
6964 * Wrapper for EXCHANGE_ID operation.
6965 */
6966 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
6967 u32 sp4_how)
6968 {
6969 nfs4_verifier verifier;
6970 struct nfs41_exchange_id_args args = {
6971 .verifier = &verifier,
6972 .client = clp,
6973 #ifdef CONFIG_NFS_V4_1_MIGRATION
6974 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6975 EXCHGID4_FLAG_BIND_PRINC_STATEID |
6976 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
6977 #else
6978 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6979 EXCHGID4_FLAG_BIND_PRINC_STATEID,
6980 #endif
6981 };
6982 struct nfs41_exchange_id_res res = {
6983 0
6984 };
6985 int status;
6986 struct rpc_message msg = {
6987 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
6988 .rpc_argp = &args,
6989 .rpc_resp = &res,
6990 .rpc_cred = cred,
6991 };
6992
6993 nfs4_init_boot_verifier(clp, &verifier);
6994
6995 status = nfs4_init_uniform_client_string(clp);
6996 if (status)
6997 goto out;
6998
6999 dprintk("NFS call exchange_id auth=%s, '%s'\n",
7000 clp->cl_rpcclient->cl_auth->au_ops->au_name,
7001 clp->cl_owner_id);
7002
7003 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
7004 GFP_NOFS);
7005 if (unlikely(res.server_owner == NULL)) {
7006 status = -ENOMEM;
7007 goto out;
7008 }
7009
7010 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
7011 GFP_NOFS);
7012 if (unlikely(res.server_scope == NULL)) {
7013 status = -ENOMEM;
7014 goto out_server_owner;
7015 }
7016
7017 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
7018 if (unlikely(res.impl_id == NULL)) {
7019 status = -ENOMEM;
7020 goto out_server_scope;
7021 }
7022
7023 switch (sp4_how) {
7024 case SP4_NONE:
7025 args.state_protect.how = SP4_NONE;
7026 break;
7027
7028 case SP4_MACH_CRED:
7029 args.state_protect = nfs4_sp4_mach_cred_request;
7030 break;
7031
7032 default:
7033 /* unsupported! */
7034 WARN_ON_ONCE(1);
7035 status = -EINVAL;
7036 goto out_impl_id;
7037 }
7038
7039 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7040 trace_nfs4_exchange_id(clp, status);
7041 if (status == 0)
7042 status = nfs4_check_cl_exchange_flags(res.flags);
7043
7044 if (status == 0)
7045 status = nfs4_sp4_select_mode(clp, &res.state_protect);
7046
7047 if (status == 0) {
7048 clp->cl_clientid = res.clientid;
7049 clp->cl_exchange_flags = res.flags;
7050 /* Client ID is not confirmed */
7051 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
7052 clear_bit(NFS4_SESSION_ESTABLISHED,
7053 &clp->cl_session->session_state);
7054 clp->cl_seqid = res.seqid;
7055 }
7056
7057 kfree(clp->cl_serverowner);
7058 clp->cl_serverowner = res.server_owner;
7059 res.server_owner = NULL;
7060
7061 /* use the most recent implementation id */
7062 kfree(clp->cl_implid);
7063 clp->cl_implid = res.impl_id;
7064 res.impl_id = NULL;
7065
7066 if (clp->cl_serverscope != NULL &&
7067 !nfs41_same_server_scope(clp->cl_serverscope,
7068 res.server_scope)) {
7069 dprintk("%s: server_scope mismatch detected\n",
7070 __func__);
7071 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7072 kfree(clp->cl_serverscope);
7073 clp->cl_serverscope = NULL;
7074 }
7075
7076 if (clp->cl_serverscope == NULL) {
7077 clp->cl_serverscope = res.server_scope;
7078 res.server_scope = NULL;
7079 }
7080 }
7081
7082 out_impl_id:
7083 kfree(res.impl_id);
7084 out_server_scope:
7085 kfree(res.server_scope);
7086 out_server_owner:
7087 kfree(res.server_owner);
7088 out:
7089 if (clp->cl_implid != NULL)
7090 dprintk("NFS reply exchange_id: Server Implementation ID: "
7091 "domain: %s, name: %s, date: %llu,%u\n",
7092 clp->cl_implid->domain, clp->cl_implid->name,
7093 clp->cl_implid->date.seconds,
7094 clp->cl_implid->date.nseconds);
7095 dprintk("NFS reply exchange_id: %d\n", status);
7096 return status;
7097 }
7098
7099 /*
7100 * nfs4_proc_exchange_id()
7101 *
7102 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7103 *
7104 * Since the clientid has expired, all compounds using sessions
7105 * associated with the stale clientid will be returning
7106 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7107 * be in some phase of session reset.
7108 *
7109 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7110 */
7111 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7112 {
7113 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7114 int status;
7115
7116 /* try SP4_MACH_CRED if krb5i/p */
7117 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7118 authflavor == RPC_AUTH_GSS_KRB5P) {
7119 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7120 if (!status)
7121 return 0;
7122 }
7123
7124 /* try SP4_NONE */
7125 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7126 }
7127
7128 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7129 struct rpc_cred *cred)
7130 {
7131 struct rpc_message msg = {
7132 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7133 .rpc_argp = clp,
7134 .rpc_cred = cred,
7135 };
7136 int status;
7137
7138 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7139 trace_nfs4_destroy_clientid(clp, status);
7140 if (status)
7141 dprintk("NFS: Got error %d from the server %s on "
7142 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7143 return status;
7144 }
7145
7146 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7147 struct rpc_cred *cred)
7148 {
7149 unsigned int loop;
7150 int ret;
7151
7152 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7153 ret = _nfs4_proc_destroy_clientid(clp, cred);
7154 switch (ret) {
7155 case -NFS4ERR_DELAY:
7156 case -NFS4ERR_CLIENTID_BUSY:
7157 ssleep(1);
7158 break;
7159 default:
7160 return ret;
7161 }
7162 }
7163 return 0;
7164 }
7165
7166 int nfs4_destroy_clientid(struct nfs_client *clp)
7167 {
7168 struct rpc_cred *cred;
7169 int ret = 0;
7170
7171 if (clp->cl_mvops->minor_version < 1)
7172 goto out;
7173 if (clp->cl_exchange_flags == 0)
7174 goto out;
7175 if (clp->cl_preserve_clid)
7176 goto out;
7177 cred = nfs4_get_clid_cred(clp);
7178 ret = nfs4_proc_destroy_clientid(clp, cred);
7179 if (cred)
7180 put_rpccred(cred);
7181 switch (ret) {
7182 case 0:
7183 case -NFS4ERR_STALE_CLIENTID:
7184 clp->cl_exchange_flags = 0;
7185 }
7186 out:
7187 return ret;
7188 }
7189
7190 struct nfs4_get_lease_time_data {
7191 struct nfs4_get_lease_time_args *args;
7192 struct nfs4_get_lease_time_res *res;
7193 struct nfs_client *clp;
7194 };
7195
7196 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7197 void *calldata)
7198 {
7199 struct nfs4_get_lease_time_data *data =
7200 (struct nfs4_get_lease_time_data *)calldata;
7201
7202 dprintk("--> %s\n", __func__);
7203 /* just setup sequence, do not trigger session recovery
7204 since we're invoked within one */
7205 nfs41_setup_sequence(data->clp->cl_session,
7206 &data->args->la_seq_args,
7207 &data->res->lr_seq_res,
7208 task);
7209 dprintk("<-- %s\n", __func__);
7210 }
7211
7212 /*
7213 * Called from nfs4_state_manager thread for session setup, so don't recover
7214 * from sequence operation or clientid errors.
7215 */
7216 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7217 {
7218 struct nfs4_get_lease_time_data *data =
7219 (struct nfs4_get_lease_time_data *)calldata;
7220
7221 dprintk("--> %s\n", __func__);
7222 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7223 return;
7224 switch (task->tk_status) {
7225 case -NFS4ERR_DELAY:
7226 case -NFS4ERR_GRACE:
7227 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7228 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7229 task->tk_status = 0;
7230 /* fall through */
7231 case -NFS4ERR_RETRY_UNCACHED_REP:
7232 rpc_restart_call_prepare(task);
7233 return;
7234 }
7235 dprintk("<-- %s\n", __func__);
7236 }
7237
7238 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7239 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7240 .rpc_call_done = nfs4_get_lease_time_done,
7241 };
7242
7243 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7244 {
7245 struct rpc_task *task;
7246 struct nfs4_get_lease_time_args args;
7247 struct nfs4_get_lease_time_res res = {
7248 .lr_fsinfo = fsinfo,
7249 };
7250 struct nfs4_get_lease_time_data data = {
7251 .args = &args,
7252 .res = &res,
7253 .clp = clp,
7254 };
7255 struct rpc_message msg = {
7256 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7257 .rpc_argp = &args,
7258 .rpc_resp = &res,
7259 };
7260 struct rpc_task_setup task_setup = {
7261 .rpc_client = clp->cl_rpcclient,
7262 .rpc_message = &msg,
7263 .callback_ops = &nfs4_get_lease_time_ops,
7264 .callback_data = &data,
7265 .flags = RPC_TASK_TIMEOUT,
7266 };
7267 int status;
7268
7269 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
7270 nfs4_set_sequence_privileged(&args.la_seq_args);
7271 dprintk("--> %s\n", __func__);
7272 task = rpc_run_task(&task_setup);
7273
7274 if (IS_ERR(task))
7275 status = PTR_ERR(task);
7276 else {
7277 status = task->tk_status;
7278 rpc_put_task(task);
7279 }
7280 dprintk("<-- %s return %d\n", __func__, status);
7281
7282 return status;
7283 }
7284
7285 /*
7286 * Initialize the values to be used by the client in CREATE_SESSION
7287 * If nfs4_init_session set the fore channel request and response sizes,
7288 * use them.
7289 *
7290 * Set the back channel max_resp_sz_cached to zero to force the client to
7291 * always set csa_cachethis to FALSE because the current implementation
7292 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
7293 */
7294 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
7295 {
7296 unsigned int max_rqst_sz, max_resp_sz;
7297
7298 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
7299 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
7300
7301 /* Fore channel attributes */
7302 args->fc_attrs.max_rqst_sz = max_rqst_sz;
7303 args->fc_attrs.max_resp_sz = max_resp_sz;
7304 args->fc_attrs.max_ops = NFS4_MAX_OPS;
7305 args->fc_attrs.max_reqs = max_session_slots;
7306
7307 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
7308 "max_ops=%u max_reqs=%u\n",
7309 __func__,
7310 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
7311 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
7312
7313 /* Back channel attributes */
7314 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
7315 args->bc_attrs.max_resp_sz = PAGE_SIZE;
7316 args->bc_attrs.max_resp_sz_cached = 0;
7317 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
7318 args->bc_attrs.max_reqs = 1;
7319
7320 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
7321 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
7322 __func__,
7323 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
7324 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
7325 args->bc_attrs.max_reqs);
7326 }
7327
7328 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7329 struct nfs41_create_session_res *res)
7330 {
7331 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7332 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7333
7334 if (rcvd->max_resp_sz > sent->max_resp_sz)
7335 return -EINVAL;
7336 /*
7337 * Our requested max_ops is the minimum we need; we're not
7338 * prepared to break up compounds into smaller pieces than that.
7339 * So, no point even trying to continue if the server won't
7340 * cooperate:
7341 */
7342 if (rcvd->max_ops < sent->max_ops)
7343 return -EINVAL;
7344 if (rcvd->max_reqs == 0)
7345 return -EINVAL;
7346 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
7347 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
7348 return 0;
7349 }
7350
7351 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7352 struct nfs41_create_session_res *res)
7353 {
7354 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7355 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7356
7357 if (!(res->flags & SESSION4_BACK_CHAN))
7358 goto out;
7359 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7360 return -EINVAL;
7361 if (rcvd->max_resp_sz < sent->max_resp_sz)
7362 return -EINVAL;
7363 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
7364 return -EINVAL;
7365 /* These would render the backchannel useless: */
7366 if (rcvd->max_ops != sent->max_ops)
7367 return -EINVAL;
7368 if (rcvd->max_reqs != sent->max_reqs)
7369 return -EINVAL;
7370 out:
7371 return 0;
7372 }
7373
7374 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7375 struct nfs41_create_session_res *res)
7376 {
7377 int ret;
7378
7379 ret = nfs4_verify_fore_channel_attrs(args, res);
7380 if (ret)
7381 return ret;
7382 return nfs4_verify_back_channel_attrs(args, res);
7383 }
7384
7385 static void nfs4_update_session(struct nfs4_session *session,
7386 struct nfs41_create_session_res *res)
7387 {
7388 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7389 /* Mark client id and session as being confirmed */
7390 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7391 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7392 session->flags = res->flags;
7393 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7394 if (res->flags & SESSION4_BACK_CHAN)
7395 memcpy(&session->bc_attrs, &res->bc_attrs,
7396 sizeof(session->bc_attrs));
7397 }
7398
7399 static int _nfs4_proc_create_session(struct nfs_client *clp,
7400 struct rpc_cred *cred)
7401 {
7402 struct nfs4_session *session = clp->cl_session;
7403 struct nfs41_create_session_args args = {
7404 .client = clp,
7405 .clientid = clp->cl_clientid,
7406 .seqid = clp->cl_seqid,
7407 .cb_program = NFS4_CALLBACK,
7408 };
7409 struct nfs41_create_session_res res;
7410
7411 struct rpc_message msg = {
7412 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7413 .rpc_argp = &args,
7414 .rpc_resp = &res,
7415 .rpc_cred = cred,
7416 };
7417 int status;
7418
7419 nfs4_init_channel_attrs(&args);
7420 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
7421
7422 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7423 trace_nfs4_create_session(clp, status);
7424
7425 if (!status) {
7426 /* Verify the session's negotiated channel_attrs values */
7427 status = nfs4_verify_channel_attrs(&args, &res);
7428 /* Increment the clientid slot sequence id */
7429 if (clp->cl_seqid == res.seqid)
7430 clp->cl_seqid++;
7431 if (status)
7432 goto out;
7433 nfs4_update_session(session, &res);
7434 }
7435 out:
7436 return status;
7437 }
7438
7439 /*
7440 * Issues a CREATE_SESSION operation to the server.
7441 * It is the responsibility of the caller to verify the session is
7442 * expired before calling this routine.
7443 */
7444 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
7445 {
7446 int status;
7447 unsigned *ptr;
7448 struct nfs4_session *session = clp->cl_session;
7449
7450 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
7451
7452 status = _nfs4_proc_create_session(clp, cred);
7453 if (status)
7454 goto out;
7455
7456 /* Init or reset the session slot tables */
7457 status = nfs4_setup_session_slot_tables(session);
7458 dprintk("slot table setup returned %d\n", status);
7459 if (status)
7460 goto out;
7461
7462 ptr = (unsigned *)&session->sess_id.data[0];
7463 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
7464 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
7465 out:
7466 dprintk("<-- %s\n", __func__);
7467 return status;
7468 }
7469
7470 /*
7471 * Issue the over-the-wire RPC DESTROY_SESSION.
7472 * The caller must serialize access to this routine.
7473 */
7474 int nfs4_proc_destroy_session(struct nfs4_session *session,
7475 struct rpc_cred *cred)
7476 {
7477 struct rpc_message msg = {
7478 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
7479 .rpc_argp = session,
7480 .rpc_cred = cred,
7481 };
7482 int status = 0;
7483
7484 dprintk("--> nfs4_proc_destroy_session\n");
7485
7486 /* session is still being setup */
7487 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7488 return 0;
7489
7490 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7491 trace_nfs4_destroy_session(session->clp, status);
7492
7493 if (status)
7494 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
7495 "Session has been destroyed regardless...\n", status);
7496
7497 dprintk("<-- nfs4_proc_destroy_session\n");
7498 return status;
7499 }
7500
7501 /*
7502 * Renew the cl_session lease.
7503 */
7504 struct nfs4_sequence_data {
7505 struct nfs_client *clp;
7506 struct nfs4_sequence_args args;
7507 struct nfs4_sequence_res res;
7508 };
7509
7510 static void nfs41_sequence_release(void *data)
7511 {
7512 struct nfs4_sequence_data *calldata = data;
7513 struct nfs_client *clp = calldata->clp;
7514
7515 if (atomic_read(&clp->cl_count) > 1)
7516 nfs4_schedule_state_renewal(clp);
7517 nfs_put_client(clp);
7518 kfree(calldata);
7519 }
7520
7521 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7522 {
7523 switch(task->tk_status) {
7524 case -NFS4ERR_DELAY:
7525 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7526 return -EAGAIN;
7527 default:
7528 nfs4_schedule_lease_recovery(clp);
7529 }
7530 return 0;
7531 }
7532
7533 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
7534 {
7535 struct nfs4_sequence_data *calldata = data;
7536 struct nfs_client *clp = calldata->clp;
7537
7538 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
7539 return;
7540
7541 trace_nfs4_sequence(clp, task->tk_status);
7542 if (task->tk_status < 0) {
7543 dprintk("%s ERROR %d\n", __func__, task->tk_status);
7544 if (atomic_read(&clp->cl_count) == 1)
7545 goto out;
7546
7547 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
7548 rpc_restart_call_prepare(task);
7549 return;
7550 }
7551 }
7552 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
7553 out:
7554 dprintk("<-- %s\n", __func__);
7555 }
7556
7557 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
7558 {
7559 struct nfs4_sequence_data *calldata = data;
7560 struct nfs_client *clp = calldata->clp;
7561 struct nfs4_sequence_args *args;
7562 struct nfs4_sequence_res *res;
7563
7564 args = task->tk_msg.rpc_argp;
7565 res = task->tk_msg.rpc_resp;
7566
7567 nfs41_setup_sequence(clp->cl_session, args, res, task);
7568 }
7569
7570 static const struct rpc_call_ops nfs41_sequence_ops = {
7571 .rpc_call_done = nfs41_sequence_call_done,
7572 .rpc_call_prepare = nfs41_sequence_prepare,
7573 .rpc_release = nfs41_sequence_release,
7574 };
7575
7576 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
7577 struct rpc_cred *cred,
7578 bool is_privileged)
7579 {
7580 struct nfs4_sequence_data *calldata;
7581 struct rpc_message msg = {
7582 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
7583 .rpc_cred = cred,
7584 };
7585 struct rpc_task_setup task_setup_data = {
7586 .rpc_client = clp->cl_rpcclient,
7587 .rpc_message = &msg,
7588 .callback_ops = &nfs41_sequence_ops,
7589 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
7590 };
7591
7592 if (!atomic_inc_not_zero(&clp->cl_count))
7593 return ERR_PTR(-EIO);
7594 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7595 if (calldata == NULL) {
7596 nfs_put_client(clp);
7597 return ERR_PTR(-ENOMEM);
7598 }
7599 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
7600 if (is_privileged)
7601 nfs4_set_sequence_privileged(&calldata->args);
7602 msg.rpc_argp = &calldata->args;
7603 msg.rpc_resp = &calldata->res;
7604 calldata->clp = clp;
7605 task_setup_data.callback_data = calldata;
7606
7607 return rpc_run_task(&task_setup_data);
7608 }
7609
7610 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
7611 {
7612 struct rpc_task *task;
7613 int ret = 0;
7614
7615 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
7616 return -EAGAIN;
7617 task = _nfs41_proc_sequence(clp, cred, false);
7618 if (IS_ERR(task))
7619 ret = PTR_ERR(task);
7620 else
7621 rpc_put_task_async(task);
7622 dprintk("<-- %s status=%d\n", __func__, ret);
7623 return ret;
7624 }
7625
7626 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7627 {
7628 struct rpc_task *task;
7629 int ret;
7630
7631 task = _nfs41_proc_sequence(clp, cred, true);
7632 if (IS_ERR(task)) {
7633 ret = PTR_ERR(task);
7634 goto out;
7635 }
7636 ret = rpc_wait_for_completion_task(task);
7637 if (!ret)
7638 ret = task->tk_status;
7639 rpc_put_task(task);
7640 out:
7641 dprintk("<-- %s status=%d\n", __func__, ret);
7642 return ret;
7643 }
7644
7645 struct nfs4_reclaim_complete_data {
7646 struct nfs_client *clp;
7647 struct nfs41_reclaim_complete_args arg;
7648 struct nfs41_reclaim_complete_res res;
7649 };
7650
7651 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
7652 {
7653 struct nfs4_reclaim_complete_data *calldata = data;
7654
7655 nfs41_setup_sequence(calldata->clp->cl_session,
7656 &calldata->arg.seq_args,
7657 &calldata->res.seq_res,
7658 task);
7659 }
7660
7661 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7662 {
7663 switch(task->tk_status) {
7664 case 0:
7665 case -NFS4ERR_COMPLETE_ALREADY:
7666 case -NFS4ERR_WRONG_CRED: /* What to do here? */
7667 break;
7668 case -NFS4ERR_DELAY:
7669 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7670 /* fall through */
7671 case -NFS4ERR_RETRY_UNCACHED_REP:
7672 return -EAGAIN;
7673 default:
7674 nfs4_schedule_lease_recovery(clp);
7675 }
7676 return 0;
7677 }
7678
7679 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
7680 {
7681 struct nfs4_reclaim_complete_data *calldata = data;
7682 struct nfs_client *clp = calldata->clp;
7683 struct nfs4_sequence_res *res = &calldata->res.seq_res;
7684
7685 dprintk("--> %s\n", __func__);
7686 if (!nfs41_sequence_done(task, res))
7687 return;
7688
7689 trace_nfs4_reclaim_complete(clp, task->tk_status);
7690 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
7691 rpc_restart_call_prepare(task);
7692 return;
7693 }
7694 dprintk("<-- %s\n", __func__);
7695 }
7696
7697 static void nfs4_free_reclaim_complete_data(void *data)
7698 {
7699 struct nfs4_reclaim_complete_data *calldata = data;
7700
7701 kfree(calldata);
7702 }
7703
7704 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
7705 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
7706 .rpc_call_done = nfs4_reclaim_complete_done,
7707 .rpc_release = nfs4_free_reclaim_complete_data,
7708 };
7709
7710 /*
7711 * Issue a global reclaim complete.
7712 */
7713 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7714 struct rpc_cred *cred)
7715 {
7716 struct nfs4_reclaim_complete_data *calldata;
7717 struct rpc_task *task;
7718 struct rpc_message msg = {
7719 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
7720 .rpc_cred = cred,
7721 };
7722 struct rpc_task_setup task_setup_data = {
7723 .rpc_client = clp->cl_rpcclient,
7724 .rpc_message = &msg,
7725 .callback_ops = &nfs4_reclaim_complete_call_ops,
7726 .flags = RPC_TASK_ASYNC,
7727 };
7728 int status = -ENOMEM;
7729
7730 dprintk("--> %s\n", __func__);
7731 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7732 if (calldata == NULL)
7733 goto out;
7734 calldata->clp = clp;
7735 calldata->arg.one_fs = 0;
7736
7737 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
7738 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
7739 msg.rpc_argp = &calldata->arg;
7740 msg.rpc_resp = &calldata->res;
7741 task_setup_data.callback_data = calldata;
7742 task = rpc_run_task(&task_setup_data);
7743 if (IS_ERR(task)) {
7744 status = PTR_ERR(task);
7745 goto out;
7746 }
7747 status = nfs4_wait_for_completion_rpc_task(task);
7748 if (status == 0)
7749 status = task->tk_status;
7750 rpc_put_task(task);
7751 return 0;
7752 out:
7753 dprintk("<-- %s status=%d\n", __func__, status);
7754 return status;
7755 }
7756
7757 static void
7758 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
7759 {
7760 struct nfs4_layoutget *lgp = calldata;
7761 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
7762 struct nfs4_session *session = nfs4_get_session(server);
7763
7764 dprintk("--> %s\n", __func__);
7765 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
7766 * right now covering the LAYOUTGET we are about to send.
7767 * However, that is not so catastrophic, and there seems
7768 * to be no way to prevent it completely.
7769 */
7770 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
7771 &lgp->res.seq_res, task))
7772 return;
7773 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
7774 NFS_I(lgp->args.inode)->layout,
7775 &lgp->args.range,
7776 lgp->args.ctx->state)) {
7777 rpc_exit(task, NFS4_OK);
7778 }
7779 }
7780
7781 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7782 {
7783 struct nfs4_layoutget *lgp = calldata;
7784 struct inode *inode = lgp->args.inode;
7785 struct nfs_server *server = NFS_SERVER(inode);
7786 struct pnfs_layout_hdr *lo;
7787 struct nfs4_state *state = NULL;
7788 unsigned long timeo, now, giveup;
7789
7790 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
7791
7792 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
7793 goto out;
7794
7795 switch (task->tk_status) {
7796 case 0:
7797 goto out;
7798
7799 /*
7800 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
7801 * on the file. set tk_status to -ENODATA to tell upper layer to
7802 * retry go inband.
7803 */
7804 case -NFS4ERR_LAYOUTUNAVAILABLE:
7805 task->tk_status = -ENODATA;
7806 goto out;
7807 /*
7808 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
7809 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
7810 */
7811 case -NFS4ERR_BADLAYOUT:
7812 goto out_overflow;
7813 /*
7814 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
7815 * (or clients) writing to the same RAID stripe except when
7816 * the minlength argument is 0 (see RFC5661 section 18.43.3).
7817 */
7818 case -NFS4ERR_LAYOUTTRYLATER:
7819 if (lgp->args.minlength == 0)
7820 goto out_overflow;
7821 /*
7822 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
7823 * existing layout before getting a new one).
7824 */
7825 case -NFS4ERR_RECALLCONFLICT:
7826 timeo = rpc_get_timeout(task->tk_client);
7827 giveup = lgp->args.timestamp + timeo;
7828 now = jiffies;
7829 if (time_after(giveup, now)) {
7830 unsigned long delay;
7831
7832 /* Delay for:
7833 * - Not less then NFS4_POLL_RETRY_MIN.
7834 * - One last time a jiffie before we give up
7835 * - exponential backoff (time_now minus start_attempt)
7836 */
7837 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
7838 min((giveup - now - 1),
7839 now - lgp->args.timestamp));
7840
7841 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7842 __func__, delay);
7843 rpc_delay(task, delay);
7844 /* Do not call nfs4_async_handle_error() */
7845 goto out_restart;
7846 }
7847 break;
7848 case -NFS4ERR_EXPIRED:
7849 case -NFS4ERR_BAD_STATEID:
7850 spin_lock(&inode->i_lock);
7851 if (nfs4_stateid_match(&lgp->args.stateid,
7852 &lgp->args.ctx->state->stateid)) {
7853 spin_unlock(&inode->i_lock);
7854 /* If the open stateid was bad, then recover it. */
7855 state = lgp->args.ctx->state;
7856 break;
7857 }
7858 lo = NFS_I(inode)->layout;
7859 if (lo && nfs4_stateid_match(&lgp->args.stateid,
7860 &lo->plh_stateid)) {
7861 LIST_HEAD(head);
7862
7863 /*
7864 * Mark the bad layout state as invalid, then retry
7865 * with the current stateid.
7866 */
7867 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
7868 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7869 spin_unlock(&inode->i_lock);
7870 pnfs_free_lseg_list(&head);
7871 } else
7872 spin_unlock(&inode->i_lock);
7873 goto out_restart;
7874 }
7875 if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN)
7876 goto out_restart;
7877 out:
7878 dprintk("<-- %s\n", __func__);
7879 return;
7880 out_restart:
7881 task->tk_status = 0;
7882 rpc_restart_call_prepare(task);
7883 return;
7884 out_overflow:
7885 task->tk_status = -EOVERFLOW;
7886 goto out;
7887 }
7888
7889 static size_t max_response_pages(struct nfs_server *server)
7890 {
7891 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
7892 return nfs_page_array_len(0, max_resp_sz);
7893 }
7894
7895 static void nfs4_free_pages(struct page **pages, size_t size)
7896 {
7897 int i;
7898
7899 if (!pages)
7900 return;
7901
7902 for (i = 0; i < size; i++) {
7903 if (!pages[i])
7904 break;
7905 __free_page(pages[i]);
7906 }
7907 kfree(pages);
7908 }
7909
7910 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
7911 {
7912 struct page **pages;
7913 int i;
7914
7915 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
7916 if (!pages) {
7917 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
7918 return NULL;
7919 }
7920
7921 for (i = 0; i < size; i++) {
7922 pages[i] = alloc_page(gfp_flags);
7923 if (!pages[i]) {
7924 dprintk("%s: failed to allocate page\n", __func__);
7925 nfs4_free_pages(pages, size);
7926 return NULL;
7927 }
7928 }
7929
7930 return pages;
7931 }
7932
7933 static void nfs4_layoutget_release(void *calldata)
7934 {
7935 struct nfs4_layoutget *lgp = calldata;
7936 struct inode *inode = lgp->args.inode;
7937 struct nfs_server *server = NFS_SERVER(inode);
7938 size_t max_pages = max_response_pages(server);
7939
7940 dprintk("--> %s\n", __func__);
7941 nfs4_free_pages(lgp->args.layout.pages, max_pages);
7942 pnfs_put_layout_hdr(NFS_I(inode)->layout);
7943 put_nfs_open_context(lgp->args.ctx);
7944 kfree(calldata);
7945 dprintk("<-- %s\n", __func__);
7946 }
7947
7948 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
7949 .rpc_call_prepare = nfs4_layoutget_prepare,
7950 .rpc_call_done = nfs4_layoutget_done,
7951 .rpc_release = nfs4_layoutget_release,
7952 };
7953
7954 struct pnfs_layout_segment *
7955 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
7956 {
7957 struct inode *inode = lgp->args.inode;
7958 struct nfs_server *server = NFS_SERVER(inode);
7959 size_t max_pages = max_response_pages(server);
7960 struct rpc_task *task;
7961 struct rpc_message msg = {
7962 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
7963 .rpc_argp = &lgp->args,
7964 .rpc_resp = &lgp->res,
7965 .rpc_cred = lgp->cred,
7966 };
7967 struct rpc_task_setup task_setup_data = {
7968 .rpc_client = server->client,
7969 .rpc_message = &msg,
7970 .callback_ops = &nfs4_layoutget_call_ops,
7971 .callback_data = lgp,
7972 .flags = RPC_TASK_ASYNC,
7973 };
7974 struct pnfs_layout_segment *lseg = NULL;
7975 int status = 0;
7976
7977 dprintk("--> %s\n", __func__);
7978
7979 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
7980 pnfs_get_layout_hdr(NFS_I(inode)->layout);
7981
7982 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
7983 if (!lgp->args.layout.pages) {
7984 nfs4_layoutget_release(lgp);
7985 return ERR_PTR(-ENOMEM);
7986 }
7987 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
7988 lgp->args.timestamp = jiffies;
7989
7990 lgp->res.layoutp = &lgp->args.layout;
7991 lgp->res.seq_res.sr_slot = NULL;
7992 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
7993
7994 task = rpc_run_task(&task_setup_data);
7995 if (IS_ERR(task))
7996 return ERR_CAST(task);
7997 status = nfs4_wait_for_completion_rpc_task(task);
7998 if (status == 0)
7999 status = task->tk_status;
8000 trace_nfs4_layoutget(lgp->args.ctx,
8001 &lgp->args.range,
8002 &lgp->res.range,
8003 &lgp->res.stateid,
8004 status);
8005 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
8006 if (status == 0 && lgp->res.layoutp->len)
8007 lseg = pnfs_layout_process(lgp);
8008 rpc_put_task(task);
8009 dprintk("<-- %s status=%d\n", __func__, status);
8010 if (status)
8011 return ERR_PTR(status);
8012 return lseg;
8013 }
8014
8015 static void
8016 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
8017 {
8018 struct nfs4_layoutreturn *lrp = calldata;
8019
8020 dprintk("--> %s\n", __func__);
8021 nfs41_setup_sequence(lrp->clp->cl_session,
8022 &lrp->args.seq_args,
8023 &lrp->res.seq_res,
8024 task);
8025 }
8026
8027 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
8028 {
8029 struct nfs4_layoutreturn *lrp = calldata;
8030 struct nfs_server *server;
8031
8032 dprintk("--> %s\n", __func__);
8033
8034 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
8035 return;
8036
8037 server = NFS_SERVER(lrp->args.inode);
8038 switch (task->tk_status) {
8039 default:
8040 task->tk_status = 0;
8041 case 0:
8042 break;
8043 case -NFS4ERR_DELAY:
8044 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
8045 break;
8046 rpc_restart_call_prepare(task);
8047 return;
8048 }
8049 dprintk("<-- %s\n", __func__);
8050 }
8051
8052 static void nfs4_layoutreturn_release(void *calldata)
8053 {
8054 struct nfs4_layoutreturn *lrp = calldata;
8055 struct pnfs_layout_hdr *lo = lrp->args.layout;
8056 LIST_HEAD(freeme);
8057
8058 dprintk("--> %s\n", __func__);
8059 spin_lock(&lo->plh_inode->i_lock);
8060 if (lrp->res.lrs_present)
8061 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
8062 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
8063 pnfs_clear_layoutreturn_waitbit(lo);
8064 lo->plh_block_lgets--;
8065 spin_unlock(&lo->plh_inode->i_lock);
8066 pnfs_free_lseg_list(&freeme);
8067 pnfs_put_layout_hdr(lrp->args.layout);
8068 nfs_iput_and_deactive(lrp->inode);
8069 kfree(calldata);
8070 dprintk("<-- %s\n", __func__);
8071 }
8072
8073 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
8074 .rpc_call_prepare = nfs4_layoutreturn_prepare,
8075 .rpc_call_done = nfs4_layoutreturn_done,
8076 .rpc_release = nfs4_layoutreturn_release,
8077 };
8078
8079 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
8080 {
8081 struct rpc_task *task;
8082 struct rpc_message msg = {
8083 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
8084 .rpc_argp = &lrp->args,
8085 .rpc_resp = &lrp->res,
8086 .rpc_cred = lrp->cred,
8087 };
8088 struct rpc_task_setup task_setup_data = {
8089 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8090 .rpc_message = &msg,
8091 .callback_ops = &nfs4_layoutreturn_call_ops,
8092 .callback_data = lrp,
8093 };
8094 int status = 0;
8095
8096 dprintk("--> %s\n", __func__);
8097 if (!sync) {
8098 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8099 if (!lrp->inode) {
8100 nfs4_layoutreturn_release(lrp);
8101 return -EAGAIN;
8102 }
8103 task_setup_data.flags |= RPC_TASK_ASYNC;
8104 }
8105 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8106 task = rpc_run_task(&task_setup_data);
8107 if (IS_ERR(task))
8108 return PTR_ERR(task);
8109 if (sync)
8110 status = task->tk_status;
8111 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
8112 dprintk("<-- %s status=%d\n", __func__, status);
8113 rpc_put_task(task);
8114 return status;
8115 }
8116
8117 static int
8118 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
8119 struct pnfs_device *pdev,
8120 struct rpc_cred *cred)
8121 {
8122 struct nfs4_getdeviceinfo_args args = {
8123 .pdev = pdev,
8124 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8125 NOTIFY_DEVICEID4_DELETE,
8126 };
8127 struct nfs4_getdeviceinfo_res res = {
8128 .pdev = pdev,
8129 };
8130 struct rpc_message msg = {
8131 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8132 .rpc_argp = &args,
8133 .rpc_resp = &res,
8134 .rpc_cred = cred,
8135 };
8136 int status;
8137
8138 dprintk("--> %s\n", __func__);
8139 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8140 if (res.notification & ~args.notify_types)
8141 dprintk("%s: unsupported notification\n", __func__);
8142 if (res.notification != args.notify_types)
8143 pdev->nocache = 1;
8144
8145 dprintk("<-- %s status=%d\n", __func__, status);
8146
8147 return status;
8148 }
8149
8150 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8151 struct pnfs_device *pdev,
8152 struct rpc_cred *cred)
8153 {
8154 struct nfs4_exception exception = { };
8155 int err;
8156
8157 do {
8158 err = nfs4_handle_exception(server,
8159 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8160 &exception);
8161 } while (exception.retry);
8162 return err;
8163 }
8164 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8165
8166 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8167 {
8168 struct nfs4_layoutcommit_data *data = calldata;
8169 struct nfs_server *server = NFS_SERVER(data->args.inode);
8170 struct nfs4_session *session = nfs4_get_session(server);
8171
8172 nfs41_setup_sequence(session,
8173 &data->args.seq_args,
8174 &data->res.seq_res,
8175 task);
8176 }
8177
8178 static void
8179 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8180 {
8181 struct nfs4_layoutcommit_data *data = calldata;
8182 struct nfs_server *server = NFS_SERVER(data->args.inode);
8183
8184 if (!nfs41_sequence_done(task, &data->res.seq_res))
8185 return;
8186
8187 switch (task->tk_status) { /* Just ignore these failures */
8188 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8189 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8190 case -NFS4ERR_BADLAYOUT: /* no layout */
8191 case -NFS4ERR_GRACE: /* loca_recalim always false */
8192 task->tk_status = 0;
8193 case 0:
8194 break;
8195 default:
8196 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8197 rpc_restart_call_prepare(task);
8198 return;
8199 }
8200 }
8201 }
8202
8203 static void nfs4_layoutcommit_release(void *calldata)
8204 {
8205 struct nfs4_layoutcommit_data *data = calldata;
8206
8207 pnfs_cleanup_layoutcommit(data);
8208 nfs_post_op_update_inode_force_wcc(data->args.inode,
8209 data->res.fattr);
8210 put_rpccred(data->cred);
8211 nfs_iput_and_deactive(data->inode);
8212 kfree(data);
8213 }
8214
8215 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8216 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8217 .rpc_call_done = nfs4_layoutcommit_done,
8218 .rpc_release = nfs4_layoutcommit_release,
8219 };
8220
8221 int
8222 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8223 {
8224 struct rpc_message msg = {
8225 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8226 .rpc_argp = &data->args,
8227 .rpc_resp = &data->res,
8228 .rpc_cred = data->cred,
8229 };
8230 struct rpc_task_setup task_setup_data = {
8231 .task = &data->task,
8232 .rpc_client = NFS_CLIENT(data->args.inode),
8233 .rpc_message = &msg,
8234 .callback_ops = &nfs4_layoutcommit_ops,
8235 .callback_data = data,
8236 };
8237 struct rpc_task *task;
8238 int status = 0;
8239
8240 dprintk("NFS: initiating layoutcommit call. sync %d "
8241 "lbw: %llu inode %lu\n", sync,
8242 data->args.lastbytewritten,
8243 data->args.inode->i_ino);
8244
8245 if (!sync) {
8246 data->inode = nfs_igrab_and_active(data->args.inode);
8247 if (data->inode == NULL) {
8248 nfs4_layoutcommit_release(data);
8249 return -EAGAIN;
8250 }
8251 task_setup_data.flags = RPC_TASK_ASYNC;
8252 }
8253 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
8254 task = rpc_run_task(&task_setup_data);
8255 if (IS_ERR(task))
8256 return PTR_ERR(task);
8257 if (sync)
8258 status = task->tk_status;
8259 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
8260 dprintk("%s: status %d\n", __func__, status);
8261 rpc_put_task(task);
8262 return status;
8263 }
8264
8265 /**
8266 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
8267 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
8268 */
8269 static int
8270 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8271 struct nfs_fsinfo *info,
8272 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8273 {
8274 struct nfs41_secinfo_no_name_args args = {
8275 .style = SECINFO_STYLE_CURRENT_FH,
8276 };
8277 struct nfs4_secinfo_res res = {
8278 .flavors = flavors,
8279 };
8280 struct rpc_message msg = {
8281 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
8282 .rpc_argp = &args,
8283 .rpc_resp = &res,
8284 };
8285 struct rpc_clnt *clnt = server->client;
8286 struct rpc_cred *cred = NULL;
8287 int status;
8288
8289 if (use_integrity) {
8290 clnt = server->nfs_client->cl_rpcclient;
8291 cred = nfs4_get_clid_cred(server->nfs_client);
8292 msg.rpc_cred = cred;
8293 }
8294
8295 dprintk("--> %s\n", __func__);
8296 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
8297 &res.seq_res, 0);
8298 dprintk("<-- %s status=%d\n", __func__, status);
8299
8300 if (cred)
8301 put_rpccred(cred);
8302
8303 return status;
8304 }
8305
8306 static int
8307 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8308 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
8309 {
8310 struct nfs4_exception exception = { };
8311 int err;
8312 do {
8313 /* first try using integrity protection */
8314 err = -NFS4ERR_WRONGSEC;
8315
8316 /* try to use integrity protection with machine cred */
8317 if (_nfs4_is_integrity_protected(server->nfs_client))
8318 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8319 flavors, true);
8320
8321 /*
8322 * if unable to use integrity protection, or SECINFO with
8323 * integrity protection returns NFS4ERR_WRONGSEC (which is
8324 * disallowed by spec, but exists in deployed servers) use
8325 * the current filesystem's rpc_client and the user cred.
8326 */
8327 if (err == -NFS4ERR_WRONGSEC)
8328 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8329 flavors, false);
8330
8331 switch (err) {
8332 case 0:
8333 case -NFS4ERR_WRONGSEC:
8334 case -ENOTSUPP:
8335 goto out;
8336 default:
8337 err = nfs4_handle_exception(server, err, &exception);
8338 }
8339 } while (exception.retry);
8340 out:
8341 return err;
8342 }
8343
8344 static int
8345 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
8346 struct nfs_fsinfo *info)
8347 {
8348 int err;
8349 struct page *page;
8350 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
8351 struct nfs4_secinfo_flavors *flavors;
8352 struct nfs4_secinfo4 *secinfo;
8353 int i;
8354
8355 page = alloc_page(GFP_KERNEL);
8356 if (!page) {
8357 err = -ENOMEM;
8358 goto out;
8359 }
8360
8361 flavors = page_address(page);
8362 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
8363
8364 /*
8365 * Fall back on "guess and check" method if
8366 * the server doesn't support SECINFO_NO_NAME
8367 */
8368 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
8369 err = nfs4_find_root_sec(server, fhandle, info);
8370 goto out_freepage;
8371 }
8372 if (err)
8373 goto out_freepage;
8374
8375 for (i = 0; i < flavors->num_flavors; i++) {
8376 secinfo = &flavors->flavors[i];
8377
8378 switch (secinfo->flavor) {
8379 case RPC_AUTH_NULL:
8380 case RPC_AUTH_UNIX:
8381 case RPC_AUTH_GSS:
8382 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
8383 &secinfo->flavor_info);
8384 break;
8385 default:
8386 flavor = RPC_AUTH_MAXFLAVOR;
8387 break;
8388 }
8389
8390 if (!nfs_auth_info_match(&server->auth_info, flavor))
8391 flavor = RPC_AUTH_MAXFLAVOR;
8392
8393 if (flavor != RPC_AUTH_MAXFLAVOR) {
8394 err = nfs4_lookup_root_sec(server, fhandle,
8395 info, flavor);
8396 if (!err)
8397 break;
8398 }
8399 }
8400
8401 if (flavor == RPC_AUTH_MAXFLAVOR)
8402 err = -EPERM;
8403
8404 out_freepage:
8405 put_page(page);
8406 if (err == -EACCES)
8407 return -EPERM;
8408 out:
8409 return err;
8410 }
8411
8412 static int _nfs41_test_stateid(struct nfs_server *server,
8413 nfs4_stateid *stateid,
8414 struct rpc_cred *cred)
8415 {
8416 int status;
8417 struct nfs41_test_stateid_args args = {
8418 .stateid = stateid,
8419 };
8420 struct nfs41_test_stateid_res res;
8421 struct rpc_message msg = {
8422 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
8423 .rpc_argp = &args,
8424 .rpc_resp = &res,
8425 .rpc_cred = cred,
8426 };
8427 struct rpc_clnt *rpc_client = server->client;
8428
8429 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8430 &rpc_client, &msg);
8431
8432 dprintk("NFS call test_stateid %p\n", stateid);
8433 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
8434 nfs4_set_sequence_privileged(&args.seq_args);
8435 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
8436 &args.seq_args, &res.seq_res);
8437 if (status != NFS_OK) {
8438 dprintk("NFS reply test_stateid: failed, %d\n", status);
8439 return status;
8440 }
8441 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
8442 return -res.status;
8443 }
8444
8445 /**
8446 * nfs41_test_stateid - perform a TEST_STATEID operation
8447 *
8448 * @server: server / transport on which to perform the operation
8449 * @stateid: state ID to test
8450 * @cred: credential
8451 *
8452 * Returns NFS_OK if the server recognizes that "stateid" is valid.
8453 * Otherwise a negative NFS4ERR value is returned if the operation
8454 * failed or the state ID is not currently valid.
8455 */
8456 static int nfs41_test_stateid(struct nfs_server *server,
8457 nfs4_stateid *stateid,
8458 struct rpc_cred *cred)
8459 {
8460 struct nfs4_exception exception = { };
8461 int err;
8462 do {
8463 err = _nfs41_test_stateid(server, stateid, cred);
8464 if (err != -NFS4ERR_DELAY)
8465 break;
8466 nfs4_handle_exception(server, err, &exception);
8467 } while (exception.retry);
8468 return err;
8469 }
8470
8471 struct nfs_free_stateid_data {
8472 struct nfs_server *server;
8473 struct nfs41_free_stateid_args args;
8474 struct nfs41_free_stateid_res res;
8475 };
8476
8477 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
8478 {
8479 struct nfs_free_stateid_data *data = calldata;
8480 nfs41_setup_sequence(nfs4_get_session(data->server),
8481 &data->args.seq_args,
8482 &data->res.seq_res,
8483 task);
8484 }
8485
8486 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
8487 {
8488 struct nfs_free_stateid_data *data = calldata;
8489
8490 nfs41_sequence_done(task, &data->res.seq_res);
8491
8492 switch (task->tk_status) {
8493 case -NFS4ERR_DELAY:
8494 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
8495 rpc_restart_call_prepare(task);
8496 }
8497 }
8498
8499 static void nfs41_free_stateid_release(void *calldata)
8500 {
8501 kfree(calldata);
8502 }
8503
8504 static const struct rpc_call_ops nfs41_free_stateid_ops = {
8505 .rpc_call_prepare = nfs41_free_stateid_prepare,
8506 .rpc_call_done = nfs41_free_stateid_done,
8507 .rpc_release = nfs41_free_stateid_release,
8508 };
8509
8510 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
8511 nfs4_stateid *stateid,
8512 struct rpc_cred *cred,
8513 bool privileged)
8514 {
8515 struct rpc_message msg = {
8516 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
8517 .rpc_cred = cred,
8518 };
8519 struct rpc_task_setup task_setup = {
8520 .rpc_client = server->client,
8521 .rpc_message = &msg,
8522 .callback_ops = &nfs41_free_stateid_ops,
8523 .flags = RPC_TASK_ASYNC,
8524 };
8525 struct nfs_free_stateid_data *data;
8526
8527 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8528 &task_setup.rpc_client, &msg);
8529
8530 dprintk("NFS call free_stateid %p\n", stateid);
8531 data = kmalloc(sizeof(*data), GFP_NOFS);
8532 if (!data)
8533 return ERR_PTR(-ENOMEM);
8534 data->server = server;
8535 nfs4_stateid_copy(&data->args.stateid, stateid);
8536
8537 task_setup.callback_data = data;
8538
8539 msg.rpc_argp = &data->args;
8540 msg.rpc_resp = &data->res;
8541 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
8542 if (privileged)
8543 nfs4_set_sequence_privileged(&data->args.seq_args);
8544
8545 return rpc_run_task(&task_setup);
8546 }
8547
8548 /**
8549 * nfs41_free_stateid - perform a FREE_STATEID operation
8550 *
8551 * @server: server / transport on which to perform the operation
8552 * @stateid: state ID to release
8553 * @cred: credential
8554 *
8555 * Returns NFS_OK if the server freed "stateid". Otherwise a
8556 * negative NFS4ERR value is returned.
8557 */
8558 static int nfs41_free_stateid(struct nfs_server *server,
8559 nfs4_stateid *stateid,
8560 struct rpc_cred *cred)
8561 {
8562 struct rpc_task *task;
8563 int ret;
8564
8565 task = _nfs41_free_stateid(server, stateid, cred, true);
8566 if (IS_ERR(task))
8567 return PTR_ERR(task);
8568 ret = rpc_wait_for_completion_task(task);
8569 if (!ret)
8570 ret = task->tk_status;
8571 rpc_put_task(task);
8572 return ret;
8573 }
8574
8575 static void
8576 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
8577 {
8578 struct rpc_task *task;
8579 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
8580
8581 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
8582 nfs4_free_lock_state(server, lsp);
8583 if (IS_ERR(task))
8584 return;
8585 rpc_put_task(task);
8586 }
8587
8588 static bool nfs41_match_stateid(const nfs4_stateid *s1,
8589 const nfs4_stateid *s2)
8590 {
8591 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
8592 return false;
8593
8594 if (s1->seqid == s2->seqid)
8595 return true;
8596 if (s1->seqid == 0 || s2->seqid == 0)
8597 return true;
8598
8599 return false;
8600 }
8601
8602 #endif /* CONFIG_NFS_V4_1 */
8603
8604 static bool nfs4_match_stateid(const nfs4_stateid *s1,
8605 const nfs4_stateid *s2)
8606 {
8607 return nfs4_stateid_match(s1, s2);
8608 }
8609
8610
8611 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
8612 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8613 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8614 .recover_open = nfs4_open_reclaim,
8615 .recover_lock = nfs4_lock_reclaim,
8616 .establish_clid = nfs4_init_clientid,
8617 .detect_trunking = nfs40_discover_server_trunking,
8618 };
8619
8620 #if defined(CONFIG_NFS_V4_1)
8621 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8622 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8623 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8624 .recover_open = nfs4_open_reclaim,
8625 .recover_lock = nfs4_lock_reclaim,
8626 .establish_clid = nfs41_init_clientid,
8627 .reclaim_complete = nfs41_proc_reclaim_complete,
8628 .detect_trunking = nfs41_discover_server_trunking,
8629 };
8630 #endif /* CONFIG_NFS_V4_1 */
8631
8632 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8633 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8634 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8635 .recover_open = nfs40_open_expired,
8636 .recover_lock = nfs4_lock_expired,
8637 .establish_clid = nfs4_init_clientid,
8638 };
8639
8640 #if defined(CONFIG_NFS_V4_1)
8641 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
8642 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8643 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8644 .recover_open = nfs41_open_expired,
8645 .recover_lock = nfs41_lock_expired,
8646 .establish_clid = nfs41_init_clientid,
8647 };
8648 #endif /* CONFIG_NFS_V4_1 */
8649
8650 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
8651 .sched_state_renewal = nfs4_proc_async_renew,
8652 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
8653 .renew_lease = nfs4_proc_renew,
8654 };
8655
8656 #if defined(CONFIG_NFS_V4_1)
8657 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
8658 .sched_state_renewal = nfs41_proc_async_sequence,
8659 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
8660 .renew_lease = nfs4_proc_sequence,
8661 };
8662 #endif
8663
8664 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
8665 .get_locations = _nfs40_proc_get_locations,
8666 .fsid_present = _nfs40_proc_fsid_present,
8667 };
8668
8669 #if defined(CONFIG_NFS_V4_1)
8670 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
8671 .get_locations = _nfs41_proc_get_locations,
8672 .fsid_present = _nfs41_proc_fsid_present,
8673 };
8674 #endif /* CONFIG_NFS_V4_1 */
8675
8676 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8677 .minor_version = 0,
8678 .init_caps = NFS_CAP_READDIRPLUS
8679 | NFS_CAP_ATOMIC_OPEN
8680 | NFS_CAP_POSIX_LOCK,
8681 .init_client = nfs40_init_client,
8682 .shutdown_client = nfs40_shutdown_client,
8683 .match_stateid = nfs4_match_stateid,
8684 .find_root_sec = nfs4_find_root_sec,
8685 .free_lock_state = nfs4_release_lockowner,
8686 .alloc_seqid = nfs_alloc_seqid,
8687 .call_sync_ops = &nfs40_call_sync_ops,
8688 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
8689 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
8690 .state_renewal_ops = &nfs40_state_renewal_ops,
8691 .mig_recovery_ops = &nfs40_mig_recovery_ops,
8692 };
8693
8694 #if defined(CONFIG_NFS_V4_1)
8695 static struct nfs_seqid *
8696 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
8697 {
8698 return NULL;
8699 }
8700
8701 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8702 .minor_version = 1,
8703 .init_caps = NFS_CAP_READDIRPLUS
8704 | NFS_CAP_ATOMIC_OPEN
8705 | NFS_CAP_POSIX_LOCK
8706 | NFS_CAP_STATEID_NFSV41
8707 | NFS_CAP_ATOMIC_OPEN_V1,
8708 .init_client = nfs41_init_client,
8709 .shutdown_client = nfs41_shutdown_client,
8710 .match_stateid = nfs41_match_stateid,
8711 .find_root_sec = nfs41_find_root_sec,
8712 .free_lock_state = nfs41_free_lock_state,
8713 .alloc_seqid = nfs_alloc_no_seqid,
8714 .call_sync_ops = &nfs41_call_sync_ops,
8715 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8716 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8717 .state_renewal_ops = &nfs41_state_renewal_ops,
8718 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8719 };
8720 #endif
8721
8722 #if defined(CONFIG_NFS_V4_2)
8723 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8724 .minor_version = 2,
8725 .init_caps = NFS_CAP_READDIRPLUS
8726 | NFS_CAP_ATOMIC_OPEN
8727 | NFS_CAP_POSIX_LOCK
8728 | NFS_CAP_STATEID_NFSV41
8729 | NFS_CAP_ATOMIC_OPEN_V1
8730 | NFS_CAP_ALLOCATE
8731 | NFS_CAP_DEALLOCATE
8732 | NFS_CAP_SEEK
8733 | NFS_CAP_LAYOUTSTATS
8734 | NFS_CAP_CLONE,
8735 .init_client = nfs41_init_client,
8736 .shutdown_client = nfs41_shutdown_client,
8737 .match_stateid = nfs41_match_stateid,
8738 .find_root_sec = nfs41_find_root_sec,
8739 .free_lock_state = nfs41_free_lock_state,
8740 .call_sync_ops = &nfs41_call_sync_ops,
8741 .alloc_seqid = nfs_alloc_no_seqid,
8742 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8743 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8744 .state_renewal_ops = &nfs41_state_renewal_ops,
8745 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8746 };
8747 #endif
8748
8749 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
8750 [0] = &nfs_v4_0_minor_ops,
8751 #if defined(CONFIG_NFS_V4_1)
8752 [1] = &nfs_v4_1_minor_ops,
8753 #endif
8754 #if defined(CONFIG_NFS_V4_2)
8755 [2] = &nfs_v4_2_minor_ops,
8756 #endif
8757 };
8758
8759 static const struct inode_operations nfs4_dir_inode_operations = {
8760 .create = nfs_create,
8761 .lookup = nfs_lookup,
8762 .atomic_open = nfs_atomic_open,
8763 .link = nfs_link,
8764 .unlink = nfs_unlink,
8765 .symlink = nfs_symlink,
8766 .mkdir = nfs_mkdir,
8767 .rmdir = nfs_rmdir,
8768 .mknod = nfs_mknod,
8769 .rename = nfs_rename,
8770 .permission = nfs_permission,
8771 .getattr = nfs_getattr,
8772 .setattr = nfs_setattr,
8773 .getxattr = generic_getxattr,
8774 .setxattr = generic_setxattr,
8775 .listxattr = generic_listxattr,
8776 .removexattr = generic_removexattr,
8777 };
8778
8779 static const struct inode_operations nfs4_file_inode_operations = {
8780 .permission = nfs_permission,
8781 .getattr = nfs_getattr,
8782 .setattr = nfs_setattr,
8783 .getxattr = generic_getxattr,
8784 .setxattr = generic_setxattr,
8785 .listxattr = generic_listxattr,
8786 .removexattr = generic_removexattr,
8787 };
8788
8789 const struct nfs_rpc_ops nfs_v4_clientops = {
8790 .version = 4, /* protocol version */
8791 .dentry_ops = &nfs4_dentry_operations,
8792 .dir_inode_ops = &nfs4_dir_inode_operations,
8793 .file_inode_ops = &nfs4_file_inode_operations,
8794 .file_ops = &nfs4_file_operations,
8795 .getroot = nfs4_proc_get_root,
8796 .submount = nfs4_submount,
8797 .try_mount = nfs4_try_mount,
8798 .getattr = nfs4_proc_getattr,
8799 .setattr = nfs4_proc_setattr,
8800 .lookup = nfs4_proc_lookup,
8801 .access = nfs4_proc_access,
8802 .readlink = nfs4_proc_readlink,
8803 .create = nfs4_proc_create,
8804 .remove = nfs4_proc_remove,
8805 .unlink_setup = nfs4_proc_unlink_setup,
8806 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
8807 .unlink_done = nfs4_proc_unlink_done,
8808 .rename_setup = nfs4_proc_rename_setup,
8809 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
8810 .rename_done = nfs4_proc_rename_done,
8811 .link = nfs4_proc_link,
8812 .symlink = nfs4_proc_symlink,
8813 .mkdir = nfs4_proc_mkdir,
8814 .rmdir = nfs4_proc_remove,
8815 .readdir = nfs4_proc_readdir,
8816 .mknod = nfs4_proc_mknod,
8817 .statfs = nfs4_proc_statfs,
8818 .fsinfo = nfs4_proc_fsinfo,
8819 .pathconf = nfs4_proc_pathconf,
8820 .set_capabilities = nfs4_server_capabilities,
8821 .decode_dirent = nfs4_decode_dirent,
8822 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
8823 .read_setup = nfs4_proc_read_setup,
8824 .read_done = nfs4_read_done,
8825 .write_setup = nfs4_proc_write_setup,
8826 .write_done = nfs4_write_done,
8827 .commit_setup = nfs4_proc_commit_setup,
8828 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
8829 .commit_done = nfs4_commit_done,
8830 .lock = nfs4_proc_lock,
8831 .clear_acl_cache = nfs4_zap_acl_attr,
8832 .close_context = nfs4_close_context,
8833 .open_context = nfs4_atomic_open,
8834 .have_delegation = nfs4_have_delegation,
8835 .return_delegation = nfs4_inode_return_delegation,
8836 .alloc_client = nfs4_alloc_client,
8837 .init_client = nfs4_init_client,
8838 .free_client = nfs4_free_client,
8839 .create_server = nfs4_create_server,
8840 .clone_server = nfs_clone_server,
8841 };
8842
8843 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
8844 .prefix = XATTR_NAME_NFSV4_ACL,
8845 .list = nfs4_xattr_list_nfs4_acl,
8846 .get = nfs4_xattr_get_nfs4_acl,
8847 .set = nfs4_xattr_set_nfs4_acl,
8848 };
8849
8850 const struct xattr_handler *nfs4_xattr_handlers[] = {
8851 &nfs4_xattr_nfs4_acl_handler,
8852 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
8853 &nfs4_xattr_nfs4_label_handler,
8854 #endif
8855 NULL
8856 };
8857
8858 /*
8859 * Local variables:
8860 * c-basic-offset: 8
8861 * End:
8862 */
This page took 0.211008 seconds and 6 git commands to generate.