Adding stateid information to tracepoints
[deliverable/linux.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/file.h>
42 #include <linux/string.h>
43 #include <linux/ratelimit.h>
44 #include <linux/printk.h>
45 #include <linux/slab.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/xattr.h>
56 #include <linux/utsname.h>
57 #include <linux/freezer.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4idmap.h"
67 #include "nfs4session.h"
68 #include "fscache.h"
69
70 #include "nfs4trace.h"
71
72 #define NFSDBG_FACILITY NFSDBG_PROC
73
74 #define NFS4_POLL_RETRY_MIN (HZ/10)
75 #define NFS4_POLL_RETRY_MAX (15*HZ)
76
77 struct nfs4_opendata;
78 static int _nfs4_proc_open(struct nfs4_opendata *data);
79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
82 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
83 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
84 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
85 struct nfs_fattr *fattr, struct iattr *sattr,
86 struct nfs4_state *state, struct nfs4_label *ilabel,
87 struct nfs4_label *olabel);
88 #ifdef CONFIG_NFS_V4_1
89 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
90 struct rpc_cred *);
91 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
92 struct rpc_cred *);
93 #endif
94
95 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
96 static inline struct nfs4_label *
97 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
98 struct iattr *sattr, struct nfs4_label *label)
99 {
100 int err;
101
102 if (label == NULL)
103 return NULL;
104
105 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
106 return NULL;
107
108 err = security_dentry_init_security(dentry, sattr->ia_mode,
109 &dentry->d_name, (void **)&label->label, &label->len);
110 if (err == 0)
111 return label;
112
113 return NULL;
114 }
115 static inline void
116 nfs4_label_release_security(struct nfs4_label *label)
117 {
118 if (label)
119 security_release_secctx(label->label, label->len);
120 }
121 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
122 {
123 if (label)
124 return server->attr_bitmask;
125
126 return server->attr_bitmask_nl;
127 }
128 #else
129 static inline struct nfs4_label *
130 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
131 struct iattr *sattr, struct nfs4_label *l)
132 { return NULL; }
133 static inline void
134 nfs4_label_release_security(struct nfs4_label *label)
135 { return; }
136 static inline u32 *
137 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
138 { return server->attr_bitmask; }
139 #endif
140
141 /* Prevent leaks of NFSv4 errors into userland */
142 static int nfs4_map_errors(int err)
143 {
144 if (err >= -1000)
145 return err;
146 switch (err) {
147 case -NFS4ERR_RESOURCE:
148 case -NFS4ERR_LAYOUTTRYLATER:
149 case -NFS4ERR_RECALLCONFLICT:
150 return -EREMOTEIO;
151 case -NFS4ERR_WRONGSEC:
152 case -NFS4ERR_WRONG_CRED:
153 return -EPERM;
154 case -NFS4ERR_BADOWNER:
155 case -NFS4ERR_BADNAME:
156 return -EINVAL;
157 case -NFS4ERR_SHARE_DENIED:
158 return -EACCES;
159 case -NFS4ERR_MINOR_VERS_MISMATCH:
160 return -EPROTONOSUPPORT;
161 case -NFS4ERR_FILE_OPEN:
162 return -EBUSY;
163 default:
164 dprintk("%s could not handle NFSv4 error %d\n",
165 __func__, -err);
166 break;
167 }
168 return -EIO;
169 }
170
171 /*
172 * This is our standard bitmap for GETATTR requests.
173 */
174 const u32 nfs4_fattr_bitmap[3] = {
175 FATTR4_WORD0_TYPE
176 | FATTR4_WORD0_CHANGE
177 | FATTR4_WORD0_SIZE
178 | FATTR4_WORD0_FSID
179 | FATTR4_WORD0_FILEID,
180 FATTR4_WORD1_MODE
181 | FATTR4_WORD1_NUMLINKS
182 | FATTR4_WORD1_OWNER
183 | FATTR4_WORD1_OWNER_GROUP
184 | FATTR4_WORD1_RAWDEV
185 | FATTR4_WORD1_SPACE_USED
186 | FATTR4_WORD1_TIME_ACCESS
187 | FATTR4_WORD1_TIME_METADATA
188 | FATTR4_WORD1_TIME_MODIFY
189 | FATTR4_WORD1_MOUNTED_ON_FILEID,
190 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
191 FATTR4_WORD2_SECURITY_LABEL
192 #endif
193 };
194
195 static const u32 nfs4_pnfs_open_bitmap[3] = {
196 FATTR4_WORD0_TYPE
197 | FATTR4_WORD0_CHANGE
198 | FATTR4_WORD0_SIZE
199 | FATTR4_WORD0_FSID
200 | FATTR4_WORD0_FILEID,
201 FATTR4_WORD1_MODE
202 | FATTR4_WORD1_NUMLINKS
203 | FATTR4_WORD1_OWNER
204 | FATTR4_WORD1_OWNER_GROUP
205 | FATTR4_WORD1_RAWDEV
206 | FATTR4_WORD1_SPACE_USED
207 | FATTR4_WORD1_TIME_ACCESS
208 | FATTR4_WORD1_TIME_METADATA
209 | FATTR4_WORD1_TIME_MODIFY,
210 FATTR4_WORD2_MDSTHRESHOLD
211 };
212
213 static const u32 nfs4_open_noattr_bitmap[3] = {
214 FATTR4_WORD0_TYPE
215 | FATTR4_WORD0_CHANGE
216 | FATTR4_WORD0_FILEID,
217 };
218
219 const u32 nfs4_statfs_bitmap[3] = {
220 FATTR4_WORD0_FILES_AVAIL
221 | FATTR4_WORD0_FILES_FREE
222 | FATTR4_WORD0_FILES_TOTAL,
223 FATTR4_WORD1_SPACE_AVAIL
224 | FATTR4_WORD1_SPACE_FREE
225 | FATTR4_WORD1_SPACE_TOTAL
226 };
227
228 const u32 nfs4_pathconf_bitmap[3] = {
229 FATTR4_WORD0_MAXLINK
230 | FATTR4_WORD0_MAXNAME,
231 0
232 };
233
234 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
235 | FATTR4_WORD0_MAXREAD
236 | FATTR4_WORD0_MAXWRITE
237 | FATTR4_WORD0_LEASE_TIME,
238 FATTR4_WORD1_TIME_DELTA
239 | FATTR4_WORD1_FS_LAYOUT_TYPES,
240 FATTR4_WORD2_LAYOUT_BLKSIZE
241 | FATTR4_WORD2_CLONE_BLKSIZE
242 };
243
244 const u32 nfs4_fs_locations_bitmap[3] = {
245 FATTR4_WORD0_TYPE
246 | FATTR4_WORD0_CHANGE
247 | FATTR4_WORD0_SIZE
248 | FATTR4_WORD0_FSID
249 | FATTR4_WORD0_FILEID
250 | FATTR4_WORD0_FS_LOCATIONS,
251 FATTR4_WORD1_MODE
252 | FATTR4_WORD1_NUMLINKS
253 | FATTR4_WORD1_OWNER
254 | FATTR4_WORD1_OWNER_GROUP
255 | FATTR4_WORD1_RAWDEV
256 | FATTR4_WORD1_SPACE_USED
257 | FATTR4_WORD1_TIME_ACCESS
258 | FATTR4_WORD1_TIME_METADATA
259 | FATTR4_WORD1_TIME_MODIFY
260 | FATTR4_WORD1_MOUNTED_ON_FILEID,
261 };
262
263 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
264 struct nfs4_readdir_arg *readdir)
265 {
266 __be32 *start, *p;
267
268 if (cookie > 2) {
269 readdir->cookie = cookie;
270 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
271 return;
272 }
273
274 readdir->cookie = 0;
275 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
276 if (cookie == 2)
277 return;
278
279 /*
280 * NFSv4 servers do not return entries for '.' and '..'
281 * Therefore, we fake these entries here. We let '.'
282 * have cookie 0 and '..' have cookie 1. Note that
283 * when talking to the server, we always send cookie 0
284 * instead of 1 or 2.
285 */
286 start = p = kmap_atomic(*readdir->pages);
287
288 if (cookie == 0) {
289 *p++ = xdr_one; /* next */
290 *p++ = xdr_zero; /* cookie, first word */
291 *p++ = xdr_one; /* cookie, second word */
292 *p++ = xdr_one; /* entry len */
293 memcpy(p, ".\0\0\0", 4); /* entry */
294 p++;
295 *p++ = xdr_one; /* bitmap length */
296 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
297 *p++ = htonl(8); /* attribute buffer length */
298 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
299 }
300
301 *p++ = xdr_one; /* next */
302 *p++ = xdr_zero; /* cookie, first word */
303 *p++ = xdr_two; /* cookie, second word */
304 *p++ = xdr_two; /* entry len */
305 memcpy(p, "..\0\0", 4); /* entry */
306 p++;
307 *p++ = xdr_one; /* bitmap length */
308 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
309 *p++ = htonl(8); /* attribute buffer length */
310 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
311
312 readdir->pgbase = (char *)p - (char *)start;
313 readdir->count -= readdir->pgbase;
314 kunmap_atomic(start);
315 }
316
317 static long nfs4_update_delay(long *timeout)
318 {
319 long ret;
320 if (!timeout)
321 return NFS4_POLL_RETRY_MAX;
322 if (*timeout <= 0)
323 *timeout = NFS4_POLL_RETRY_MIN;
324 if (*timeout > NFS4_POLL_RETRY_MAX)
325 *timeout = NFS4_POLL_RETRY_MAX;
326 ret = *timeout;
327 *timeout <<= 1;
328 return ret;
329 }
330
331 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
332 {
333 int res = 0;
334
335 might_sleep();
336
337 freezable_schedule_timeout_killable_unsafe(
338 nfs4_update_delay(timeout));
339 if (fatal_signal_pending(current))
340 res = -ERESTARTSYS;
341 return res;
342 }
343
344 /* This is the error handling routine for processes that are allowed
345 * to sleep.
346 */
347 static int nfs4_do_handle_exception(struct nfs_server *server,
348 int errorcode, struct nfs4_exception *exception)
349 {
350 struct nfs_client *clp = server->nfs_client;
351 struct nfs4_state *state = exception->state;
352 struct inode *inode = exception->inode;
353 int ret = errorcode;
354
355 exception->delay = 0;
356 exception->recovering = 0;
357 exception->retry = 0;
358 switch(errorcode) {
359 case 0:
360 return 0;
361 case -NFS4ERR_OPENMODE:
362 case -NFS4ERR_DELEG_REVOKED:
363 case -NFS4ERR_ADMIN_REVOKED:
364 case -NFS4ERR_BAD_STATEID:
365 if (inode && nfs_async_inode_return_delegation(inode,
366 NULL) == 0)
367 goto wait_on_recovery;
368 if (state == NULL)
369 break;
370 ret = nfs4_schedule_stateid_recovery(server, state);
371 if (ret < 0)
372 break;
373 goto wait_on_recovery;
374 case -NFS4ERR_EXPIRED:
375 if (state != NULL) {
376 ret = nfs4_schedule_stateid_recovery(server, state);
377 if (ret < 0)
378 break;
379 }
380 case -NFS4ERR_STALE_STATEID:
381 case -NFS4ERR_STALE_CLIENTID:
382 nfs4_schedule_lease_recovery(clp);
383 goto wait_on_recovery;
384 case -NFS4ERR_MOVED:
385 ret = nfs4_schedule_migration_recovery(server);
386 if (ret < 0)
387 break;
388 goto wait_on_recovery;
389 case -NFS4ERR_LEASE_MOVED:
390 nfs4_schedule_lease_moved_recovery(clp);
391 goto wait_on_recovery;
392 #if defined(CONFIG_NFS_V4_1)
393 case -NFS4ERR_BADSESSION:
394 case -NFS4ERR_BADSLOT:
395 case -NFS4ERR_BAD_HIGH_SLOT:
396 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
397 case -NFS4ERR_DEADSESSION:
398 case -NFS4ERR_SEQ_FALSE_RETRY:
399 case -NFS4ERR_SEQ_MISORDERED:
400 dprintk("%s ERROR: %d Reset session\n", __func__,
401 errorcode);
402 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
403 goto wait_on_recovery;
404 #endif /* defined(CONFIG_NFS_V4_1) */
405 case -NFS4ERR_FILE_OPEN:
406 if (exception->timeout > HZ) {
407 /* We have retried a decent amount, time to
408 * fail
409 */
410 ret = -EBUSY;
411 break;
412 }
413 case -NFS4ERR_DELAY:
414 nfs_inc_server_stats(server, NFSIOS_DELAY);
415 case -NFS4ERR_GRACE:
416 exception->delay = 1;
417 return 0;
418
419 case -NFS4ERR_RETRY_UNCACHED_REP:
420 case -NFS4ERR_OLD_STATEID:
421 exception->retry = 1;
422 break;
423 case -NFS4ERR_BADOWNER:
424 /* The following works around a Linux server bug! */
425 case -NFS4ERR_BADNAME:
426 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
427 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
428 exception->retry = 1;
429 printk(KERN_WARNING "NFS: v4 server %s "
430 "does not accept raw "
431 "uid/gids. "
432 "Reenabling the idmapper.\n",
433 server->nfs_client->cl_hostname);
434 }
435 }
436 /* We failed to handle the error */
437 return nfs4_map_errors(ret);
438 wait_on_recovery:
439 exception->recovering = 1;
440 return 0;
441 }
442
443 /* This is the error handling routine for processes that are allowed
444 * to sleep.
445 */
446 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
447 {
448 struct nfs_client *clp = server->nfs_client;
449 int ret;
450
451 ret = nfs4_do_handle_exception(server, errorcode, exception);
452 if (exception->delay) {
453 ret = nfs4_delay(server->client, &exception->timeout);
454 goto out_retry;
455 }
456 if (exception->recovering) {
457 ret = nfs4_wait_clnt_recover(clp);
458 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
459 return -EIO;
460 goto out_retry;
461 }
462 return ret;
463 out_retry:
464 if (ret == 0)
465 exception->retry = 1;
466 return ret;
467 }
468
469 static int
470 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
471 int errorcode, struct nfs4_exception *exception)
472 {
473 struct nfs_client *clp = server->nfs_client;
474 int ret;
475
476 ret = nfs4_do_handle_exception(server, errorcode, exception);
477 if (exception->delay) {
478 rpc_delay(task, nfs4_update_delay(&exception->timeout));
479 goto out_retry;
480 }
481 if (exception->recovering) {
482 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
483 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
484 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
485 goto out_retry;
486 }
487 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
488 ret = -EIO;
489 return ret;
490 out_retry:
491 if (ret == 0)
492 exception->retry = 1;
493 return ret;
494 }
495
496 static int
497 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
498 struct nfs4_state *state, long *timeout)
499 {
500 struct nfs4_exception exception = {
501 .state = state,
502 };
503
504 if (task->tk_status >= 0)
505 return 0;
506 if (timeout)
507 exception.timeout = *timeout;
508 task->tk_status = nfs4_async_handle_exception(task, server,
509 task->tk_status,
510 &exception);
511 if (exception.delay && timeout)
512 *timeout = exception.timeout;
513 if (exception.retry)
514 return -EAGAIN;
515 return 0;
516 }
517
518 /*
519 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
520 * or 'false' otherwise.
521 */
522 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
523 {
524 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
525
526 if (flavor == RPC_AUTH_GSS_KRB5I ||
527 flavor == RPC_AUTH_GSS_KRB5P)
528 return true;
529
530 return false;
531 }
532
533 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
534 {
535 spin_lock(&clp->cl_lock);
536 if (time_before(clp->cl_last_renewal,timestamp))
537 clp->cl_last_renewal = timestamp;
538 spin_unlock(&clp->cl_lock);
539 }
540
541 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
542 {
543 struct nfs_client *clp = server->nfs_client;
544
545 if (!nfs4_has_session(clp))
546 do_renew_lease(clp, timestamp);
547 }
548
549 struct nfs4_call_sync_data {
550 const struct nfs_server *seq_server;
551 struct nfs4_sequence_args *seq_args;
552 struct nfs4_sequence_res *seq_res;
553 };
554
555 void nfs4_init_sequence(struct nfs4_sequence_args *args,
556 struct nfs4_sequence_res *res, int cache_reply)
557 {
558 args->sa_slot = NULL;
559 args->sa_cache_this = cache_reply;
560 args->sa_privileged = 0;
561
562 res->sr_slot = NULL;
563 }
564
565 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
566 {
567 args->sa_privileged = 1;
568 }
569
570 int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
571 struct nfs4_sequence_args *args,
572 struct nfs4_sequence_res *res,
573 struct rpc_task *task)
574 {
575 struct nfs4_slot *slot;
576
577 /* slot already allocated? */
578 if (res->sr_slot != NULL)
579 goto out_start;
580
581 spin_lock(&tbl->slot_tbl_lock);
582 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
583 goto out_sleep;
584
585 slot = nfs4_alloc_slot(tbl);
586 if (IS_ERR(slot)) {
587 if (slot == ERR_PTR(-ENOMEM))
588 task->tk_timeout = HZ >> 2;
589 goto out_sleep;
590 }
591 spin_unlock(&tbl->slot_tbl_lock);
592
593 args->sa_slot = slot;
594 res->sr_slot = slot;
595
596 out_start:
597 rpc_call_start(task);
598 return 0;
599
600 out_sleep:
601 if (args->sa_privileged)
602 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
603 NULL, RPC_PRIORITY_PRIVILEGED);
604 else
605 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
606 spin_unlock(&tbl->slot_tbl_lock);
607 return -EAGAIN;
608 }
609 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
610
611 static int nfs40_sequence_done(struct rpc_task *task,
612 struct nfs4_sequence_res *res)
613 {
614 struct nfs4_slot *slot = res->sr_slot;
615 struct nfs4_slot_table *tbl;
616
617 if (slot == NULL)
618 goto out;
619
620 tbl = slot->table;
621 spin_lock(&tbl->slot_tbl_lock);
622 if (!nfs41_wake_and_assign_slot(tbl, slot))
623 nfs4_free_slot(tbl, slot);
624 spin_unlock(&tbl->slot_tbl_lock);
625
626 res->sr_slot = NULL;
627 out:
628 return 1;
629 }
630
631 #if defined(CONFIG_NFS_V4_1)
632
633 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
634 {
635 struct nfs4_session *session;
636 struct nfs4_slot_table *tbl;
637 struct nfs4_slot *slot = res->sr_slot;
638 bool send_new_highest_used_slotid = false;
639
640 tbl = slot->table;
641 session = tbl->session;
642
643 spin_lock(&tbl->slot_tbl_lock);
644 /* Be nice to the server: try to ensure that the last transmitted
645 * value for highest_user_slotid <= target_highest_slotid
646 */
647 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
648 send_new_highest_used_slotid = true;
649
650 if (nfs41_wake_and_assign_slot(tbl, slot)) {
651 send_new_highest_used_slotid = false;
652 goto out_unlock;
653 }
654 nfs4_free_slot(tbl, slot);
655
656 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
657 send_new_highest_used_slotid = false;
658 out_unlock:
659 spin_unlock(&tbl->slot_tbl_lock);
660 res->sr_slot = NULL;
661 if (send_new_highest_used_slotid)
662 nfs41_notify_server(session->clp);
663 }
664
665 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
666 {
667 struct nfs4_session *session;
668 struct nfs4_slot *slot = res->sr_slot;
669 struct nfs_client *clp;
670 bool interrupted = false;
671 int ret = 1;
672
673 if (slot == NULL)
674 goto out_noaction;
675 /* don't increment the sequence number if the task wasn't sent */
676 if (!RPC_WAS_SENT(task))
677 goto out;
678
679 session = slot->table->session;
680
681 if (slot->interrupted) {
682 slot->interrupted = 0;
683 interrupted = true;
684 }
685
686 trace_nfs4_sequence_done(session, res);
687 /* Check the SEQUENCE operation status */
688 switch (res->sr_status) {
689 case 0:
690 /* Update the slot's sequence and clientid lease timer */
691 ++slot->seq_nr;
692 clp = session->clp;
693 do_renew_lease(clp, res->sr_timestamp);
694 /* Check sequence flags */
695 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
696 nfs41_update_target_slotid(slot->table, slot, res);
697 break;
698 case 1:
699 /*
700 * sr_status remains 1 if an RPC level error occurred.
701 * The server may or may not have processed the sequence
702 * operation..
703 * Mark the slot as having hosted an interrupted RPC call.
704 */
705 slot->interrupted = 1;
706 goto out;
707 case -NFS4ERR_DELAY:
708 /* The server detected a resend of the RPC call and
709 * returned NFS4ERR_DELAY as per Section 2.10.6.2
710 * of RFC5661.
711 */
712 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
713 __func__,
714 slot->slot_nr,
715 slot->seq_nr);
716 goto out_retry;
717 case -NFS4ERR_BADSLOT:
718 /*
719 * The slot id we used was probably retired. Try again
720 * using a different slot id.
721 */
722 goto retry_nowait;
723 case -NFS4ERR_SEQ_MISORDERED:
724 /*
725 * Was the last operation on this sequence interrupted?
726 * If so, retry after bumping the sequence number.
727 */
728 if (interrupted) {
729 ++slot->seq_nr;
730 goto retry_nowait;
731 }
732 /*
733 * Could this slot have been previously retired?
734 * If so, then the server may be expecting seq_nr = 1!
735 */
736 if (slot->seq_nr != 1) {
737 slot->seq_nr = 1;
738 goto retry_nowait;
739 }
740 break;
741 case -NFS4ERR_SEQ_FALSE_RETRY:
742 ++slot->seq_nr;
743 goto retry_nowait;
744 default:
745 /* Just update the slot sequence no. */
746 ++slot->seq_nr;
747 }
748 out:
749 /* The session may be reset by one of the error handlers. */
750 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
751 nfs41_sequence_free_slot(res);
752 out_noaction:
753 return ret;
754 retry_nowait:
755 if (rpc_restart_call_prepare(task)) {
756 task->tk_status = 0;
757 ret = 0;
758 }
759 goto out;
760 out_retry:
761 if (!rpc_restart_call(task))
762 goto out;
763 rpc_delay(task, NFS4_POLL_RETRY_MAX);
764 return 0;
765 }
766 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
767
768 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
769 {
770 if (res->sr_slot == NULL)
771 return 1;
772 if (!res->sr_slot->table->session)
773 return nfs40_sequence_done(task, res);
774 return nfs41_sequence_done(task, res);
775 }
776 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
777
778 int nfs41_setup_sequence(struct nfs4_session *session,
779 struct nfs4_sequence_args *args,
780 struct nfs4_sequence_res *res,
781 struct rpc_task *task)
782 {
783 struct nfs4_slot *slot;
784 struct nfs4_slot_table *tbl;
785
786 dprintk("--> %s\n", __func__);
787 /* slot already allocated? */
788 if (res->sr_slot != NULL)
789 goto out_success;
790
791 tbl = &session->fc_slot_table;
792
793 task->tk_timeout = 0;
794
795 spin_lock(&tbl->slot_tbl_lock);
796 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
797 !args->sa_privileged) {
798 /* The state manager will wait until the slot table is empty */
799 dprintk("%s session is draining\n", __func__);
800 goto out_sleep;
801 }
802
803 slot = nfs4_alloc_slot(tbl);
804 if (IS_ERR(slot)) {
805 /* If out of memory, try again in 1/4 second */
806 if (slot == ERR_PTR(-ENOMEM))
807 task->tk_timeout = HZ >> 2;
808 dprintk("<-- %s: no free slots\n", __func__);
809 goto out_sleep;
810 }
811 spin_unlock(&tbl->slot_tbl_lock);
812
813 args->sa_slot = slot;
814
815 dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
816 slot->slot_nr, slot->seq_nr);
817
818 res->sr_slot = slot;
819 res->sr_timestamp = jiffies;
820 res->sr_status_flags = 0;
821 /*
822 * sr_status is only set in decode_sequence, and so will remain
823 * set to 1 if an rpc level failure occurs.
824 */
825 res->sr_status = 1;
826 trace_nfs4_setup_sequence(session, args);
827 out_success:
828 rpc_call_start(task);
829 return 0;
830 out_sleep:
831 /* Privileged tasks are queued with top priority */
832 if (args->sa_privileged)
833 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
834 NULL, RPC_PRIORITY_PRIVILEGED);
835 else
836 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
837 spin_unlock(&tbl->slot_tbl_lock);
838 return -EAGAIN;
839 }
840 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
841
842 static int nfs4_setup_sequence(const struct nfs_server *server,
843 struct nfs4_sequence_args *args,
844 struct nfs4_sequence_res *res,
845 struct rpc_task *task)
846 {
847 struct nfs4_session *session = nfs4_get_session(server);
848 int ret = 0;
849
850 if (!session)
851 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
852 args, res, task);
853
854 dprintk("--> %s clp %p session %p sr_slot %u\n",
855 __func__, session->clp, session, res->sr_slot ?
856 res->sr_slot->slot_nr : NFS4_NO_SLOT);
857
858 ret = nfs41_setup_sequence(session, args, res, task);
859
860 dprintk("<-- %s status=%d\n", __func__, ret);
861 return ret;
862 }
863
864 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
865 {
866 struct nfs4_call_sync_data *data = calldata;
867 struct nfs4_session *session = nfs4_get_session(data->seq_server);
868
869 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
870
871 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
872 }
873
874 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
875 {
876 struct nfs4_call_sync_data *data = calldata;
877
878 nfs41_sequence_done(task, data->seq_res);
879 }
880
881 static const struct rpc_call_ops nfs41_call_sync_ops = {
882 .rpc_call_prepare = nfs41_call_sync_prepare,
883 .rpc_call_done = nfs41_call_sync_done,
884 };
885
886 #else /* !CONFIG_NFS_V4_1 */
887
888 static int nfs4_setup_sequence(const struct nfs_server *server,
889 struct nfs4_sequence_args *args,
890 struct nfs4_sequence_res *res,
891 struct rpc_task *task)
892 {
893 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
894 args, res, task);
895 }
896
897 int nfs4_sequence_done(struct rpc_task *task,
898 struct nfs4_sequence_res *res)
899 {
900 return nfs40_sequence_done(task, res);
901 }
902 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
903
904 #endif /* !CONFIG_NFS_V4_1 */
905
906 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
907 {
908 struct nfs4_call_sync_data *data = calldata;
909 nfs4_setup_sequence(data->seq_server,
910 data->seq_args, data->seq_res, task);
911 }
912
913 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
914 {
915 struct nfs4_call_sync_data *data = calldata;
916 nfs4_sequence_done(task, data->seq_res);
917 }
918
919 static const struct rpc_call_ops nfs40_call_sync_ops = {
920 .rpc_call_prepare = nfs40_call_sync_prepare,
921 .rpc_call_done = nfs40_call_sync_done,
922 };
923
924 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
925 struct nfs_server *server,
926 struct rpc_message *msg,
927 struct nfs4_sequence_args *args,
928 struct nfs4_sequence_res *res)
929 {
930 int ret;
931 struct rpc_task *task;
932 struct nfs_client *clp = server->nfs_client;
933 struct nfs4_call_sync_data data = {
934 .seq_server = server,
935 .seq_args = args,
936 .seq_res = res,
937 };
938 struct rpc_task_setup task_setup = {
939 .rpc_client = clnt,
940 .rpc_message = msg,
941 .callback_ops = clp->cl_mvops->call_sync_ops,
942 .callback_data = &data
943 };
944
945 task = rpc_run_task(&task_setup);
946 if (IS_ERR(task))
947 ret = PTR_ERR(task);
948 else {
949 ret = task->tk_status;
950 rpc_put_task(task);
951 }
952 return ret;
953 }
954
955 int nfs4_call_sync(struct rpc_clnt *clnt,
956 struct nfs_server *server,
957 struct rpc_message *msg,
958 struct nfs4_sequence_args *args,
959 struct nfs4_sequence_res *res,
960 int cache_reply)
961 {
962 nfs4_init_sequence(args, res, cache_reply);
963 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
964 }
965
966 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
967 {
968 struct nfs_inode *nfsi = NFS_I(dir);
969
970 spin_lock(&dir->i_lock);
971 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
972 if (!cinfo->atomic || cinfo->before != dir->i_version)
973 nfs_force_lookup_revalidate(dir);
974 dir->i_version = cinfo->after;
975 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
976 nfs_fscache_invalidate(dir);
977 spin_unlock(&dir->i_lock);
978 }
979
980 struct nfs4_opendata {
981 struct kref kref;
982 struct nfs_openargs o_arg;
983 struct nfs_openres o_res;
984 struct nfs_open_confirmargs c_arg;
985 struct nfs_open_confirmres c_res;
986 struct nfs4_string owner_name;
987 struct nfs4_string group_name;
988 struct nfs4_label *a_label;
989 struct nfs_fattr f_attr;
990 struct nfs4_label *f_label;
991 struct dentry *dir;
992 struct dentry *dentry;
993 struct nfs4_state_owner *owner;
994 struct nfs4_state *state;
995 struct iattr attrs;
996 unsigned long timestamp;
997 unsigned int rpc_done : 1;
998 unsigned int file_created : 1;
999 unsigned int is_recover : 1;
1000 int rpc_status;
1001 int cancelled;
1002 };
1003
1004 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1005 int err, struct nfs4_exception *exception)
1006 {
1007 if (err != -EINVAL)
1008 return false;
1009 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1010 return false;
1011 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1012 exception->retry = 1;
1013 return true;
1014 }
1015
1016 static u32
1017 nfs4_map_atomic_open_share(struct nfs_server *server,
1018 fmode_t fmode, int openflags)
1019 {
1020 u32 res = 0;
1021
1022 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1023 case FMODE_READ:
1024 res = NFS4_SHARE_ACCESS_READ;
1025 break;
1026 case FMODE_WRITE:
1027 res = NFS4_SHARE_ACCESS_WRITE;
1028 break;
1029 case FMODE_READ|FMODE_WRITE:
1030 res = NFS4_SHARE_ACCESS_BOTH;
1031 }
1032 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1033 goto out;
1034 /* Want no delegation if we're using O_DIRECT */
1035 if (openflags & O_DIRECT)
1036 res |= NFS4_SHARE_WANT_NO_DELEG;
1037 out:
1038 return res;
1039 }
1040
1041 static enum open_claim_type4
1042 nfs4_map_atomic_open_claim(struct nfs_server *server,
1043 enum open_claim_type4 claim)
1044 {
1045 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1046 return claim;
1047 switch (claim) {
1048 default:
1049 return claim;
1050 case NFS4_OPEN_CLAIM_FH:
1051 return NFS4_OPEN_CLAIM_NULL;
1052 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1053 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1054 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1055 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1056 }
1057 }
1058
1059 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1060 {
1061 p->o_res.f_attr = &p->f_attr;
1062 p->o_res.f_label = p->f_label;
1063 p->o_res.seqid = p->o_arg.seqid;
1064 p->c_res.seqid = p->c_arg.seqid;
1065 p->o_res.server = p->o_arg.server;
1066 p->o_res.access_request = p->o_arg.access;
1067 nfs_fattr_init(&p->f_attr);
1068 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1069 }
1070
1071 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1072 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1073 const struct iattr *attrs,
1074 struct nfs4_label *label,
1075 enum open_claim_type4 claim,
1076 gfp_t gfp_mask)
1077 {
1078 struct dentry *parent = dget_parent(dentry);
1079 struct inode *dir = d_inode(parent);
1080 struct nfs_server *server = NFS_SERVER(dir);
1081 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1082 struct nfs4_opendata *p;
1083
1084 p = kzalloc(sizeof(*p), gfp_mask);
1085 if (p == NULL)
1086 goto err;
1087
1088 p->f_label = nfs4_label_alloc(server, gfp_mask);
1089 if (IS_ERR(p->f_label))
1090 goto err_free_p;
1091
1092 p->a_label = nfs4_label_alloc(server, gfp_mask);
1093 if (IS_ERR(p->a_label))
1094 goto err_free_f;
1095
1096 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1097 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1098 if (IS_ERR(p->o_arg.seqid))
1099 goto err_free_label;
1100 nfs_sb_active(dentry->d_sb);
1101 p->dentry = dget(dentry);
1102 p->dir = parent;
1103 p->owner = sp;
1104 atomic_inc(&sp->so_count);
1105 p->o_arg.open_flags = flags;
1106 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1107 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1108 fmode, flags);
1109 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1110 * will return permission denied for all bits until close */
1111 if (!(flags & O_EXCL)) {
1112 /* ask server to check for all possible rights as results
1113 * are cached */
1114 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1115 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
1116 }
1117 p->o_arg.clientid = server->nfs_client->cl_clientid;
1118 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1119 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1120 p->o_arg.name = &dentry->d_name;
1121 p->o_arg.server = server;
1122 p->o_arg.bitmask = nfs4_bitmask(server, label);
1123 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1124 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1125 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1126 switch (p->o_arg.claim) {
1127 case NFS4_OPEN_CLAIM_NULL:
1128 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1129 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1130 p->o_arg.fh = NFS_FH(dir);
1131 break;
1132 case NFS4_OPEN_CLAIM_PREVIOUS:
1133 case NFS4_OPEN_CLAIM_FH:
1134 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1135 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1136 p->o_arg.fh = NFS_FH(d_inode(dentry));
1137 }
1138 if (attrs != NULL && attrs->ia_valid != 0) {
1139 __u32 verf[2];
1140
1141 p->o_arg.u.attrs = &p->attrs;
1142 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1143
1144 verf[0] = jiffies;
1145 verf[1] = current->pid;
1146 memcpy(p->o_arg.u.verifier.data, verf,
1147 sizeof(p->o_arg.u.verifier.data));
1148 }
1149 p->c_arg.fh = &p->o_res.fh;
1150 p->c_arg.stateid = &p->o_res.stateid;
1151 p->c_arg.seqid = p->o_arg.seqid;
1152 nfs4_init_opendata_res(p);
1153 kref_init(&p->kref);
1154 return p;
1155
1156 err_free_label:
1157 nfs4_label_free(p->a_label);
1158 err_free_f:
1159 nfs4_label_free(p->f_label);
1160 err_free_p:
1161 kfree(p);
1162 err:
1163 dput(parent);
1164 return NULL;
1165 }
1166
1167 static void nfs4_opendata_free(struct kref *kref)
1168 {
1169 struct nfs4_opendata *p = container_of(kref,
1170 struct nfs4_opendata, kref);
1171 struct super_block *sb = p->dentry->d_sb;
1172
1173 nfs_free_seqid(p->o_arg.seqid);
1174 if (p->state != NULL)
1175 nfs4_put_open_state(p->state);
1176 nfs4_put_state_owner(p->owner);
1177
1178 nfs4_label_free(p->a_label);
1179 nfs4_label_free(p->f_label);
1180
1181 dput(p->dir);
1182 dput(p->dentry);
1183 nfs_sb_deactive(sb);
1184 nfs_fattr_free_names(&p->f_attr);
1185 kfree(p->f_attr.mdsthreshold);
1186 kfree(p);
1187 }
1188
1189 static void nfs4_opendata_put(struct nfs4_opendata *p)
1190 {
1191 if (p != NULL)
1192 kref_put(&p->kref, nfs4_opendata_free);
1193 }
1194
1195 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1196 {
1197 int ret;
1198
1199 ret = rpc_wait_for_completion_task(task);
1200 return ret;
1201 }
1202
1203 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1204 fmode_t fmode)
1205 {
1206 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1207 case FMODE_READ|FMODE_WRITE:
1208 return state->n_rdwr != 0;
1209 case FMODE_WRITE:
1210 return state->n_wronly != 0;
1211 case FMODE_READ:
1212 return state->n_rdonly != 0;
1213 }
1214 WARN_ON_ONCE(1);
1215 return false;
1216 }
1217
1218 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1219 {
1220 int ret = 0;
1221
1222 if (open_mode & (O_EXCL|O_TRUNC))
1223 goto out;
1224 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1225 case FMODE_READ:
1226 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1227 && state->n_rdonly != 0;
1228 break;
1229 case FMODE_WRITE:
1230 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1231 && state->n_wronly != 0;
1232 break;
1233 case FMODE_READ|FMODE_WRITE:
1234 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1235 && state->n_rdwr != 0;
1236 }
1237 out:
1238 return ret;
1239 }
1240
1241 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1242 enum open_claim_type4 claim)
1243 {
1244 if (delegation == NULL)
1245 return 0;
1246 if ((delegation->type & fmode) != fmode)
1247 return 0;
1248 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1249 return 0;
1250 switch (claim) {
1251 case NFS4_OPEN_CLAIM_NULL:
1252 case NFS4_OPEN_CLAIM_FH:
1253 break;
1254 case NFS4_OPEN_CLAIM_PREVIOUS:
1255 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1256 break;
1257 default:
1258 return 0;
1259 }
1260 nfs_mark_delegation_referenced(delegation);
1261 return 1;
1262 }
1263
1264 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1265 {
1266 switch (fmode) {
1267 case FMODE_WRITE:
1268 state->n_wronly++;
1269 break;
1270 case FMODE_READ:
1271 state->n_rdonly++;
1272 break;
1273 case FMODE_READ|FMODE_WRITE:
1274 state->n_rdwr++;
1275 }
1276 nfs4_state_set_mode_locked(state, state->state | fmode);
1277 }
1278
1279 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1280 {
1281 struct nfs_client *clp = state->owner->so_server->nfs_client;
1282 bool need_recover = false;
1283
1284 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1285 need_recover = true;
1286 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1287 need_recover = true;
1288 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1289 need_recover = true;
1290 if (need_recover)
1291 nfs4_state_mark_reclaim_nograce(clp, state);
1292 }
1293
1294 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1295 nfs4_stateid *stateid)
1296 {
1297 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
1298 return true;
1299 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1300 nfs_test_and_clear_all_open_stateid(state);
1301 return true;
1302 }
1303 if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
1304 return true;
1305 return false;
1306 }
1307
1308 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1309 {
1310 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1311 return;
1312 if (state->n_wronly)
1313 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1314 if (state->n_rdonly)
1315 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1316 if (state->n_rdwr)
1317 set_bit(NFS_O_RDWR_STATE, &state->flags);
1318 set_bit(NFS_OPEN_STATE, &state->flags);
1319 }
1320
1321 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1322 nfs4_stateid *arg_stateid,
1323 nfs4_stateid *stateid, fmode_t fmode)
1324 {
1325 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1326 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1327 case FMODE_WRITE:
1328 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1329 break;
1330 case FMODE_READ:
1331 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1332 break;
1333 case 0:
1334 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1335 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1336 clear_bit(NFS_OPEN_STATE, &state->flags);
1337 }
1338 if (stateid == NULL)
1339 return;
1340 /* Handle races with OPEN */
1341 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
1342 (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1343 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
1344 nfs_resync_open_stateid_locked(state);
1345 return;
1346 }
1347 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1348 nfs4_stateid_copy(&state->stateid, stateid);
1349 nfs4_stateid_copy(&state->open_stateid, stateid);
1350 }
1351
1352 static void nfs_clear_open_stateid(struct nfs4_state *state,
1353 nfs4_stateid *arg_stateid,
1354 nfs4_stateid *stateid, fmode_t fmode)
1355 {
1356 write_seqlock(&state->seqlock);
1357 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
1358 write_sequnlock(&state->seqlock);
1359 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1360 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1361 }
1362
1363 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1364 {
1365 switch (fmode) {
1366 case FMODE_READ:
1367 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1368 break;
1369 case FMODE_WRITE:
1370 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1371 break;
1372 case FMODE_READ|FMODE_WRITE:
1373 set_bit(NFS_O_RDWR_STATE, &state->flags);
1374 }
1375 if (!nfs_need_update_open_stateid(state, stateid))
1376 return;
1377 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1378 nfs4_stateid_copy(&state->stateid, stateid);
1379 nfs4_stateid_copy(&state->open_stateid, stateid);
1380 }
1381
1382 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1383 {
1384 /*
1385 * Protect the call to nfs4_state_set_mode_locked and
1386 * serialise the stateid update
1387 */
1388 write_seqlock(&state->seqlock);
1389 if (deleg_stateid != NULL) {
1390 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1391 set_bit(NFS_DELEGATED_STATE, &state->flags);
1392 }
1393 if (open_stateid != NULL)
1394 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1395 write_sequnlock(&state->seqlock);
1396 spin_lock(&state->owner->so_lock);
1397 update_open_stateflags(state, fmode);
1398 spin_unlock(&state->owner->so_lock);
1399 }
1400
1401 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1402 {
1403 struct nfs_inode *nfsi = NFS_I(state->inode);
1404 struct nfs_delegation *deleg_cur;
1405 int ret = 0;
1406
1407 fmode &= (FMODE_READ|FMODE_WRITE);
1408
1409 rcu_read_lock();
1410 deleg_cur = rcu_dereference(nfsi->delegation);
1411 if (deleg_cur == NULL)
1412 goto no_delegation;
1413
1414 spin_lock(&deleg_cur->lock);
1415 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1416 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1417 (deleg_cur->type & fmode) != fmode)
1418 goto no_delegation_unlock;
1419
1420 if (delegation == NULL)
1421 delegation = &deleg_cur->stateid;
1422 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1423 goto no_delegation_unlock;
1424
1425 nfs_mark_delegation_referenced(deleg_cur);
1426 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1427 ret = 1;
1428 no_delegation_unlock:
1429 spin_unlock(&deleg_cur->lock);
1430 no_delegation:
1431 rcu_read_unlock();
1432
1433 if (!ret && open_stateid != NULL) {
1434 __update_open_stateid(state, open_stateid, NULL, fmode);
1435 ret = 1;
1436 }
1437 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1438 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1439
1440 return ret;
1441 }
1442
1443 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1444 const nfs4_stateid *stateid)
1445 {
1446 struct nfs4_state *state = lsp->ls_state;
1447 bool ret = false;
1448
1449 spin_lock(&state->state_lock);
1450 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1451 goto out_noupdate;
1452 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1453 goto out_noupdate;
1454 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1455 ret = true;
1456 out_noupdate:
1457 spin_unlock(&state->state_lock);
1458 return ret;
1459 }
1460
1461 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1462 {
1463 struct nfs_delegation *delegation;
1464
1465 rcu_read_lock();
1466 delegation = rcu_dereference(NFS_I(inode)->delegation);
1467 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1468 rcu_read_unlock();
1469 return;
1470 }
1471 rcu_read_unlock();
1472 nfs4_inode_return_delegation(inode);
1473 }
1474
1475 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1476 {
1477 struct nfs4_state *state = opendata->state;
1478 struct nfs_inode *nfsi = NFS_I(state->inode);
1479 struct nfs_delegation *delegation;
1480 int open_mode = opendata->o_arg.open_flags;
1481 fmode_t fmode = opendata->o_arg.fmode;
1482 enum open_claim_type4 claim = opendata->o_arg.claim;
1483 nfs4_stateid stateid;
1484 int ret = -EAGAIN;
1485
1486 for (;;) {
1487 spin_lock(&state->owner->so_lock);
1488 if (can_open_cached(state, fmode, open_mode)) {
1489 update_open_stateflags(state, fmode);
1490 spin_unlock(&state->owner->so_lock);
1491 goto out_return_state;
1492 }
1493 spin_unlock(&state->owner->so_lock);
1494 rcu_read_lock();
1495 delegation = rcu_dereference(nfsi->delegation);
1496 if (!can_open_delegated(delegation, fmode, claim)) {
1497 rcu_read_unlock();
1498 break;
1499 }
1500 /* Save the delegation */
1501 nfs4_stateid_copy(&stateid, &delegation->stateid);
1502 rcu_read_unlock();
1503 nfs_release_seqid(opendata->o_arg.seqid);
1504 if (!opendata->is_recover) {
1505 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1506 if (ret != 0)
1507 goto out;
1508 }
1509 ret = -EAGAIN;
1510
1511 /* Try to update the stateid using the delegation */
1512 if (update_open_stateid(state, NULL, &stateid, fmode))
1513 goto out_return_state;
1514 }
1515 out:
1516 return ERR_PTR(ret);
1517 out_return_state:
1518 atomic_inc(&state->count);
1519 return state;
1520 }
1521
1522 static void
1523 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1524 {
1525 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1526 struct nfs_delegation *delegation;
1527 int delegation_flags = 0;
1528
1529 rcu_read_lock();
1530 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1531 if (delegation)
1532 delegation_flags = delegation->flags;
1533 rcu_read_unlock();
1534 switch (data->o_arg.claim) {
1535 default:
1536 break;
1537 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1538 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1539 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1540 "returning a delegation for "
1541 "OPEN(CLAIM_DELEGATE_CUR)\n",
1542 clp->cl_hostname);
1543 return;
1544 }
1545 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1546 nfs_inode_set_delegation(state->inode,
1547 data->owner->so_cred,
1548 &data->o_res);
1549 else
1550 nfs_inode_reclaim_delegation(state->inode,
1551 data->owner->so_cred,
1552 &data->o_res);
1553 }
1554
1555 /*
1556 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1557 * and update the nfs4_state.
1558 */
1559 static struct nfs4_state *
1560 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1561 {
1562 struct inode *inode = data->state->inode;
1563 struct nfs4_state *state = data->state;
1564 int ret;
1565
1566 if (!data->rpc_done) {
1567 if (data->rpc_status) {
1568 ret = data->rpc_status;
1569 goto err;
1570 }
1571 /* cached opens have already been processed */
1572 goto update;
1573 }
1574
1575 ret = nfs_refresh_inode(inode, &data->f_attr);
1576 if (ret)
1577 goto err;
1578
1579 if (data->o_res.delegation_type != 0)
1580 nfs4_opendata_check_deleg(data, state);
1581 update:
1582 update_open_stateid(state, &data->o_res.stateid, NULL,
1583 data->o_arg.fmode);
1584 atomic_inc(&state->count);
1585
1586 return state;
1587 err:
1588 return ERR_PTR(ret);
1589
1590 }
1591
1592 static struct nfs4_state *
1593 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1594 {
1595 struct inode *inode;
1596 struct nfs4_state *state = NULL;
1597 int ret;
1598
1599 if (!data->rpc_done) {
1600 state = nfs4_try_open_cached(data);
1601 goto out;
1602 }
1603
1604 ret = -EAGAIN;
1605 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1606 goto err;
1607 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label);
1608 ret = PTR_ERR(inode);
1609 if (IS_ERR(inode))
1610 goto err;
1611 ret = -ENOMEM;
1612 state = nfs4_get_open_state(inode, data->owner);
1613 if (state == NULL)
1614 goto err_put_inode;
1615 if (data->o_res.delegation_type != 0)
1616 nfs4_opendata_check_deleg(data, state);
1617 update_open_stateid(state, &data->o_res.stateid, NULL,
1618 data->o_arg.fmode);
1619 iput(inode);
1620 out:
1621 nfs_release_seqid(data->o_arg.seqid);
1622 return state;
1623 err_put_inode:
1624 iput(inode);
1625 err:
1626 return ERR_PTR(ret);
1627 }
1628
1629 static struct nfs4_state *
1630 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1631 {
1632 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1633 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1634 return _nfs4_opendata_to_nfs4_state(data);
1635 }
1636
1637 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1638 {
1639 struct nfs_inode *nfsi = NFS_I(state->inode);
1640 struct nfs_open_context *ctx;
1641
1642 spin_lock(&state->inode->i_lock);
1643 list_for_each_entry(ctx, &nfsi->open_files, list) {
1644 if (ctx->state != state)
1645 continue;
1646 get_nfs_open_context(ctx);
1647 spin_unlock(&state->inode->i_lock);
1648 return ctx;
1649 }
1650 spin_unlock(&state->inode->i_lock);
1651 return ERR_PTR(-ENOENT);
1652 }
1653
1654 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1655 struct nfs4_state *state, enum open_claim_type4 claim)
1656 {
1657 struct nfs4_opendata *opendata;
1658
1659 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1660 NULL, NULL, claim, GFP_NOFS);
1661 if (opendata == NULL)
1662 return ERR_PTR(-ENOMEM);
1663 opendata->state = state;
1664 atomic_inc(&state->count);
1665 return opendata;
1666 }
1667
1668 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
1669 fmode_t fmode)
1670 {
1671 struct nfs4_state *newstate;
1672 int ret;
1673
1674 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
1675 return 0;
1676 opendata->o_arg.open_flags = 0;
1677 opendata->o_arg.fmode = fmode;
1678 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1679 NFS_SB(opendata->dentry->d_sb),
1680 fmode, 0);
1681 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1682 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1683 nfs4_init_opendata_res(opendata);
1684 ret = _nfs4_recover_proc_open(opendata);
1685 if (ret != 0)
1686 return ret;
1687 newstate = nfs4_opendata_to_nfs4_state(opendata);
1688 if (IS_ERR(newstate))
1689 return PTR_ERR(newstate);
1690 if (newstate != opendata->state)
1691 ret = -ESTALE;
1692 nfs4_close_state(newstate, fmode);
1693 return ret;
1694 }
1695
1696 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1697 {
1698 int ret;
1699
1700 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1701 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1702 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1703 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1704 /* memory barrier prior to reading state->n_* */
1705 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1706 clear_bit(NFS_OPEN_STATE, &state->flags);
1707 smp_rmb();
1708 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1709 if (ret != 0)
1710 return ret;
1711 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1712 if (ret != 0)
1713 return ret;
1714 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
1715 if (ret != 0)
1716 return ret;
1717 /*
1718 * We may have performed cached opens for all three recoveries.
1719 * Check if we need to update the current stateid.
1720 */
1721 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1722 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1723 write_seqlock(&state->seqlock);
1724 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1725 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1726 write_sequnlock(&state->seqlock);
1727 }
1728 return 0;
1729 }
1730
1731 /*
1732 * OPEN_RECLAIM:
1733 * reclaim state on the server after a reboot.
1734 */
1735 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1736 {
1737 struct nfs_delegation *delegation;
1738 struct nfs4_opendata *opendata;
1739 fmode_t delegation_type = 0;
1740 int status;
1741
1742 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1743 NFS4_OPEN_CLAIM_PREVIOUS);
1744 if (IS_ERR(opendata))
1745 return PTR_ERR(opendata);
1746 rcu_read_lock();
1747 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1748 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1749 delegation_type = delegation->type;
1750 rcu_read_unlock();
1751 opendata->o_arg.u.delegation_type = delegation_type;
1752 status = nfs4_open_recover(opendata, state);
1753 nfs4_opendata_put(opendata);
1754 return status;
1755 }
1756
1757 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1758 {
1759 struct nfs_server *server = NFS_SERVER(state->inode);
1760 struct nfs4_exception exception = { };
1761 int err;
1762 do {
1763 err = _nfs4_do_open_reclaim(ctx, state);
1764 trace_nfs4_open_reclaim(ctx, 0, err);
1765 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1766 continue;
1767 if (err != -NFS4ERR_DELAY)
1768 break;
1769 nfs4_handle_exception(server, err, &exception);
1770 } while (exception.retry);
1771 return err;
1772 }
1773
1774 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1775 {
1776 struct nfs_open_context *ctx;
1777 int ret;
1778
1779 ctx = nfs4_state_find_open_context(state);
1780 if (IS_ERR(ctx))
1781 return -EAGAIN;
1782 ret = nfs4_do_open_reclaim(ctx, state);
1783 put_nfs_open_context(ctx);
1784 return ret;
1785 }
1786
1787 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1788 {
1789 switch (err) {
1790 default:
1791 printk(KERN_ERR "NFS: %s: unhandled error "
1792 "%d.\n", __func__, err);
1793 case 0:
1794 case -ENOENT:
1795 case -EAGAIN:
1796 case -ESTALE:
1797 break;
1798 case -NFS4ERR_BADSESSION:
1799 case -NFS4ERR_BADSLOT:
1800 case -NFS4ERR_BAD_HIGH_SLOT:
1801 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1802 case -NFS4ERR_DEADSESSION:
1803 set_bit(NFS_DELEGATED_STATE, &state->flags);
1804 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1805 return -EAGAIN;
1806 case -NFS4ERR_STALE_CLIENTID:
1807 case -NFS4ERR_STALE_STATEID:
1808 set_bit(NFS_DELEGATED_STATE, &state->flags);
1809 case -NFS4ERR_EXPIRED:
1810 /* Don't recall a delegation if it was lost */
1811 nfs4_schedule_lease_recovery(server->nfs_client);
1812 return -EAGAIN;
1813 case -NFS4ERR_MOVED:
1814 nfs4_schedule_migration_recovery(server);
1815 return -EAGAIN;
1816 case -NFS4ERR_LEASE_MOVED:
1817 nfs4_schedule_lease_moved_recovery(server->nfs_client);
1818 return -EAGAIN;
1819 case -NFS4ERR_DELEG_REVOKED:
1820 case -NFS4ERR_ADMIN_REVOKED:
1821 case -NFS4ERR_BAD_STATEID:
1822 case -NFS4ERR_OPENMODE:
1823 nfs_inode_find_state_and_recover(state->inode,
1824 stateid);
1825 nfs4_schedule_stateid_recovery(server, state);
1826 return -EAGAIN;
1827 case -NFS4ERR_DELAY:
1828 case -NFS4ERR_GRACE:
1829 set_bit(NFS_DELEGATED_STATE, &state->flags);
1830 ssleep(1);
1831 return -EAGAIN;
1832 case -ENOMEM:
1833 case -NFS4ERR_DENIED:
1834 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1835 return 0;
1836 }
1837 return err;
1838 }
1839
1840 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
1841 struct nfs4_state *state, const nfs4_stateid *stateid,
1842 fmode_t type)
1843 {
1844 struct nfs_server *server = NFS_SERVER(state->inode);
1845 struct nfs4_opendata *opendata;
1846 int err = 0;
1847
1848 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1849 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1850 if (IS_ERR(opendata))
1851 return PTR_ERR(opendata);
1852 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1853 write_seqlock(&state->seqlock);
1854 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1855 write_sequnlock(&state->seqlock);
1856 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1857 switch (type & (FMODE_READ|FMODE_WRITE)) {
1858 case FMODE_READ|FMODE_WRITE:
1859 case FMODE_WRITE:
1860 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1861 if (err)
1862 break;
1863 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1864 if (err)
1865 break;
1866 case FMODE_READ:
1867 err = nfs4_open_recover_helper(opendata, FMODE_READ);
1868 }
1869 nfs4_opendata_put(opendata);
1870 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1871 }
1872
1873 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1874 {
1875 struct nfs4_opendata *data = calldata;
1876
1877 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
1878 &data->c_arg.seq_args, &data->c_res.seq_res, task);
1879 }
1880
1881 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1882 {
1883 struct nfs4_opendata *data = calldata;
1884
1885 nfs40_sequence_done(task, &data->c_res.seq_res);
1886
1887 data->rpc_status = task->tk_status;
1888 if (data->rpc_status == 0) {
1889 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1890 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1891 renew_lease(data->o_res.server, data->timestamp);
1892 data->rpc_done = 1;
1893 }
1894 }
1895
1896 static void nfs4_open_confirm_release(void *calldata)
1897 {
1898 struct nfs4_opendata *data = calldata;
1899 struct nfs4_state *state = NULL;
1900
1901 /* If this request hasn't been cancelled, do nothing */
1902 if (data->cancelled == 0)
1903 goto out_free;
1904 /* In case of error, no cleanup! */
1905 if (!data->rpc_done)
1906 goto out_free;
1907 state = nfs4_opendata_to_nfs4_state(data);
1908 if (!IS_ERR(state))
1909 nfs4_close_state(state, data->o_arg.fmode);
1910 out_free:
1911 nfs4_opendata_put(data);
1912 }
1913
1914 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1915 .rpc_call_prepare = nfs4_open_confirm_prepare,
1916 .rpc_call_done = nfs4_open_confirm_done,
1917 .rpc_release = nfs4_open_confirm_release,
1918 };
1919
1920 /*
1921 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1922 */
1923 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1924 {
1925 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
1926 struct rpc_task *task;
1927 struct rpc_message msg = {
1928 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1929 .rpc_argp = &data->c_arg,
1930 .rpc_resp = &data->c_res,
1931 .rpc_cred = data->owner->so_cred,
1932 };
1933 struct rpc_task_setup task_setup_data = {
1934 .rpc_client = server->client,
1935 .rpc_message = &msg,
1936 .callback_ops = &nfs4_open_confirm_ops,
1937 .callback_data = data,
1938 .workqueue = nfsiod_workqueue,
1939 .flags = RPC_TASK_ASYNC,
1940 };
1941 int status;
1942
1943 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
1944 kref_get(&data->kref);
1945 data->rpc_done = 0;
1946 data->rpc_status = 0;
1947 data->timestamp = jiffies;
1948 if (data->is_recover)
1949 nfs4_set_sequence_privileged(&data->c_arg.seq_args);
1950 task = rpc_run_task(&task_setup_data);
1951 if (IS_ERR(task))
1952 return PTR_ERR(task);
1953 status = nfs4_wait_for_completion_rpc_task(task);
1954 if (status != 0) {
1955 data->cancelled = 1;
1956 smp_wmb();
1957 } else
1958 status = data->rpc_status;
1959 rpc_put_task(task);
1960 return status;
1961 }
1962
1963 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1964 {
1965 struct nfs4_opendata *data = calldata;
1966 struct nfs4_state_owner *sp = data->owner;
1967 struct nfs_client *clp = sp->so_server->nfs_client;
1968 enum open_claim_type4 claim = data->o_arg.claim;
1969
1970 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1971 goto out_wait;
1972 /*
1973 * Check if we still need to send an OPEN call, or if we can use
1974 * a delegation instead.
1975 */
1976 if (data->state != NULL) {
1977 struct nfs_delegation *delegation;
1978
1979 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1980 goto out_no_action;
1981 rcu_read_lock();
1982 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1983 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
1984 goto unlock_no_action;
1985 rcu_read_unlock();
1986 }
1987 /* Update client id. */
1988 data->o_arg.clientid = clp->cl_clientid;
1989 switch (claim) {
1990 default:
1991 break;
1992 case NFS4_OPEN_CLAIM_PREVIOUS:
1993 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1994 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1995 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1996 case NFS4_OPEN_CLAIM_FH:
1997 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1998 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1999 }
2000 data->timestamp = jiffies;
2001 if (nfs4_setup_sequence(data->o_arg.server,
2002 &data->o_arg.seq_args,
2003 &data->o_res.seq_res,
2004 task) != 0)
2005 nfs_release_seqid(data->o_arg.seqid);
2006
2007 /* Set the create mode (note dependency on the session type) */
2008 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2009 if (data->o_arg.open_flags & O_EXCL) {
2010 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2011 if (nfs4_has_persistent_session(clp))
2012 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2013 else if (clp->cl_mvops->minor_version > 0)
2014 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2015 }
2016 return;
2017 unlock_no_action:
2018 rcu_read_unlock();
2019 out_no_action:
2020 task->tk_action = NULL;
2021 out_wait:
2022 nfs4_sequence_done(task, &data->o_res.seq_res);
2023 }
2024
2025 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2026 {
2027 struct nfs4_opendata *data = calldata;
2028
2029 data->rpc_status = task->tk_status;
2030
2031 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
2032 return;
2033
2034 if (task->tk_status == 0) {
2035 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2036 switch (data->o_res.f_attr->mode & S_IFMT) {
2037 case S_IFREG:
2038 break;
2039 case S_IFLNK:
2040 data->rpc_status = -ELOOP;
2041 break;
2042 case S_IFDIR:
2043 data->rpc_status = -EISDIR;
2044 break;
2045 default:
2046 data->rpc_status = -ENOTDIR;
2047 }
2048 }
2049 renew_lease(data->o_res.server, data->timestamp);
2050 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2051 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2052 }
2053 data->rpc_done = 1;
2054 }
2055
2056 static void nfs4_open_release(void *calldata)
2057 {
2058 struct nfs4_opendata *data = calldata;
2059 struct nfs4_state *state = NULL;
2060
2061 /* If this request hasn't been cancelled, do nothing */
2062 if (data->cancelled == 0)
2063 goto out_free;
2064 /* In case of error, no cleanup! */
2065 if (data->rpc_status != 0 || !data->rpc_done)
2066 goto out_free;
2067 /* In case we need an open_confirm, no cleanup! */
2068 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2069 goto out_free;
2070 state = nfs4_opendata_to_nfs4_state(data);
2071 if (!IS_ERR(state))
2072 nfs4_close_state(state, data->o_arg.fmode);
2073 out_free:
2074 nfs4_opendata_put(data);
2075 }
2076
2077 static const struct rpc_call_ops nfs4_open_ops = {
2078 .rpc_call_prepare = nfs4_open_prepare,
2079 .rpc_call_done = nfs4_open_done,
2080 .rpc_release = nfs4_open_release,
2081 };
2082
2083 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
2084 {
2085 struct inode *dir = d_inode(data->dir);
2086 struct nfs_server *server = NFS_SERVER(dir);
2087 struct nfs_openargs *o_arg = &data->o_arg;
2088 struct nfs_openres *o_res = &data->o_res;
2089 struct rpc_task *task;
2090 struct rpc_message msg = {
2091 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2092 .rpc_argp = o_arg,
2093 .rpc_resp = o_res,
2094 .rpc_cred = data->owner->so_cred,
2095 };
2096 struct rpc_task_setup task_setup_data = {
2097 .rpc_client = server->client,
2098 .rpc_message = &msg,
2099 .callback_ops = &nfs4_open_ops,
2100 .callback_data = data,
2101 .workqueue = nfsiod_workqueue,
2102 .flags = RPC_TASK_ASYNC,
2103 };
2104 int status;
2105
2106 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
2107 kref_get(&data->kref);
2108 data->rpc_done = 0;
2109 data->rpc_status = 0;
2110 data->cancelled = 0;
2111 data->is_recover = 0;
2112 if (isrecover) {
2113 nfs4_set_sequence_privileged(&o_arg->seq_args);
2114 data->is_recover = 1;
2115 }
2116 task = rpc_run_task(&task_setup_data);
2117 if (IS_ERR(task))
2118 return PTR_ERR(task);
2119 status = nfs4_wait_for_completion_rpc_task(task);
2120 if (status != 0) {
2121 data->cancelled = 1;
2122 smp_wmb();
2123 } else
2124 status = data->rpc_status;
2125 rpc_put_task(task);
2126
2127 return status;
2128 }
2129
2130 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2131 {
2132 struct inode *dir = d_inode(data->dir);
2133 struct nfs_openres *o_res = &data->o_res;
2134 int status;
2135
2136 status = nfs4_run_open_task(data, 1);
2137 if (status != 0 || !data->rpc_done)
2138 return status;
2139
2140 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2141
2142 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2143 status = _nfs4_proc_open_confirm(data);
2144 if (status != 0)
2145 return status;
2146 }
2147
2148 return status;
2149 }
2150
2151 /*
2152 * Additional permission checks in order to distinguish between an
2153 * open for read, and an open for execute. This works around the
2154 * fact that NFSv4 OPEN treats read and execute permissions as being
2155 * the same.
2156 * Note that in the non-execute case, we want to turn off permission
2157 * checking if we just created a new file (POSIX open() semantics).
2158 */
2159 static int nfs4_opendata_access(struct rpc_cred *cred,
2160 struct nfs4_opendata *opendata,
2161 struct nfs4_state *state, fmode_t fmode,
2162 int openflags)
2163 {
2164 struct nfs_access_entry cache;
2165 u32 mask;
2166
2167 /* access call failed or for some reason the server doesn't
2168 * support any access modes -- defer access call until later */
2169 if (opendata->o_res.access_supported == 0)
2170 return 0;
2171
2172 mask = 0;
2173 /*
2174 * Use openflags to check for exec, because fmode won't
2175 * always have FMODE_EXEC set when file open for exec.
2176 */
2177 if (openflags & __FMODE_EXEC) {
2178 /* ONLY check for exec rights */
2179 mask = MAY_EXEC;
2180 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2181 mask = MAY_READ;
2182
2183 cache.cred = cred;
2184 cache.jiffies = jiffies;
2185 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2186 nfs_access_add_cache(state->inode, &cache);
2187
2188 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2189 return 0;
2190
2191 /* even though OPEN succeeded, access is denied. Close the file */
2192 nfs4_close_state(state, fmode);
2193 return -EACCES;
2194 }
2195
2196 /*
2197 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2198 */
2199 static int _nfs4_proc_open(struct nfs4_opendata *data)
2200 {
2201 struct inode *dir = d_inode(data->dir);
2202 struct nfs_server *server = NFS_SERVER(dir);
2203 struct nfs_openargs *o_arg = &data->o_arg;
2204 struct nfs_openres *o_res = &data->o_res;
2205 int status;
2206
2207 status = nfs4_run_open_task(data, 0);
2208 if (!data->rpc_done)
2209 return status;
2210 if (status != 0) {
2211 if (status == -NFS4ERR_BADNAME &&
2212 !(o_arg->open_flags & O_CREAT))
2213 return -ENOENT;
2214 return status;
2215 }
2216
2217 nfs_fattr_map_and_free_names(server, &data->f_attr);
2218
2219 if (o_arg->open_flags & O_CREAT) {
2220 update_changeattr(dir, &o_res->cinfo);
2221 if (o_arg->open_flags & O_EXCL)
2222 data->file_created = 1;
2223 else if (o_res->cinfo.before != o_res->cinfo.after)
2224 data->file_created = 1;
2225 }
2226 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2227 server->caps &= ~NFS_CAP_POSIX_LOCK;
2228 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2229 status = _nfs4_proc_open_confirm(data);
2230 if (status != 0)
2231 return status;
2232 }
2233 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
2234 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2235 return 0;
2236 }
2237
2238 static int nfs4_recover_expired_lease(struct nfs_server *server)
2239 {
2240 return nfs4_client_recover_expired_lease(server->nfs_client);
2241 }
2242
2243 /*
2244 * OPEN_EXPIRED:
2245 * reclaim state on the server after a network partition.
2246 * Assumes caller holds the appropriate lock
2247 */
2248 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2249 {
2250 struct nfs4_opendata *opendata;
2251 int ret;
2252
2253 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2254 NFS4_OPEN_CLAIM_FH);
2255 if (IS_ERR(opendata))
2256 return PTR_ERR(opendata);
2257 ret = nfs4_open_recover(opendata, state);
2258 if (ret == -ESTALE)
2259 d_drop(ctx->dentry);
2260 nfs4_opendata_put(opendata);
2261 return ret;
2262 }
2263
2264 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2265 {
2266 struct nfs_server *server = NFS_SERVER(state->inode);
2267 struct nfs4_exception exception = { };
2268 int err;
2269
2270 do {
2271 err = _nfs4_open_expired(ctx, state);
2272 trace_nfs4_open_expired(ctx, 0, err);
2273 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2274 continue;
2275 switch (err) {
2276 default:
2277 goto out;
2278 case -NFS4ERR_GRACE:
2279 case -NFS4ERR_DELAY:
2280 nfs4_handle_exception(server, err, &exception);
2281 err = 0;
2282 }
2283 } while (exception.retry);
2284 out:
2285 return err;
2286 }
2287
2288 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2289 {
2290 struct nfs_open_context *ctx;
2291 int ret;
2292
2293 ctx = nfs4_state_find_open_context(state);
2294 if (IS_ERR(ctx))
2295 return -EAGAIN;
2296 ret = nfs4_do_open_expired(ctx, state);
2297 put_nfs_open_context(ctx);
2298 return ret;
2299 }
2300
2301 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2302 {
2303 nfs_remove_bad_delegation(state->inode);
2304 write_seqlock(&state->seqlock);
2305 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2306 write_sequnlock(&state->seqlock);
2307 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2308 }
2309
2310 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2311 {
2312 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2313 nfs_finish_clear_delegation_stateid(state);
2314 }
2315
2316 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2317 {
2318 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2319 nfs40_clear_delegation_stateid(state);
2320 return nfs4_open_expired(sp, state);
2321 }
2322
2323 #if defined(CONFIG_NFS_V4_1)
2324 static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2325 {
2326 struct nfs_server *server = NFS_SERVER(state->inode);
2327 nfs4_stateid stateid;
2328 struct nfs_delegation *delegation;
2329 struct rpc_cred *cred;
2330 int status;
2331
2332 /* Get the delegation credential for use by test/free_stateid */
2333 rcu_read_lock();
2334 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2335 if (delegation == NULL) {
2336 rcu_read_unlock();
2337 return;
2338 }
2339
2340 nfs4_stateid_copy(&stateid, &delegation->stateid);
2341 cred = get_rpccred(delegation->cred);
2342 rcu_read_unlock();
2343 status = nfs41_test_stateid(server, &stateid, cred);
2344 trace_nfs4_test_delegation_stateid(state, NULL, status);
2345
2346 if (status != NFS_OK) {
2347 /* Free the stateid unless the server explicitly
2348 * informs us the stateid is unrecognized. */
2349 if (status != -NFS4ERR_BAD_STATEID)
2350 nfs41_free_stateid(server, &stateid, cred);
2351 nfs_finish_clear_delegation_stateid(state);
2352 }
2353
2354 put_rpccred(cred);
2355 }
2356
2357 /**
2358 * nfs41_check_open_stateid - possibly free an open stateid
2359 *
2360 * @state: NFSv4 state for an inode
2361 *
2362 * Returns NFS_OK if recovery for this stateid is now finished.
2363 * Otherwise a negative NFS4ERR value is returned.
2364 */
2365 static int nfs41_check_open_stateid(struct nfs4_state *state)
2366 {
2367 struct nfs_server *server = NFS_SERVER(state->inode);
2368 nfs4_stateid *stateid = &state->open_stateid;
2369 struct rpc_cred *cred = state->owner->so_cred;
2370 int status;
2371
2372 /* If a state reset has been done, test_stateid is unneeded */
2373 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2374 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2375 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2376 return -NFS4ERR_BAD_STATEID;
2377
2378 status = nfs41_test_stateid(server, stateid, cred);
2379 trace_nfs4_test_open_stateid(state, NULL, status);
2380 if (status != NFS_OK) {
2381 /* Free the stateid unless the server explicitly
2382 * informs us the stateid is unrecognized. */
2383 if (status != -NFS4ERR_BAD_STATEID)
2384 nfs41_free_stateid(server, stateid, cred);
2385
2386 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2387 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2388 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2389 clear_bit(NFS_OPEN_STATE, &state->flags);
2390 }
2391 return status;
2392 }
2393
2394 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2395 {
2396 int status;
2397
2398 nfs41_check_delegation_stateid(state);
2399 status = nfs41_check_open_stateid(state);
2400 if (status != NFS_OK)
2401 status = nfs4_open_expired(sp, state);
2402 return status;
2403 }
2404 #endif
2405
2406 /*
2407 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2408 * fields corresponding to attributes that were used to store the verifier.
2409 * Make sure we clobber those fields in the later setattr call
2410 */
2411 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2412 struct iattr *sattr, struct nfs4_label **label)
2413 {
2414 const u32 *attrset = opendata->o_res.attrset;
2415
2416 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2417 !(sattr->ia_valid & ATTR_ATIME_SET))
2418 sattr->ia_valid |= ATTR_ATIME;
2419
2420 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2421 !(sattr->ia_valid & ATTR_MTIME_SET))
2422 sattr->ia_valid |= ATTR_MTIME;
2423
2424 /* Except MODE, it seems harmless of setting twice. */
2425 if ((attrset[1] & FATTR4_WORD1_MODE))
2426 sattr->ia_valid &= ~ATTR_MODE;
2427
2428 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
2429 *label = NULL;
2430 }
2431
2432 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2433 fmode_t fmode,
2434 int flags,
2435 struct nfs_open_context *ctx)
2436 {
2437 struct nfs4_state_owner *sp = opendata->owner;
2438 struct nfs_server *server = sp->so_server;
2439 struct dentry *dentry;
2440 struct nfs4_state *state;
2441 unsigned int seq;
2442 int ret;
2443
2444 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2445
2446 ret = _nfs4_proc_open(opendata);
2447 if (ret != 0)
2448 goto out;
2449
2450 state = nfs4_opendata_to_nfs4_state(opendata);
2451 ret = PTR_ERR(state);
2452 if (IS_ERR(state))
2453 goto out;
2454 if (server->caps & NFS_CAP_POSIX_LOCK)
2455 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2456
2457 dentry = opendata->dentry;
2458 if (d_really_is_negative(dentry)) {
2459 /* FIXME: Is this d_drop() ever needed? */
2460 d_drop(dentry);
2461 dentry = d_add_unique(dentry, igrab(state->inode));
2462 if (dentry == NULL) {
2463 dentry = opendata->dentry;
2464 } else if (dentry != ctx->dentry) {
2465 dput(ctx->dentry);
2466 ctx->dentry = dget(dentry);
2467 }
2468 nfs_set_verifier(dentry,
2469 nfs_save_change_attribute(d_inode(opendata->dir)));
2470 }
2471
2472 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2473 if (ret != 0)
2474 goto out;
2475
2476 ctx->state = state;
2477 if (d_inode(dentry) == state->inode) {
2478 nfs_inode_attach_open_context(ctx);
2479 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2480 nfs4_schedule_stateid_recovery(server, state);
2481 }
2482 out:
2483 return ret;
2484 }
2485
2486 /*
2487 * Returns a referenced nfs4_state
2488 */
2489 static int _nfs4_do_open(struct inode *dir,
2490 struct nfs_open_context *ctx,
2491 int flags,
2492 struct iattr *sattr,
2493 struct nfs4_label *label,
2494 int *opened)
2495 {
2496 struct nfs4_state_owner *sp;
2497 struct nfs4_state *state = NULL;
2498 struct nfs_server *server = NFS_SERVER(dir);
2499 struct nfs4_opendata *opendata;
2500 struct dentry *dentry = ctx->dentry;
2501 struct rpc_cred *cred = ctx->cred;
2502 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2503 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2504 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2505 struct nfs4_label *olabel = NULL;
2506 int status;
2507
2508 /* Protect against reboot recovery conflicts */
2509 status = -ENOMEM;
2510 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2511 if (sp == NULL) {
2512 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2513 goto out_err;
2514 }
2515 status = nfs4_recover_expired_lease(server);
2516 if (status != 0)
2517 goto err_put_state_owner;
2518 if (d_really_is_positive(dentry))
2519 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2520 status = -ENOMEM;
2521 if (d_really_is_positive(dentry))
2522 claim = NFS4_OPEN_CLAIM_FH;
2523 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2524 label, claim, GFP_KERNEL);
2525 if (opendata == NULL)
2526 goto err_put_state_owner;
2527
2528 if (label) {
2529 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2530 if (IS_ERR(olabel)) {
2531 status = PTR_ERR(olabel);
2532 goto err_opendata_put;
2533 }
2534 }
2535
2536 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2537 if (!opendata->f_attr.mdsthreshold) {
2538 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2539 if (!opendata->f_attr.mdsthreshold)
2540 goto err_free_label;
2541 }
2542 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2543 }
2544 if (d_really_is_positive(dentry))
2545 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2546
2547 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2548 if (status != 0)
2549 goto err_free_label;
2550 state = ctx->state;
2551
2552 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
2553 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2554 nfs4_exclusive_attrset(opendata, sattr, &label);
2555
2556 nfs_fattr_init(opendata->o_res.f_attr);
2557 status = nfs4_do_setattr(state->inode, cred,
2558 opendata->o_res.f_attr, sattr,
2559 state, label, olabel);
2560 if (status == 0) {
2561 nfs_setattr_update_inode(state->inode, sattr,
2562 opendata->o_res.f_attr);
2563 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2564 }
2565 }
2566 if (opened && opendata->file_created)
2567 *opened |= FILE_CREATED;
2568
2569 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2570 *ctx_th = opendata->f_attr.mdsthreshold;
2571 opendata->f_attr.mdsthreshold = NULL;
2572 }
2573
2574 nfs4_label_free(olabel);
2575
2576 nfs4_opendata_put(opendata);
2577 nfs4_put_state_owner(sp);
2578 return 0;
2579 err_free_label:
2580 nfs4_label_free(olabel);
2581 err_opendata_put:
2582 nfs4_opendata_put(opendata);
2583 err_put_state_owner:
2584 nfs4_put_state_owner(sp);
2585 out_err:
2586 return status;
2587 }
2588
2589
2590 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2591 struct nfs_open_context *ctx,
2592 int flags,
2593 struct iattr *sattr,
2594 struct nfs4_label *label,
2595 int *opened)
2596 {
2597 struct nfs_server *server = NFS_SERVER(dir);
2598 struct nfs4_exception exception = { };
2599 struct nfs4_state *res;
2600 int status;
2601
2602 do {
2603 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2604 res = ctx->state;
2605 trace_nfs4_open_file(ctx, flags, status);
2606 if (status == 0)
2607 break;
2608 /* NOTE: BAD_SEQID means the server and client disagree about the
2609 * book-keeping w.r.t. state-changing operations
2610 * (OPEN/CLOSE/LOCK/LOCKU...)
2611 * It is actually a sign of a bug on the client or on the server.
2612 *
2613 * If we receive a BAD_SEQID error in the particular case of
2614 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2615 * have unhashed the old state_owner for us, and that we can
2616 * therefore safely retry using a new one. We should still warn
2617 * the user though...
2618 */
2619 if (status == -NFS4ERR_BAD_SEQID) {
2620 pr_warn_ratelimited("NFS: v4 server %s "
2621 " returned a bad sequence-id error!\n",
2622 NFS_SERVER(dir)->nfs_client->cl_hostname);
2623 exception.retry = 1;
2624 continue;
2625 }
2626 /*
2627 * BAD_STATEID on OPEN means that the server cancelled our
2628 * state before it received the OPEN_CONFIRM.
2629 * Recover by retrying the request as per the discussion
2630 * on Page 181 of RFC3530.
2631 */
2632 if (status == -NFS4ERR_BAD_STATEID) {
2633 exception.retry = 1;
2634 continue;
2635 }
2636 if (status == -EAGAIN) {
2637 /* We must have found a delegation */
2638 exception.retry = 1;
2639 continue;
2640 }
2641 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2642 continue;
2643 res = ERR_PTR(nfs4_handle_exception(server,
2644 status, &exception));
2645 } while (exception.retry);
2646 return res;
2647 }
2648
2649 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2650 struct nfs_fattr *fattr, struct iattr *sattr,
2651 struct nfs4_state *state, struct nfs4_label *ilabel,
2652 struct nfs4_label *olabel)
2653 {
2654 struct nfs_server *server = NFS_SERVER(inode);
2655 struct nfs_setattrargs arg = {
2656 .fh = NFS_FH(inode),
2657 .iap = sattr,
2658 .server = server,
2659 .bitmask = server->attr_bitmask,
2660 .label = ilabel,
2661 };
2662 struct nfs_setattrres res = {
2663 .fattr = fattr,
2664 .label = olabel,
2665 .server = server,
2666 };
2667 struct rpc_message msg = {
2668 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2669 .rpc_argp = &arg,
2670 .rpc_resp = &res,
2671 .rpc_cred = cred,
2672 };
2673 unsigned long timestamp = jiffies;
2674 fmode_t fmode;
2675 bool truncate;
2676 int status;
2677
2678 arg.bitmask = nfs4_bitmask(server, ilabel);
2679 if (ilabel)
2680 arg.bitmask = nfs4_bitmask(server, olabel);
2681
2682 nfs_fattr_init(fattr);
2683
2684 /* Servers should only apply open mode checks for file size changes */
2685 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
2686 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2687
2688 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2689 /* Use that stateid */
2690 } else if (truncate && state != NULL) {
2691 struct nfs_lockowner lockowner = {
2692 .l_owner = current->files,
2693 .l_pid = current->tgid,
2694 };
2695 if (!nfs4_valid_open_stateid(state))
2696 return -EBADF;
2697 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2698 &lockowner) == -EIO)
2699 return -EBADF;
2700 } else
2701 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2702
2703 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2704 if (status == 0 && state != NULL)
2705 renew_lease(server, timestamp);
2706 trace_nfs4_setattr(inode, &arg.stateid, status);
2707 return status;
2708 }
2709
2710 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2711 struct nfs_fattr *fattr, struct iattr *sattr,
2712 struct nfs4_state *state, struct nfs4_label *ilabel,
2713 struct nfs4_label *olabel)
2714 {
2715 struct nfs_server *server = NFS_SERVER(inode);
2716 struct nfs4_exception exception = {
2717 .state = state,
2718 .inode = inode,
2719 };
2720 int err;
2721 do {
2722 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel);
2723 switch (err) {
2724 case -NFS4ERR_OPENMODE:
2725 if (!(sattr->ia_valid & ATTR_SIZE)) {
2726 pr_warn_once("NFSv4: server %s is incorrectly "
2727 "applying open mode checks to "
2728 "a SETATTR that is not "
2729 "changing file size.\n",
2730 server->nfs_client->cl_hostname);
2731 }
2732 if (state && !(state->state & FMODE_WRITE)) {
2733 err = -EBADF;
2734 if (sattr->ia_valid & ATTR_OPEN)
2735 err = -EACCES;
2736 goto out;
2737 }
2738 }
2739 err = nfs4_handle_exception(server, err, &exception);
2740 } while (exception.retry);
2741 out:
2742 return err;
2743 }
2744
2745 static bool
2746 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
2747 {
2748 if (inode == NULL || !nfs_have_layout(inode))
2749 return false;
2750
2751 return pnfs_wait_on_layoutreturn(inode, task);
2752 }
2753
2754 struct nfs4_closedata {
2755 struct inode *inode;
2756 struct nfs4_state *state;
2757 struct nfs_closeargs arg;
2758 struct nfs_closeres res;
2759 struct nfs_fattr fattr;
2760 unsigned long timestamp;
2761 bool roc;
2762 u32 roc_barrier;
2763 };
2764
2765 static void nfs4_free_closedata(void *data)
2766 {
2767 struct nfs4_closedata *calldata = data;
2768 struct nfs4_state_owner *sp = calldata->state->owner;
2769 struct super_block *sb = calldata->state->inode->i_sb;
2770
2771 if (calldata->roc)
2772 pnfs_roc_release(calldata->state->inode);
2773 nfs4_put_open_state(calldata->state);
2774 nfs_free_seqid(calldata->arg.seqid);
2775 nfs4_put_state_owner(sp);
2776 nfs_sb_deactive(sb);
2777 kfree(calldata);
2778 }
2779
2780 static void nfs4_close_done(struct rpc_task *task, void *data)
2781 {
2782 struct nfs4_closedata *calldata = data;
2783 struct nfs4_state *state = calldata->state;
2784 struct nfs_server *server = NFS_SERVER(calldata->inode);
2785 nfs4_stateid *res_stateid = NULL;
2786
2787 dprintk("%s: begin!\n", __func__);
2788 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2789 return;
2790 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
2791 /* hmm. we are done with the inode, and in the process of freeing
2792 * the state_owner. we keep this around to process errors
2793 */
2794 switch (task->tk_status) {
2795 case 0:
2796 res_stateid = &calldata->res.stateid;
2797 if (calldata->roc)
2798 pnfs_roc_set_barrier(state->inode,
2799 calldata->roc_barrier);
2800 renew_lease(server, calldata->timestamp);
2801 break;
2802 case -NFS4ERR_ADMIN_REVOKED:
2803 case -NFS4ERR_STALE_STATEID:
2804 case -NFS4ERR_OLD_STATEID:
2805 case -NFS4ERR_BAD_STATEID:
2806 case -NFS4ERR_EXPIRED:
2807 if (!nfs4_stateid_match(&calldata->arg.stateid,
2808 &state->open_stateid)) {
2809 rpc_restart_call_prepare(task);
2810 goto out_release;
2811 }
2812 if (calldata->arg.fmode == 0)
2813 break;
2814 default:
2815 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
2816 rpc_restart_call_prepare(task);
2817 goto out_release;
2818 }
2819 }
2820 nfs_clear_open_stateid(state, &calldata->arg.stateid,
2821 res_stateid, calldata->arg.fmode);
2822 out_release:
2823 nfs_release_seqid(calldata->arg.seqid);
2824 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2825 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2826 }
2827
2828 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2829 {
2830 struct nfs4_closedata *calldata = data;
2831 struct nfs4_state *state = calldata->state;
2832 struct inode *inode = calldata->inode;
2833 bool is_rdonly, is_wronly, is_rdwr;
2834 int call_close = 0;
2835
2836 dprintk("%s: begin!\n", __func__);
2837 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2838 goto out_wait;
2839
2840 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2841 spin_lock(&state->owner->so_lock);
2842 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2843 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2844 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2845 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2846 /* Calculate the change in open mode */
2847 calldata->arg.fmode = 0;
2848 if (state->n_rdwr == 0) {
2849 if (state->n_rdonly == 0)
2850 call_close |= is_rdonly;
2851 else if (is_rdonly)
2852 calldata->arg.fmode |= FMODE_READ;
2853 if (state->n_wronly == 0)
2854 call_close |= is_wronly;
2855 else if (is_wronly)
2856 calldata->arg.fmode |= FMODE_WRITE;
2857 } else if (is_rdwr)
2858 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2859
2860 if (calldata->arg.fmode == 0)
2861 call_close |= is_rdwr;
2862
2863 if (!nfs4_valid_open_stateid(state))
2864 call_close = 0;
2865 spin_unlock(&state->owner->so_lock);
2866
2867 if (!call_close) {
2868 /* Note: exit _without_ calling nfs4_close_done */
2869 goto out_no_action;
2870 }
2871
2872 if (nfs4_wait_on_layoutreturn(inode, task)) {
2873 nfs_release_seqid(calldata->arg.seqid);
2874 goto out_wait;
2875 }
2876
2877 if (calldata->arg.fmode == 0)
2878 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2879 if (calldata->roc)
2880 pnfs_roc_get_barrier(inode, &calldata->roc_barrier);
2881
2882 calldata->arg.share_access =
2883 nfs4_map_atomic_open_share(NFS_SERVER(inode),
2884 calldata->arg.fmode, 0);
2885
2886 nfs_fattr_init(calldata->res.fattr);
2887 calldata->timestamp = jiffies;
2888 if (nfs4_setup_sequence(NFS_SERVER(inode),
2889 &calldata->arg.seq_args,
2890 &calldata->res.seq_res,
2891 task) != 0)
2892 nfs_release_seqid(calldata->arg.seqid);
2893 dprintk("%s: done!\n", __func__);
2894 return;
2895 out_no_action:
2896 task->tk_action = NULL;
2897 out_wait:
2898 nfs4_sequence_done(task, &calldata->res.seq_res);
2899 }
2900
2901 static const struct rpc_call_ops nfs4_close_ops = {
2902 .rpc_call_prepare = nfs4_close_prepare,
2903 .rpc_call_done = nfs4_close_done,
2904 .rpc_release = nfs4_free_closedata,
2905 };
2906
2907 static bool nfs4_roc(struct inode *inode)
2908 {
2909 if (!nfs_have_layout(inode))
2910 return false;
2911 return pnfs_roc(inode);
2912 }
2913
2914 /*
2915 * It is possible for data to be read/written from a mem-mapped file
2916 * after the sys_close call (which hits the vfs layer as a flush).
2917 * This means that we can't safely call nfsv4 close on a file until
2918 * the inode is cleared. This in turn means that we are not good
2919 * NFSv4 citizens - we do not indicate to the server to update the file's
2920 * share state even when we are done with one of the three share
2921 * stateid's in the inode.
2922 *
2923 * NOTE: Caller must be holding the sp->so_owner semaphore!
2924 */
2925 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2926 {
2927 struct nfs_server *server = NFS_SERVER(state->inode);
2928 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
2929 struct nfs4_closedata *calldata;
2930 struct nfs4_state_owner *sp = state->owner;
2931 struct rpc_task *task;
2932 struct rpc_message msg = {
2933 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2934 .rpc_cred = state->owner->so_cred,
2935 };
2936 struct rpc_task_setup task_setup_data = {
2937 .rpc_client = server->client,
2938 .rpc_message = &msg,
2939 .callback_ops = &nfs4_close_ops,
2940 .workqueue = nfsiod_workqueue,
2941 .flags = RPC_TASK_ASYNC,
2942 };
2943 int status = -ENOMEM;
2944
2945 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
2946 &task_setup_data.rpc_client, &msg);
2947
2948 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2949 if (calldata == NULL)
2950 goto out;
2951 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2952 calldata->inode = state->inode;
2953 calldata->state = state;
2954 calldata->arg.fh = NFS_FH(state->inode);
2955 /* Serialization for the sequence id */
2956 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
2957 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
2958 if (IS_ERR(calldata->arg.seqid))
2959 goto out_free_calldata;
2960 calldata->arg.fmode = 0;
2961 calldata->arg.bitmask = server->cache_consistency_bitmask;
2962 calldata->res.fattr = &calldata->fattr;
2963 calldata->res.seqid = calldata->arg.seqid;
2964 calldata->res.server = server;
2965 calldata->roc = nfs4_roc(state->inode);
2966 nfs_sb_active(calldata->inode->i_sb);
2967
2968 msg.rpc_argp = &calldata->arg;
2969 msg.rpc_resp = &calldata->res;
2970 task_setup_data.callback_data = calldata;
2971 task = rpc_run_task(&task_setup_data);
2972 if (IS_ERR(task))
2973 return PTR_ERR(task);
2974 status = 0;
2975 if (wait)
2976 status = rpc_wait_for_completion_task(task);
2977 rpc_put_task(task);
2978 return status;
2979 out_free_calldata:
2980 kfree(calldata);
2981 out:
2982 nfs4_put_open_state(state);
2983 nfs4_put_state_owner(sp);
2984 return status;
2985 }
2986
2987 static struct inode *
2988 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2989 int open_flags, struct iattr *attr, int *opened)
2990 {
2991 struct nfs4_state *state;
2992 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
2993
2994 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
2995
2996 /* Protect against concurrent sillydeletes */
2997 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
2998
2999 nfs4_label_release_security(label);
3000
3001 if (IS_ERR(state))
3002 return ERR_CAST(state);
3003 return state->inode;
3004 }
3005
3006 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3007 {
3008 if (ctx->state == NULL)
3009 return;
3010 if (is_sync)
3011 nfs4_close_sync(ctx->state, ctx->mode);
3012 else
3013 nfs4_close_state(ctx->state, ctx->mode);
3014 }
3015
3016 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3017 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3018 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
3019
3020 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3021 {
3022 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3023 struct nfs4_server_caps_arg args = {
3024 .fhandle = fhandle,
3025 .bitmask = bitmask,
3026 };
3027 struct nfs4_server_caps_res res = {};
3028 struct rpc_message msg = {
3029 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3030 .rpc_argp = &args,
3031 .rpc_resp = &res,
3032 };
3033 int status;
3034
3035 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3036 FATTR4_WORD0_FH_EXPIRE_TYPE |
3037 FATTR4_WORD0_LINK_SUPPORT |
3038 FATTR4_WORD0_SYMLINK_SUPPORT |
3039 FATTR4_WORD0_ACLSUPPORT;
3040 if (minorversion)
3041 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3042
3043 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3044 if (status == 0) {
3045 /* Sanity check the server answers */
3046 switch (minorversion) {
3047 case 0:
3048 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3049 res.attr_bitmask[2] = 0;
3050 break;
3051 case 1:
3052 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3053 break;
3054 case 2:
3055 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3056 }
3057 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3058 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
3059 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
3060 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
3061 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
3062 NFS_CAP_CTIME|NFS_CAP_MTIME|
3063 NFS_CAP_SECURITY_LABEL);
3064 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3065 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3066 server->caps |= NFS_CAP_ACLS;
3067 if (res.has_links != 0)
3068 server->caps |= NFS_CAP_HARDLINKS;
3069 if (res.has_symlinks != 0)
3070 server->caps |= NFS_CAP_SYMLINKS;
3071 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
3072 server->caps |= NFS_CAP_FILEID;
3073 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
3074 server->caps |= NFS_CAP_MODE;
3075 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
3076 server->caps |= NFS_CAP_NLINK;
3077 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
3078 server->caps |= NFS_CAP_OWNER;
3079 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
3080 server->caps |= NFS_CAP_OWNER_GROUP;
3081 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
3082 server->caps |= NFS_CAP_ATIME;
3083 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
3084 server->caps |= NFS_CAP_CTIME;
3085 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
3086 server->caps |= NFS_CAP_MTIME;
3087 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3088 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3089 server->caps |= NFS_CAP_SECURITY_LABEL;
3090 #endif
3091 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3092 sizeof(server->attr_bitmask));
3093 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3094
3095 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3096 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3097 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3098 server->cache_consistency_bitmask[2] = 0;
3099 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3100 sizeof(server->exclcreat_bitmask));
3101 server->acl_bitmask = res.acl_bitmask;
3102 server->fh_expire_type = res.fh_expire_type;
3103 }
3104
3105 return status;
3106 }
3107
3108 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3109 {
3110 struct nfs4_exception exception = { };
3111 int err;
3112 do {
3113 err = nfs4_handle_exception(server,
3114 _nfs4_server_capabilities(server, fhandle),
3115 &exception);
3116 } while (exception.retry);
3117 return err;
3118 }
3119
3120 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3121 struct nfs_fsinfo *info)
3122 {
3123 u32 bitmask[3];
3124 struct nfs4_lookup_root_arg args = {
3125 .bitmask = bitmask,
3126 };
3127 struct nfs4_lookup_res res = {
3128 .server = server,
3129 .fattr = info->fattr,
3130 .fh = fhandle,
3131 };
3132 struct rpc_message msg = {
3133 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
3134 .rpc_argp = &args,
3135 .rpc_resp = &res,
3136 };
3137
3138 bitmask[0] = nfs4_fattr_bitmap[0];
3139 bitmask[1] = nfs4_fattr_bitmap[1];
3140 /*
3141 * Process the label in the upcoming getfattr
3142 */
3143 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
3144
3145 nfs_fattr_init(info->fattr);
3146 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3147 }
3148
3149 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3150 struct nfs_fsinfo *info)
3151 {
3152 struct nfs4_exception exception = { };
3153 int err;
3154 do {
3155 err = _nfs4_lookup_root(server, fhandle, info);
3156 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3157 switch (err) {
3158 case 0:
3159 case -NFS4ERR_WRONGSEC:
3160 goto out;
3161 default:
3162 err = nfs4_handle_exception(server, err, &exception);
3163 }
3164 } while (exception.retry);
3165 out:
3166 return err;
3167 }
3168
3169 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3170 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3171 {
3172 struct rpc_auth_create_args auth_args = {
3173 .pseudoflavor = flavor,
3174 };
3175 struct rpc_auth *auth;
3176 int ret;
3177
3178 auth = rpcauth_create(&auth_args, server->client);
3179 if (IS_ERR(auth)) {
3180 ret = -EACCES;
3181 goto out;
3182 }
3183 ret = nfs4_lookup_root(server, fhandle, info);
3184 out:
3185 return ret;
3186 }
3187
3188 /*
3189 * Retry pseudoroot lookup with various security flavors. We do this when:
3190 *
3191 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3192 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3193 *
3194 * Returns zero on success, or a negative NFS4ERR value, or a
3195 * negative errno value.
3196 */
3197 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3198 struct nfs_fsinfo *info)
3199 {
3200 /* Per 3530bis 15.33.5 */
3201 static const rpc_authflavor_t flav_array[] = {
3202 RPC_AUTH_GSS_KRB5P,
3203 RPC_AUTH_GSS_KRB5I,
3204 RPC_AUTH_GSS_KRB5,
3205 RPC_AUTH_UNIX, /* courtesy */
3206 RPC_AUTH_NULL,
3207 };
3208 int status = -EPERM;
3209 size_t i;
3210
3211 if (server->auth_info.flavor_len > 0) {
3212 /* try each flavor specified by user */
3213 for (i = 0; i < server->auth_info.flavor_len; i++) {
3214 status = nfs4_lookup_root_sec(server, fhandle, info,
3215 server->auth_info.flavors[i]);
3216 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3217 continue;
3218 break;
3219 }
3220 } else {
3221 /* no flavors specified by user, try default list */
3222 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3223 status = nfs4_lookup_root_sec(server, fhandle, info,
3224 flav_array[i]);
3225 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3226 continue;
3227 break;
3228 }
3229 }
3230
3231 /*
3232 * -EACCESS could mean that the user doesn't have correct permissions
3233 * to access the mount. It could also mean that we tried to mount
3234 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3235 * existing mount programs don't handle -EACCES very well so it should
3236 * be mapped to -EPERM instead.
3237 */
3238 if (status == -EACCES)
3239 status = -EPERM;
3240 return status;
3241 }
3242
3243 static int nfs4_do_find_root_sec(struct nfs_server *server,
3244 struct nfs_fh *fhandle, struct nfs_fsinfo *info)
3245 {
3246 int mv = server->nfs_client->cl_minorversion;
3247 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
3248 }
3249
3250 /**
3251 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3252 * @server: initialized nfs_server handle
3253 * @fhandle: we fill in the pseudo-fs root file handle
3254 * @info: we fill in an FSINFO struct
3255 * @auth_probe: probe the auth flavours
3256 *
3257 * Returns zero on success, or a negative errno.
3258 */
3259 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3260 struct nfs_fsinfo *info,
3261 bool auth_probe)
3262 {
3263 int status = 0;
3264
3265 if (!auth_probe)
3266 status = nfs4_lookup_root(server, fhandle, info);
3267
3268 if (auth_probe || status == NFS4ERR_WRONGSEC)
3269 status = nfs4_do_find_root_sec(server, fhandle, info);
3270
3271 if (status == 0)
3272 status = nfs4_server_capabilities(server, fhandle);
3273 if (status == 0)
3274 status = nfs4_do_fsinfo(server, fhandle, info);
3275
3276 return nfs4_map_errors(status);
3277 }
3278
3279 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3280 struct nfs_fsinfo *info)
3281 {
3282 int error;
3283 struct nfs_fattr *fattr = info->fattr;
3284 struct nfs4_label *label = NULL;
3285
3286 error = nfs4_server_capabilities(server, mntfh);
3287 if (error < 0) {
3288 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3289 return error;
3290 }
3291
3292 label = nfs4_label_alloc(server, GFP_KERNEL);
3293 if (IS_ERR(label))
3294 return PTR_ERR(label);
3295
3296 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3297 if (error < 0) {
3298 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3299 goto err_free_label;
3300 }
3301
3302 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3303 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3304 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3305
3306 err_free_label:
3307 nfs4_label_free(label);
3308
3309 return error;
3310 }
3311
3312 /*
3313 * Get locations and (maybe) other attributes of a referral.
3314 * Note that we'll actually follow the referral later when
3315 * we detect fsid mismatch in inode revalidation
3316 */
3317 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3318 const struct qstr *name, struct nfs_fattr *fattr,
3319 struct nfs_fh *fhandle)
3320 {
3321 int status = -ENOMEM;
3322 struct page *page = NULL;
3323 struct nfs4_fs_locations *locations = NULL;
3324
3325 page = alloc_page(GFP_KERNEL);
3326 if (page == NULL)
3327 goto out;
3328 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3329 if (locations == NULL)
3330 goto out;
3331
3332 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3333 if (status != 0)
3334 goto out;
3335
3336 /*
3337 * If the fsid didn't change, this is a migration event, not a
3338 * referral. Cause us to drop into the exception handler, which
3339 * will kick off migration recovery.
3340 */
3341 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3342 dprintk("%s: server did not return a different fsid for"
3343 " a referral at %s\n", __func__, name->name);
3344 status = -NFS4ERR_MOVED;
3345 goto out;
3346 }
3347 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3348 nfs_fixup_referral_attributes(&locations->fattr);
3349
3350 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3351 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3352 memset(fhandle, 0, sizeof(struct nfs_fh));
3353 out:
3354 if (page)
3355 __free_page(page);
3356 kfree(locations);
3357 return status;
3358 }
3359
3360 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3361 struct nfs_fattr *fattr, struct nfs4_label *label)
3362 {
3363 struct nfs4_getattr_arg args = {
3364 .fh = fhandle,
3365 .bitmask = server->attr_bitmask,
3366 };
3367 struct nfs4_getattr_res res = {
3368 .fattr = fattr,
3369 .label = label,
3370 .server = server,
3371 };
3372 struct rpc_message msg = {
3373 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3374 .rpc_argp = &args,
3375 .rpc_resp = &res,
3376 };
3377
3378 args.bitmask = nfs4_bitmask(server, label);
3379
3380 nfs_fattr_init(fattr);
3381 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3382 }
3383
3384 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3385 struct nfs_fattr *fattr, struct nfs4_label *label)
3386 {
3387 struct nfs4_exception exception = { };
3388 int err;
3389 do {
3390 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3391 trace_nfs4_getattr(server, fhandle, fattr, err);
3392 err = nfs4_handle_exception(server, err,
3393 &exception);
3394 } while (exception.retry);
3395 return err;
3396 }
3397
3398 /*
3399 * The file is not closed if it is opened due to the a request to change
3400 * the size of the file. The open call will not be needed once the
3401 * VFS layer lookup-intents are implemented.
3402 *
3403 * Close is called when the inode is destroyed.
3404 * If we haven't opened the file for O_WRONLY, we
3405 * need to in the size_change case to obtain a stateid.
3406 *
3407 * Got race?
3408 * Because OPEN is always done by name in nfsv4, it is
3409 * possible that we opened a different file by the same
3410 * name. We can recognize this race condition, but we
3411 * can't do anything about it besides returning an error.
3412 *
3413 * This will be fixed with VFS changes (lookup-intent).
3414 */
3415 static int
3416 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3417 struct iattr *sattr)
3418 {
3419 struct inode *inode = d_inode(dentry);
3420 struct rpc_cred *cred = NULL;
3421 struct nfs4_state *state = NULL;
3422 struct nfs4_label *label = NULL;
3423 int status;
3424
3425 if (pnfs_ld_layoutret_on_setattr(inode) &&
3426 sattr->ia_valid & ATTR_SIZE &&
3427 sattr->ia_size < i_size_read(inode))
3428 pnfs_commit_and_return_layout(inode);
3429
3430 nfs_fattr_init(fattr);
3431
3432 /* Deal with open(O_TRUNC) */
3433 if (sattr->ia_valid & ATTR_OPEN)
3434 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3435
3436 /* Optimization: if the end result is no change, don't RPC */
3437 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3438 return 0;
3439
3440 /* Search for an existing open(O_WRITE) file */
3441 if (sattr->ia_valid & ATTR_FILE) {
3442 struct nfs_open_context *ctx;
3443
3444 ctx = nfs_file_open_context(sattr->ia_file);
3445 if (ctx) {
3446 cred = ctx->cred;
3447 state = ctx->state;
3448 }
3449 }
3450
3451 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3452 if (IS_ERR(label))
3453 return PTR_ERR(label);
3454
3455 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3456 if (status == 0) {
3457 nfs_setattr_update_inode(inode, sattr, fattr);
3458 nfs_setsecurity(inode, fattr, label);
3459 }
3460 nfs4_label_free(label);
3461 return status;
3462 }
3463
3464 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3465 const struct qstr *name, struct nfs_fh *fhandle,
3466 struct nfs_fattr *fattr, struct nfs4_label *label)
3467 {
3468 struct nfs_server *server = NFS_SERVER(dir);
3469 int status;
3470 struct nfs4_lookup_arg args = {
3471 .bitmask = server->attr_bitmask,
3472 .dir_fh = NFS_FH(dir),
3473 .name = name,
3474 };
3475 struct nfs4_lookup_res res = {
3476 .server = server,
3477 .fattr = fattr,
3478 .label = label,
3479 .fh = fhandle,
3480 };
3481 struct rpc_message msg = {
3482 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3483 .rpc_argp = &args,
3484 .rpc_resp = &res,
3485 };
3486
3487 args.bitmask = nfs4_bitmask(server, label);
3488
3489 nfs_fattr_init(fattr);
3490
3491 dprintk("NFS call lookup %s\n", name->name);
3492 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3493 dprintk("NFS reply lookup: %d\n", status);
3494 return status;
3495 }
3496
3497 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3498 {
3499 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3500 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3501 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3502 fattr->nlink = 2;
3503 }
3504
3505 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3506 struct qstr *name, struct nfs_fh *fhandle,
3507 struct nfs_fattr *fattr, struct nfs4_label *label)
3508 {
3509 struct nfs4_exception exception = { };
3510 struct rpc_clnt *client = *clnt;
3511 int err;
3512 do {
3513 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3514 trace_nfs4_lookup(dir, name, err);
3515 switch (err) {
3516 case -NFS4ERR_BADNAME:
3517 err = -ENOENT;
3518 goto out;
3519 case -NFS4ERR_MOVED:
3520 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3521 if (err == -NFS4ERR_MOVED)
3522 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3523 goto out;
3524 case -NFS4ERR_WRONGSEC:
3525 err = -EPERM;
3526 if (client != *clnt)
3527 goto out;
3528 client = nfs4_negotiate_security(client, dir, name);
3529 if (IS_ERR(client))
3530 return PTR_ERR(client);
3531
3532 exception.retry = 1;
3533 break;
3534 default:
3535 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3536 }
3537 } while (exception.retry);
3538
3539 out:
3540 if (err == 0)
3541 *clnt = client;
3542 else if (client != *clnt)
3543 rpc_shutdown_client(client);
3544
3545 return err;
3546 }
3547
3548 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
3549 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3550 struct nfs4_label *label)
3551 {
3552 int status;
3553 struct rpc_clnt *client = NFS_CLIENT(dir);
3554
3555 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3556 if (client != NFS_CLIENT(dir)) {
3557 rpc_shutdown_client(client);
3558 nfs_fixup_secinfo_attributes(fattr);
3559 }
3560 return status;
3561 }
3562
3563 struct rpc_clnt *
3564 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
3565 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3566 {
3567 struct rpc_clnt *client = NFS_CLIENT(dir);
3568 int status;
3569
3570 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3571 if (status < 0)
3572 return ERR_PTR(status);
3573 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3574 }
3575
3576 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3577 {
3578 struct nfs_server *server = NFS_SERVER(inode);
3579 struct nfs4_accessargs args = {
3580 .fh = NFS_FH(inode),
3581 .bitmask = server->cache_consistency_bitmask,
3582 };
3583 struct nfs4_accessres res = {
3584 .server = server,
3585 };
3586 struct rpc_message msg = {
3587 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
3588 .rpc_argp = &args,
3589 .rpc_resp = &res,
3590 .rpc_cred = entry->cred,
3591 };
3592 int mode = entry->mask;
3593 int status = 0;
3594
3595 /*
3596 * Determine which access bits we want to ask for...
3597 */
3598 if (mode & MAY_READ)
3599 args.access |= NFS4_ACCESS_READ;
3600 if (S_ISDIR(inode->i_mode)) {
3601 if (mode & MAY_WRITE)
3602 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
3603 if (mode & MAY_EXEC)
3604 args.access |= NFS4_ACCESS_LOOKUP;
3605 } else {
3606 if (mode & MAY_WRITE)
3607 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
3608 if (mode & MAY_EXEC)
3609 args.access |= NFS4_ACCESS_EXECUTE;
3610 }
3611
3612 res.fattr = nfs_alloc_fattr();
3613 if (res.fattr == NULL)
3614 return -ENOMEM;
3615
3616 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3617 if (!status) {
3618 nfs_access_set_mask(entry, res.access);
3619 nfs_refresh_inode(inode, res.fattr);
3620 }
3621 nfs_free_fattr(res.fattr);
3622 return status;
3623 }
3624
3625 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3626 {
3627 struct nfs4_exception exception = { };
3628 int err;
3629 do {
3630 err = _nfs4_proc_access(inode, entry);
3631 trace_nfs4_access(inode, err);
3632 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3633 &exception);
3634 } while (exception.retry);
3635 return err;
3636 }
3637
3638 /*
3639 * TODO: For the time being, we don't try to get any attributes
3640 * along with any of the zero-copy operations READ, READDIR,
3641 * READLINK, WRITE.
3642 *
3643 * In the case of the first three, we want to put the GETATTR
3644 * after the read-type operation -- this is because it is hard
3645 * to predict the length of a GETATTR response in v4, and thus
3646 * align the READ data correctly. This means that the GETATTR
3647 * may end up partially falling into the page cache, and we should
3648 * shift it into the 'tail' of the xdr_buf before processing.
3649 * To do this efficiently, we need to know the total length
3650 * of data received, which doesn't seem to be available outside
3651 * of the RPC layer.
3652 *
3653 * In the case of WRITE, we also want to put the GETATTR after
3654 * the operation -- in this case because we want to make sure
3655 * we get the post-operation mtime and size.
3656 *
3657 * Both of these changes to the XDR layer would in fact be quite
3658 * minor, but I decided to leave them for a subsequent patch.
3659 */
3660 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3661 unsigned int pgbase, unsigned int pglen)
3662 {
3663 struct nfs4_readlink args = {
3664 .fh = NFS_FH(inode),
3665 .pgbase = pgbase,
3666 .pglen = pglen,
3667 .pages = &page,
3668 };
3669 struct nfs4_readlink_res res;
3670 struct rpc_message msg = {
3671 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3672 .rpc_argp = &args,
3673 .rpc_resp = &res,
3674 };
3675
3676 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3677 }
3678
3679 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3680 unsigned int pgbase, unsigned int pglen)
3681 {
3682 struct nfs4_exception exception = { };
3683 int err;
3684 do {
3685 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
3686 trace_nfs4_readlink(inode, err);
3687 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3688 &exception);
3689 } while (exception.retry);
3690 return err;
3691 }
3692
3693 /*
3694 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3695 */
3696 static int
3697 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3698 int flags)
3699 {
3700 struct nfs4_label l, *ilabel = NULL;
3701 struct nfs_open_context *ctx;
3702 struct nfs4_state *state;
3703 int status = 0;
3704
3705 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3706 if (IS_ERR(ctx))
3707 return PTR_ERR(ctx);
3708
3709 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3710
3711 sattr->ia_mode &= ~current_umask();
3712 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
3713 if (IS_ERR(state)) {
3714 status = PTR_ERR(state);
3715 goto out;
3716 }
3717 out:
3718 nfs4_label_release_security(ilabel);
3719 put_nfs_open_context(ctx);
3720 return status;
3721 }
3722
3723 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3724 {
3725 struct nfs_server *server = NFS_SERVER(dir);
3726 struct nfs_removeargs args = {
3727 .fh = NFS_FH(dir),
3728 .name = *name,
3729 };
3730 struct nfs_removeres res = {
3731 .server = server,
3732 };
3733 struct rpc_message msg = {
3734 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3735 .rpc_argp = &args,
3736 .rpc_resp = &res,
3737 };
3738 int status;
3739
3740 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3741 if (status == 0)
3742 update_changeattr(dir, &res.cinfo);
3743 return status;
3744 }
3745
3746 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3747 {
3748 struct nfs4_exception exception = { };
3749 int err;
3750 do {
3751 err = _nfs4_proc_remove(dir, name);
3752 trace_nfs4_remove(dir, name, err);
3753 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3754 &exception);
3755 } while (exception.retry);
3756 return err;
3757 }
3758
3759 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3760 {
3761 struct nfs_server *server = NFS_SERVER(dir);
3762 struct nfs_removeargs *args = msg->rpc_argp;
3763 struct nfs_removeres *res = msg->rpc_resp;
3764
3765 res->server = server;
3766 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3767 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
3768
3769 nfs_fattr_init(res->dir_attr);
3770 }
3771
3772 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3773 {
3774 nfs4_setup_sequence(NFS_SERVER(data->dir),
3775 &data->args.seq_args,
3776 &data->res.seq_res,
3777 task);
3778 }
3779
3780 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3781 {
3782 struct nfs_unlinkdata *data = task->tk_calldata;
3783 struct nfs_removeres *res = &data->res;
3784
3785 if (!nfs4_sequence_done(task, &res->seq_res))
3786 return 0;
3787 if (nfs4_async_handle_error(task, res->server, NULL,
3788 &data->timeout) == -EAGAIN)
3789 return 0;
3790 update_changeattr(dir, &res->cinfo);
3791 return 1;
3792 }
3793
3794 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3795 {
3796 struct nfs_server *server = NFS_SERVER(dir);
3797 struct nfs_renameargs *arg = msg->rpc_argp;
3798 struct nfs_renameres *res = msg->rpc_resp;
3799
3800 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3801 res->server = server;
3802 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
3803 }
3804
3805 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3806 {
3807 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3808 &data->args.seq_args,
3809 &data->res.seq_res,
3810 task);
3811 }
3812
3813 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3814 struct inode *new_dir)
3815 {
3816 struct nfs_renamedata *data = task->tk_calldata;
3817 struct nfs_renameres *res = &data->res;
3818
3819 if (!nfs4_sequence_done(task, &res->seq_res))
3820 return 0;
3821 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
3822 return 0;
3823
3824 update_changeattr(old_dir, &res->old_cinfo);
3825 update_changeattr(new_dir, &res->new_cinfo);
3826 return 1;
3827 }
3828
3829 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3830 {
3831 struct nfs_server *server = NFS_SERVER(inode);
3832 struct nfs4_link_arg arg = {
3833 .fh = NFS_FH(inode),
3834 .dir_fh = NFS_FH(dir),
3835 .name = name,
3836 .bitmask = server->attr_bitmask,
3837 };
3838 struct nfs4_link_res res = {
3839 .server = server,
3840 .label = NULL,
3841 };
3842 struct rpc_message msg = {
3843 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3844 .rpc_argp = &arg,
3845 .rpc_resp = &res,
3846 };
3847 int status = -ENOMEM;
3848
3849 res.fattr = nfs_alloc_fattr();
3850 if (res.fattr == NULL)
3851 goto out;
3852
3853 res.label = nfs4_label_alloc(server, GFP_KERNEL);
3854 if (IS_ERR(res.label)) {
3855 status = PTR_ERR(res.label);
3856 goto out;
3857 }
3858 arg.bitmask = nfs4_bitmask(server, res.label);
3859
3860 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3861 if (!status) {
3862 update_changeattr(dir, &res.cinfo);
3863 status = nfs_post_op_update_inode(inode, res.fattr);
3864 if (!status)
3865 nfs_setsecurity(inode, res.fattr, res.label);
3866 }
3867
3868
3869 nfs4_label_free(res.label);
3870
3871 out:
3872 nfs_free_fattr(res.fattr);
3873 return status;
3874 }
3875
3876 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3877 {
3878 struct nfs4_exception exception = { };
3879 int err;
3880 do {
3881 err = nfs4_handle_exception(NFS_SERVER(inode),
3882 _nfs4_proc_link(inode, dir, name),
3883 &exception);
3884 } while (exception.retry);
3885 return err;
3886 }
3887
3888 struct nfs4_createdata {
3889 struct rpc_message msg;
3890 struct nfs4_create_arg arg;
3891 struct nfs4_create_res res;
3892 struct nfs_fh fh;
3893 struct nfs_fattr fattr;
3894 struct nfs4_label *label;
3895 };
3896
3897 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3898 struct qstr *name, struct iattr *sattr, u32 ftype)
3899 {
3900 struct nfs4_createdata *data;
3901
3902 data = kzalloc(sizeof(*data), GFP_KERNEL);
3903 if (data != NULL) {
3904 struct nfs_server *server = NFS_SERVER(dir);
3905
3906 data->label = nfs4_label_alloc(server, GFP_KERNEL);
3907 if (IS_ERR(data->label))
3908 goto out_free;
3909
3910 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3911 data->msg.rpc_argp = &data->arg;
3912 data->msg.rpc_resp = &data->res;
3913 data->arg.dir_fh = NFS_FH(dir);
3914 data->arg.server = server;
3915 data->arg.name = name;
3916 data->arg.attrs = sattr;
3917 data->arg.ftype = ftype;
3918 data->arg.bitmask = nfs4_bitmask(server, data->label);
3919 data->res.server = server;
3920 data->res.fh = &data->fh;
3921 data->res.fattr = &data->fattr;
3922 data->res.label = data->label;
3923 nfs_fattr_init(data->res.fattr);
3924 }
3925 return data;
3926 out_free:
3927 kfree(data);
3928 return NULL;
3929 }
3930
3931 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3932 {
3933 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3934 &data->arg.seq_args, &data->res.seq_res, 1);
3935 if (status == 0) {
3936 update_changeattr(dir, &data->res.dir_cinfo);
3937 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
3938 }
3939 return status;
3940 }
3941
3942 static void nfs4_free_createdata(struct nfs4_createdata *data)
3943 {
3944 nfs4_label_free(data->label);
3945 kfree(data);
3946 }
3947
3948 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3949 struct page *page, unsigned int len, struct iattr *sattr,
3950 struct nfs4_label *label)
3951 {
3952 struct nfs4_createdata *data;
3953 int status = -ENAMETOOLONG;
3954
3955 if (len > NFS4_MAXPATHLEN)
3956 goto out;
3957
3958 status = -ENOMEM;
3959 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3960 if (data == NULL)
3961 goto out;
3962
3963 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3964 data->arg.u.symlink.pages = &page;
3965 data->arg.u.symlink.len = len;
3966 data->arg.label = label;
3967
3968 status = nfs4_do_create(dir, dentry, data);
3969
3970 nfs4_free_createdata(data);
3971 out:
3972 return status;
3973 }
3974
3975 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3976 struct page *page, unsigned int len, struct iattr *sattr)
3977 {
3978 struct nfs4_exception exception = { };
3979 struct nfs4_label l, *label = NULL;
3980 int err;
3981
3982 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3983
3984 do {
3985 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
3986 trace_nfs4_symlink(dir, &dentry->d_name, err);
3987 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3988 &exception);
3989 } while (exception.retry);
3990
3991 nfs4_label_release_security(label);
3992 return err;
3993 }
3994
3995 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3996 struct iattr *sattr, struct nfs4_label *label)
3997 {
3998 struct nfs4_createdata *data;
3999 int status = -ENOMEM;
4000
4001 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
4002 if (data == NULL)
4003 goto out;
4004
4005 data->arg.label = label;
4006 status = nfs4_do_create(dir, dentry, data);
4007
4008 nfs4_free_createdata(data);
4009 out:
4010 return status;
4011 }
4012
4013 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4014 struct iattr *sattr)
4015 {
4016 struct nfs4_exception exception = { };
4017 struct nfs4_label l, *label = NULL;
4018 int err;
4019
4020 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4021
4022 sattr->ia_mode &= ~current_umask();
4023 do {
4024 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
4025 trace_nfs4_mkdir(dir, &dentry->d_name, err);
4026 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4027 &exception);
4028 } while (exception.retry);
4029 nfs4_label_release_security(label);
4030
4031 return err;
4032 }
4033
4034 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4035 u64 cookie, struct page **pages, unsigned int count, int plus)
4036 {
4037 struct inode *dir = d_inode(dentry);
4038 struct nfs4_readdir_arg args = {
4039 .fh = NFS_FH(dir),
4040 .pages = pages,
4041 .pgbase = 0,
4042 .count = count,
4043 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
4044 .plus = plus,
4045 };
4046 struct nfs4_readdir_res res;
4047 struct rpc_message msg = {
4048 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
4049 .rpc_argp = &args,
4050 .rpc_resp = &res,
4051 .rpc_cred = cred,
4052 };
4053 int status;
4054
4055 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
4056 dentry,
4057 (unsigned long long)cookie);
4058 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
4059 res.pgbase = args.pgbase;
4060 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
4061 if (status >= 0) {
4062 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
4063 status += args.pgbase;
4064 }
4065
4066 nfs_invalidate_atime(dir);
4067
4068 dprintk("%s: returns %d\n", __func__, status);
4069 return status;
4070 }
4071
4072 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4073 u64 cookie, struct page **pages, unsigned int count, int plus)
4074 {
4075 struct nfs4_exception exception = { };
4076 int err;
4077 do {
4078 err = _nfs4_proc_readdir(dentry, cred, cookie,
4079 pages, count, plus);
4080 trace_nfs4_readdir(d_inode(dentry), err);
4081 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
4082 &exception);
4083 } while (exception.retry);
4084 return err;
4085 }
4086
4087 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4088 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
4089 {
4090 struct nfs4_createdata *data;
4091 int mode = sattr->ia_mode;
4092 int status = -ENOMEM;
4093
4094 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
4095 if (data == NULL)
4096 goto out;
4097
4098 if (S_ISFIFO(mode))
4099 data->arg.ftype = NF4FIFO;
4100 else if (S_ISBLK(mode)) {
4101 data->arg.ftype = NF4BLK;
4102 data->arg.u.device.specdata1 = MAJOR(rdev);
4103 data->arg.u.device.specdata2 = MINOR(rdev);
4104 }
4105 else if (S_ISCHR(mode)) {
4106 data->arg.ftype = NF4CHR;
4107 data->arg.u.device.specdata1 = MAJOR(rdev);
4108 data->arg.u.device.specdata2 = MINOR(rdev);
4109 } else if (!S_ISSOCK(mode)) {
4110 status = -EINVAL;
4111 goto out_free;
4112 }
4113
4114 data->arg.label = label;
4115 status = nfs4_do_create(dir, dentry, data);
4116 out_free:
4117 nfs4_free_createdata(data);
4118 out:
4119 return status;
4120 }
4121
4122 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4123 struct iattr *sattr, dev_t rdev)
4124 {
4125 struct nfs4_exception exception = { };
4126 struct nfs4_label l, *label = NULL;
4127 int err;
4128
4129 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4130
4131 sattr->ia_mode &= ~current_umask();
4132 do {
4133 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
4134 trace_nfs4_mknod(dir, &dentry->d_name, err);
4135 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4136 &exception);
4137 } while (exception.retry);
4138
4139 nfs4_label_release_security(label);
4140
4141 return err;
4142 }
4143
4144 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
4145 struct nfs_fsstat *fsstat)
4146 {
4147 struct nfs4_statfs_arg args = {
4148 .fh = fhandle,
4149 .bitmask = server->attr_bitmask,
4150 };
4151 struct nfs4_statfs_res res = {
4152 .fsstat = fsstat,
4153 };
4154 struct rpc_message msg = {
4155 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
4156 .rpc_argp = &args,
4157 .rpc_resp = &res,
4158 };
4159
4160 nfs_fattr_init(fsstat->fattr);
4161 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4162 }
4163
4164 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4165 {
4166 struct nfs4_exception exception = { };
4167 int err;
4168 do {
4169 err = nfs4_handle_exception(server,
4170 _nfs4_proc_statfs(server, fhandle, fsstat),
4171 &exception);
4172 } while (exception.retry);
4173 return err;
4174 }
4175
4176 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4177 struct nfs_fsinfo *fsinfo)
4178 {
4179 struct nfs4_fsinfo_arg args = {
4180 .fh = fhandle,
4181 .bitmask = server->attr_bitmask,
4182 };
4183 struct nfs4_fsinfo_res res = {
4184 .fsinfo = fsinfo,
4185 };
4186 struct rpc_message msg = {
4187 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4188 .rpc_argp = &args,
4189 .rpc_resp = &res,
4190 };
4191
4192 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4193 }
4194
4195 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4196 {
4197 struct nfs4_exception exception = { };
4198 unsigned long now = jiffies;
4199 int err;
4200
4201 do {
4202 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4203 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4204 if (err == 0) {
4205 struct nfs_client *clp = server->nfs_client;
4206
4207 spin_lock(&clp->cl_lock);
4208 clp->cl_lease_time = fsinfo->lease_time * HZ;
4209 clp->cl_last_renewal = now;
4210 spin_unlock(&clp->cl_lock);
4211 break;
4212 }
4213 err = nfs4_handle_exception(server, err, &exception);
4214 } while (exception.retry);
4215 return err;
4216 }
4217
4218 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4219 {
4220 int error;
4221
4222 nfs_fattr_init(fsinfo->fattr);
4223 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4224 if (error == 0) {
4225 /* block layout checks this! */
4226 server->pnfs_blksize = fsinfo->blksize;
4227 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
4228 }
4229
4230 return error;
4231 }
4232
4233 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4234 struct nfs_pathconf *pathconf)
4235 {
4236 struct nfs4_pathconf_arg args = {
4237 .fh = fhandle,
4238 .bitmask = server->attr_bitmask,
4239 };
4240 struct nfs4_pathconf_res res = {
4241 .pathconf = pathconf,
4242 };
4243 struct rpc_message msg = {
4244 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4245 .rpc_argp = &args,
4246 .rpc_resp = &res,
4247 };
4248
4249 /* None of the pathconf attributes are mandatory to implement */
4250 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4251 memset(pathconf, 0, sizeof(*pathconf));
4252 return 0;
4253 }
4254
4255 nfs_fattr_init(pathconf->fattr);
4256 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4257 }
4258
4259 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4260 struct nfs_pathconf *pathconf)
4261 {
4262 struct nfs4_exception exception = { };
4263 int err;
4264
4265 do {
4266 err = nfs4_handle_exception(server,
4267 _nfs4_proc_pathconf(server, fhandle, pathconf),
4268 &exception);
4269 } while (exception.retry);
4270 return err;
4271 }
4272
4273 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4274 const struct nfs_open_context *ctx,
4275 const struct nfs_lock_context *l_ctx,
4276 fmode_t fmode)
4277 {
4278 const struct nfs_lockowner *lockowner = NULL;
4279
4280 if (l_ctx != NULL)
4281 lockowner = &l_ctx->lockowner;
4282 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
4283 }
4284 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4285
4286 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4287 const struct nfs_open_context *ctx,
4288 const struct nfs_lock_context *l_ctx,
4289 fmode_t fmode)
4290 {
4291 nfs4_stateid current_stateid;
4292
4293 /* If the current stateid represents a lost lock, then exit */
4294 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4295 return true;
4296 return nfs4_stateid_match(stateid, &current_stateid);
4297 }
4298
4299 static bool nfs4_error_stateid_expired(int err)
4300 {
4301 switch (err) {
4302 case -NFS4ERR_DELEG_REVOKED:
4303 case -NFS4ERR_ADMIN_REVOKED:
4304 case -NFS4ERR_BAD_STATEID:
4305 case -NFS4ERR_STALE_STATEID:
4306 case -NFS4ERR_OLD_STATEID:
4307 case -NFS4ERR_OPENMODE:
4308 case -NFS4ERR_EXPIRED:
4309 return true;
4310 }
4311 return false;
4312 }
4313
4314 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
4315 {
4316 nfs_invalidate_atime(hdr->inode);
4317 }
4318
4319 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4320 {
4321 struct nfs_server *server = NFS_SERVER(hdr->inode);
4322
4323 trace_nfs4_read(hdr, task->tk_status);
4324 if (nfs4_async_handle_error(task, server,
4325 hdr->args.context->state,
4326 NULL) == -EAGAIN) {
4327 rpc_restart_call_prepare(task);
4328 return -EAGAIN;
4329 }
4330
4331 __nfs4_read_done_cb(hdr);
4332 if (task->tk_status > 0)
4333 renew_lease(server, hdr->timestamp);
4334 return 0;
4335 }
4336
4337 static bool nfs4_read_stateid_changed(struct rpc_task *task,
4338 struct nfs_pgio_args *args)
4339 {
4340
4341 if (!nfs4_error_stateid_expired(task->tk_status) ||
4342 nfs4_stateid_is_current(&args->stateid,
4343 args->context,
4344 args->lock_context,
4345 FMODE_READ))
4346 return false;
4347 rpc_restart_call_prepare(task);
4348 return true;
4349 }
4350
4351 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4352 {
4353
4354 dprintk("--> %s\n", __func__);
4355
4356 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4357 return -EAGAIN;
4358 if (nfs4_read_stateid_changed(task, &hdr->args))
4359 return -EAGAIN;
4360 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4361 nfs4_read_done_cb(task, hdr);
4362 }
4363
4364 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4365 struct rpc_message *msg)
4366 {
4367 hdr->timestamp = jiffies;
4368 hdr->pgio_done_cb = nfs4_read_done_cb;
4369 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4370 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4371 }
4372
4373 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4374 struct nfs_pgio_header *hdr)
4375 {
4376 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode),
4377 &hdr->args.seq_args,
4378 &hdr->res.seq_res,
4379 task))
4380 return 0;
4381 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4382 hdr->args.lock_context,
4383 hdr->rw_ops->rw_mode) == -EIO)
4384 return -EIO;
4385 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4386 return -EIO;
4387 return 0;
4388 }
4389
4390 static int nfs4_write_done_cb(struct rpc_task *task,
4391 struct nfs_pgio_header *hdr)
4392 {
4393 struct inode *inode = hdr->inode;
4394
4395 trace_nfs4_write(hdr, task->tk_status);
4396 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4397 hdr->args.context->state,
4398 NULL) == -EAGAIN) {
4399 rpc_restart_call_prepare(task);
4400 return -EAGAIN;
4401 }
4402 if (task->tk_status >= 0) {
4403 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4404 nfs_writeback_update_inode(hdr);
4405 }
4406 return 0;
4407 }
4408
4409 static bool nfs4_write_stateid_changed(struct rpc_task *task,
4410 struct nfs_pgio_args *args)
4411 {
4412
4413 if (!nfs4_error_stateid_expired(task->tk_status) ||
4414 nfs4_stateid_is_current(&args->stateid,
4415 args->context,
4416 args->lock_context,
4417 FMODE_WRITE))
4418 return false;
4419 rpc_restart_call_prepare(task);
4420 return true;
4421 }
4422
4423 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4424 {
4425 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4426 return -EAGAIN;
4427 if (nfs4_write_stateid_changed(task, &hdr->args))
4428 return -EAGAIN;
4429 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4430 nfs4_write_done_cb(task, hdr);
4431 }
4432
4433 static
4434 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4435 {
4436 /* Don't request attributes for pNFS or O_DIRECT writes */
4437 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4438 return false;
4439 /* Otherwise, request attributes if and only if we don't hold
4440 * a delegation
4441 */
4442 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4443 }
4444
4445 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4446 struct rpc_message *msg)
4447 {
4448 struct nfs_server *server = NFS_SERVER(hdr->inode);
4449
4450 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4451 hdr->args.bitmask = NULL;
4452 hdr->res.fattr = NULL;
4453 } else
4454 hdr->args.bitmask = server->cache_consistency_bitmask;
4455
4456 if (!hdr->pgio_done_cb)
4457 hdr->pgio_done_cb = nfs4_write_done_cb;
4458 hdr->res.server = server;
4459 hdr->timestamp = jiffies;
4460
4461 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4462 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4463 }
4464
4465 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4466 {
4467 nfs4_setup_sequence(NFS_SERVER(data->inode),
4468 &data->args.seq_args,
4469 &data->res.seq_res,
4470 task);
4471 }
4472
4473 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4474 {
4475 struct inode *inode = data->inode;
4476
4477 trace_nfs4_commit(data, task->tk_status);
4478 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4479 NULL, NULL) == -EAGAIN) {
4480 rpc_restart_call_prepare(task);
4481 return -EAGAIN;
4482 }
4483 return 0;
4484 }
4485
4486 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4487 {
4488 if (!nfs4_sequence_done(task, &data->res.seq_res))
4489 return -EAGAIN;
4490 return data->commit_done_cb(task, data);
4491 }
4492
4493 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4494 {
4495 struct nfs_server *server = NFS_SERVER(data->inode);
4496
4497 if (data->commit_done_cb == NULL)
4498 data->commit_done_cb = nfs4_commit_done_cb;
4499 data->res.server = server;
4500 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4501 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4502 }
4503
4504 struct nfs4_renewdata {
4505 struct nfs_client *client;
4506 unsigned long timestamp;
4507 };
4508
4509 /*
4510 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4511 * standalone procedure for queueing an asynchronous RENEW.
4512 */
4513 static void nfs4_renew_release(void *calldata)
4514 {
4515 struct nfs4_renewdata *data = calldata;
4516 struct nfs_client *clp = data->client;
4517
4518 if (atomic_read(&clp->cl_count) > 1)
4519 nfs4_schedule_state_renewal(clp);
4520 nfs_put_client(clp);
4521 kfree(data);
4522 }
4523
4524 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4525 {
4526 struct nfs4_renewdata *data = calldata;
4527 struct nfs_client *clp = data->client;
4528 unsigned long timestamp = data->timestamp;
4529
4530 trace_nfs4_renew_async(clp, task->tk_status);
4531 switch (task->tk_status) {
4532 case 0:
4533 break;
4534 case -NFS4ERR_LEASE_MOVED:
4535 nfs4_schedule_lease_moved_recovery(clp);
4536 break;
4537 default:
4538 /* Unless we're shutting down, schedule state recovery! */
4539 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
4540 return;
4541 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
4542 nfs4_schedule_lease_recovery(clp);
4543 return;
4544 }
4545 nfs4_schedule_path_down_recovery(clp);
4546 }
4547 do_renew_lease(clp, timestamp);
4548 }
4549
4550 static const struct rpc_call_ops nfs4_renew_ops = {
4551 .rpc_call_done = nfs4_renew_done,
4552 .rpc_release = nfs4_renew_release,
4553 };
4554
4555 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
4556 {
4557 struct rpc_message msg = {
4558 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4559 .rpc_argp = clp,
4560 .rpc_cred = cred,
4561 };
4562 struct nfs4_renewdata *data;
4563
4564 if (renew_flags == 0)
4565 return 0;
4566 if (!atomic_inc_not_zero(&clp->cl_count))
4567 return -EIO;
4568 data = kmalloc(sizeof(*data), GFP_NOFS);
4569 if (data == NULL)
4570 return -ENOMEM;
4571 data->client = clp;
4572 data->timestamp = jiffies;
4573 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
4574 &nfs4_renew_ops, data);
4575 }
4576
4577 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
4578 {
4579 struct rpc_message msg = {
4580 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4581 .rpc_argp = clp,
4582 .rpc_cred = cred,
4583 };
4584 unsigned long now = jiffies;
4585 int status;
4586
4587 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4588 if (status < 0)
4589 return status;
4590 do_renew_lease(clp, now);
4591 return 0;
4592 }
4593
4594 static inline int nfs4_server_supports_acls(struct nfs_server *server)
4595 {
4596 return server->caps & NFS_CAP_ACLS;
4597 }
4598
4599 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
4600 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
4601 * the stack.
4602 */
4603 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
4604
4605 static int buf_to_pages_noslab(const void *buf, size_t buflen,
4606 struct page **pages)
4607 {
4608 struct page *newpage, **spages;
4609 int rc = 0;
4610 size_t len;
4611 spages = pages;
4612
4613 do {
4614 len = min_t(size_t, PAGE_SIZE, buflen);
4615 newpage = alloc_page(GFP_KERNEL);
4616
4617 if (newpage == NULL)
4618 goto unwind;
4619 memcpy(page_address(newpage), buf, len);
4620 buf += len;
4621 buflen -= len;
4622 *pages++ = newpage;
4623 rc++;
4624 } while (buflen != 0);
4625
4626 return rc;
4627
4628 unwind:
4629 for(; rc > 0; rc--)
4630 __free_page(spages[rc-1]);
4631 return -ENOMEM;
4632 }
4633
4634 struct nfs4_cached_acl {
4635 int cached;
4636 size_t len;
4637 char data[0];
4638 };
4639
4640 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
4641 {
4642 struct nfs_inode *nfsi = NFS_I(inode);
4643
4644 spin_lock(&inode->i_lock);
4645 kfree(nfsi->nfs4_acl);
4646 nfsi->nfs4_acl = acl;
4647 spin_unlock(&inode->i_lock);
4648 }
4649
4650 static void nfs4_zap_acl_attr(struct inode *inode)
4651 {
4652 nfs4_set_cached_acl(inode, NULL);
4653 }
4654
4655 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
4656 {
4657 struct nfs_inode *nfsi = NFS_I(inode);
4658 struct nfs4_cached_acl *acl;
4659 int ret = -ENOENT;
4660
4661 spin_lock(&inode->i_lock);
4662 acl = nfsi->nfs4_acl;
4663 if (acl == NULL)
4664 goto out;
4665 if (buf == NULL) /* user is just asking for length */
4666 goto out_len;
4667 if (acl->cached == 0)
4668 goto out;
4669 ret = -ERANGE; /* see getxattr(2) man page */
4670 if (acl->len > buflen)
4671 goto out;
4672 memcpy(buf, acl->data, acl->len);
4673 out_len:
4674 ret = acl->len;
4675 out:
4676 spin_unlock(&inode->i_lock);
4677 return ret;
4678 }
4679
4680 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
4681 {
4682 struct nfs4_cached_acl *acl;
4683 size_t buflen = sizeof(*acl) + acl_len;
4684
4685 if (buflen <= PAGE_SIZE) {
4686 acl = kmalloc(buflen, GFP_KERNEL);
4687 if (acl == NULL)
4688 goto out;
4689 acl->cached = 1;
4690 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4691 } else {
4692 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4693 if (acl == NULL)
4694 goto out;
4695 acl->cached = 0;
4696 }
4697 acl->len = acl_len;
4698 out:
4699 nfs4_set_cached_acl(inode, acl);
4700 }
4701
4702 /*
4703 * The getxattr API returns the required buffer length when called with a
4704 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4705 * the required buf. On a NULL buf, we send a page of data to the server
4706 * guessing that the ACL request can be serviced by a page. If so, we cache
4707 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4708 * the cache. If not so, we throw away the page, and cache the required
4709 * length. The next getxattr call will then produce another round trip to
4710 * the server, this time with the input buf of the required size.
4711 */
4712 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4713 {
4714 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4715 struct nfs_getaclargs args = {
4716 .fh = NFS_FH(inode),
4717 .acl_pages = pages,
4718 .acl_len = buflen,
4719 };
4720 struct nfs_getaclres res = {
4721 .acl_len = buflen,
4722 };
4723 struct rpc_message msg = {
4724 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4725 .rpc_argp = &args,
4726 .rpc_resp = &res,
4727 };
4728 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4729 int ret = -ENOMEM, i;
4730
4731 /* As long as we're doing a round trip to the server anyway,
4732 * let's be prepared for a page of acl data. */
4733 if (npages == 0)
4734 npages = 1;
4735 if (npages > ARRAY_SIZE(pages))
4736 return -ERANGE;
4737
4738 for (i = 0; i < npages; i++) {
4739 pages[i] = alloc_page(GFP_KERNEL);
4740 if (!pages[i])
4741 goto out_free;
4742 }
4743
4744 /* for decoding across pages */
4745 res.acl_scratch = alloc_page(GFP_KERNEL);
4746 if (!res.acl_scratch)
4747 goto out_free;
4748
4749 args.acl_len = npages * PAGE_SIZE;
4750
4751 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4752 __func__, buf, buflen, npages, args.acl_len);
4753 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4754 &msg, &args.seq_args, &res.seq_res, 0);
4755 if (ret)
4756 goto out_free;
4757
4758 /* Handle the case where the passed-in buffer is too short */
4759 if (res.acl_flags & NFS4_ACL_TRUNC) {
4760 /* Did the user only issue a request for the acl length? */
4761 if (buf == NULL)
4762 goto out_ok;
4763 ret = -ERANGE;
4764 goto out_free;
4765 }
4766 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4767 if (buf) {
4768 if (res.acl_len > buflen) {
4769 ret = -ERANGE;
4770 goto out_free;
4771 }
4772 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4773 }
4774 out_ok:
4775 ret = res.acl_len;
4776 out_free:
4777 for (i = 0; i < npages; i++)
4778 if (pages[i])
4779 __free_page(pages[i]);
4780 if (res.acl_scratch)
4781 __free_page(res.acl_scratch);
4782 return ret;
4783 }
4784
4785 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4786 {
4787 struct nfs4_exception exception = { };
4788 ssize_t ret;
4789 do {
4790 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4791 trace_nfs4_get_acl(inode, ret);
4792 if (ret >= 0)
4793 break;
4794 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4795 } while (exception.retry);
4796 return ret;
4797 }
4798
4799 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4800 {
4801 struct nfs_server *server = NFS_SERVER(inode);
4802 int ret;
4803
4804 if (!nfs4_server_supports_acls(server))
4805 return -EOPNOTSUPP;
4806 ret = nfs_revalidate_inode(server, inode);
4807 if (ret < 0)
4808 return ret;
4809 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4810 nfs_zap_acl_cache(inode);
4811 ret = nfs4_read_cached_acl(inode, buf, buflen);
4812 if (ret != -ENOENT)
4813 /* -ENOENT is returned if there is no ACL or if there is an ACL
4814 * but no cached acl data, just the acl length */
4815 return ret;
4816 return nfs4_get_acl_uncached(inode, buf, buflen);
4817 }
4818
4819 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4820 {
4821 struct nfs_server *server = NFS_SERVER(inode);
4822 struct page *pages[NFS4ACL_MAXPAGES];
4823 struct nfs_setaclargs arg = {
4824 .fh = NFS_FH(inode),
4825 .acl_pages = pages,
4826 .acl_len = buflen,
4827 };
4828 struct nfs_setaclres res;
4829 struct rpc_message msg = {
4830 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4831 .rpc_argp = &arg,
4832 .rpc_resp = &res,
4833 };
4834 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4835 int ret, i;
4836
4837 if (!nfs4_server_supports_acls(server))
4838 return -EOPNOTSUPP;
4839 if (npages > ARRAY_SIZE(pages))
4840 return -ERANGE;
4841 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages);
4842 if (i < 0)
4843 return i;
4844 nfs4_inode_return_delegation(inode);
4845 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4846
4847 /*
4848 * Free each page after tx, so the only ref left is
4849 * held by the network stack
4850 */
4851 for (; i > 0; i--)
4852 put_page(pages[i-1]);
4853
4854 /*
4855 * Acl update can result in inode attribute update.
4856 * so mark the attribute cache invalid.
4857 */
4858 spin_lock(&inode->i_lock);
4859 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4860 spin_unlock(&inode->i_lock);
4861 nfs_access_zap_cache(inode);
4862 nfs_zap_acl_cache(inode);
4863 return ret;
4864 }
4865
4866 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4867 {
4868 struct nfs4_exception exception = { };
4869 int err;
4870 do {
4871 err = __nfs4_proc_set_acl(inode, buf, buflen);
4872 trace_nfs4_set_acl(inode, err);
4873 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4874 &exception);
4875 } while (exception.retry);
4876 return err;
4877 }
4878
4879 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4880 static int _nfs4_get_security_label(struct inode *inode, void *buf,
4881 size_t buflen)
4882 {
4883 struct nfs_server *server = NFS_SERVER(inode);
4884 struct nfs_fattr fattr;
4885 struct nfs4_label label = {0, 0, buflen, buf};
4886
4887 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4888 struct nfs4_getattr_arg arg = {
4889 .fh = NFS_FH(inode),
4890 .bitmask = bitmask,
4891 };
4892 struct nfs4_getattr_res res = {
4893 .fattr = &fattr,
4894 .label = &label,
4895 .server = server,
4896 };
4897 struct rpc_message msg = {
4898 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4899 .rpc_argp = &arg,
4900 .rpc_resp = &res,
4901 };
4902 int ret;
4903
4904 nfs_fattr_init(&fattr);
4905
4906 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
4907 if (ret)
4908 return ret;
4909 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4910 return -ENOENT;
4911 if (buflen < label.len)
4912 return -ERANGE;
4913 return 0;
4914 }
4915
4916 static int nfs4_get_security_label(struct inode *inode, void *buf,
4917 size_t buflen)
4918 {
4919 struct nfs4_exception exception = { };
4920 int err;
4921
4922 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4923 return -EOPNOTSUPP;
4924
4925 do {
4926 err = _nfs4_get_security_label(inode, buf, buflen);
4927 trace_nfs4_get_security_label(inode, err);
4928 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4929 &exception);
4930 } while (exception.retry);
4931 return err;
4932 }
4933
4934 static int _nfs4_do_set_security_label(struct inode *inode,
4935 struct nfs4_label *ilabel,
4936 struct nfs_fattr *fattr,
4937 struct nfs4_label *olabel)
4938 {
4939
4940 struct iattr sattr = {0};
4941 struct nfs_server *server = NFS_SERVER(inode);
4942 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4943 struct nfs_setattrargs arg = {
4944 .fh = NFS_FH(inode),
4945 .iap = &sattr,
4946 .server = server,
4947 .bitmask = bitmask,
4948 .label = ilabel,
4949 };
4950 struct nfs_setattrres res = {
4951 .fattr = fattr,
4952 .label = olabel,
4953 .server = server,
4954 };
4955 struct rpc_message msg = {
4956 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
4957 .rpc_argp = &arg,
4958 .rpc_resp = &res,
4959 };
4960 int status;
4961
4962 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
4963
4964 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4965 if (status)
4966 dprintk("%s failed: %d\n", __func__, status);
4967
4968 return status;
4969 }
4970
4971 static int nfs4_do_set_security_label(struct inode *inode,
4972 struct nfs4_label *ilabel,
4973 struct nfs_fattr *fattr,
4974 struct nfs4_label *olabel)
4975 {
4976 struct nfs4_exception exception = { };
4977 int err;
4978
4979 do {
4980 err = _nfs4_do_set_security_label(inode, ilabel,
4981 fattr, olabel);
4982 trace_nfs4_set_security_label(inode, err);
4983 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4984 &exception);
4985 } while (exception.retry);
4986 return err;
4987 }
4988
4989 static int
4990 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
4991 {
4992 struct nfs4_label ilabel, *olabel = NULL;
4993 struct nfs_fattr fattr;
4994 struct rpc_cred *cred;
4995 struct inode *inode = d_inode(dentry);
4996 int status;
4997
4998 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4999 return -EOPNOTSUPP;
5000
5001 nfs_fattr_init(&fattr);
5002
5003 ilabel.pi = 0;
5004 ilabel.lfs = 0;
5005 ilabel.label = (char *)buf;
5006 ilabel.len = buflen;
5007
5008 cred = rpc_lookup_cred();
5009 if (IS_ERR(cred))
5010 return PTR_ERR(cred);
5011
5012 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
5013 if (IS_ERR(olabel)) {
5014 status = -PTR_ERR(olabel);
5015 goto out;
5016 }
5017
5018 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
5019 if (status == 0)
5020 nfs_setsecurity(inode, &fattr, olabel);
5021
5022 nfs4_label_free(olabel);
5023 out:
5024 put_rpccred(cred);
5025 return status;
5026 }
5027 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
5028
5029
5030 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
5031 nfs4_verifier *bootverf)
5032 {
5033 __be32 verf[2];
5034
5035 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
5036 /* An impossible timestamp guarantees this value
5037 * will never match a generated boot time. */
5038 verf[0] = 0;
5039 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
5040 } else {
5041 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
5042 verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
5043 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
5044 }
5045 memcpy(bootverf->data, verf, sizeof(bootverf->data));
5046 }
5047
5048 static int
5049 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
5050 {
5051 int result;
5052 size_t len;
5053 char *str;
5054
5055 if (clp->cl_owner_id != NULL)
5056 return 0;
5057
5058 rcu_read_lock();
5059 len = 14 + strlen(clp->cl_ipaddr) + 1 +
5060 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
5061 1 +
5062 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
5063 1;
5064 rcu_read_unlock();
5065
5066 if (len > NFS4_OPAQUE_LIMIT + 1)
5067 return -EINVAL;
5068
5069 /*
5070 * Since this string is allocated at mount time, and held until the
5071 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5072 * about a memory-reclaim deadlock.
5073 */
5074 str = kmalloc(len, GFP_KERNEL);
5075 if (!str)
5076 return -ENOMEM;
5077
5078 rcu_read_lock();
5079 result = scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
5080 clp->cl_ipaddr,
5081 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
5082 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
5083 rcu_read_unlock();
5084
5085 clp->cl_owner_id = str;
5086 return 0;
5087 }
5088
5089 static int
5090 nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5091 {
5092 int result;
5093 size_t len;
5094 char *str;
5095
5096 len = 10 + 10 + 1 + 10 + 1 +
5097 strlen(nfs4_client_id_uniquifier) + 1 +
5098 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5099
5100 if (len > NFS4_OPAQUE_LIMIT + 1)
5101 return -EINVAL;
5102
5103 /*
5104 * Since this string is allocated at mount time, and held until the
5105 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5106 * about a memory-reclaim deadlock.
5107 */
5108 str = kmalloc(len, GFP_KERNEL);
5109 if (!str)
5110 return -ENOMEM;
5111
5112 result = scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5113 clp->rpc_ops->version, clp->cl_minorversion,
5114 nfs4_client_id_uniquifier,
5115 clp->cl_rpcclient->cl_nodename);
5116 clp->cl_owner_id = str;
5117 return 0;
5118 }
5119
5120 static int
5121 nfs4_init_uniform_client_string(struct nfs_client *clp)
5122 {
5123 int result;
5124 size_t len;
5125 char *str;
5126
5127 if (clp->cl_owner_id != NULL)
5128 return 0;
5129
5130 if (nfs4_client_id_uniquifier[0] != '\0')
5131 return nfs4_init_uniquifier_client_string(clp);
5132
5133 len = 10 + 10 + 1 + 10 + 1 +
5134 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5135
5136 if (len > NFS4_OPAQUE_LIMIT + 1)
5137 return -EINVAL;
5138
5139 /*
5140 * Since this string is allocated at mount time, and held until the
5141 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5142 * about a memory-reclaim deadlock.
5143 */
5144 str = kmalloc(len, GFP_KERNEL);
5145 if (!str)
5146 return -ENOMEM;
5147
5148 result = scnprintf(str, len, "Linux NFSv%u.%u %s",
5149 clp->rpc_ops->version, clp->cl_minorversion,
5150 clp->cl_rpcclient->cl_nodename);
5151 clp->cl_owner_id = str;
5152 return 0;
5153 }
5154
5155 /*
5156 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5157 * services. Advertise one based on the address family of the
5158 * clientaddr.
5159 */
5160 static unsigned int
5161 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5162 {
5163 if (strchr(clp->cl_ipaddr, ':') != NULL)
5164 return scnprintf(buf, len, "tcp6");
5165 else
5166 return scnprintf(buf, len, "tcp");
5167 }
5168
5169 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5170 {
5171 struct nfs4_setclientid *sc = calldata;
5172
5173 if (task->tk_status == 0)
5174 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5175 }
5176
5177 static const struct rpc_call_ops nfs4_setclientid_ops = {
5178 .rpc_call_done = nfs4_setclientid_done,
5179 };
5180
5181 /**
5182 * nfs4_proc_setclientid - Negotiate client ID
5183 * @clp: state data structure
5184 * @program: RPC program for NFSv4 callback service
5185 * @port: IP port number for NFS4 callback service
5186 * @cred: RPC credential to use for this call
5187 * @res: where to place the result
5188 *
5189 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5190 */
5191 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5192 unsigned short port, struct rpc_cred *cred,
5193 struct nfs4_setclientid_res *res)
5194 {
5195 nfs4_verifier sc_verifier;
5196 struct nfs4_setclientid setclientid = {
5197 .sc_verifier = &sc_verifier,
5198 .sc_prog = program,
5199 .sc_clnt = clp,
5200 };
5201 struct rpc_message msg = {
5202 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5203 .rpc_argp = &setclientid,
5204 .rpc_resp = res,
5205 .rpc_cred = cred,
5206 };
5207 struct rpc_task *task;
5208 struct rpc_task_setup task_setup_data = {
5209 .rpc_client = clp->cl_rpcclient,
5210 .rpc_message = &msg,
5211 .callback_ops = &nfs4_setclientid_ops,
5212 .callback_data = &setclientid,
5213 .flags = RPC_TASK_TIMEOUT,
5214 };
5215 int status;
5216
5217 /* nfs_client_id4 */
5218 nfs4_init_boot_verifier(clp, &sc_verifier);
5219
5220 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5221 status = nfs4_init_uniform_client_string(clp);
5222 else
5223 status = nfs4_init_nonuniform_client_string(clp);
5224
5225 if (status)
5226 goto out;
5227
5228 /* cb_client4 */
5229 setclientid.sc_netid_len =
5230 nfs4_init_callback_netid(clp,
5231 setclientid.sc_netid,
5232 sizeof(setclientid.sc_netid));
5233 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5234 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5235 clp->cl_ipaddr, port >> 8, port & 255);
5236
5237 dprintk("NFS call setclientid auth=%s, '%s'\n",
5238 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5239 clp->cl_owner_id);
5240 task = rpc_run_task(&task_setup_data);
5241 if (IS_ERR(task)) {
5242 status = PTR_ERR(task);
5243 goto out;
5244 }
5245 status = task->tk_status;
5246 if (setclientid.sc_cred) {
5247 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5248 put_rpccred(setclientid.sc_cred);
5249 }
5250 rpc_put_task(task);
5251 out:
5252 trace_nfs4_setclientid(clp, status);
5253 dprintk("NFS reply setclientid: %d\n", status);
5254 return status;
5255 }
5256
5257 /**
5258 * nfs4_proc_setclientid_confirm - Confirm client ID
5259 * @clp: state data structure
5260 * @res: result of a previous SETCLIENTID
5261 * @cred: RPC credential to use for this call
5262 *
5263 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5264 */
5265 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5266 struct nfs4_setclientid_res *arg,
5267 struct rpc_cred *cred)
5268 {
5269 struct rpc_message msg = {
5270 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5271 .rpc_argp = arg,
5272 .rpc_cred = cred,
5273 };
5274 int status;
5275
5276 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5277 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5278 clp->cl_clientid);
5279 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5280 trace_nfs4_setclientid_confirm(clp, status);
5281 dprintk("NFS reply setclientid_confirm: %d\n", status);
5282 return status;
5283 }
5284
5285 struct nfs4_delegreturndata {
5286 struct nfs4_delegreturnargs args;
5287 struct nfs4_delegreturnres res;
5288 struct nfs_fh fh;
5289 nfs4_stateid stateid;
5290 unsigned long timestamp;
5291 struct nfs_fattr fattr;
5292 int rpc_status;
5293 struct inode *inode;
5294 bool roc;
5295 u32 roc_barrier;
5296 };
5297
5298 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5299 {
5300 struct nfs4_delegreturndata *data = calldata;
5301
5302 if (!nfs4_sequence_done(task, &data->res.seq_res))
5303 return;
5304
5305 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5306 switch (task->tk_status) {
5307 case 0:
5308 renew_lease(data->res.server, data->timestamp);
5309 case -NFS4ERR_ADMIN_REVOKED:
5310 case -NFS4ERR_DELEG_REVOKED:
5311 case -NFS4ERR_BAD_STATEID:
5312 case -NFS4ERR_OLD_STATEID:
5313 case -NFS4ERR_STALE_STATEID:
5314 case -NFS4ERR_EXPIRED:
5315 task->tk_status = 0;
5316 if (data->roc)
5317 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5318 break;
5319 default:
5320 if (nfs4_async_handle_error(task, data->res.server,
5321 NULL, NULL) == -EAGAIN) {
5322 rpc_restart_call_prepare(task);
5323 return;
5324 }
5325 }
5326 data->rpc_status = task->tk_status;
5327 }
5328
5329 static void nfs4_delegreturn_release(void *calldata)
5330 {
5331 struct nfs4_delegreturndata *data = calldata;
5332 struct inode *inode = data->inode;
5333
5334 if (inode) {
5335 if (data->roc)
5336 pnfs_roc_release(inode);
5337 nfs_iput_and_deactive(inode);
5338 }
5339 kfree(calldata);
5340 }
5341
5342 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5343 {
5344 struct nfs4_delegreturndata *d_data;
5345
5346 d_data = (struct nfs4_delegreturndata *)data;
5347
5348 if (nfs4_wait_on_layoutreturn(d_data->inode, task))
5349 return;
5350
5351 if (d_data->roc)
5352 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
5353
5354 nfs4_setup_sequence(d_data->res.server,
5355 &d_data->args.seq_args,
5356 &d_data->res.seq_res,
5357 task);
5358 }
5359
5360 static const struct rpc_call_ops nfs4_delegreturn_ops = {
5361 .rpc_call_prepare = nfs4_delegreturn_prepare,
5362 .rpc_call_done = nfs4_delegreturn_done,
5363 .rpc_release = nfs4_delegreturn_release,
5364 };
5365
5366 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5367 {
5368 struct nfs4_delegreturndata *data;
5369 struct nfs_server *server = NFS_SERVER(inode);
5370 struct rpc_task *task;
5371 struct rpc_message msg = {
5372 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5373 .rpc_cred = cred,
5374 };
5375 struct rpc_task_setup task_setup_data = {
5376 .rpc_client = server->client,
5377 .rpc_message = &msg,
5378 .callback_ops = &nfs4_delegreturn_ops,
5379 .flags = RPC_TASK_ASYNC,
5380 };
5381 int status = 0;
5382
5383 data = kzalloc(sizeof(*data), GFP_NOFS);
5384 if (data == NULL)
5385 return -ENOMEM;
5386 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5387 data->args.fhandle = &data->fh;
5388 data->args.stateid = &data->stateid;
5389 data->args.bitmask = server->cache_consistency_bitmask;
5390 nfs_copy_fh(&data->fh, NFS_FH(inode));
5391 nfs4_stateid_copy(&data->stateid, stateid);
5392 data->res.fattr = &data->fattr;
5393 data->res.server = server;
5394 nfs_fattr_init(data->res.fattr);
5395 data->timestamp = jiffies;
5396 data->rpc_status = 0;
5397 data->inode = nfs_igrab_and_active(inode);
5398 if (data->inode)
5399 data->roc = nfs4_roc(inode);
5400
5401 task_setup_data.callback_data = data;
5402 msg.rpc_argp = &data->args;
5403 msg.rpc_resp = &data->res;
5404 task = rpc_run_task(&task_setup_data);
5405 if (IS_ERR(task))
5406 return PTR_ERR(task);
5407 if (!issync)
5408 goto out;
5409 status = nfs4_wait_for_completion_rpc_task(task);
5410 if (status != 0)
5411 goto out;
5412 status = data->rpc_status;
5413 if (status == 0)
5414 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5415 else
5416 nfs_refresh_inode(inode, &data->fattr);
5417 out:
5418 rpc_put_task(task);
5419 return status;
5420 }
5421
5422 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5423 {
5424 struct nfs_server *server = NFS_SERVER(inode);
5425 struct nfs4_exception exception = { };
5426 int err;
5427 do {
5428 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5429 trace_nfs4_delegreturn(inode, stateid, err);
5430 switch (err) {
5431 case -NFS4ERR_STALE_STATEID:
5432 case -NFS4ERR_EXPIRED:
5433 case 0:
5434 return 0;
5435 }
5436 err = nfs4_handle_exception(server, err, &exception);
5437 } while (exception.retry);
5438 return err;
5439 }
5440
5441 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
5442 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
5443
5444 /*
5445 * sleep, with exponential backoff, and retry the LOCK operation.
5446 */
5447 static unsigned long
5448 nfs4_set_lock_task_retry(unsigned long timeout)
5449 {
5450 freezable_schedule_timeout_killable_unsafe(timeout);
5451 timeout <<= 1;
5452 if (timeout > NFS4_LOCK_MAXTIMEOUT)
5453 return NFS4_LOCK_MAXTIMEOUT;
5454 return timeout;
5455 }
5456
5457 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5458 {
5459 struct inode *inode = state->inode;
5460 struct nfs_server *server = NFS_SERVER(inode);
5461 struct nfs_client *clp = server->nfs_client;
5462 struct nfs_lockt_args arg = {
5463 .fh = NFS_FH(inode),
5464 .fl = request,
5465 };
5466 struct nfs_lockt_res res = {
5467 .denied = request,
5468 };
5469 struct rpc_message msg = {
5470 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5471 .rpc_argp = &arg,
5472 .rpc_resp = &res,
5473 .rpc_cred = state->owner->so_cred,
5474 };
5475 struct nfs4_lock_state *lsp;
5476 int status;
5477
5478 arg.lock_owner.clientid = clp->cl_clientid;
5479 status = nfs4_set_lock_state(state, request);
5480 if (status != 0)
5481 goto out;
5482 lsp = request->fl_u.nfs4_fl.owner;
5483 arg.lock_owner.id = lsp->ls_seqid.owner_id;
5484 arg.lock_owner.s_dev = server->s_dev;
5485 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5486 switch (status) {
5487 case 0:
5488 request->fl_type = F_UNLCK;
5489 break;
5490 case -NFS4ERR_DENIED:
5491 status = 0;
5492 }
5493 request->fl_ops->fl_release_private(request);
5494 request->fl_ops = NULL;
5495 out:
5496 return status;
5497 }
5498
5499 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5500 {
5501 struct nfs4_exception exception = { };
5502 int err;
5503
5504 do {
5505 err = _nfs4_proc_getlk(state, cmd, request);
5506 trace_nfs4_get_lock(request, state, cmd, err);
5507 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
5508 &exception);
5509 } while (exception.retry);
5510 return err;
5511 }
5512
5513 static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
5514 {
5515 return locks_lock_inode_wait(inode, fl);
5516 }
5517
5518 struct nfs4_unlockdata {
5519 struct nfs_locku_args arg;
5520 struct nfs_locku_res res;
5521 struct nfs4_lock_state *lsp;
5522 struct nfs_open_context *ctx;
5523 struct file_lock fl;
5524 struct nfs_server *server;
5525 unsigned long timestamp;
5526 };
5527
5528 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5529 struct nfs_open_context *ctx,
5530 struct nfs4_lock_state *lsp,
5531 struct nfs_seqid *seqid)
5532 {
5533 struct nfs4_unlockdata *p;
5534 struct inode *inode = lsp->ls_state->inode;
5535
5536 p = kzalloc(sizeof(*p), GFP_NOFS);
5537 if (p == NULL)
5538 return NULL;
5539 p->arg.fh = NFS_FH(inode);
5540 p->arg.fl = &p->fl;
5541 p->arg.seqid = seqid;
5542 p->res.seqid = seqid;
5543 p->lsp = lsp;
5544 atomic_inc(&lsp->ls_count);
5545 /* Ensure we don't close file until we're done freeing locks! */
5546 p->ctx = get_nfs_open_context(ctx);
5547 memcpy(&p->fl, fl, sizeof(p->fl));
5548 p->server = NFS_SERVER(inode);
5549 return p;
5550 }
5551
5552 static void nfs4_locku_release_calldata(void *data)
5553 {
5554 struct nfs4_unlockdata *calldata = data;
5555 nfs_free_seqid(calldata->arg.seqid);
5556 nfs4_put_lock_state(calldata->lsp);
5557 put_nfs_open_context(calldata->ctx);
5558 kfree(calldata);
5559 }
5560
5561 static void nfs4_locku_done(struct rpc_task *task, void *data)
5562 {
5563 struct nfs4_unlockdata *calldata = data;
5564
5565 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
5566 return;
5567 switch (task->tk_status) {
5568 case 0:
5569 renew_lease(calldata->server, calldata->timestamp);
5570 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
5571 if (nfs4_update_lock_stateid(calldata->lsp,
5572 &calldata->res.stateid))
5573 break;
5574 case -NFS4ERR_BAD_STATEID:
5575 case -NFS4ERR_OLD_STATEID:
5576 case -NFS4ERR_STALE_STATEID:
5577 case -NFS4ERR_EXPIRED:
5578 if (!nfs4_stateid_match(&calldata->arg.stateid,
5579 &calldata->lsp->ls_stateid))
5580 rpc_restart_call_prepare(task);
5581 break;
5582 default:
5583 if (nfs4_async_handle_error(task, calldata->server,
5584 NULL, NULL) == -EAGAIN)
5585 rpc_restart_call_prepare(task);
5586 }
5587 nfs_release_seqid(calldata->arg.seqid);
5588 }
5589
5590 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
5591 {
5592 struct nfs4_unlockdata *calldata = data;
5593
5594 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
5595 goto out_wait;
5596 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
5597 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
5598 /* Note: exit _without_ running nfs4_locku_done */
5599 goto out_no_action;
5600 }
5601 calldata->timestamp = jiffies;
5602 if (nfs4_setup_sequence(calldata->server,
5603 &calldata->arg.seq_args,
5604 &calldata->res.seq_res,
5605 task) != 0)
5606 nfs_release_seqid(calldata->arg.seqid);
5607 return;
5608 out_no_action:
5609 task->tk_action = NULL;
5610 out_wait:
5611 nfs4_sequence_done(task, &calldata->res.seq_res);
5612 }
5613
5614 static const struct rpc_call_ops nfs4_locku_ops = {
5615 .rpc_call_prepare = nfs4_locku_prepare,
5616 .rpc_call_done = nfs4_locku_done,
5617 .rpc_release = nfs4_locku_release_calldata,
5618 };
5619
5620 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
5621 struct nfs_open_context *ctx,
5622 struct nfs4_lock_state *lsp,
5623 struct nfs_seqid *seqid)
5624 {
5625 struct nfs4_unlockdata *data;
5626 struct rpc_message msg = {
5627 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
5628 .rpc_cred = ctx->cred,
5629 };
5630 struct rpc_task_setup task_setup_data = {
5631 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
5632 .rpc_message = &msg,
5633 .callback_ops = &nfs4_locku_ops,
5634 .workqueue = nfsiod_workqueue,
5635 .flags = RPC_TASK_ASYNC,
5636 };
5637
5638 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
5639 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
5640
5641 /* Ensure this is an unlock - when canceling a lock, the
5642 * canceled lock is passed in, and it won't be an unlock.
5643 */
5644 fl->fl_type = F_UNLCK;
5645
5646 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
5647 if (data == NULL) {
5648 nfs_free_seqid(seqid);
5649 return ERR_PTR(-ENOMEM);
5650 }
5651
5652 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5653 msg.rpc_argp = &data->arg;
5654 msg.rpc_resp = &data->res;
5655 task_setup_data.callback_data = data;
5656 return rpc_run_task(&task_setup_data);
5657 }
5658
5659 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
5660 {
5661 struct inode *inode = state->inode;
5662 struct nfs4_state_owner *sp = state->owner;
5663 struct nfs_inode *nfsi = NFS_I(inode);
5664 struct nfs_seqid *seqid;
5665 struct nfs4_lock_state *lsp;
5666 struct rpc_task *task;
5667 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5668 int status = 0;
5669 unsigned char fl_flags = request->fl_flags;
5670
5671 status = nfs4_set_lock_state(state, request);
5672 /* Unlock _before_ we do the RPC call */
5673 request->fl_flags |= FL_EXISTS;
5674 /* Exclude nfs_delegation_claim_locks() */
5675 mutex_lock(&sp->so_delegreturn_mutex);
5676 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5677 down_read(&nfsi->rwsem);
5678 if (do_vfs_lock(inode, request) == -ENOENT) {
5679 up_read(&nfsi->rwsem);
5680 mutex_unlock(&sp->so_delegreturn_mutex);
5681 goto out;
5682 }
5683 up_read(&nfsi->rwsem);
5684 mutex_unlock(&sp->so_delegreturn_mutex);
5685 if (status != 0)
5686 goto out;
5687 /* Is this a delegated lock? */
5688 lsp = request->fl_u.nfs4_fl.owner;
5689 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
5690 goto out;
5691 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
5692 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
5693 status = -ENOMEM;
5694 if (IS_ERR(seqid))
5695 goto out;
5696 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
5697 status = PTR_ERR(task);
5698 if (IS_ERR(task))
5699 goto out;
5700 status = nfs4_wait_for_completion_rpc_task(task);
5701 rpc_put_task(task);
5702 out:
5703 request->fl_flags = fl_flags;
5704 trace_nfs4_unlock(request, state, F_SETLK, status);
5705 return status;
5706 }
5707
5708 struct nfs4_lockdata {
5709 struct nfs_lock_args arg;
5710 struct nfs_lock_res res;
5711 struct nfs4_lock_state *lsp;
5712 struct nfs_open_context *ctx;
5713 struct file_lock fl;
5714 unsigned long timestamp;
5715 int rpc_status;
5716 int cancelled;
5717 struct nfs_server *server;
5718 };
5719
5720 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5721 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
5722 gfp_t gfp_mask)
5723 {
5724 struct nfs4_lockdata *p;
5725 struct inode *inode = lsp->ls_state->inode;
5726 struct nfs_server *server = NFS_SERVER(inode);
5727 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5728
5729 p = kzalloc(sizeof(*p), gfp_mask);
5730 if (p == NULL)
5731 return NULL;
5732
5733 p->arg.fh = NFS_FH(inode);
5734 p->arg.fl = &p->fl;
5735 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
5736 if (IS_ERR(p->arg.open_seqid))
5737 goto out_free;
5738 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
5739 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
5740 if (IS_ERR(p->arg.lock_seqid))
5741 goto out_free_seqid;
5742 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
5743 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
5744 p->arg.lock_owner.s_dev = server->s_dev;
5745 p->res.lock_seqid = p->arg.lock_seqid;
5746 p->lsp = lsp;
5747 p->server = server;
5748 atomic_inc(&lsp->ls_count);
5749 p->ctx = get_nfs_open_context(ctx);
5750 get_file(fl->fl_file);
5751 memcpy(&p->fl, fl, sizeof(p->fl));
5752 return p;
5753 out_free_seqid:
5754 nfs_free_seqid(p->arg.open_seqid);
5755 out_free:
5756 kfree(p);
5757 return NULL;
5758 }
5759
5760 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
5761 {
5762 struct nfs4_lockdata *data = calldata;
5763 struct nfs4_state *state = data->lsp->ls_state;
5764
5765 dprintk("%s: begin!\n", __func__);
5766 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
5767 goto out_wait;
5768 /* Do we need to do an open_to_lock_owner? */
5769 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
5770 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
5771 goto out_release_lock_seqid;
5772 }
5773 nfs4_stateid_copy(&data->arg.open_stateid,
5774 &state->open_stateid);
5775 data->arg.new_lock_owner = 1;
5776 data->res.open_seqid = data->arg.open_seqid;
5777 } else {
5778 data->arg.new_lock_owner = 0;
5779 nfs4_stateid_copy(&data->arg.lock_stateid,
5780 &data->lsp->ls_stateid);
5781 }
5782 if (!nfs4_valid_open_stateid(state)) {
5783 data->rpc_status = -EBADF;
5784 task->tk_action = NULL;
5785 goto out_release_open_seqid;
5786 }
5787 data->timestamp = jiffies;
5788 if (nfs4_setup_sequence(data->server,
5789 &data->arg.seq_args,
5790 &data->res.seq_res,
5791 task) == 0)
5792 return;
5793 out_release_open_seqid:
5794 nfs_release_seqid(data->arg.open_seqid);
5795 out_release_lock_seqid:
5796 nfs_release_seqid(data->arg.lock_seqid);
5797 out_wait:
5798 nfs4_sequence_done(task, &data->res.seq_res);
5799 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
5800 }
5801
5802 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5803 {
5804 struct nfs4_lockdata *data = calldata;
5805 struct nfs4_lock_state *lsp = data->lsp;
5806
5807 dprintk("%s: begin!\n", __func__);
5808
5809 if (!nfs4_sequence_done(task, &data->res.seq_res))
5810 return;
5811
5812 data->rpc_status = task->tk_status;
5813 switch (task->tk_status) {
5814 case 0:
5815 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
5816 data->timestamp);
5817 if (data->arg.new_lock) {
5818 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5819 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
5820 rpc_restart_call_prepare(task);
5821 break;
5822 }
5823 }
5824 if (data->arg.new_lock_owner != 0) {
5825 nfs_confirm_seqid(&lsp->ls_seqid, 0);
5826 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
5827 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5828 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
5829 rpc_restart_call_prepare(task);
5830 break;
5831 case -NFS4ERR_BAD_STATEID:
5832 case -NFS4ERR_OLD_STATEID:
5833 case -NFS4ERR_STALE_STATEID:
5834 case -NFS4ERR_EXPIRED:
5835 if (data->arg.new_lock_owner != 0) {
5836 if (!nfs4_stateid_match(&data->arg.open_stateid,
5837 &lsp->ls_state->open_stateid))
5838 rpc_restart_call_prepare(task);
5839 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
5840 &lsp->ls_stateid))
5841 rpc_restart_call_prepare(task);
5842 }
5843 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
5844 }
5845
5846 static void nfs4_lock_release(void *calldata)
5847 {
5848 struct nfs4_lockdata *data = calldata;
5849
5850 dprintk("%s: begin!\n", __func__);
5851 nfs_free_seqid(data->arg.open_seqid);
5852 if (data->cancelled != 0) {
5853 struct rpc_task *task;
5854 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
5855 data->arg.lock_seqid);
5856 if (!IS_ERR(task))
5857 rpc_put_task_async(task);
5858 dprintk("%s: cancelling lock!\n", __func__);
5859 } else
5860 nfs_free_seqid(data->arg.lock_seqid);
5861 nfs4_put_lock_state(data->lsp);
5862 put_nfs_open_context(data->ctx);
5863 fput(data->fl.fl_file);
5864 kfree(data);
5865 dprintk("%s: done!\n", __func__);
5866 }
5867
5868 static const struct rpc_call_ops nfs4_lock_ops = {
5869 .rpc_call_prepare = nfs4_lock_prepare,
5870 .rpc_call_done = nfs4_lock_done,
5871 .rpc_release = nfs4_lock_release,
5872 };
5873
5874 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
5875 {
5876 switch (error) {
5877 case -NFS4ERR_ADMIN_REVOKED:
5878 case -NFS4ERR_BAD_STATEID:
5879 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5880 if (new_lock_owner != 0 ||
5881 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
5882 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
5883 break;
5884 case -NFS4ERR_STALE_STATEID:
5885 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5886 case -NFS4ERR_EXPIRED:
5887 nfs4_schedule_lease_recovery(server->nfs_client);
5888 };
5889 }
5890
5891 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
5892 {
5893 struct nfs4_lockdata *data;
5894 struct rpc_task *task;
5895 struct rpc_message msg = {
5896 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
5897 .rpc_cred = state->owner->so_cred,
5898 };
5899 struct rpc_task_setup task_setup_data = {
5900 .rpc_client = NFS_CLIENT(state->inode),
5901 .rpc_message = &msg,
5902 .callback_ops = &nfs4_lock_ops,
5903 .workqueue = nfsiod_workqueue,
5904 .flags = RPC_TASK_ASYNC,
5905 };
5906 int ret;
5907
5908 dprintk("%s: begin!\n", __func__);
5909 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
5910 fl->fl_u.nfs4_fl.owner,
5911 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
5912 if (data == NULL)
5913 return -ENOMEM;
5914 if (IS_SETLKW(cmd))
5915 data->arg.block = 1;
5916 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5917 msg.rpc_argp = &data->arg;
5918 msg.rpc_resp = &data->res;
5919 task_setup_data.callback_data = data;
5920 if (recovery_type > NFS_LOCK_NEW) {
5921 if (recovery_type == NFS_LOCK_RECLAIM)
5922 data->arg.reclaim = NFS_LOCK_RECLAIM;
5923 nfs4_set_sequence_privileged(&data->arg.seq_args);
5924 } else
5925 data->arg.new_lock = 1;
5926 task = rpc_run_task(&task_setup_data);
5927 if (IS_ERR(task))
5928 return PTR_ERR(task);
5929 ret = nfs4_wait_for_completion_rpc_task(task);
5930 if (ret == 0) {
5931 ret = data->rpc_status;
5932 if (ret)
5933 nfs4_handle_setlk_error(data->server, data->lsp,
5934 data->arg.new_lock_owner, ret);
5935 } else
5936 data->cancelled = 1;
5937 rpc_put_task(task);
5938 dprintk("%s: done, ret = %d!\n", __func__, ret);
5939 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
5940 return ret;
5941 }
5942
5943 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
5944 {
5945 struct nfs_server *server = NFS_SERVER(state->inode);
5946 struct nfs4_exception exception = {
5947 .inode = state->inode,
5948 };
5949 int err;
5950
5951 do {
5952 /* Cache the lock if possible... */
5953 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5954 return 0;
5955 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
5956 if (err != -NFS4ERR_DELAY)
5957 break;
5958 nfs4_handle_exception(server, err, &exception);
5959 } while (exception.retry);
5960 return err;
5961 }
5962
5963 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
5964 {
5965 struct nfs_server *server = NFS_SERVER(state->inode);
5966 struct nfs4_exception exception = {
5967 .inode = state->inode,
5968 };
5969 int err;
5970
5971 err = nfs4_set_lock_state(state, request);
5972 if (err != 0)
5973 return err;
5974 if (!recover_lost_locks) {
5975 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
5976 return 0;
5977 }
5978 do {
5979 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5980 return 0;
5981 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5982 switch (err) {
5983 default:
5984 goto out;
5985 case -NFS4ERR_GRACE:
5986 case -NFS4ERR_DELAY:
5987 nfs4_handle_exception(server, err, &exception);
5988 err = 0;
5989 }
5990 } while (exception.retry);
5991 out:
5992 return err;
5993 }
5994
5995 #if defined(CONFIG_NFS_V4_1)
5996 /**
5997 * nfs41_check_expired_locks - possibly free a lock stateid
5998 *
5999 * @state: NFSv4 state for an inode
6000 *
6001 * Returns NFS_OK if recovery for this stateid is now finished.
6002 * Otherwise a negative NFS4ERR value is returned.
6003 */
6004 static int nfs41_check_expired_locks(struct nfs4_state *state)
6005 {
6006 int status, ret = -NFS4ERR_BAD_STATEID;
6007 struct nfs4_lock_state *lsp;
6008 struct nfs_server *server = NFS_SERVER(state->inode);
6009
6010 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
6011 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
6012 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
6013
6014 status = nfs41_test_stateid(server,
6015 &lsp->ls_stateid,
6016 cred);
6017 trace_nfs4_test_lock_stateid(state, lsp, status);
6018 if (status != NFS_OK) {
6019 /* Free the stateid unless the server
6020 * informs us the stateid is unrecognized. */
6021 if (status != -NFS4ERR_BAD_STATEID)
6022 nfs41_free_stateid(server,
6023 &lsp->ls_stateid,
6024 cred);
6025 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
6026 ret = status;
6027 }
6028 }
6029 };
6030
6031 return ret;
6032 }
6033
6034 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
6035 {
6036 int status = NFS_OK;
6037
6038 if (test_bit(LK_STATE_IN_USE, &state->flags))
6039 status = nfs41_check_expired_locks(state);
6040 if (status != NFS_OK)
6041 status = nfs4_lock_expired(state, request);
6042 return status;
6043 }
6044 #endif
6045
6046 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6047 {
6048 struct nfs_inode *nfsi = NFS_I(state->inode);
6049 unsigned char fl_flags = request->fl_flags;
6050 int status = -ENOLCK;
6051
6052 if ((fl_flags & FL_POSIX) &&
6053 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6054 goto out;
6055 /* Is this a delegated open? */
6056 status = nfs4_set_lock_state(state, request);
6057 if (status != 0)
6058 goto out;
6059 request->fl_flags |= FL_ACCESS;
6060 status = do_vfs_lock(state->inode, request);
6061 if (status < 0)
6062 goto out;
6063 down_read(&nfsi->rwsem);
6064 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6065 /* Yes: cache locks! */
6066 /* ...but avoid races with delegation recall... */
6067 request->fl_flags = fl_flags & ~FL_SLEEP;
6068 status = do_vfs_lock(state->inode, request);
6069 up_read(&nfsi->rwsem);
6070 goto out;
6071 }
6072 up_read(&nfsi->rwsem);
6073 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6074 out:
6075 request->fl_flags = fl_flags;
6076 return status;
6077 }
6078
6079 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6080 {
6081 struct nfs4_exception exception = {
6082 .state = state,
6083 .inode = state->inode,
6084 };
6085 int err;
6086
6087 do {
6088 err = _nfs4_proc_setlk(state, cmd, request);
6089 if (err == -NFS4ERR_DENIED)
6090 err = -EAGAIN;
6091 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6092 err, &exception);
6093 } while (exception.retry);
6094 return err;
6095 }
6096
6097 static int
6098 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6099 {
6100 struct nfs_open_context *ctx;
6101 struct nfs4_state *state;
6102 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6103 int status;
6104
6105 /* verify open state */
6106 ctx = nfs_file_open_context(filp);
6107 state = ctx->state;
6108
6109 if (request->fl_start < 0 || request->fl_end < 0)
6110 return -EINVAL;
6111
6112 if (IS_GETLK(cmd)) {
6113 if (state != NULL)
6114 return nfs4_proc_getlk(state, F_GETLK, request);
6115 return 0;
6116 }
6117
6118 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6119 return -EINVAL;
6120
6121 if (request->fl_type == F_UNLCK) {
6122 if (state != NULL)
6123 return nfs4_proc_unlck(state, cmd, request);
6124 return 0;
6125 }
6126
6127 if (state == NULL)
6128 return -ENOLCK;
6129 /*
6130 * Don't rely on the VFS having checked the file open mode,
6131 * since it won't do this for flock() locks.
6132 */
6133 switch (request->fl_type) {
6134 case F_RDLCK:
6135 if (!(filp->f_mode & FMODE_READ))
6136 return -EBADF;
6137 break;
6138 case F_WRLCK:
6139 if (!(filp->f_mode & FMODE_WRITE))
6140 return -EBADF;
6141 }
6142
6143 do {
6144 status = nfs4_proc_setlk(state, cmd, request);
6145 if ((status != -EAGAIN) || IS_SETLK(cmd))
6146 break;
6147 timeout = nfs4_set_lock_task_retry(timeout);
6148 status = -ERESTARTSYS;
6149 if (signalled())
6150 break;
6151 } while(status < 0);
6152 return status;
6153 }
6154
6155 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6156 {
6157 struct nfs_server *server = NFS_SERVER(state->inode);
6158 int err;
6159
6160 err = nfs4_set_lock_state(state, fl);
6161 if (err != 0)
6162 return err;
6163 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6164 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6165 }
6166
6167 struct nfs_release_lockowner_data {
6168 struct nfs4_lock_state *lsp;
6169 struct nfs_server *server;
6170 struct nfs_release_lockowner_args args;
6171 struct nfs_release_lockowner_res res;
6172 unsigned long timestamp;
6173 };
6174
6175 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6176 {
6177 struct nfs_release_lockowner_data *data = calldata;
6178 struct nfs_server *server = data->server;
6179 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
6180 &data->args.seq_args, &data->res.seq_res, task);
6181 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6182 data->timestamp = jiffies;
6183 }
6184
6185 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6186 {
6187 struct nfs_release_lockowner_data *data = calldata;
6188 struct nfs_server *server = data->server;
6189
6190 nfs40_sequence_done(task, &data->res.seq_res);
6191
6192 switch (task->tk_status) {
6193 case 0:
6194 renew_lease(server, data->timestamp);
6195 break;
6196 case -NFS4ERR_STALE_CLIENTID:
6197 case -NFS4ERR_EXPIRED:
6198 nfs4_schedule_lease_recovery(server->nfs_client);
6199 break;
6200 case -NFS4ERR_LEASE_MOVED:
6201 case -NFS4ERR_DELAY:
6202 if (nfs4_async_handle_error(task, server,
6203 NULL, NULL) == -EAGAIN)
6204 rpc_restart_call_prepare(task);
6205 }
6206 }
6207
6208 static void nfs4_release_lockowner_release(void *calldata)
6209 {
6210 struct nfs_release_lockowner_data *data = calldata;
6211 nfs4_free_lock_state(data->server, data->lsp);
6212 kfree(calldata);
6213 }
6214
6215 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6216 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6217 .rpc_call_done = nfs4_release_lockowner_done,
6218 .rpc_release = nfs4_release_lockowner_release,
6219 };
6220
6221 static void
6222 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6223 {
6224 struct nfs_release_lockowner_data *data;
6225 struct rpc_message msg = {
6226 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6227 };
6228
6229 if (server->nfs_client->cl_mvops->minor_version != 0)
6230 return;
6231
6232 data = kmalloc(sizeof(*data), GFP_NOFS);
6233 if (!data)
6234 return;
6235 data->lsp = lsp;
6236 data->server = server;
6237 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6238 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6239 data->args.lock_owner.s_dev = server->s_dev;
6240
6241 msg.rpc_argp = &data->args;
6242 msg.rpc_resp = &data->res;
6243 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6244 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6245 }
6246
6247 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6248
6249 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
6250 struct dentry *dentry, const char *key,
6251 const void *buf, size_t buflen,
6252 int flags)
6253 {
6254 if (strcmp(key, "") != 0)
6255 return -EINVAL;
6256
6257 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
6258 }
6259
6260 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
6261 struct dentry *dentry, const char *key,
6262 void *buf, size_t buflen)
6263 {
6264 if (strcmp(key, "") != 0)
6265 return -EINVAL;
6266
6267 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen);
6268 }
6269
6270 static size_t nfs4_xattr_list_nfs4_acl(const struct xattr_handler *handler,
6271 struct dentry *dentry, char *list,
6272 size_t list_len, const char *name,
6273 size_t name_len)
6274 {
6275 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
6276
6277 if (!nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))))
6278 return 0;
6279
6280 if (list && len <= list_len)
6281 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
6282 return len;
6283 }
6284
6285 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6286 static inline int nfs4_server_supports_labels(struct nfs_server *server)
6287 {
6288 return server->caps & NFS_CAP_SECURITY_LABEL;
6289 }
6290
6291 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
6292 struct dentry *dentry, const char *key,
6293 const void *buf, size_t buflen,
6294 int flags)
6295 {
6296 if (security_ismaclabel(key))
6297 return nfs4_set_security_label(dentry, buf, buflen);
6298
6299 return -EOPNOTSUPP;
6300 }
6301
6302 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
6303 struct dentry *dentry, const char *key,
6304 void *buf, size_t buflen)
6305 {
6306 if (security_ismaclabel(key))
6307 return nfs4_get_security_label(d_inode(dentry), buf, buflen);
6308 return -EOPNOTSUPP;
6309 }
6310
6311 static size_t nfs4_xattr_list_nfs4_label(const struct xattr_handler *handler,
6312 struct dentry *dentry, char *list,
6313 size_t list_len, const char *name,
6314 size_t name_len)
6315 {
6316 size_t len = 0;
6317
6318 if (nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
6319 len = security_inode_listsecurity(d_inode(dentry), NULL, 0);
6320 if (list && len <= list_len)
6321 security_inode_listsecurity(d_inode(dentry), list, len);
6322 }
6323 return len;
6324 }
6325
6326 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6327 .prefix = XATTR_SECURITY_PREFIX,
6328 .list = nfs4_xattr_list_nfs4_label,
6329 .get = nfs4_xattr_get_nfs4_label,
6330 .set = nfs4_xattr_set_nfs4_label,
6331 };
6332 #endif
6333
6334
6335 /*
6336 * nfs_fhget will use either the mounted_on_fileid or the fileid
6337 */
6338 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6339 {
6340 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6341 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6342 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6343 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6344 return;
6345
6346 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6347 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6348 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6349 fattr->nlink = 2;
6350 }
6351
6352 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6353 const struct qstr *name,
6354 struct nfs4_fs_locations *fs_locations,
6355 struct page *page)
6356 {
6357 struct nfs_server *server = NFS_SERVER(dir);
6358 u32 bitmask[3] = {
6359 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6360 };
6361 struct nfs4_fs_locations_arg args = {
6362 .dir_fh = NFS_FH(dir),
6363 .name = name,
6364 .page = page,
6365 .bitmask = bitmask,
6366 };
6367 struct nfs4_fs_locations_res res = {
6368 .fs_locations = fs_locations,
6369 };
6370 struct rpc_message msg = {
6371 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6372 .rpc_argp = &args,
6373 .rpc_resp = &res,
6374 };
6375 int status;
6376
6377 dprintk("%s: start\n", __func__);
6378
6379 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6380 * is not supported */
6381 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6382 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6383 else
6384 bitmask[0] |= FATTR4_WORD0_FILEID;
6385
6386 nfs_fattr_init(&fs_locations->fattr);
6387 fs_locations->server = server;
6388 fs_locations->nlocations = 0;
6389 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6390 dprintk("%s: returned status = %d\n", __func__, status);
6391 return status;
6392 }
6393
6394 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6395 const struct qstr *name,
6396 struct nfs4_fs_locations *fs_locations,
6397 struct page *page)
6398 {
6399 struct nfs4_exception exception = { };
6400 int err;
6401 do {
6402 err = _nfs4_proc_fs_locations(client, dir, name,
6403 fs_locations, page);
6404 trace_nfs4_get_fs_locations(dir, name, err);
6405 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6406 &exception);
6407 } while (exception.retry);
6408 return err;
6409 }
6410
6411 /*
6412 * This operation also signals the server that this client is
6413 * performing migration recovery. The server can stop returning
6414 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
6415 * appended to this compound to identify the client ID which is
6416 * performing recovery.
6417 */
6418 static int _nfs40_proc_get_locations(struct inode *inode,
6419 struct nfs4_fs_locations *locations,
6420 struct page *page, struct rpc_cred *cred)
6421 {
6422 struct nfs_server *server = NFS_SERVER(inode);
6423 struct rpc_clnt *clnt = server->client;
6424 u32 bitmask[2] = {
6425 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6426 };
6427 struct nfs4_fs_locations_arg args = {
6428 .clientid = server->nfs_client->cl_clientid,
6429 .fh = NFS_FH(inode),
6430 .page = page,
6431 .bitmask = bitmask,
6432 .migration = 1, /* skip LOOKUP */
6433 .renew = 1, /* append RENEW */
6434 };
6435 struct nfs4_fs_locations_res res = {
6436 .fs_locations = locations,
6437 .migration = 1,
6438 .renew = 1,
6439 };
6440 struct rpc_message msg = {
6441 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6442 .rpc_argp = &args,
6443 .rpc_resp = &res,
6444 .rpc_cred = cred,
6445 };
6446 unsigned long now = jiffies;
6447 int status;
6448
6449 nfs_fattr_init(&locations->fattr);
6450 locations->server = server;
6451 locations->nlocations = 0;
6452
6453 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6454 nfs4_set_sequence_privileged(&args.seq_args);
6455 status = nfs4_call_sync_sequence(clnt, server, &msg,
6456 &args.seq_args, &res.seq_res);
6457 if (status)
6458 return status;
6459
6460 renew_lease(server, now);
6461 return 0;
6462 }
6463
6464 #ifdef CONFIG_NFS_V4_1
6465
6466 /*
6467 * This operation also signals the server that this client is
6468 * performing migration recovery. The server can stop asserting
6469 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
6470 * performing this operation is identified in the SEQUENCE
6471 * operation in this compound.
6472 *
6473 * When the client supports GETATTR(fs_locations_info), it can
6474 * be plumbed in here.
6475 */
6476 static int _nfs41_proc_get_locations(struct inode *inode,
6477 struct nfs4_fs_locations *locations,
6478 struct page *page, struct rpc_cred *cred)
6479 {
6480 struct nfs_server *server = NFS_SERVER(inode);
6481 struct rpc_clnt *clnt = server->client;
6482 u32 bitmask[2] = {
6483 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6484 };
6485 struct nfs4_fs_locations_arg args = {
6486 .fh = NFS_FH(inode),
6487 .page = page,
6488 .bitmask = bitmask,
6489 .migration = 1, /* skip LOOKUP */
6490 };
6491 struct nfs4_fs_locations_res res = {
6492 .fs_locations = locations,
6493 .migration = 1,
6494 };
6495 struct rpc_message msg = {
6496 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6497 .rpc_argp = &args,
6498 .rpc_resp = &res,
6499 .rpc_cred = cred,
6500 };
6501 int status;
6502
6503 nfs_fattr_init(&locations->fattr);
6504 locations->server = server;
6505 locations->nlocations = 0;
6506
6507 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6508 nfs4_set_sequence_privileged(&args.seq_args);
6509 status = nfs4_call_sync_sequence(clnt, server, &msg,
6510 &args.seq_args, &res.seq_res);
6511 if (status == NFS4_OK &&
6512 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6513 status = -NFS4ERR_LEASE_MOVED;
6514 return status;
6515 }
6516
6517 #endif /* CONFIG_NFS_V4_1 */
6518
6519 /**
6520 * nfs4_proc_get_locations - discover locations for a migrated FSID
6521 * @inode: inode on FSID that is migrating
6522 * @locations: result of query
6523 * @page: buffer
6524 * @cred: credential to use for this operation
6525 *
6526 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
6527 * operation failed, or a negative errno if a local error occurred.
6528 *
6529 * On success, "locations" is filled in, but if the server has
6530 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
6531 * asserted.
6532 *
6533 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
6534 * from this client that require migration recovery.
6535 */
6536 int nfs4_proc_get_locations(struct inode *inode,
6537 struct nfs4_fs_locations *locations,
6538 struct page *page, struct rpc_cred *cred)
6539 {
6540 struct nfs_server *server = NFS_SERVER(inode);
6541 struct nfs_client *clp = server->nfs_client;
6542 const struct nfs4_mig_recovery_ops *ops =
6543 clp->cl_mvops->mig_recovery_ops;
6544 struct nfs4_exception exception = { };
6545 int status;
6546
6547 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6548 (unsigned long long)server->fsid.major,
6549 (unsigned long long)server->fsid.minor,
6550 clp->cl_hostname);
6551 nfs_display_fhandle(NFS_FH(inode), __func__);
6552
6553 do {
6554 status = ops->get_locations(inode, locations, page, cred);
6555 if (status != -NFS4ERR_DELAY)
6556 break;
6557 nfs4_handle_exception(server, status, &exception);
6558 } while (exception.retry);
6559 return status;
6560 }
6561
6562 /*
6563 * This operation also signals the server that this client is
6564 * performing "lease moved" recovery. The server can stop
6565 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
6566 * is appended to this compound to identify the client ID which is
6567 * performing recovery.
6568 */
6569 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6570 {
6571 struct nfs_server *server = NFS_SERVER(inode);
6572 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
6573 struct rpc_clnt *clnt = server->client;
6574 struct nfs4_fsid_present_arg args = {
6575 .fh = NFS_FH(inode),
6576 .clientid = clp->cl_clientid,
6577 .renew = 1, /* append RENEW */
6578 };
6579 struct nfs4_fsid_present_res res = {
6580 .renew = 1,
6581 };
6582 struct rpc_message msg = {
6583 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6584 .rpc_argp = &args,
6585 .rpc_resp = &res,
6586 .rpc_cred = cred,
6587 };
6588 unsigned long now = jiffies;
6589 int status;
6590
6591 res.fh = nfs_alloc_fhandle();
6592 if (res.fh == NULL)
6593 return -ENOMEM;
6594
6595 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6596 nfs4_set_sequence_privileged(&args.seq_args);
6597 status = nfs4_call_sync_sequence(clnt, server, &msg,
6598 &args.seq_args, &res.seq_res);
6599 nfs_free_fhandle(res.fh);
6600 if (status)
6601 return status;
6602
6603 do_renew_lease(clp, now);
6604 return 0;
6605 }
6606
6607 #ifdef CONFIG_NFS_V4_1
6608
6609 /*
6610 * This operation also signals the server that this client is
6611 * performing "lease moved" recovery. The server can stop asserting
6612 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
6613 * this operation is identified in the SEQUENCE operation in this
6614 * compound.
6615 */
6616 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6617 {
6618 struct nfs_server *server = NFS_SERVER(inode);
6619 struct rpc_clnt *clnt = server->client;
6620 struct nfs4_fsid_present_arg args = {
6621 .fh = NFS_FH(inode),
6622 };
6623 struct nfs4_fsid_present_res res = {
6624 };
6625 struct rpc_message msg = {
6626 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6627 .rpc_argp = &args,
6628 .rpc_resp = &res,
6629 .rpc_cred = cred,
6630 };
6631 int status;
6632
6633 res.fh = nfs_alloc_fhandle();
6634 if (res.fh == NULL)
6635 return -ENOMEM;
6636
6637 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6638 nfs4_set_sequence_privileged(&args.seq_args);
6639 status = nfs4_call_sync_sequence(clnt, server, &msg,
6640 &args.seq_args, &res.seq_res);
6641 nfs_free_fhandle(res.fh);
6642 if (status == NFS4_OK &&
6643 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6644 status = -NFS4ERR_LEASE_MOVED;
6645 return status;
6646 }
6647
6648 #endif /* CONFIG_NFS_V4_1 */
6649
6650 /**
6651 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
6652 * @inode: inode on FSID to check
6653 * @cred: credential to use for this operation
6654 *
6655 * Server indicates whether the FSID is present, moved, or not
6656 * recognized. This operation is necessary to clear a LEASE_MOVED
6657 * condition for this client ID.
6658 *
6659 * Returns NFS4_OK if the FSID is present on this server,
6660 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
6661 * NFS4ERR code if some error occurred on the server, or a
6662 * negative errno if a local failure occurred.
6663 */
6664 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6665 {
6666 struct nfs_server *server = NFS_SERVER(inode);
6667 struct nfs_client *clp = server->nfs_client;
6668 const struct nfs4_mig_recovery_ops *ops =
6669 clp->cl_mvops->mig_recovery_ops;
6670 struct nfs4_exception exception = { };
6671 int status;
6672
6673 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6674 (unsigned long long)server->fsid.major,
6675 (unsigned long long)server->fsid.minor,
6676 clp->cl_hostname);
6677 nfs_display_fhandle(NFS_FH(inode), __func__);
6678
6679 do {
6680 status = ops->fsid_present(inode, cred);
6681 if (status != -NFS4ERR_DELAY)
6682 break;
6683 nfs4_handle_exception(server, status, &exception);
6684 } while (exception.retry);
6685 return status;
6686 }
6687
6688 /**
6689 * If 'use_integrity' is true and the state managment nfs_client
6690 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
6691 * and the machine credential as per RFC3530bis and RFC5661 Security
6692 * Considerations sections. Otherwise, just use the user cred with the
6693 * filesystem's rpc_client.
6694 */
6695 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
6696 {
6697 int status;
6698 struct nfs4_secinfo_arg args = {
6699 .dir_fh = NFS_FH(dir),
6700 .name = name,
6701 };
6702 struct nfs4_secinfo_res res = {
6703 .flavors = flavors,
6704 };
6705 struct rpc_message msg = {
6706 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
6707 .rpc_argp = &args,
6708 .rpc_resp = &res,
6709 };
6710 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
6711 struct rpc_cred *cred = NULL;
6712
6713 if (use_integrity) {
6714 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
6715 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
6716 msg.rpc_cred = cred;
6717 }
6718
6719 dprintk("NFS call secinfo %s\n", name->name);
6720
6721 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
6722 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
6723
6724 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
6725 &res.seq_res, 0);
6726 dprintk("NFS reply secinfo: %d\n", status);
6727
6728 if (cred)
6729 put_rpccred(cred);
6730
6731 return status;
6732 }
6733
6734 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
6735 struct nfs4_secinfo_flavors *flavors)
6736 {
6737 struct nfs4_exception exception = { };
6738 int err;
6739 do {
6740 err = -NFS4ERR_WRONGSEC;
6741
6742 /* try to use integrity protection with machine cred */
6743 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
6744 err = _nfs4_proc_secinfo(dir, name, flavors, true);
6745
6746 /*
6747 * if unable to use integrity protection, or SECINFO with
6748 * integrity protection returns NFS4ERR_WRONGSEC (which is
6749 * disallowed by spec, but exists in deployed servers) use
6750 * the current filesystem's rpc_client and the user cred.
6751 */
6752 if (err == -NFS4ERR_WRONGSEC)
6753 err = _nfs4_proc_secinfo(dir, name, flavors, false);
6754
6755 trace_nfs4_secinfo(dir, name, err);
6756 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6757 &exception);
6758 } while (exception.retry);
6759 return err;
6760 }
6761
6762 #ifdef CONFIG_NFS_V4_1
6763 /*
6764 * Check the exchange flags returned by the server for invalid flags, having
6765 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
6766 * DS flags set.
6767 */
6768 static int nfs4_check_cl_exchange_flags(u32 flags)
6769 {
6770 if (flags & ~EXCHGID4_FLAG_MASK_R)
6771 goto out_inval;
6772 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
6773 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
6774 goto out_inval;
6775 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
6776 goto out_inval;
6777 return NFS_OK;
6778 out_inval:
6779 return -NFS4ERR_INVAL;
6780 }
6781
6782 static bool
6783 nfs41_same_server_scope(struct nfs41_server_scope *a,
6784 struct nfs41_server_scope *b)
6785 {
6786 if (a->server_scope_sz == b->server_scope_sz &&
6787 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
6788 return true;
6789
6790 return false;
6791 }
6792
6793 /*
6794 * nfs4_proc_bind_conn_to_session()
6795 *
6796 * The 4.1 client currently uses the same TCP connection for the
6797 * fore and backchannel.
6798 */
6799 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6800 {
6801 int status;
6802 struct nfs41_bind_conn_to_session_args args = {
6803 .client = clp,
6804 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6805 };
6806 struct nfs41_bind_conn_to_session_res res;
6807 struct rpc_message msg = {
6808 .rpc_proc =
6809 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6810 .rpc_argp = &args,
6811 .rpc_resp = &res,
6812 .rpc_cred = cred,
6813 };
6814
6815 dprintk("--> %s\n", __func__);
6816
6817 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6818 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6819 args.dir = NFS4_CDFC4_FORE;
6820
6821 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6822 trace_nfs4_bind_conn_to_session(clp, status);
6823 if (status == 0) {
6824 if (memcmp(res.sessionid.data,
6825 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6826 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6827 status = -EIO;
6828 goto out;
6829 }
6830 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6831 dprintk("NFS: %s: Unexpected direction from server\n",
6832 __func__);
6833 status = -EIO;
6834 goto out;
6835 }
6836 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6837 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6838 __func__);
6839 status = -EIO;
6840 goto out;
6841 }
6842 }
6843 out:
6844 dprintk("<-- %s status= %d\n", __func__, status);
6845 return status;
6846 }
6847
6848 /*
6849 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
6850 * and operations we'd like to see to enable certain features in the allow map
6851 */
6852 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
6853 .how = SP4_MACH_CRED,
6854 .enforce.u.words = {
6855 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6856 1 << (OP_EXCHANGE_ID - 32) |
6857 1 << (OP_CREATE_SESSION - 32) |
6858 1 << (OP_DESTROY_SESSION - 32) |
6859 1 << (OP_DESTROY_CLIENTID - 32)
6860 },
6861 .allow.u.words = {
6862 [0] = 1 << (OP_CLOSE) |
6863 1 << (OP_LOCKU) |
6864 1 << (OP_COMMIT),
6865 [1] = 1 << (OP_SECINFO - 32) |
6866 1 << (OP_SECINFO_NO_NAME - 32) |
6867 1 << (OP_TEST_STATEID - 32) |
6868 1 << (OP_FREE_STATEID - 32) |
6869 1 << (OP_WRITE - 32)
6870 }
6871 };
6872
6873 /*
6874 * Select the state protection mode for client `clp' given the server results
6875 * from exchange_id in `sp'.
6876 *
6877 * Returns 0 on success, negative errno otherwise.
6878 */
6879 static int nfs4_sp4_select_mode(struct nfs_client *clp,
6880 struct nfs41_state_protection *sp)
6881 {
6882 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
6883 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6884 1 << (OP_EXCHANGE_ID - 32) |
6885 1 << (OP_CREATE_SESSION - 32) |
6886 1 << (OP_DESTROY_SESSION - 32) |
6887 1 << (OP_DESTROY_CLIENTID - 32)
6888 };
6889 unsigned int i;
6890
6891 if (sp->how == SP4_MACH_CRED) {
6892 /* Print state protect result */
6893 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
6894 for (i = 0; i <= LAST_NFS4_OP; i++) {
6895 if (test_bit(i, sp->enforce.u.longs))
6896 dfprintk(MOUNT, " enforce op %d\n", i);
6897 if (test_bit(i, sp->allow.u.longs))
6898 dfprintk(MOUNT, " allow op %d\n", i);
6899 }
6900
6901 /* make sure nothing is on enforce list that isn't supported */
6902 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
6903 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
6904 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6905 return -EINVAL;
6906 }
6907 }
6908
6909 /*
6910 * Minimal mode - state operations are allowed to use machine
6911 * credential. Note this already happens by default, so the
6912 * client doesn't have to do anything more than the negotiation.
6913 *
6914 * NOTE: we don't care if EXCHANGE_ID is in the list -
6915 * we're already using the machine cred for exchange_id
6916 * and will never use a different cred.
6917 */
6918 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
6919 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
6920 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
6921 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
6922 dfprintk(MOUNT, "sp4_mach_cred:\n");
6923 dfprintk(MOUNT, " minimal mode enabled\n");
6924 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags);
6925 } else {
6926 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6927 return -EINVAL;
6928 }
6929
6930 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
6931 test_bit(OP_LOCKU, sp->allow.u.longs)) {
6932 dfprintk(MOUNT, " cleanup mode enabled\n");
6933 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags);
6934 }
6935
6936 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
6937 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
6938 dfprintk(MOUNT, " secinfo mode enabled\n");
6939 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags);
6940 }
6941
6942 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
6943 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
6944 dfprintk(MOUNT, " stateid mode enabled\n");
6945 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags);
6946 }
6947
6948 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
6949 dfprintk(MOUNT, " write mode enabled\n");
6950 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags);
6951 }
6952
6953 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
6954 dfprintk(MOUNT, " commit mode enabled\n");
6955 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags);
6956 }
6957 }
6958
6959 return 0;
6960 }
6961
6962 /*
6963 * _nfs4_proc_exchange_id()
6964 *
6965 * Wrapper for EXCHANGE_ID operation.
6966 */
6967 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
6968 u32 sp4_how)
6969 {
6970 nfs4_verifier verifier;
6971 struct nfs41_exchange_id_args args = {
6972 .verifier = &verifier,
6973 .client = clp,
6974 #ifdef CONFIG_NFS_V4_1_MIGRATION
6975 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6976 EXCHGID4_FLAG_BIND_PRINC_STATEID |
6977 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
6978 #else
6979 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6980 EXCHGID4_FLAG_BIND_PRINC_STATEID,
6981 #endif
6982 };
6983 struct nfs41_exchange_id_res res = {
6984 0
6985 };
6986 int status;
6987 struct rpc_message msg = {
6988 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
6989 .rpc_argp = &args,
6990 .rpc_resp = &res,
6991 .rpc_cred = cred,
6992 };
6993
6994 nfs4_init_boot_verifier(clp, &verifier);
6995
6996 status = nfs4_init_uniform_client_string(clp);
6997 if (status)
6998 goto out;
6999
7000 dprintk("NFS call exchange_id auth=%s, '%s'\n",
7001 clp->cl_rpcclient->cl_auth->au_ops->au_name,
7002 clp->cl_owner_id);
7003
7004 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
7005 GFP_NOFS);
7006 if (unlikely(res.server_owner == NULL)) {
7007 status = -ENOMEM;
7008 goto out;
7009 }
7010
7011 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
7012 GFP_NOFS);
7013 if (unlikely(res.server_scope == NULL)) {
7014 status = -ENOMEM;
7015 goto out_server_owner;
7016 }
7017
7018 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
7019 if (unlikely(res.impl_id == NULL)) {
7020 status = -ENOMEM;
7021 goto out_server_scope;
7022 }
7023
7024 switch (sp4_how) {
7025 case SP4_NONE:
7026 args.state_protect.how = SP4_NONE;
7027 break;
7028
7029 case SP4_MACH_CRED:
7030 args.state_protect = nfs4_sp4_mach_cred_request;
7031 break;
7032
7033 default:
7034 /* unsupported! */
7035 WARN_ON_ONCE(1);
7036 status = -EINVAL;
7037 goto out_impl_id;
7038 }
7039
7040 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7041 trace_nfs4_exchange_id(clp, status);
7042 if (status == 0)
7043 status = nfs4_check_cl_exchange_flags(res.flags);
7044
7045 if (status == 0)
7046 status = nfs4_sp4_select_mode(clp, &res.state_protect);
7047
7048 if (status == 0) {
7049 clp->cl_clientid = res.clientid;
7050 clp->cl_exchange_flags = res.flags;
7051 /* Client ID is not confirmed */
7052 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
7053 clear_bit(NFS4_SESSION_ESTABLISHED,
7054 &clp->cl_session->session_state);
7055 clp->cl_seqid = res.seqid;
7056 }
7057
7058 kfree(clp->cl_serverowner);
7059 clp->cl_serverowner = res.server_owner;
7060 res.server_owner = NULL;
7061
7062 /* use the most recent implementation id */
7063 kfree(clp->cl_implid);
7064 clp->cl_implid = res.impl_id;
7065 res.impl_id = NULL;
7066
7067 if (clp->cl_serverscope != NULL &&
7068 !nfs41_same_server_scope(clp->cl_serverscope,
7069 res.server_scope)) {
7070 dprintk("%s: server_scope mismatch detected\n",
7071 __func__);
7072 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7073 kfree(clp->cl_serverscope);
7074 clp->cl_serverscope = NULL;
7075 }
7076
7077 if (clp->cl_serverscope == NULL) {
7078 clp->cl_serverscope = res.server_scope;
7079 res.server_scope = NULL;
7080 }
7081 }
7082
7083 out_impl_id:
7084 kfree(res.impl_id);
7085 out_server_scope:
7086 kfree(res.server_scope);
7087 out_server_owner:
7088 kfree(res.server_owner);
7089 out:
7090 if (clp->cl_implid != NULL)
7091 dprintk("NFS reply exchange_id: Server Implementation ID: "
7092 "domain: %s, name: %s, date: %llu,%u\n",
7093 clp->cl_implid->domain, clp->cl_implid->name,
7094 clp->cl_implid->date.seconds,
7095 clp->cl_implid->date.nseconds);
7096 dprintk("NFS reply exchange_id: %d\n", status);
7097 return status;
7098 }
7099
7100 /*
7101 * nfs4_proc_exchange_id()
7102 *
7103 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7104 *
7105 * Since the clientid has expired, all compounds using sessions
7106 * associated with the stale clientid will be returning
7107 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7108 * be in some phase of session reset.
7109 *
7110 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7111 */
7112 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7113 {
7114 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7115 int status;
7116
7117 /* try SP4_MACH_CRED if krb5i/p */
7118 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7119 authflavor == RPC_AUTH_GSS_KRB5P) {
7120 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7121 if (!status)
7122 return 0;
7123 }
7124
7125 /* try SP4_NONE */
7126 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7127 }
7128
7129 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7130 struct rpc_cred *cred)
7131 {
7132 struct rpc_message msg = {
7133 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7134 .rpc_argp = clp,
7135 .rpc_cred = cred,
7136 };
7137 int status;
7138
7139 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7140 trace_nfs4_destroy_clientid(clp, status);
7141 if (status)
7142 dprintk("NFS: Got error %d from the server %s on "
7143 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7144 return status;
7145 }
7146
7147 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7148 struct rpc_cred *cred)
7149 {
7150 unsigned int loop;
7151 int ret;
7152
7153 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7154 ret = _nfs4_proc_destroy_clientid(clp, cred);
7155 switch (ret) {
7156 case -NFS4ERR_DELAY:
7157 case -NFS4ERR_CLIENTID_BUSY:
7158 ssleep(1);
7159 break;
7160 default:
7161 return ret;
7162 }
7163 }
7164 return 0;
7165 }
7166
7167 int nfs4_destroy_clientid(struct nfs_client *clp)
7168 {
7169 struct rpc_cred *cred;
7170 int ret = 0;
7171
7172 if (clp->cl_mvops->minor_version < 1)
7173 goto out;
7174 if (clp->cl_exchange_flags == 0)
7175 goto out;
7176 if (clp->cl_preserve_clid)
7177 goto out;
7178 cred = nfs4_get_clid_cred(clp);
7179 ret = nfs4_proc_destroy_clientid(clp, cred);
7180 if (cred)
7181 put_rpccred(cred);
7182 switch (ret) {
7183 case 0:
7184 case -NFS4ERR_STALE_CLIENTID:
7185 clp->cl_exchange_flags = 0;
7186 }
7187 out:
7188 return ret;
7189 }
7190
7191 struct nfs4_get_lease_time_data {
7192 struct nfs4_get_lease_time_args *args;
7193 struct nfs4_get_lease_time_res *res;
7194 struct nfs_client *clp;
7195 };
7196
7197 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7198 void *calldata)
7199 {
7200 struct nfs4_get_lease_time_data *data =
7201 (struct nfs4_get_lease_time_data *)calldata;
7202
7203 dprintk("--> %s\n", __func__);
7204 /* just setup sequence, do not trigger session recovery
7205 since we're invoked within one */
7206 nfs41_setup_sequence(data->clp->cl_session,
7207 &data->args->la_seq_args,
7208 &data->res->lr_seq_res,
7209 task);
7210 dprintk("<-- %s\n", __func__);
7211 }
7212
7213 /*
7214 * Called from nfs4_state_manager thread for session setup, so don't recover
7215 * from sequence operation or clientid errors.
7216 */
7217 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7218 {
7219 struct nfs4_get_lease_time_data *data =
7220 (struct nfs4_get_lease_time_data *)calldata;
7221
7222 dprintk("--> %s\n", __func__);
7223 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7224 return;
7225 switch (task->tk_status) {
7226 case -NFS4ERR_DELAY:
7227 case -NFS4ERR_GRACE:
7228 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7229 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7230 task->tk_status = 0;
7231 /* fall through */
7232 case -NFS4ERR_RETRY_UNCACHED_REP:
7233 rpc_restart_call_prepare(task);
7234 return;
7235 }
7236 dprintk("<-- %s\n", __func__);
7237 }
7238
7239 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7240 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7241 .rpc_call_done = nfs4_get_lease_time_done,
7242 };
7243
7244 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7245 {
7246 struct rpc_task *task;
7247 struct nfs4_get_lease_time_args args;
7248 struct nfs4_get_lease_time_res res = {
7249 .lr_fsinfo = fsinfo,
7250 };
7251 struct nfs4_get_lease_time_data data = {
7252 .args = &args,
7253 .res = &res,
7254 .clp = clp,
7255 };
7256 struct rpc_message msg = {
7257 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7258 .rpc_argp = &args,
7259 .rpc_resp = &res,
7260 };
7261 struct rpc_task_setup task_setup = {
7262 .rpc_client = clp->cl_rpcclient,
7263 .rpc_message = &msg,
7264 .callback_ops = &nfs4_get_lease_time_ops,
7265 .callback_data = &data,
7266 .flags = RPC_TASK_TIMEOUT,
7267 };
7268 int status;
7269
7270 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
7271 nfs4_set_sequence_privileged(&args.la_seq_args);
7272 dprintk("--> %s\n", __func__);
7273 task = rpc_run_task(&task_setup);
7274
7275 if (IS_ERR(task))
7276 status = PTR_ERR(task);
7277 else {
7278 status = task->tk_status;
7279 rpc_put_task(task);
7280 }
7281 dprintk("<-- %s return %d\n", __func__, status);
7282
7283 return status;
7284 }
7285
7286 /*
7287 * Initialize the values to be used by the client in CREATE_SESSION
7288 * If nfs4_init_session set the fore channel request and response sizes,
7289 * use them.
7290 *
7291 * Set the back channel max_resp_sz_cached to zero to force the client to
7292 * always set csa_cachethis to FALSE because the current implementation
7293 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
7294 */
7295 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
7296 {
7297 unsigned int max_rqst_sz, max_resp_sz;
7298
7299 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
7300 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
7301
7302 /* Fore channel attributes */
7303 args->fc_attrs.max_rqst_sz = max_rqst_sz;
7304 args->fc_attrs.max_resp_sz = max_resp_sz;
7305 args->fc_attrs.max_ops = NFS4_MAX_OPS;
7306 args->fc_attrs.max_reqs = max_session_slots;
7307
7308 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
7309 "max_ops=%u max_reqs=%u\n",
7310 __func__,
7311 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
7312 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
7313
7314 /* Back channel attributes */
7315 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
7316 args->bc_attrs.max_resp_sz = PAGE_SIZE;
7317 args->bc_attrs.max_resp_sz_cached = 0;
7318 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
7319 args->bc_attrs.max_reqs = 1;
7320
7321 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
7322 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
7323 __func__,
7324 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
7325 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
7326 args->bc_attrs.max_reqs);
7327 }
7328
7329 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7330 struct nfs41_create_session_res *res)
7331 {
7332 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7333 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7334
7335 if (rcvd->max_resp_sz > sent->max_resp_sz)
7336 return -EINVAL;
7337 /*
7338 * Our requested max_ops is the minimum we need; we're not
7339 * prepared to break up compounds into smaller pieces than that.
7340 * So, no point even trying to continue if the server won't
7341 * cooperate:
7342 */
7343 if (rcvd->max_ops < sent->max_ops)
7344 return -EINVAL;
7345 if (rcvd->max_reqs == 0)
7346 return -EINVAL;
7347 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
7348 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
7349 return 0;
7350 }
7351
7352 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7353 struct nfs41_create_session_res *res)
7354 {
7355 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7356 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7357
7358 if (!(res->flags & SESSION4_BACK_CHAN))
7359 goto out;
7360 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7361 return -EINVAL;
7362 if (rcvd->max_resp_sz < sent->max_resp_sz)
7363 return -EINVAL;
7364 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
7365 return -EINVAL;
7366 /* These would render the backchannel useless: */
7367 if (rcvd->max_ops != sent->max_ops)
7368 return -EINVAL;
7369 if (rcvd->max_reqs != sent->max_reqs)
7370 return -EINVAL;
7371 out:
7372 return 0;
7373 }
7374
7375 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7376 struct nfs41_create_session_res *res)
7377 {
7378 int ret;
7379
7380 ret = nfs4_verify_fore_channel_attrs(args, res);
7381 if (ret)
7382 return ret;
7383 return nfs4_verify_back_channel_attrs(args, res);
7384 }
7385
7386 static void nfs4_update_session(struct nfs4_session *session,
7387 struct nfs41_create_session_res *res)
7388 {
7389 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7390 /* Mark client id and session as being confirmed */
7391 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7392 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7393 session->flags = res->flags;
7394 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7395 if (res->flags & SESSION4_BACK_CHAN)
7396 memcpy(&session->bc_attrs, &res->bc_attrs,
7397 sizeof(session->bc_attrs));
7398 }
7399
7400 static int _nfs4_proc_create_session(struct nfs_client *clp,
7401 struct rpc_cred *cred)
7402 {
7403 struct nfs4_session *session = clp->cl_session;
7404 struct nfs41_create_session_args args = {
7405 .client = clp,
7406 .clientid = clp->cl_clientid,
7407 .seqid = clp->cl_seqid,
7408 .cb_program = NFS4_CALLBACK,
7409 };
7410 struct nfs41_create_session_res res;
7411
7412 struct rpc_message msg = {
7413 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7414 .rpc_argp = &args,
7415 .rpc_resp = &res,
7416 .rpc_cred = cred,
7417 };
7418 int status;
7419
7420 nfs4_init_channel_attrs(&args);
7421 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
7422
7423 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7424 trace_nfs4_create_session(clp, status);
7425
7426 if (!status) {
7427 /* Verify the session's negotiated channel_attrs values */
7428 status = nfs4_verify_channel_attrs(&args, &res);
7429 /* Increment the clientid slot sequence id */
7430 if (clp->cl_seqid == res.seqid)
7431 clp->cl_seqid++;
7432 if (status)
7433 goto out;
7434 nfs4_update_session(session, &res);
7435 }
7436 out:
7437 return status;
7438 }
7439
7440 /*
7441 * Issues a CREATE_SESSION operation to the server.
7442 * It is the responsibility of the caller to verify the session is
7443 * expired before calling this routine.
7444 */
7445 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
7446 {
7447 int status;
7448 unsigned *ptr;
7449 struct nfs4_session *session = clp->cl_session;
7450
7451 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
7452
7453 status = _nfs4_proc_create_session(clp, cred);
7454 if (status)
7455 goto out;
7456
7457 /* Init or reset the session slot tables */
7458 status = nfs4_setup_session_slot_tables(session);
7459 dprintk("slot table setup returned %d\n", status);
7460 if (status)
7461 goto out;
7462
7463 ptr = (unsigned *)&session->sess_id.data[0];
7464 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
7465 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
7466 out:
7467 dprintk("<-- %s\n", __func__);
7468 return status;
7469 }
7470
7471 /*
7472 * Issue the over-the-wire RPC DESTROY_SESSION.
7473 * The caller must serialize access to this routine.
7474 */
7475 int nfs4_proc_destroy_session(struct nfs4_session *session,
7476 struct rpc_cred *cred)
7477 {
7478 struct rpc_message msg = {
7479 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
7480 .rpc_argp = session,
7481 .rpc_cred = cred,
7482 };
7483 int status = 0;
7484
7485 dprintk("--> nfs4_proc_destroy_session\n");
7486
7487 /* session is still being setup */
7488 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7489 return 0;
7490
7491 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7492 trace_nfs4_destroy_session(session->clp, status);
7493
7494 if (status)
7495 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
7496 "Session has been destroyed regardless...\n", status);
7497
7498 dprintk("<-- nfs4_proc_destroy_session\n");
7499 return status;
7500 }
7501
7502 /*
7503 * Renew the cl_session lease.
7504 */
7505 struct nfs4_sequence_data {
7506 struct nfs_client *clp;
7507 struct nfs4_sequence_args args;
7508 struct nfs4_sequence_res res;
7509 };
7510
7511 static void nfs41_sequence_release(void *data)
7512 {
7513 struct nfs4_sequence_data *calldata = data;
7514 struct nfs_client *clp = calldata->clp;
7515
7516 if (atomic_read(&clp->cl_count) > 1)
7517 nfs4_schedule_state_renewal(clp);
7518 nfs_put_client(clp);
7519 kfree(calldata);
7520 }
7521
7522 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7523 {
7524 switch(task->tk_status) {
7525 case -NFS4ERR_DELAY:
7526 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7527 return -EAGAIN;
7528 default:
7529 nfs4_schedule_lease_recovery(clp);
7530 }
7531 return 0;
7532 }
7533
7534 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
7535 {
7536 struct nfs4_sequence_data *calldata = data;
7537 struct nfs_client *clp = calldata->clp;
7538
7539 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
7540 return;
7541
7542 trace_nfs4_sequence(clp, task->tk_status);
7543 if (task->tk_status < 0) {
7544 dprintk("%s ERROR %d\n", __func__, task->tk_status);
7545 if (atomic_read(&clp->cl_count) == 1)
7546 goto out;
7547
7548 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
7549 rpc_restart_call_prepare(task);
7550 return;
7551 }
7552 }
7553 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
7554 out:
7555 dprintk("<-- %s\n", __func__);
7556 }
7557
7558 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
7559 {
7560 struct nfs4_sequence_data *calldata = data;
7561 struct nfs_client *clp = calldata->clp;
7562 struct nfs4_sequence_args *args;
7563 struct nfs4_sequence_res *res;
7564
7565 args = task->tk_msg.rpc_argp;
7566 res = task->tk_msg.rpc_resp;
7567
7568 nfs41_setup_sequence(clp->cl_session, args, res, task);
7569 }
7570
7571 static const struct rpc_call_ops nfs41_sequence_ops = {
7572 .rpc_call_done = nfs41_sequence_call_done,
7573 .rpc_call_prepare = nfs41_sequence_prepare,
7574 .rpc_release = nfs41_sequence_release,
7575 };
7576
7577 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
7578 struct rpc_cred *cred,
7579 bool is_privileged)
7580 {
7581 struct nfs4_sequence_data *calldata;
7582 struct rpc_message msg = {
7583 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
7584 .rpc_cred = cred,
7585 };
7586 struct rpc_task_setup task_setup_data = {
7587 .rpc_client = clp->cl_rpcclient,
7588 .rpc_message = &msg,
7589 .callback_ops = &nfs41_sequence_ops,
7590 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
7591 };
7592
7593 if (!atomic_inc_not_zero(&clp->cl_count))
7594 return ERR_PTR(-EIO);
7595 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7596 if (calldata == NULL) {
7597 nfs_put_client(clp);
7598 return ERR_PTR(-ENOMEM);
7599 }
7600 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
7601 if (is_privileged)
7602 nfs4_set_sequence_privileged(&calldata->args);
7603 msg.rpc_argp = &calldata->args;
7604 msg.rpc_resp = &calldata->res;
7605 calldata->clp = clp;
7606 task_setup_data.callback_data = calldata;
7607
7608 return rpc_run_task(&task_setup_data);
7609 }
7610
7611 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
7612 {
7613 struct rpc_task *task;
7614 int ret = 0;
7615
7616 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
7617 return -EAGAIN;
7618 task = _nfs41_proc_sequence(clp, cred, false);
7619 if (IS_ERR(task))
7620 ret = PTR_ERR(task);
7621 else
7622 rpc_put_task_async(task);
7623 dprintk("<-- %s status=%d\n", __func__, ret);
7624 return ret;
7625 }
7626
7627 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7628 {
7629 struct rpc_task *task;
7630 int ret;
7631
7632 task = _nfs41_proc_sequence(clp, cred, true);
7633 if (IS_ERR(task)) {
7634 ret = PTR_ERR(task);
7635 goto out;
7636 }
7637 ret = rpc_wait_for_completion_task(task);
7638 if (!ret)
7639 ret = task->tk_status;
7640 rpc_put_task(task);
7641 out:
7642 dprintk("<-- %s status=%d\n", __func__, ret);
7643 return ret;
7644 }
7645
7646 struct nfs4_reclaim_complete_data {
7647 struct nfs_client *clp;
7648 struct nfs41_reclaim_complete_args arg;
7649 struct nfs41_reclaim_complete_res res;
7650 };
7651
7652 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
7653 {
7654 struct nfs4_reclaim_complete_data *calldata = data;
7655
7656 nfs41_setup_sequence(calldata->clp->cl_session,
7657 &calldata->arg.seq_args,
7658 &calldata->res.seq_res,
7659 task);
7660 }
7661
7662 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7663 {
7664 switch(task->tk_status) {
7665 case 0:
7666 case -NFS4ERR_COMPLETE_ALREADY:
7667 case -NFS4ERR_WRONG_CRED: /* What to do here? */
7668 break;
7669 case -NFS4ERR_DELAY:
7670 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7671 /* fall through */
7672 case -NFS4ERR_RETRY_UNCACHED_REP:
7673 return -EAGAIN;
7674 default:
7675 nfs4_schedule_lease_recovery(clp);
7676 }
7677 return 0;
7678 }
7679
7680 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
7681 {
7682 struct nfs4_reclaim_complete_data *calldata = data;
7683 struct nfs_client *clp = calldata->clp;
7684 struct nfs4_sequence_res *res = &calldata->res.seq_res;
7685
7686 dprintk("--> %s\n", __func__);
7687 if (!nfs41_sequence_done(task, res))
7688 return;
7689
7690 trace_nfs4_reclaim_complete(clp, task->tk_status);
7691 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
7692 rpc_restart_call_prepare(task);
7693 return;
7694 }
7695 dprintk("<-- %s\n", __func__);
7696 }
7697
7698 static void nfs4_free_reclaim_complete_data(void *data)
7699 {
7700 struct nfs4_reclaim_complete_data *calldata = data;
7701
7702 kfree(calldata);
7703 }
7704
7705 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
7706 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
7707 .rpc_call_done = nfs4_reclaim_complete_done,
7708 .rpc_release = nfs4_free_reclaim_complete_data,
7709 };
7710
7711 /*
7712 * Issue a global reclaim complete.
7713 */
7714 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7715 struct rpc_cred *cred)
7716 {
7717 struct nfs4_reclaim_complete_data *calldata;
7718 struct rpc_task *task;
7719 struct rpc_message msg = {
7720 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
7721 .rpc_cred = cred,
7722 };
7723 struct rpc_task_setup task_setup_data = {
7724 .rpc_client = clp->cl_rpcclient,
7725 .rpc_message = &msg,
7726 .callback_ops = &nfs4_reclaim_complete_call_ops,
7727 .flags = RPC_TASK_ASYNC,
7728 };
7729 int status = -ENOMEM;
7730
7731 dprintk("--> %s\n", __func__);
7732 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7733 if (calldata == NULL)
7734 goto out;
7735 calldata->clp = clp;
7736 calldata->arg.one_fs = 0;
7737
7738 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
7739 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
7740 msg.rpc_argp = &calldata->arg;
7741 msg.rpc_resp = &calldata->res;
7742 task_setup_data.callback_data = calldata;
7743 task = rpc_run_task(&task_setup_data);
7744 if (IS_ERR(task)) {
7745 status = PTR_ERR(task);
7746 goto out;
7747 }
7748 status = nfs4_wait_for_completion_rpc_task(task);
7749 if (status == 0)
7750 status = task->tk_status;
7751 rpc_put_task(task);
7752 return 0;
7753 out:
7754 dprintk("<-- %s status=%d\n", __func__, status);
7755 return status;
7756 }
7757
7758 static void
7759 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
7760 {
7761 struct nfs4_layoutget *lgp = calldata;
7762 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
7763 struct nfs4_session *session = nfs4_get_session(server);
7764
7765 dprintk("--> %s\n", __func__);
7766 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
7767 * right now covering the LAYOUTGET we are about to send.
7768 * However, that is not so catastrophic, and there seems
7769 * to be no way to prevent it completely.
7770 */
7771 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
7772 &lgp->res.seq_res, task))
7773 return;
7774 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
7775 NFS_I(lgp->args.inode)->layout,
7776 &lgp->args.range,
7777 lgp->args.ctx->state)) {
7778 rpc_exit(task, NFS4_OK);
7779 }
7780 }
7781
7782 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7783 {
7784 struct nfs4_layoutget *lgp = calldata;
7785 struct inode *inode = lgp->args.inode;
7786 struct nfs_server *server = NFS_SERVER(inode);
7787 struct pnfs_layout_hdr *lo;
7788 struct nfs4_state *state = NULL;
7789 unsigned long timeo, now, giveup;
7790
7791 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
7792
7793 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
7794 goto out;
7795
7796 switch (task->tk_status) {
7797 case 0:
7798 goto out;
7799 /*
7800 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
7801 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
7802 */
7803 case -NFS4ERR_BADLAYOUT:
7804 goto out_overflow;
7805 /*
7806 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
7807 * (or clients) writing to the same RAID stripe except when
7808 * the minlength argument is 0 (see RFC5661 section 18.43.3).
7809 */
7810 case -NFS4ERR_LAYOUTTRYLATER:
7811 if (lgp->args.minlength == 0)
7812 goto out_overflow;
7813 /*
7814 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
7815 * existing layout before getting a new one).
7816 */
7817 case -NFS4ERR_RECALLCONFLICT:
7818 timeo = rpc_get_timeout(task->tk_client);
7819 giveup = lgp->args.timestamp + timeo;
7820 now = jiffies;
7821 if (time_after(giveup, now)) {
7822 unsigned long delay;
7823
7824 /* Delay for:
7825 * - Not less then NFS4_POLL_RETRY_MIN.
7826 * - One last time a jiffie before we give up
7827 * - exponential backoff (time_now minus start_attempt)
7828 */
7829 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
7830 min((giveup - now - 1),
7831 now - lgp->args.timestamp));
7832
7833 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7834 __func__, delay);
7835 rpc_delay(task, delay);
7836 /* Do not call nfs4_async_handle_error() */
7837 goto out_restart;
7838 }
7839 break;
7840 case -NFS4ERR_EXPIRED:
7841 case -NFS4ERR_BAD_STATEID:
7842 spin_lock(&inode->i_lock);
7843 if (nfs4_stateid_match(&lgp->args.stateid,
7844 &lgp->args.ctx->state->stateid)) {
7845 spin_unlock(&inode->i_lock);
7846 /* If the open stateid was bad, then recover it. */
7847 state = lgp->args.ctx->state;
7848 break;
7849 }
7850 lo = NFS_I(inode)->layout;
7851 if (lo && nfs4_stateid_match(&lgp->args.stateid,
7852 &lo->plh_stateid)) {
7853 LIST_HEAD(head);
7854
7855 /*
7856 * Mark the bad layout state as invalid, then retry
7857 * with the current stateid.
7858 */
7859 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
7860 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7861 spin_unlock(&inode->i_lock);
7862 pnfs_free_lseg_list(&head);
7863 } else
7864 spin_unlock(&inode->i_lock);
7865 goto out_restart;
7866 }
7867 if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN)
7868 goto out_restart;
7869 out:
7870 dprintk("<-- %s\n", __func__);
7871 return;
7872 out_restart:
7873 task->tk_status = 0;
7874 rpc_restart_call_prepare(task);
7875 return;
7876 out_overflow:
7877 task->tk_status = -EOVERFLOW;
7878 goto out;
7879 }
7880
7881 static size_t max_response_pages(struct nfs_server *server)
7882 {
7883 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
7884 return nfs_page_array_len(0, max_resp_sz);
7885 }
7886
7887 static void nfs4_free_pages(struct page **pages, size_t size)
7888 {
7889 int i;
7890
7891 if (!pages)
7892 return;
7893
7894 for (i = 0; i < size; i++) {
7895 if (!pages[i])
7896 break;
7897 __free_page(pages[i]);
7898 }
7899 kfree(pages);
7900 }
7901
7902 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
7903 {
7904 struct page **pages;
7905 int i;
7906
7907 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
7908 if (!pages) {
7909 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
7910 return NULL;
7911 }
7912
7913 for (i = 0; i < size; i++) {
7914 pages[i] = alloc_page(gfp_flags);
7915 if (!pages[i]) {
7916 dprintk("%s: failed to allocate page\n", __func__);
7917 nfs4_free_pages(pages, size);
7918 return NULL;
7919 }
7920 }
7921
7922 return pages;
7923 }
7924
7925 static void nfs4_layoutget_release(void *calldata)
7926 {
7927 struct nfs4_layoutget *lgp = calldata;
7928 struct inode *inode = lgp->args.inode;
7929 struct nfs_server *server = NFS_SERVER(inode);
7930 size_t max_pages = max_response_pages(server);
7931
7932 dprintk("--> %s\n", __func__);
7933 nfs4_free_pages(lgp->args.layout.pages, max_pages);
7934 pnfs_put_layout_hdr(NFS_I(inode)->layout);
7935 put_nfs_open_context(lgp->args.ctx);
7936 kfree(calldata);
7937 dprintk("<-- %s\n", __func__);
7938 }
7939
7940 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
7941 .rpc_call_prepare = nfs4_layoutget_prepare,
7942 .rpc_call_done = nfs4_layoutget_done,
7943 .rpc_release = nfs4_layoutget_release,
7944 };
7945
7946 struct pnfs_layout_segment *
7947 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
7948 {
7949 struct inode *inode = lgp->args.inode;
7950 struct nfs_server *server = NFS_SERVER(inode);
7951 size_t max_pages = max_response_pages(server);
7952 struct rpc_task *task;
7953 struct rpc_message msg = {
7954 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
7955 .rpc_argp = &lgp->args,
7956 .rpc_resp = &lgp->res,
7957 .rpc_cred = lgp->cred,
7958 };
7959 struct rpc_task_setup task_setup_data = {
7960 .rpc_client = server->client,
7961 .rpc_message = &msg,
7962 .callback_ops = &nfs4_layoutget_call_ops,
7963 .callback_data = lgp,
7964 .flags = RPC_TASK_ASYNC,
7965 };
7966 struct pnfs_layout_segment *lseg = NULL;
7967 int status = 0;
7968
7969 dprintk("--> %s\n", __func__);
7970
7971 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
7972 pnfs_get_layout_hdr(NFS_I(inode)->layout);
7973
7974 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
7975 if (!lgp->args.layout.pages) {
7976 nfs4_layoutget_release(lgp);
7977 return ERR_PTR(-ENOMEM);
7978 }
7979 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
7980 lgp->args.timestamp = jiffies;
7981
7982 lgp->res.layoutp = &lgp->args.layout;
7983 lgp->res.seq_res.sr_slot = NULL;
7984 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
7985
7986 task = rpc_run_task(&task_setup_data);
7987 if (IS_ERR(task))
7988 return ERR_CAST(task);
7989 status = nfs4_wait_for_completion_rpc_task(task);
7990 if (status == 0)
7991 status = task->tk_status;
7992 trace_nfs4_layoutget(lgp->args.ctx,
7993 &lgp->args.range,
7994 &lgp->res.range,
7995 &lgp->res.stateid,
7996 status);
7997 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
7998 if (status == 0 && lgp->res.layoutp->len)
7999 lseg = pnfs_layout_process(lgp);
8000 rpc_put_task(task);
8001 dprintk("<-- %s status=%d\n", __func__, status);
8002 if (status)
8003 return ERR_PTR(status);
8004 return lseg;
8005 }
8006
8007 static void
8008 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
8009 {
8010 struct nfs4_layoutreturn *lrp = calldata;
8011
8012 dprintk("--> %s\n", __func__);
8013 nfs41_setup_sequence(lrp->clp->cl_session,
8014 &lrp->args.seq_args,
8015 &lrp->res.seq_res,
8016 task);
8017 }
8018
8019 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
8020 {
8021 struct nfs4_layoutreturn *lrp = calldata;
8022 struct nfs_server *server;
8023
8024 dprintk("--> %s\n", __func__);
8025
8026 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
8027 return;
8028
8029 server = NFS_SERVER(lrp->args.inode);
8030 switch (task->tk_status) {
8031 default:
8032 task->tk_status = 0;
8033 case 0:
8034 break;
8035 case -NFS4ERR_DELAY:
8036 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
8037 break;
8038 rpc_restart_call_prepare(task);
8039 return;
8040 }
8041 dprintk("<-- %s\n", __func__);
8042 }
8043
8044 static void nfs4_layoutreturn_release(void *calldata)
8045 {
8046 struct nfs4_layoutreturn *lrp = calldata;
8047 struct pnfs_layout_hdr *lo = lrp->args.layout;
8048 LIST_HEAD(freeme);
8049
8050 dprintk("--> %s\n", __func__);
8051 spin_lock(&lo->plh_inode->i_lock);
8052 if (lrp->res.lrs_present)
8053 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
8054 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
8055 pnfs_clear_layoutreturn_waitbit(lo);
8056 lo->plh_block_lgets--;
8057 spin_unlock(&lo->plh_inode->i_lock);
8058 pnfs_free_lseg_list(&freeme);
8059 pnfs_put_layout_hdr(lrp->args.layout);
8060 nfs_iput_and_deactive(lrp->inode);
8061 kfree(calldata);
8062 dprintk("<-- %s\n", __func__);
8063 }
8064
8065 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
8066 .rpc_call_prepare = nfs4_layoutreturn_prepare,
8067 .rpc_call_done = nfs4_layoutreturn_done,
8068 .rpc_release = nfs4_layoutreturn_release,
8069 };
8070
8071 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
8072 {
8073 struct rpc_task *task;
8074 struct rpc_message msg = {
8075 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
8076 .rpc_argp = &lrp->args,
8077 .rpc_resp = &lrp->res,
8078 .rpc_cred = lrp->cred,
8079 };
8080 struct rpc_task_setup task_setup_data = {
8081 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8082 .rpc_message = &msg,
8083 .callback_ops = &nfs4_layoutreturn_call_ops,
8084 .callback_data = lrp,
8085 };
8086 int status = 0;
8087
8088 dprintk("--> %s\n", __func__);
8089 if (!sync) {
8090 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8091 if (!lrp->inode) {
8092 nfs4_layoutreturn_release(lrp);
8093 return -EAGAIN;
8094 }
8095 task_setup_data.flags |= RPC_TASK_ASYNC;
8096 }
8097 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8098 task = rpc_run_task(&task_setup_data);
8099 if (IS_ERR(task))
8100 return PTR_ERR(task);
8101 if (sync)
8102 status = task->tk_status;
8103 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
8104 dprintk("<-- %s status=%d\n", __func__, status);
8105 rpc_put_task(task);
8106 return status;
8107 }
8108
8109 static int
8110 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
8111 struct pnfs_device *pdev,
8112 struct rpc_cred *cred)
8113 {
8114 struct nfs4_getdeviceinfo_args args = {
8115 .pdev = pdev,
8116 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8117 NOTIFY_DEVICEID4_DELETE,
8118 };
8119 struct nfs4_getdeviceinfo_res res = {
8120 .pdev = pdev,
8121 };
8122 struct rpc_message msg = {
8123 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8124 .rpc_argp = &args,
8125 .rpc_resp = &res,
8126 .rpc_cred = cred,
8127 };
8128 int status;
8129
8130 dprintk("--> %s\n", __func__);
8131 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8132 if (res.notification & ~args.notify_types)
8133 dprintk("%s: unsupported notification\n", __func__);
8134 if (res.notification != args.notify_types)
8135 pdev->nocache = 1;
8136
8137 dprintk("<-- %s status=%d\n", __func__, status);
8138
8139 return status;
8140 }
8141
8142 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8143 struct pnfs_device *pdev,
8144 struct rpc_cred *cred)
8145 {
8146 struct nfs4_exception exception = { };
8147 int err;
8148
8149 do {
8150 err = nfs4_handle_exception(server,
8151 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8152 &exception);
8153 } while (exception.retry);
8154 return err;
8155 }
8156 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8157
8158 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8159 {
8160 struct nfs4_layoutcommit_data *data = calldata;
8161 struct nfs_server *server = NFS_SERVER(data->args.inode);
8162 struct nfs4_session *session = nfs4_get_session(server);
8163
8164 nfs41_setup_sequence(session,
8165 &data->args.seq_args,
8166 &data->res.seq_res,
8167 task);
8168 }
8169
8170 static void
8171 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8172 {
8173 struct nfs4_layoutcommit_data *data = calldata;
8174 struct nfs_server *server = NFS_SERVER(data->args.inode);
8175
8176 if (!nfs41_sequence_done(task, &data->res.seq_res))
8177 return;
8178
8179 switch (task->tk_status) { /* Just ignore these failures */
8180 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8181 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8182 case -NFS4ERR_BADLAYOUT: /* no layout */
8183 case -NFS4ERR_GRACE: /* loca_recalim always false */
8184 task->tk_status = 0;
8185 case 0:
8186 break;
8187 default:
8188 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8189 rpc_restart_call_prepare(task);
8190 return;
8191 }
8192 }
8193 }
8194
8195 static void nfs4_layoutcommit_release(void *calldata)
8196 {
8197 struct nfs4_layoutcommit_data *data = calldata;
8198
8199 pnfs_cleanup_layoutcommit(data);
8200 nfs_post_op_update_inode_force_wcc(data->args.inode,
8201 data->res.fattr);
8202 put_rpccred(data->cred);
8203 nfs_iput_and_deactive(data->inode);
8204 kfree(data);
8205 }
8206
8207 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8208 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8209 .rpc_call_done = nfs4_layoutcommit_done,
8210 .rpc_release = nfs4_layoutcommit_release,
8211 };
8212
8213 int
8214 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8215 {
8216 struct rpc_message msg = {
8217 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8218 .rpc_argp = &data->args,
8219 .rpc_resp = &data->res,
8220 .rpc_cred = data->cred,
8221 };
8222 struct rpc_task_setup task_setup_data = {
8223 .task = &data->task,
8224 .rpc_client = NFS_CLIENT(data->args.inode),
8225 .rpc_message = &msg,
8226 .callback_ops = &nfs4_layoutcommit_ops,
8227 .callback_data = data,
8228 };
8229 struct rpc_task *task;
8230 int status = 0;
8231
8232 dprintk("NFS: initiating layoutcommit call. sync %d "
8233 "lbw: %llu inode %lu\n", sync,
8234 data->args.lastbytewritten,
8235 data->args.inode->i_ino);
8236
8237 if (!sync) {
8238 data->inode = nfs_igrab_and_active(data->args.inode);
8239 if (data->inode == NULL) {
8240 nfs4_layoutcommit_release(data);
8241 return -EAGAIN;
8242 }
8243 task_setup_data.flags = RPC_TASK_ASYNC;
8244 }
8245 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
8246 task = rpc_run_task(&task_setup_data);
8247 if (IS_ERR(task))
8248 return PTR_ERR(task);
8249 if (sync)
8250 status = task->tk_status;
8251 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
8252 dprintk("%s: status %d\n", __func__, status);
8253 rpc_put_task(task);
8254 return status;
8255 }
8256
8257 /**
8258 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
8259 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
8260 */
8261 static int
8262 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8263 struct nfs_fsinfo *info,
8264 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8265 {
8266 struct nfs41_secinfo_no_name_args args = {
8267 .style = SECINFO_STYLE_CURRENT_FH,
8268 };
8269 struct nfs4_secinfo_res res = {
8270 .flavors = flavors,
8271 };
8272 struct rpc_message msg = {
8273 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
8274 .rpc_argp = &args,
8275 .rpc_resp = &res,
8276 };
8277 struct rpc_clnt *clnt = server->client;
8278 struct rpc_cred *cred = NULL;
8279 int status;
8280
8281 if (use_integrity) {
8282 clnt = server->nfs_client->cl_rpcclient;
8283 cred = nfs4_get_clid_cred(server->nfs_client);
8284 msg.rpc_cred = cred;
8285 }
8286
8287 dprintk("--> %s\n", __func__);
8288 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
8289 &res.seq_res, 0);
8290 dprintk("<-- %s status=%d\n", __func__, status);
8291
8292 if (cred)
8293 put_rpccred(cred);
8294
8295 return status;
8296 }
8297
8298 static int
8299 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8300 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
8301 {
8302 struct nfs4_exception exception = { };
8303 int err;
8304 do {
8305 /* first try using integrity protection */
8306 err = -NFS4ERR_WRONGSEC;
8307
8308 /* try to use integrity protection with machine cred */
8309 if (_nfs4_is_integrity_protected(server->nfs_client))
8310 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8311 flavors, true);
8312
8313 /*
8314 * if unable to use integrity protection, or SECINFO with
8315 * integrity protection returns NFS4ERR_WRONGSEC (which is
8316 * disallowed by spec, but exists in deployed servers) use
8317 * the current filesystem's rpc_client and the user cred.
8318 */
8319 if (err == -NFS4ERR_WRONGSEC)
8320 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8321 flavors, false);
8322
8323 switch (err) {
8324 case 0:
8325 case -NFS4ERR_WRONGSEC:
8326 case -ENOTSUPP:
8327 goto out;
8328 default:
8329 err = nfs4_handle_exception(server, err, &exception);
8330 }
8331 } while (exception.retry);
8332 out:
8333 return err;
8334 }
8335
8336 static int
8337 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
8338 struct nfs_fsinfo *info)
8339 {
8340 int err;
8341 struct page *page;
8342 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
8343 struct nfs4_secinfo_flavors *flavors;
8344 struct nfs4_secinfo4 *secinfo;
8345 int i;
8346
8347 page = alloc_page(GFP_KERNEL);
8348 if (!page) {
8349 err = -ENOMEM;
8350 goto out;
8351 }
8352
8353 flavors = page_address(page);
8354 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
8355
8356 /*
8357 * Fall back on "guess and check" method if
8358 * the server doesn't support SECINFO_NO_NAME
8359 */
8360 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
8361 err = nfs4_find_root_sec(server, fhandle, info);
8362 goto out_freepage;
8363 }
8364 if (err)
8365 goto out_freepage;
8366
8367 for (i = 0; i < flavors->num_flavors; i++) {
8368 secinfo = &flavors->flavors[i];
8369
8370 switch (secinfo->flavor) {
8371 case RPC_AUTH_NULL:
8372 case RPC_AUTH_UNIX:
8373 case RPC_AUTH_GSS:
8374 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
8375 &secinfo->flavor_info);
8376 break;
8377 default:
8378 flavor = RPC_AUTH_MAXFLAVOR;
8379 break;
8380 }
8381
8382 if (!nfs_auth_info_match(&server->auth_info, flavor))
8383 flavor = RPC_AUTH_MAXFLAVOR;
8384
8385 if (flavor != RPC_AUTH_MAXFLAVOR) {
8386 err = nfs4_lookup_root_sec(server, fhandle,
8387 info, flavor);
8388 if (!err)
8389 break;
8390 }
8391 }
8392
8393 if (flavor == RPC_AUTH_MAXFLAVOR)
8394 err = -EPERM;
8395
8396 out_freepage:
8397 put_page(page);
8398 if (err == -EACCES)
8399 return -EPERM;
8400 out:
8401 return err;
8402 }
8403
8404 static int _nfs41_test_stateid(struct nfs_server *server,
8405 nfs4_stateid *stateid,
8406 struct rpc_cred *cred)
8407 {
8408 int status;
8409 struct nfs41_test_stateid_args args = {
8410 .stateid = stateid,
8411 };
8412 struct nfs41_test_stateid_res res;
8413 struct rpc_message msg = {
8414 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
8415 .rpc_argp = &args,
8416 .rpc_resp = &res,
8417 .rpc_cred = cred,
8418 };
8419 struct rpc_clnt *rpc_client = server->client;
8420
8421 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8422 &rpc_client, &msg);
8423
8424 dprintk("NFS call test_stateid %p\n", stateid);
8425 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
8426 nfs4_set_sequence_privileged(&args.seq_args);
8427 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
8428 &args.seq_args, &res.seq_res);
8429 if (status != NFS_OK) {
8430 dprintk("NFS reply test_stateid: failed, %d\n", status);
8431 return status;
8432 }
8433 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
8434 return -res.status;
8435 }
8436
8437 /**
8438 * nfs41_test_stateid - perform a TEST_STATEID operation
8439 *
8440 * @server: server / transport on which to perform the operation
8441 * @stateid: state ID to test
8442 * @cred: credential
8443 *
8444 * Returns NFS_OK if the server recognizes that "stateid" is valid.
8445 * Otherwise a negative NFS4ERR value is returned if the operation
8446 * failed or the state ID is not currently valid.
8447 */
8448 static int nfs41_test_stateid(struct nfs_server *server,
8449 nfs4_stateid *stateid,
8450 struct rpc_cred *cred)
8451 {
8452 struct nfs4_exception exception = { };
8453 int err;
8454 do {
8455 err = _nfs41_test_stateid(server, stateid, cred);
8456 if (err != -NFS4ERR_DELAY)
8457 break;
8458 nfs4_handle_exception(server, err, &exception);
8459 } while (exception.retry);
8460 return err;
8461 }
8462
8463 struct nfs_free_stateid_data {
8464 struct nfs_server *server;
8465 struct nfs41_free_stateid_args args;
8466 struct nfs41_free_stateid_res res;
8467 };
8468
8469 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
8470 {
8471 struct nfs_free_stateid_data *data = calldata;
8472 nfs41_setup_sequence(nfs4_get_session(data->server),
8473 &data->args.seq_args,
8474 &data->res.seq_res,
8475 task);
8476 }
8477
8478 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
8479 {
8480 struct nfs_free_stateid_data *data = calldata;
8481
8482 nfs41_sequence_done(task, &data->res.seq_res);
8483
8484 switch (task->tk_status) {
8485 case -NFS4ERR_DELAY:
8486 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
8487 rpc_restart_call_prepare(task);
8488 }
8489 }
8490
8491 static void nfs41_free_stateid_release(void *calldata)
8492 {
8493 kfree(calldata);
8494 }
8495
8496 static const struct rpc_call_ops nfs41_free_stateid_ops = {
8497 .rpc_call_prepare = nfs41_free_stateid_prepare,
8498 .rpc_call_done = nfs41_free_stateid_done,
8499 .rpc_release = nfs41_free_stateid_release,
8500 };
8501
8502 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
8503 nfs4_stateid *stateid,
8504 struct rpc_cred *cred,
8505 bool privileged)
8506 {
8507 struct rpc_message msg = {
8508 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
8509 .rpc_cred = cred,
8510 };
8511 struct rpc_task_setup task_setup = {
8512 .rpc_client = server->client,
8513 .rpc_message = &msg,
8514 .callback_ops = &nfs41_free_stateid_ops,
8515 .flags = RPC_TASK_ASYNC,
8516 };
8517 struct nfs_free_stateid_data *data;
8518
8519 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8520 &task_setup.rpc_client, &msg);
8521
8522 dprintk("NFS call free_stateid %p\n", stateid);
8523 data = kmalloc(sizeof(*data), GFP_NOFS);
8524 if (!data)
8525 return ERR_PTR(-ENOMEM);
8526 data->server = server;
8527 nfs4_stateid_copy(&data->args.stateid, stateid);
8528
8529 task_setup.callback_data = data;
8530
8531 msg.rpc_argp = &data->args;
8532 msg.rpc_resp = &data->res;
8533 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
8534 if (privileged)
8535 nfs4_set_sequence_privileged(&data->args.seq_args);
8536
8537 return rpc_run_task(&task_setup);
8538 }
8539
8540 /**
8541 * nfs41_free_stateid - perform a FREE_STATEID operation
8542 *
8543 * @server: server / transport on which to perform the operation
8544 * @stateid: state ID to release
8545 * @cred: credential
8546 *
8547 * Returns NFS_OK if the server freed "stateid". Otherwise a
8548 * negative NFS4ERR value is returned.
8549 */
8550 static int nfs41_free_stateid(struct nfs_server *server,
8551 nfs4_stateid *stateid,
8552 struct rpc_cred *cred)
8553 {
8554 struct rpc_task *task;
8555 int ret;
8556
8557 task = _nfs41_free_stateid(server, stateid, cred, true);
8558 if (IS_ERR(task))
8559 return PTR_ERR(task);
8560 ret = rpc_wait_for_completion_task(task);
8561 if (!ret)
8562 ret = task->tk_status;
8563 rpc_put_task(task);
8564 return ret;
8565 }
8566
8567 static void
8568 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
8569 {
8570 struct rpc_task *task;
8571 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
8572
8573 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
8574 nfs4_free_lock_state(server, lsp);
8575 if (IS_ERR(task))
8576 return;
8577 rpc_put_task(task);
8578 }
8579
8580 static bool nfs41_match_stateid(const nfs4_stateid *s1,
8581 const nfs4_stateid *s2)
8582 {
8583 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
8584 return false;
8585
8586 if (s1->seqid == s2->seqid)
8587 return true;
8588 if (s1->seqid == 0 || s2->seqid == 0)
8589 return true;
8590
8591 return false;
8592 }
8593
8594 #endif /* CONFIG_NFS_V4_1 */
8595
8596 static bool nfs4_match_stateid(const nfs4_stateid *s1,
8597 const nfs4_stateid *s2)
8598 {
8599 return nfs4_stateid_match(s1, s2);
8600 }
8601
8602
8603 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
8604 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8605 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8606 .recover_open = nfs4_open_reclaim,
8607 .recover_lock = nfs4_lock_reclaim,
8608 .establish_clid = nfs4_init_clientid,
8609 .detect_trunking = nfs40_discover_server_trunking,
8610 };
8611
8612 #if defined(CONFIG_NFS_V4_1)
8613 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8614 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8615 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8616 .recover_open = nfs4_open_reclaim,
8617 .recover_lock = nfs4_lock_reclaim,
8618 .establish_clid = nfs41_init_clientid,
8619 .reclaim_complete = nfs41_proc_reclaim_complete,
8620 .detect_trunking = nfs41_discover_server_trunking,
8621 };
8622 #endif /* CONFIG_NFS_V4_1 */
8623
8624 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8625 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8626 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8627 .recover_open = nfs40_open_expired,
8628 .recover_lock = nfs4_lock_expired,
8629 .establish_clid = nfs4_init_clientid,
8630 };
8631
8632 #if defined(CONFIG_NFS_V4_1)
8633 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
8634 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8635 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8636 .recover_open = nfs41_open_expired,
8637 .recover_lock = nfs41_lock_expired,
8638 .establish_clid = nfs41_init_clientid,
8639 };
8640 #endif /* CONFIG_NFS_V4_1 */
8641
8642 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
8643 .sched_state_renewal = nfs4_proc_async_renew,
8644 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
8645 .renew_lease = nfs4_proc_renew,
8646 };
8647
8648 #if defined(CONFIG_NFS_V4_1)
8649 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
8650 .sched_state_renewal = nfs41_proc_async_sequence,
8651 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
8652 .renew_lease = nfs4_proc_sequence,
8653 };
8654 #endif
8655
8656 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
8657 .get_locations = _nfs40_proc_get_locations,
8658 .fsid_present = _nfs40_proc_fsid_present,
8659 };
8660
8661 #if defined(CONFIG_NFS_V4_1)
8662 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
8663 .get_locations = _nfs41_proc_get_locations,
8664 .fsid_present = _nfs41_proc_fsid_present,
8665 };
8666 #endif /* CONFIG_NFS_V4_1 */
8667
8668 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8669 .minor_version = 0,
8670 .init_caps = NFS_CAP_READDIRPLUS
8671 | NFS_CAP_ATOMIC_OPEN
8672 | NFS_CAP_POSIX_LOCK,
8673 .init_client = nfs40_init_client,
8674 .shutdown_client = nfs40_shutdown_client,
8675 .match_stateid = nfs4_match_stateid,
8676 .find_root_sec = nfs4_find_root_sec,
8677 .free_lock_state = nfs4_release_lockowner,
8678 .alloc_seqid = nfs_alloc_seqid,
8679 .call_sync_ops = &nfs40_call_sync_ops,
8680 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
8681 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
8682 .state_renewal_ops = &nfs40_state_renewal_ops,
8683 .mig_recovery_ops = &nfs40_mig_recovery_ops,
8684 };
8685
8686 #if defined(CONFIG_NFS_V4_1)
8687 static struct nfs_seqid *
8688 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
8689 {
8690 return NULL;
8691 }
8692
8693 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8694 .minor_version = 1,
8695 .init_caps = NFS_CAP_READDIRPLUS
8696 | NFS_CAP_ATOMIC_OPEN
8697 | NFS_CAP_POSIX_LOCK
8698 | NFS_CAP_STATEID_NFSV41
8699 | NFS_CAP_ATOMIC_OPEN_V1,
8700 .init_client = nfs41_init_client,
8701 .shutdown_client = nfs41_shutdown_client,
8702 .match_stateid = nfs41_match_stateid,
8703 .find_root_sec = nfs41_find_root_sec,
8704 .free_lock_state = nfs41_free_lock_state,
8705 .alloc_seqid = nfs_alloc_no_seqid,
8706 .call_sync_ops = &nfs41_call_sync_ops,
8707 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8708 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8709 .state_renewal_ops = &nfs41_state_renewal_ops,
8710 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8711 };
8712 #endif
8713
8714 #if defined(CONFIG_NFS_V4_2)
8715 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8716 .minor_version = 2,
8717 .init_caps = NFS_CAP_READDIRPLUS
8718 | NFS_CAP_ATOMIC_OPEN
8719 | NFS_CAP_POSIX_LOCK
8720 | NFS_CAP_STATEID_NFSV41
8721 | NFS_CAP_ATOMIC_OPEN_V1
8722 | NFS_CAP_ALLOCATE
8723 | NFS_CAP_DEALLOCATE
8724 | NFS_CAP_SEEK
8725 | NFS_CAP_LAYOUTSTATS
8726 | NFS_CAP_CLONE,
8727 .init_client = nfs41_init_client,
8728 .shutdown_client = nfs41_shutdown_client,
8729 .match_stateid = nfs41_match_stateid,
8730 .find_root_sec = nfs41_find_root_sec,
8731 .free_lock_state = nfs41_free_lock_state,
8732 .call_sync_ops = &nfs41_call_sync_ops,
8733 .alloc_seqid = nfs_alloc_no_seqid,
8734 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8735 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8736 .state_renewal_ops = &nfs41_state_renewal_ops,
8737 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8738 };
8739 #endif
8740
8741 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
8742 [0] = &nfs_v4_0_minor_ops,
8743 #if defined(CONFIG_NFS_V4_1)
8744 [1] = &nfs_v4_1_minor_ops,
8745 #endif
8746 #if defined(CONFIG_NFS_V4_2)
8747 [2] = &nfs_v4_2_minor_ops,
8748 #endif
8749 };
8750
8751 static const struct inode_operations nfs4_dir_inode_operations = {
8752 .create = nfs_create,
8753 .lookup = nfs_lookup,
8754 .atomic_open = nfs_atomic_open,
8755 .link = nfs_link,
8756 .unlink = nfs_unlink,
8757 .symlink = nfs_symlink,
8758 .mkdir = nfs_mkdir,
8759 .rmdir = nfs_rmdir,
8760 .mknod = nfs_mknod,
8761 .rename = nfs_rename,
8762 .permission = nfs_permission,
8763 .getattr = nfs_getattr,
8764 .setattr = nfs_setattr,
8765 .getxattr = generic_getxattr,
8766 .setxattr = generic_setxattr,
8767 .listxattr = generic_listxattr,
8768 .removexattr = generic_removexattr,
8769 };
8770
8771 static const struct inode_operations nfs4_file_inode_operations = {
8772 .permission = nfs_permission,
8773 .getattr = nfs_getattr,
8774 .setattr = nfs_setattr,
8775 .getxattr = generic_getxattr,
8776 .setxattr = generic_setxattr,
8777 .listxattr = generic_listxattr,
8778 .removexattr = generic_removexattr,
8779 };
8780
8781 const struct nfs_rpc_ops nfs_v4_clientops = {
8782 .version = 4, /* protocol version */
8783 .dentry_ops = &nfs4_dentry_operations,
8784 .dir_inode_ops = &nfs4_dir_inode_operations,
8785 .file_inode_ops = &nfs4_file_inode_operations,
8786 .file_ops = &nfs4_file_operations,
8787 .getroot = nfs4_proc_get_root,
8788 .submount = nfs4_submount,
8789 .try_mount = nfs4_try_mount,
8790 .getattr = nfs4_proc_getattr,
8791 .setattr = nfs4_proc_setattr,
8792 .lookup = nfs4_proc_lookup,
8793 .access = nfs4_proc_access,
8794 .readlink = nfs4_proc_readlink,
8795 .create = nfs4_proc_create,
8796 .remove = nfs4_proc_remove,
8797 .unlink_setup = nfs4_proc_unlink_setup,
8798 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
8799 .unlink_done = nfs4_proc_unlink_done,
8800 .rename_setup = nfs4_proc_rename_setup,
8801 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
8802 .rename_done = nfs4_proc_rename_done,
8803 .link = nfs4_proc_link,
8804 .symlink = nfs4_proc_symlink,
8805 .mkdir = nfs4_proc_mkdir,
8806 .rmdir = nfs4_proc_remove,
8807 .readdir = nfs4_proc_readdir,
8808 .mknod = nfs4_proc_mknod,
8809 .statfs = nfs4_proc_statfs,
8810 .fsinfo = nfs4_proc_fsinfo,
8811 .pathconf = nfs4_proc_pathconf,
8812 .set_capabilities = nfs4_server_capabilities,
8813 .decode_dirent = nfs4_decode_dirent,
8814 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
8815 .read_setup = nfs4_proc_read_setup,
8816 .read_done = nfs4_read_done,
8817 .write_setup = nfs4_proc_write_setup,
8818 .write_done = nfs4_write_done,
8819 .commit_setup = nfs4_proc_commit_setup,
8820 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
8821 .commit_done = nfs4_commit_done,
8822 .lock = nfs4_proc_lock,
8823 .clear_acl_cache = nfs4_zap_acl_attr,
8824 .close_context = nfs4_close_context,
8825 .open_context = nfs4_atomic_open,
8826 .have_delegation = nfs4_have_delegation,
8827 .return_delegation = nfs4_inode_return_delegation,
8828 .alloc_client = nfs4_alloc_client,
8829 .init_client = nfs4_init_client,
8830 .free_client = nfs4_free_client,
8831 .create_server = nfs4_create_server,
8832 .clone_server = nfs_clone_server,
8833 };
8834
8835 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
8836 .prefix = XATTR_NAME_NFSV4_ACL,
8837 .list = nfs4_xattr_list_nfs4_acl,
8838 .get = nfs4_xattr_get_nfs4_acl,
8839 .set = nfs4_xattr_set_nfs4_acl,
8840 };
8841
8842 const struct xattr_handler *nfs4_xattr_handlers[] = {
8843 &nfs4_xattr_nfs4_acl_handler,
8844 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
8845 &nfs4_xattr_nfs4_label_handler,
8846 #endif
8847 NULL
8848 };
8849
8850 /*
8851 * Local variables:
8852 * c-basic-offset: 8
8853 * End:
8854 */
This page took 0.209736 seconds and 6 git commands to generate.