bc2676c95e1bc3d8259afb1abe6f16342325a3d0
[deliverable/linux.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/file.h>
42 #include <linux/string.h>
43 #include <linux/ratelimit.h>
44 #include <linux/printk.h>
45 #include <linux/slab.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/xattr.h>
56 #include <linux/utsname.h>
57 #include <linux/freezer.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4idmap.h"
67 #include "nfs4session.h"
68 #include "fscache.h"
69
70 #include "nfs4trace.h"
71
72 #define NFSDBG_FACILITY NFSDBG_PROC
73
74 #define NFS4_POLL_RETRY_MIN (HZ/10)
75 #define NFS4_POLL_RETRY_MAX (15*HZ)
76
77 struct nfs4_opendata;
78 static int _nfs4_proc_open(struct nfs4_opendata *data);
79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
82 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
83 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
84 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
85 struct nfs_fattr *fattr, struct iattr *sattr,
86 struct nfs4_state *state, struct nfs4_label *ilabel,
87 struct nfs4_label *olabel);
88 #ifdef CONFIG_NFS_V4_1
89 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
90 struct rpc_cred *);
91 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
92 struct rpc_cred *);
93 #endif
94
95 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
96 static inline struct nfs4_label *
97 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
98 struct iattr *sattr, struct nfs4_label *label)
99 {
100 int err;
101
102 if (label == NULL)
103 return NULL;
104
105 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
106 return NULL;
107
108 err = security_dentry_init_security(dentry, sattr->ia_mode,
109 &dentry->d_name, (void **)&label->label, &label->len);
110 if (err == 0)
111 return label;
112
113 return NULL;
114 }
115 static inline void
116 nfs4_label_release_security(struct nfs4_label *label)
117 {
118 if (label)
119 security_release_secctx(label->label, label->len);
120 }
121 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
122 {
123 if (label)
124 return server->attr_bitmask;
125
126 return server->attr_bitmask_nl;
127 }
128 #else
129 static inline struct nfs4_label *
130 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
131 struct iattr *sattr, struct nfs4_label *l)
132 { return NULL; }
133 static inline void
134 nfs4_label_release_security(struct nfs4_label *label)
135 { return; }
136 static inline u32 *
137 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
138 { return server->attr_bitmask; }
139 #endif
140
141 /* Prevent leaks of NFSv4 errors into userland */
142 static int nfs4_map_errors(int err)
143 {
144 if (err >= -1000)
145 return err;
146 switch (err) {
147 case -NFS4ERR_RESOURCE:
148 case -NFS4ERR_LAYOUTTRYLATER:
149 case -NFS4ERR_RECALLCONFLICT:
150 return -EREMOTEIO;
151 case -NFS4ERR_WRONGSEC:
152 case -NFS4ERR_WRONG_CRED:
153 return -EPERM;
154 case -NFS4ERR_BADOWNER:
155 case -NFS4ERR_BADNAME:
156 return -EINVAL;
157 case -NFS4ERR_SHARE_DENIED:
158 return -EACCES;
159 case -NFS4ERR_MINOR_VERS_MISMATCH:
160 return -EPROTONOSUPPORT;
161 case -NFS4ERR_FILE_OPEN:
162 return -EBUSY;
163 default:
164 dprintk("%s could not handle NFSv4 error %d\n",
165 __func__, -err);
166 break;
167 }
168 return -EIO;
169 }
170
171 /*
172 * This is our standard bitmap for GETATTR requests.
173 */
174 const u32 nfs4_fattr_bitmap[3] = {
175 FATTR4_WORD0_TYPE
176 | FATTR4_WORD0_CHANGE
177 | FATTR4_WORD0_SIZE
178 | FATTR4_WORD0_FSID
179 | FATTR4_WORD0_FILEID,
180 FATTR4_WORD1_MODE
181 | FATTR4_WORD1_NUMLINKS
182 | FATTR4_WORD1_OWNER
183 | FATTR4_WORD1_OWNER_GROUP
184 | FATTR4_WORD1_RAWDEV
185 | FATTR4_WORD1_SPACE_USED
186 | FATTR4_WORD1_TIME_ACCESS
187 | FATTR4_WORD1_TIME_METADATA
188 | FATTR4_WORD1_TIME_MODIFY
189 | FATTR4_WORD1_MOUNTED_ON_FILEID,
190 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
191 FATTR4_WORD2_SECURITY_LABEL
192 #endif
193 };
194
195 static const u32 nfs4_pnfs_open_bitmap[3] = {
196 FATTR4_WORD0_TYPE
197 | FATTR4_WORD0_CHANGE
198 | FATTR4_WORD0_SIZE
199 | FATTR4_WORD0_FSID
200 | FATTR4_WORD0_FILEID,
201 FATTR4_WORD1_MODE
202 | FATTR4_WORD1_NUMLINKS
203 | FATTR4_WORD1_OWNER
204 | FATTR4_WORD1_OWNER_GROUP
205 | FATTR4_WORD1_RAWDEV
206 | FATTR4_WORD1_SPACE_USED
207 | FATTR4_WORD1_TIME_ACCESS
208 | FATTR4_WORD1_TIME_METADATA
209 | FATTR4_WORD1_TIME_MODIFY,
210 FATTR4_WORD2_MDSTHRESHOLD
211 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
212 | FATTR4_WORD2_SECURITY_LABEL
213 #endif
214 };
215
216 static const u32 nfs4_open_noattr_bitmap[3] = {
217 FATTR4_WORD0_TYPE
218 | FATTR4_WORD0_CHANGE
219 | FATTR4_WORD0_FILEID,
220 };
221
222 const u32 nfs4_statfs_bitmap[3] = {
223 FATTR4_WORD0_FILES_AVAIL
224 | FATTR4_WORD0_FILES_FREE
225 | FATTR4_WORD0_FILES_TOTAL,
226 FATTR4_WORD1_SPACE_AVAIL
227 | FATTR4_WORD1_SPACE_FREE
228 | FATTR4_WORD1_SPACE_TOTAL
229 };
230
231 const u32 nfs4_pathconf_bitmap[3] = {
232 FATTR4_WORD0_MAXLINK
233 | FATTR4_WORD0_MAXNAME,
234 0
235 };
236
237 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
238 | FATTR4_WORD0_MAXREAD
239 | FATTR4_WORD0_MAXWRITE
240 | FATTR4_WORD0_LEASE_TIME,
241 FATTR4_WORD1_TIME_DELTA
242 | FATTR4_WORD1_FS_LAYOUT_TYPES,
243 FATTR4_WORD2_LAYOUT_BLKSIZE
244 | FATTR4_WORD2_CLONE_BLKSIZE
245 };
246
247 const u32 nfs4_fs_locations_bitmap[3] = {
248 FATTR4_WORD0_TYPE
249 | FATTR4_WORD0_CHANGE
250 | FATTR4_WORD0_SIZE
251 | FATTR4_WORD0_FSID
252 | FATTR4_WORD0_FILEID
253 | FATTR4_WORD0_FS_LOCATIONS,
254 FATTR4_WORD1_MODE
255 | FATTR4_WORD1_NUMLINKS
256 | FATTR4_WORD1_OWNER
257 | FATTR4_WORD1_OWNER_GROUP
258 | FATTR4_WORD1_RAWDEV
259 | FATTR4_WORD1_SPACE_USED
260 | FATTR4_WORD1_TIME_ACCESS
261 | FATTR4_WORD1_TIME_METADATA
262 | FATTR4_WORD1_TIME_MODIFY
263 | FATTR4_WORD1_MOUNTED_ON_FILEID,
264 };
265
266 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
267 struct nfs4_readdir_arg *readdir)
268 {
269 __be32 *start, *p;
270
271 if (cookie > 2) {
272 readdir->cookie = cookie;
273 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
274 return;
275 }
276
277 readdir->cookie = 0;
278 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
279 if (cookie == 2)
280 return;
281
282 /*
283 * NFSv4 servers do not return entries for '.' and '..'
284 * Therefore, we fake these entries here. We let '.'
285 * have cookie 0 and '..' have cookie 1. Note that
286 * when talking to the server, we always send cookie 0
287 * instead of 1 or 2.
288 */
289 start = p = kmap_atomic(*readdir->pages);
290
291 if (cookie == 0) {
292 *p++ = xdr_one; /* next */
293 *p++ = xdr_zero; /* cookie, first word */
294 *p++ = xdr_one; /* cookie, second word */
295 *p++ = xdr_one; /* entry len */
296 memcpy(p, ".\0\0\0", 4); /* entry */
297 p++;
298 *p++ = xdr_one; /* bitmap length */
299 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
300 *p++ = htonl(8); /* attribute buffer length */
301 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
302 }
303
304 *p++ = xdr_one; /* next */
305 *p++ = xdr_zero; /* cookie, first word */
306 *p++ = xdr_two; /* cookie, second word */
307 *p++ = xdr_two; /* entry len */
308 memcpy(p, "..\0\0", 4); /* entry */
309 p++;
310 *p++ = xdr_one; /* bitmap length */
311 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
312 *p++ = htonl(8); /* attribute buffer length */
313 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
314
315 readdir->pgbase = (char *)p - (char *)start;
316 readdir->count -= readdir->pgbase;
317 kunmap_atomic(start);
318 }
319
320 static long nfs4_update_delay(long *timeout)
321 {
322 long ret;
323 if (!timeout)
324 return NFS4_POLL_RETRY_MAX;
325 if (*timeout <= 0)
326 *timeout = NFS4_POLL_RETRY_MIN;
327 if (*timeout > NFS4_POLL_RETRY_MAX)
328 *timeout = NFS4_POLL_RETRY_MAX;
329 ret = *timeout;
330 *timeout <<= 1;
331 return ret;
332 }
333
334 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
335 {
336 int res = 0;
337
338 might_sleep();
339
340 freezable_schedule_timeout_killable_unsafe(
341 nfs4_update_delay(timeout));
342 if (fatal_signal_pending(current))
343 res = -ERESTARTSYS;
344 return res;
345 }
346
347 /* This is the error handling routine for processes that are allowed
348 * to sleep.
349 */
350 static int nfs4_do_handle_exception(struct nfs_server *server,
351 int errorcode, struct nfs4_exception *exception)
352 {
353 struct nfs_client *clp = server->nfs_client;
354 struct nfs4_state *state = exception->state;
355 struct inode *inode = exception->inode;
356 int ret = errorcode;
357
358 exception->delay = 0;
359 exception->recovering = 0;
360 exception->retry = 0;
361 switch(errorcode) {
362 case 0:
363 return 0;
364 case -NFS4ERR_OPENMODE:
365 case -NFS4ERR_DELEG_REVOKED:
366 case -NFS4ERR_ADMIN_REVOKED:
367 case -NFS4ERR_BAD_STATEID:
368 if (inode && nfs_async_inode_return_delegation(inode,
369 NULL) == 0)
370 goto wait_on_recovery;
371 if (state == NULL)
372 break;
373 ret = nfs4_schedule_stateid_recovery(server, state);
374 if (ret < 0)
375 break;
376 goto wait_on_recovery;
377 case -NFS4ERR_EXPIRED:
378 if (state != NULL) {
379 ret = nfs4_schedule_stateid_recovery(server, state);
380 if (ret < 0)
381 break;
382 }
383 case -NFS4ERR_STALE_STATEID:
384 case -NFS4ERR_STALE_CLIENTID:
385 nfs4_schedule_lease_recovery(clp);
386 goto wait_on_recovery;
387 case -NFS4ERR_MOVED:
388 ret = nfs4_schedule_migration_recovery(server);
389 if (ret < 0)
390 break;
391 goto wait_on_recovery;
392 case -NFS4ERR_LEASE_MOVED:
393 nfs4_schedule_lease_moved_recovery(clp);
394 goto wait_on_recovery;
395 #if defined(CONFIG_NFS_V4_1)
396 case -NFS4ERR_BADSESSION:
397 case -NFS4ERR_BADSLOT:
398 case -NFS4ERR_BAD_HIGH_SLOT:
399 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
400 case -NFS4ERR_DEADSESSION:
401 case -NFS4ERR_SEQ_FALSE_RETRY:
402 case -NFS4ERR_SEQ_MISORDERED:
403 dprintk("%s ERROR: %d Reset session\n", __func__,
404 errorcode);
405 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
406 goto wait_on_recovery;
407 #endif /* defined(CONFIG_NFS_V4_1) */
408 case -NFS4ERR_FILE_OPEN:
409 if (exception->timeout > HZ) {
410 /* We have retried a decent amount, time to
411 * fail
412 */
413 ret = -EBUSY;
414 break;
415 }
416 case -NFS4ERR_DELAY:
417 nfs_inc_server_stats(server, NFSIOS_DELAY);
418 case -NFS4ERR_GRACE:
419 exception->delay = 1;
420 return 0;
421
422 case -NFS4ERR_RETRY_UNCACHED_REP:
423 case -NFS4ERR_OLD_STATEID:
424 exception->retry = 1;
425 break;
426 case -NFS4ERR_BADOWNER:
427 /* The following works around a Linux server bug! */
428 case -NFS4ERR_BADNAME:
429 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
430 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
431 exception->retry = 1;
432 printk(KERN_WARNING "NFS: v4 server %s "
433 "does not accept raw "
434 "uid/gids. "
435 "Reenabling the idmapper.\n",
436 server->nfs_client->cl_hostname);
437 }
438 }
439 /* We failed to handle the error */
440 return nfs4_map_errors(ret);
441 wait_on_recovery:
442 exception->recovering = 1;
443 return 0;
444 }
445
446 /* This is the error handling routine for processes that are allowed
447 * to sleep.
448 */
449 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
450 {
451 struct nfs_client *clp = server->nfs_client;
452 int ret;
453
454 ret = nfs4_do_handle_exception(server, errorcode, exception);
455 if (exception->delay) {
456 ret = nfs4_delay(server->client, &exception->timeout);
457 goto out_retry;
458 }
459 if (exception->recovering) {
460 ret = nfs4_wait_clnt_recover(clp);
461 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
462 return -EIO;
463 goto out_retry;
464 }
465 return ret;
466 out_retry:
467 if (ret == 0)
468 exception->retry = 1;
469 return ret;
470 }
471
472 static int
473 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
474 int errorcode, struct nfs4_exception *exception)
475 {
476 struct nfs_client *clp = server->nfs_client;
477 int ret;
478
479 ret = nfs4_do_handle_exception(server, errorcode, exception);
480 if (exception->delay) {
481 rpc_delay(task, nfs4_update_delay(&exception->timeout));
482 goto out_retry;
483 }
484 if (exception->recovering) {
485 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
486 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
487 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
488 goto out_retry;
489 }
490 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
491 ret = -EIO;
492 return ret;
493 out_retry:
494 if (ret == 0)
495 exception->retry = 1;
496 return ret;
497 }
498
499 static int
500 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
501 struct nfs4_state *state, long *timeout)
502 {
503 struct nfs4_exception exception = {
504 .state = state,
505 };
506
507 if (task->tk_status >= 0)
508 return 0;
509 if (timeout)
510 exception.timeout = *timeout;
511 task->tk_status = nfs4_async_handle_exception(task, server,
512 task->tk_status,
513 &exception);
514 if (exception.delay && timeout)
515 *timeout = exception.timeout;
516 if (exception.retry)
517 return -EAGAIN;
518 return 0;
519 }
520
521 /*
522 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
523 * or 'false' otherwise.
524 */
525 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
526 {
527 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
528
529 if (flavor == RPC_AUTH_GSS_KRB5I ||
530 flavor == RPC_AUTH_GSS_KRB5P)
531 return true;
532
533 return false;
534 }
535
536 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
537 {
538 spin_lock(&clp->cl_lock);
539 if (time_before(clp->cl_last_renewal,timestamp))
540 clp->cl_last_renewal = timestamp;
541 spin_unlock(&clp->cl_lock);
542 }
543
544 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
545 {
546 struct nfs_client *clp = server->nfs_client;
547
548 if (!nfs4_has_session(clp))
549 do_renew_lease(clp, timestamp);
550 }
551
552 struct nfs4_call_sync_data {
553 const struct nfs_server *seq_server;
554 struct nfs4_sequence_args *seq_args;
555 struct nfs4_sequence_res *seq_res;
556 };
557
558 void nfs4_init_sequence(struct nfs4_sequence_args *args,
559 struct nfs4_sequence_res *res, int cache_reply)
560 {
561 args->sa_slot = NULL;
562 args->sa_cache_this = cache_reply;
563 args->sa_privileged = 0;
564
565 res->sr_slot = NULL;
566 }
567
568 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
569 {
570 args->sa_privileged = 1;
571 }
572
573 int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
574 struct nfs4_sequence_args *args,
575 struct nfs4_sequence_res *res,
576 struct rpc_task *task)
577 {
578 struct nfs4_slot *slot;
579
580 /* slot already allocated? */
581 if (res->sr_slot != NULL)
582 goto out_start;
583
584 spin_lock(&tbl->slot_tbl_lock);
585 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
586 goto out_sleep;
587
588 slot = nfs4_alloc_slot(tbl);
589 if (IS_ERR(slot)) {
590 if (slot == ERR_PTR(-ENOMEM))
591 task->tk_timeout = HZ >> 2;
592 goto out_sleep;
593 }
594 spin_unlock(&tbl->slot_tbl_lock);
595
596 args->sa_slot = slot;
597 res->sr_slot = slot;
598
599 out_start:
600 rpc_call_start(task);
601 return 0;
602
603 out_sleep:
604 if (args->sa_privileged)
605 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
606 NULL, RPC_PRIORITY_PRIVILEGED);
607 else
608 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
609 spin_unlock(&tbl->slot_tbl_lock);
610 return -EAGAIN;
611 }
612 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
613
614 static int nfs40_sequence_done(struct rpc_task *task,
615 struct nfs4_sequence_res *res)
616 {
617 struct nfs4_slot *slot = res->sr_slot;
618 struct nfs4_slot_table *tbl;
619
620 if (slot == NULL)
621 goto out;
622
623 tbl = slot->table;
624 spin_lock(&tbl->slot_tbl_lock);
625 if (!nfs41_wake_and_assign_slot(tbl, slot))
626 nfs4_free_slot(tbl, slot);
627 spin_unlock(&tbl->slot_tbl_lock);
628
629 res->sr_slot = NULL;
630 out:
631 return 1;
632 }
633
634 #if defined(CONFIG_NFS_V4_1)
635
636 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
637 {
638 struct nfs4_session *session;
639 struct nfs4_slot_table *tbl;
640 struct nfs4_slot *slot = res->sr_slot;
641 bool send_new_highest_used_slotid = false;
642
643 tbl = slot->table;
644 session = tbl->session;
645
646 spin_lock(&tbl->slot_tbl_lock);
647 /* Be nice to the server: try to ensure that the last transmitted
648 * value for highest_user_slotid <= target_highest_slotid
649 */
650 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
651 send_new_highest_used_slotid = true;
652
653 if (nfs41_wake_and_assign_slot(tbl, slot)) {
654 send_new_highest_used_slotid = false;
655 goto out_unlock;
656 }
657 nfs4_free_slot(tbl, slot);
658
659 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
660 send_new_highest_used_slotid = false;
661 out_unlock:
662 spin_unlock(&tbl->slot_tbl_lock);
663 res->sr_slot = NULL;
664 if (send_new_highest_used_slotid)
665 nfs41_notify_server(session->clp);
666 }
667
668 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
669 {
670 struct nfs4_session *session;
671 struct nfs4_slot *slot = res->sr_slot;
672 struct nfs_client *clp;
673 bool interrupted = false;
674 int ret = 1;
675
676 if (slot == NULL)
677 goto out_noaction;
678 /* don't increment the sequence number if the task wasn't sent */
679 if (!RPC_WAS_SENT(task))
680 goto out;
681
682 session = slot->table->session;
683
684 if (slot->interrupted) {
685 slot->interrupted = 0;
686 interrupted = true;
687 }
688
689 trace_nfs4_sequence_done(session, res);
690 /* Check the SEQUENCE operation status */
691 switch (res->sr_status) {
692 case 0:
693 /* Update the slot's sequence and clientid lease timer */
694 ++slot->seq_nr;
695 clp = session->clp;
696 do_renew_lease(clp, res->sr_timestamp);
697 /* Check sequence flags */
698 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
699 nfs41_update_target_slotid(slot->table, slot, res);
700 break;
701 case 1:
702 /*
703 * sr_status remains 1 if an RPC level error occurred.
704 * The server may or may not have processed the sequence
705 * operation..
706 * Mark the slot as having hosted an interrupted RPC call.
707 */
708 slot->interrupted = 1;
709 goto out;
710 case -NFS4ERR_DELAY:
711 /* The server detected a resend of the RPC call and
712 * returned NFS4ERR_DELAY as per Section 2.10.6.2
713 * of RFC5661.
714 */
715 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
716 __func__,
717 slot->slot_nr,
718 slot->seq_nr);
719 goto out_retry;
720 case -NFS4ERR_BADSLOT:
721 /*
722 * The slot id we used was probably retired. Try again
723 * using a different slot id.
724 */
725 goto retry_nowait;
726 case -NFS4ERR_SEQ_MISORDERED:
727 /*
728 * Was the last operation on this sequence interrupted?
729 * If so, retry after bumping the sequence number.
730 */
731 if (interrupted) {
732 ++slot->seq_nr;
733 goto retry_nowait;
734 }
735 /*
736 * Could this slot have been previously retired?
737 * If so, then the server may be expecting seq_nr = 1!
738 */
739 if (slot->seq_nr != 1) {
740 slot->seq_nr = 1;
741 goto retry_nowait;
742 }
743 break;
744 case -NFS4ERR_SEQ_FALSE_RETRY:
745 ++slot->seq_nr;
746 goto retry_nowait;
747 default:
748 /* Just update the slot sequence no. */
749 ++slot->seq_nr;
750 }
751 out:
752 /* The session may be reset by one of the error handlers. */
753 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
754 nfs41_sequence_free_slot(res);
755 out_noaction:
756 return ret;
757 retry_nowait:
758 if (rpc_restart_call_prepare(task)) {
759 task->tk_status = 0;
760 ret = 0;
761 }
762 goto out;
763 out_retry:
764 if (!rpc_restart_call(task))
765 goto out;
766 rpc_delay(task, NFS4_POLL_RETRY_MAX);
767 return 0;
768 }
769 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
770
771 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
772 {
773 if (res->sr_slot == NULL)
774 return 1;
775 if (!res->sr_slot->table->session)
776 return nfs40_sequence_done(task, res);
777 return nfs41_sequence_done(task, res);
778 }
779 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
780
781 int nfs41_setup_sequence(struct nfs4_session *session,
782 struct nfs4_sequence_args *args,
783 struct nfs4_sequence_res *res,
784 struct rpc_task *task)
785 {
786 struct nfs4_slot *slot;
787 struct nfs4_slot_table *tbl;
788
789 dprintk("--> %s\n", __func__);
790 /* slot already allocated? */
791 if (res->sr_slot != NULL)
792 goto out_success;
793
794 tbl = &session->fc_slot_table;
795
796 task->tk_timeout = 0;
797
798 spin_lock(&tbl->slot_tbl_lock);
799 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
800 !args->sa_privileged) {
801 /* The state manager will wait until the slot table is empty */
802 dprintk("%s session is draining\n", __func__);
803 goto out_sleep;
804 }
805
806 slot = nfs4_alloc_slot(tbl);
807 if (IS_ERR(slot)) {
808 /* If out of memory, try again in 1/4 second */
809 if (slot == ERR_PTR(-ENOMEM))
810 task->tk_timeout = HZ >> 2;
811 dprintk("<-- %s: no free slots\n", __func__);
812 goto out_sleep;
813 }
814 spin_unlock(&tbl->slot_tbl_lock);
815
816 args->sa_slot = slot;
817
818 dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
819 slot->slot_nr, slot->seq_nr);
820
821 res->sr_slot = slot;
822 res->sr_timestamp = jiffies;
823 res->sr_status_flags = 0;
824 /*
825 * sr_status is only set in decode_sequence, and so will remain
826 * set to 1 if an rpc level failure occurs.
827 */
828 res->sr_status = 1;
829 trace_nfs4_setup_sequence(session, args);
830 out_success:
831 rpc_call_start(task);
832 return 0;
833 out_sleep:
834 /* Privileged tasks are queued with top priority */
835 if (args->sa_privileged)
836 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
837 NULL, RPC_PRIORITY_PRIVILEGED);
838 else
839 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
840 spin_unlock(&tbl->slot_tbl_lock);
841 return -EAGAIN;
842 }
843 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
844
845 static int nfs4_setup_sequence(const struct nfs_server *server,
846 struct nfs4_sequence_args *args,
847 struct nfs4_sequence_res *res,
848 struct rpc_task *task)
849 {
850 struct nfs4_session *session = nfs4_get_session(server);
851 int ret = 0;
852
853 if (!session)
854 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
855 args, res, task);
856
857 dprintk("--> %s clp %p session %p sr_slot %u\n",
858 __func__, session->clp, session, res->sr_slot ?
859 res->sr_slot->slot_nr : NFS4_NO_SLOT);
860
861 ret = nfs41_setup_sequence(session, args, res, task);
862
863 dprintk("<-- %s status=%d\n", __func__, ret);
864 return ret;
865 }
866
867 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
868 {
869 struct nfs4_call_sync_data *data = calldata;
870 struct nfs4_session *session = nfs4_get_session(data->seq_server);
871
872 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
873
874 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
875 }
876
877 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
878 {
879 struct nfs4_call_sync_data *data = calldata;
880
881 nfs41_sequence_done(task, data->seq_res);
882 }
883
884 static const struct rpc_call_ops nfs41_call_sync_ops = {
885 .rpc_call_prepare = nfs41_call_sync_prepare,
886 .rpc_call_done = nfs41_call_sync_done,
887 };
888
889 #else /* !CONFIG_NFS_V4_1 */
890
891 static int nfs4_setup_sequence(const struct nfs_server *server,
892 struct nfs4_sequence_args *args,
893 struct nfs4_sequence_res *res,
894 struct rpc_task *task)
895 {
896 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
897 args, res, task);
898 }
899
900 int nfs4_sequence_done(struct rpc_task *task,
901 struct nfs4_sequence_res *res)
902 {
903 return nfs40_sequence_done(task, res);
904 }
905 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
906
907 #endif /* !CONFIG_NFS_V4_1 */
908
909 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
910 {
911 struct nfs4_call_sync_data *data = calldata;
912 nfs4_setup_sequence(data->seq_server,
913 data->seq_args, data->seq_res, task);
914 }
915
916 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
917 {
918 struct nfs4_call_sync_data *data = calldata;
919 nfs4_sequence_done(task, data->seq_res);
920 }
921
922 static const struct rpc_call_ops nfs40_call_sync_ops = {
923 .rpc_call_prepare = nfs40_call_sync_prepare,
924 .rpc_call_done = nfs40_call_sync_done,
925 };
926
927 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
928 struct nfs_server *server,
929 struct rpc_message *msg,
930 struct nfs4_sequence_args *args,
931 struct nfs4_sequence_res *res)
932 {
933 int ret;
934 struct rpc_task *task;
935 struct nfs_client *clp = server->nfs_client;
936 struct nfs4_call_sync_data data = {
937 .seq_server = server,
938 .seq_args = args,
939 .seq_res = res,
940 };
941 struct rpc_task_setup task_setup = {
942 .rpc_client = clnt,
943 .rpc_message = msg,
944 .callback_ops = clp->cl_mvops->call_sync_ops,
945 .callback_data = &data
946 };
947
948 task = rpc_run_task(&task_setup);
949 if (IS_ERR(task))
950 ret = PTR_ERR(task);
951 else {
952 ret = task->tk_status;
953 rpc_put_task(task);
954 }
955 return ret;
956 }
957
958 int nfs4_call_sync(struct rpc_clnt *clnt,
959 struct nfs_server *server,
960 struct rpc_message *msg,
961 struct nfs4_sequence_args *args,
962 struct nfs4_sequence_res *res,
963 int cache_reply)
964 {
965 nfs4_init_sequence(args, res, cache_reply);
966 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
967 }
968
969 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
970 {
971 struct nfs_inode *nfsi = NFS_I(dir);
972
973 spin_lock(&dir->i_lock);
974 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
975 if (!cinfo->atomic || cinfo->before != dir->i_version)
976 nfs_force_lookup_revalidate(dir);
977 dir->i_version = cinfo->after;
978 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
979 nfs_fscache_invalidate(dir);
980 spin_unlock(&dir->i_lock);
981 }
982
983 struct nfs4_opendata {
984 struct kref kref;
985 struct nfs_openargs o_arg;
986 struct nfs_openres o_res;
987 struct nfs_open_confirmargs c_arg;
988 struct nfs_open_confirmres c_res;
989 struct nfs4_string owner_name;
990 struct nfs4_string group_name;
991 struct nfs4_label *a_label;
992 struct nfs_fattr f_attr;
993 struct nfs4_label *f_label;
994 struct dentry *dir;
995 struct dentry *dentry;
996 struct nfs4_state_owner *owner;
997 struct nfs4_state *state;
998 struct iattr attrs;
999 unsigned long timestamp;
1000 unsigned int rpc_done : 1;
1001 unsigned int file_created : 1;
1002 unsigned int is_recover : 1;
1003 int rpc_status;
1004 int cancelled;
1005 };
1006
1007 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1008 int err, struct nfs4_exception *exception)
1009 {
1010 if (err != -EINVAL)
1011 return false;
1012 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1013 return false;
1014 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1015 exception->retry = 1;
1016 return true;
1017 }
1018
1019 static u32
1020 nfs4_map_atomic_open_share(struct nfs_server *server,
1021 fmode_t fmode, int openflags)
1022 {
1023 u32 res = 0;
1024
1025 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1026 case FMODE_READ:
1027 res = NFS4_SHARE_ACCESS_READ;
1028 break;
1029 case FMODE_WRITE:
1030 res = NFS4_SHARE_ACCESS_WRITE;
1031 break;
1032 case FMODE_READ|FMODE_WRITE:
1033 res = NFS4_SHARE_ACCESS_BOTH;
1034 }
1035 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1036 goto out;
1037 /* Want no delegation if we're using O_DIRECT */
1038 if (openflags & O_DIRECT)
1039 res |= NFS4_SHARE_WANT_NO_DELEG;
1040 out:
1041 return res;
1042 }
1043
1044 static enum open_claim_type4
1045 nfs4_map_atomic_open_claim(struct nfs_server *server,
1046 enum open_claim_type4 claim)
1047 {
1048 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1049 return claim;
1050 switch (claim) {
1051 default:
1052 return claim;
1053 case NFS4_OPEN_CLAIM_FH:
1054 return NFS4_OPEN_CLAIM_NULL;
1055 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1056 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1057 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1058 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1059 }
1060 }
1061
1062 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1063 {
1064 p->o_res.f_attr = &p->f_attr;
1065 p->o_res.f_label = p->f_label;
1066 p->o_res.seqid = p->o_arg.seqid;
1067 p->c_res.seqid = p->c_arg.seqid;
1068 p->o_res.server = p->o_arg.server;
1069 p->o_res.access_request = p->o_arg.access;
1070 nfs_fattr_init(&p->f_attr);
1071 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1072 }
1073
1074 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1075 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1076 const struct iattr *attrs,
1077 struct nfs4_label *label,
1078 enum open_claim_type4 claim,
1079 gfp_t gfp_mask)
1080 {
1081 struct dentry *parent = dget_parent(dentry);
1082 struct inode *dir = d_inode(parent);
1083 struct nfs_server *server = NFS_SERVER(dir);
1084 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1085 struct nfs4_opendata *p;
1086
1087 p = kzalloc(sizeof(*p), gfp_mask);
1088 if (p == NULL)
1089 goto err;
1090
1091 p->f_label = nfs4_label_alloc(server, gfp_mask);
1092 if (IS_ERR(p->f_label))
1093 goto err_free_p;
1094
1095 p->a_label = nfs4_label_alloc(server, gfp_mask);
1096 if (IS_ERR(p->a_label))
1097 goto err_free_f;
1098
1099 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1100 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1101 if (IS_ERR(p->o_arg.seqid))
1102 goto err_free_label;
1103 nfs_sb_active(dentry->d_sb);
1104 p->dentry = dget(dentry);
1105 p->dir = parent;
1106 p->owner = sp;
1107 atomic_inc(&sp->so_count);
1108 p->o_arg.open_flags = flags;
1109 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1110 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1111 fmode, flags);
1112 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1113 * will return permission denied for all bits until close */
1114 if (!(flags & O_EXCL)) {
1115 /* ask server to check for all possible rights as results
1116 * are cached */
1117 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1118 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
1119 }
1120 p->o_arg.clientid = server->nfs_client->cl_clientid;
1121 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1122 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1123 p->o_arg.name = &dentry->d_name;
1124 p->o_arg.server = server;
1125 p->o_arg.bitmask = nfs4_bitmask(server, label);
1126 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1127 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1128 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1129 switch (p->o_arg.claim) {
1130 case NFS4_OPEN_CLAIM_NULL:
1131 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1132 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1133 p->o_arg.fh = NFS_FH(dir);
1134 break;
1135 case NFS4_OPEN_CLAIM_PREVIOUS:
1136 case NFS4_OPEN_CLAIM_FH:
1137 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1138 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1139 p->o_arg.fh = NFS_FH(d_inode(dentry));
1140 }
1141 if (attrs != NULL && attrs->ia_valid != 0) {
1142 __u32 verf[2];
1143
1144 p->o_arg.u.attrs = &p->attrs;
1145 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1146
1147 verf[0] = jiffies;
1148 verf[1] = current->pid;
1149 memcpy(p->o_arg.u.verifier.data, verf,
1150 sizeof(p->o_arg.u.verifier.data));
1151 }
1152 p->c_arg.fh = &p->o_res.fh;
1153 p->c_arg.stateid = &p->o_res.stateid;
1154 p->c_arg.seqid = p->o_arg.seqid;
1155 nfs4_init_opendata_res(p);
1156 kref_init(&p->kref);
1157 return p;
1158
1159 err_free_label:
1160 nfs4_label_free(p->a_label);
1161 err_free_f:
1162 nfs4_label_free(p->f_label);
1163 err_free_p:
1164 kfree(p);
1165 err:
1166 dput(parent);
1167 return NULL;
1168 }
1169
1170 static void nfs4_opendata_free(struct kref *kref)
1171 {
1172 struct nfs4_opendata *p = container_of(kref,
1173 struct nfs4_opendata, kref);
1174 struct super_block *sb = p->dentry->d_sb;
1175
1176 nfs_free_seqid(p->o_arg.seqid);
1177 if (p->state != NULL)
1178 nfs4_put_open_state(p->state);
1179 nfs4_put_state_owner(p->owner);
1180
1181 nfs4_label_free(p->a_label);
1182 nfs4_label_free(p->f_label);
1183
1184 dput(p->dir);
1185 dput(p->dentry);
1186 nfs_sb_deactive(sb);
1187 nfs_fattr_free_names(&p->f_attr);
1188 kfree(p->f_attr.mdsthreshold);
1189 kfree(p);
1190 }
1191
1192 static void nfs4_opendata_put(struct nfs4_opendata *p)
1193 {
1194 if (p != NULL)
1195 kref_put(&p->kref, nfs4_opendata_free);
1196 }
1197
1198 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1199 {
1200 int ret;
1201
1202 ret = rpc_wait_for_completion_task(task);
1203 return ret;
1204 }
1205
1206 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1207 fmode_t fmode)
1208 {
1209 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1210 case FMODE_READ|FMODE_WRITE:
1211 return state->n_rdwr != 0;
1212 case FMODE_WRITE:
1213 return state->n_wronly != 0;
1214 case FMODE_READ:
1215 return state->n_rdonly != 0;
1216 }
1217 WARN_ON_ONCE(1);
1218 return false;
1219 }
1220
1221 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1222 {
1223 int ret = 0;
1224
1225 if (open_mode & (O_EXCL|O_TRUNC))
1226 goto out;
1227 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1228 case FMODE_READ:
1229 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1230 && state->n_rdonly != 0;
1231 break;
1232 case FMODE_WRITE:
1233 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1234 && state->n_wronly != 0;
1235 break;
1236 case FMODE_READ|FMODE_WRITE:
1237 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1238 && state->n_rdwr != 0;
1239 }
1240 out:
1241 return ret;
1242 }
1243
1244 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1245 enum open_claim_type4 claim)
1246 {
1247 if (delegation == NULL)
1248 return 0;
1249 if ((delegation->type & fmode) != fmode)
1250 return 0;
1251 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1252 return 0;
1253 switch (claim) {
1254 case NFS4_OPEN_CLAIM_NULL:
1255 case NFS4_OPEN_CLAIM_FH:
1256 break;
1257 case NFS4_OPEN_CLAIM_PREVIOUS:
1258 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1259 break;
1260 default:
1261 return 0;
1262 }
1263 nfs_mark_delegation_referenced(delegation);
1264 return 1;
1265 }
1266
1267 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1268 {
1269 switch (fmode) {
1270 case FMODE_WRITE:
1271 state->n_wronly++;
1272 break;
1273 case FMODE_READ:
1274 state->n_rdonly++;
1275 break;
1276 case FMODE_READ|FMODE_WRITE:
1277 state->n_rdwr++;
1278 }
1279 nfs4_state_set_mode_locked(state, state->state | fmode);
1280 }
1281
1282 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1283 {
1284 struct nfs_client *clp = state->owner->so_server->nfs_client;
1285 bool need_recover = false;
1286
1287 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1288 need_recover = true;
1289 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1290 need_recover = true;
1291 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1292 need_recover = true;
1293 if (need_recover)
1294 nfs4_state_mark_reclaim_nograce(clp, state);
1295 }
1296
1297 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1298 nfs4_stateid *stateid)
1299 {
1300 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
1301 return true;
1302 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1303 nfs_test_and_clear_all_open_stateid(state);
1304 return true;
1305 }
1306 if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
1307 return true;
1308 return false;
1309 }
1310
1311 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1312 {
1313 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1314 return;
1315 if (state->n_wronly)
1316 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1317 if (state->n_rdonly)
1318 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1319 if (state->n_rdwr)
1320 set_bit(NFS_O_RDWR_STATE, &state->flags);
1321 set_bit(NFS_OPEN_STATE, &state->flags);
1322 }
1323
1324 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1325 nfs4_stateid *arg_stateid,
1326 nfs4_stateid *stateid, fmode_t fmode)
1327 {
1328 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1329 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1330 case FMODE_WRITE:
1331 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1332 break;
1333 case FMODE_READ:
1334 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1335 break;
1336 case 0:
1337 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1338 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1339 clear_bit(NFS_OPEN_STATE, &state->flags);
1340 }
1341 if (stateid == NULL)
1342 return;
1343 /* Handle races with OPEN */
1344 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
1345 (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1346 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
1347 nfs_resync_open_stateid_locked(state);
1348 return;
1349 }
1350 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1351 nfs4_stateid_copy(&state->stateid, stateid);
1352 nfs4_stateid_copy(&state->open_stateid, stateid);
1353 }
1354
1355 static void nfs_clear_open_stateid(struct nfs4_state *state,
1356 nfs4_stateid *arg_stateid,
1357 nfs4_stateid *stateid, fmode_t fmode)
1358 {
1359 write_seqlock(&state->seqlock);
1360 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
1361 write_sequnlock(&state->seqlock);
1362 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1363 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1364 }
1365
1366 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1367 {
1368 switch (fmode) {
1369 case FMODE_READ:
1370 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1371 break;
1372 case FMODE_WRITE:
1373 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1374 break;
1375 case FMODE_READ|FMODE_WRITE:
1376 set_bit(NFS_O_RDWR_STATE, &state->flags);
1377 }
1378 if (!nfs_need_update_open_stateid(state, stateid))
1379 return;
1380 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1381 nfs4_stateid_copy(&state->stateid, stateid);
1382 nfs4_stateid_copy(&state->open_stateid, stateid);
1383 }
1384
1385 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1386 {
1387 /*
1388 * Protect the call to nfs4_state_set_mode_locked and
1389 * serialise the stateid update
1390 */
1391 spin_lock(&state->owner->so_lock);
1392 write_seqlock(&state->seqlock);
1393 if (deleg_stateid != NULL) {
1394 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1395 set_bit(NFS_DELEGATED_STATE, &state->flags);
1396 }
1397 if (open_stateid != NULL)
1398 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1399 write_sequnlock(&state->seqlock);
1400 update_open_stateflags(state, fmode);
1401 spin_unlock(&state->owner->so_lock);
1402 }
1403
1404 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1405 {
1406 struct nfs_inode *nfsi = NFS_I(state->inode);
1407 struct nfs_delegation *deleg_cur;
1408 int ret = 0;
1409
1410 fmode &= (FMODE_READ|FMODE_WRITE);
1411
1412 rcu_read_lock();
1413 deleg_cur = rcu_dereference(nfsi->delegation);
1414 if (deleg_cur == NULL)
1415 goto no_delegation;
1416
1417 spin_lock(&deleg_cur->lock);
1418 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1419 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1420 (deleg_cur->type & fmode) != fmode)
1421 goto no_delegation_unlock;
1422
1423 if (delegation == NULL)
1424 delegation = &deleg_cur->stateid;
1425 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1426 goto no_delegation_unlock;
1427
1428 nfs_mark_delegation_referenced(deleg_cur);
1429 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1430 ret = 1;
1431 no_delegation_unlock:
1432 spin_unlock(&deleg_cur->lock);
1433 no_delegation:
1434 rcu_read_unlock();
1435
1436 if (!ret && open_stateid != NULL) {
1437 __update_open_stateid(state, open_stateid, NULL, fmode);
1438 ret = 1;
1439 }
1440 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1441 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1442
1443 return ret;
1444 }
1445
1446 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1447 const nfs4_stateid *stateid)
1448 {
1449 struct nfs4_state *state = lsp->ls_state;
1450 bool ret = false;
1451
1452 spin_lock(&state->state_lock);
1453 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1454 goto out_noupdate;
1455 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1456 goto out_noupdate;
1457 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1458 ret = true;
1459 out_noupdate:
1460 spin_unlock(&state->state_lock);
1461 return ret;
1462 }
1463
1464 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1465 {
1466 struct nfs_delegation *delegation;
1467
1468 rcu_read_lock();
1469 delegation = rcu_dereference(NFS_I(inode)->delegation);
1470 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1471 rcu_read_unlock();
1472 return;
1473 }
1474 rcu_read_unlock();
1475 nfs4_inode_return_delegation(inode);
1476 }
1477
1478 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1479 {
1480 struct nfs4_state *state = opendata->state;
1481 struct nfs_inode *nfsi = NFS_I(state->inode);
1482 struct nfs_delegation *delegation;
1483 int open_mode = opendata->o_arg.open_flags;
1484 fmode_t fmode = opendata->o_arg.fmode;
1485 enum open_claim_type4 claim = opendata->o_arg.claim;
1486 nfs4_stateid stateid;
1487 int ret = -EAGAIN;
1488
1489 for (;;) {
1490 spin_lock(&state->owner->so_lock);
1491 if (can_open_cached(state, fmode, open_mode)) {
1492 update_open_stateflags(state, fmode);
1493 spin_unlock(&state->owner->so_lock);
1494 goto out_return_state;
1495 }
1496 spin_unlock(&state->owner->so_lock);
1497 rcu_read_lock();
1498 delegation = rcu_dereference(nfsi->delegation);
1499 if (!can_open_delegated(delegation, fmode, claim)) {
1500 rcu_read_unlock();
1501 break;
1502 }
1503 /* Save the delegation */
1504 nfs4_stateid_copy(&stateid, &delegation->stateid);
1505 rcu_read_unlock();
1506 nfs_release_seqid(opendata->o_arg.seqid);
1507 if (!opendata->is_recover) {
1508 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1509 if (ret != 0)
1510 goto out;
1511 }
1512 ret = -EAGAIN;
1513
1514 /* Try to update the stateid using the delegation */
1515 if (update_open_stateid(state, NULL, &stateid, fmode))
1516 goto out_return_state;
1517 }
1518 out:
1519 return ERR_PTR(ret);
1520 out_return_state:
1521 atomic_inc(&state->count);
1522 return state;
1523 }
1524
1525 static void
1526 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1527 {
1528 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1529 struct nfs_delegation *delegation;
1530 int delegation_flags = 0;
1531
1532 rcu_read_lock();
1533 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1534 if (delegation)
1535 delegation_flags = delegation->flags;
1536 rcu_read_unlock();
1537 switch (data->o_arg.claim) {
1538 default:
1539 break;
1540 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1541 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1542 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1543 "returning a delegation for "
1544 "OPEN(CLAIM_DELEGATE_CUR)\n",
1545 clp->cl_hostname);
1546 return;
1547 }
1548 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1549 nfs_inode_set_delegation(state->inode,
1550 data->owner->so_cred,
1551 &data->o_res);
1552 else
1553 nfs_inode_reclaim_delegation(state->inode,
1554 data->owner->so_cred,
1555 &data->o_res);
1556 }
1557
1558 /*
1559 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1560 * and update the nfs4_state.
1561 */
1562 static struct nfs4_state *
1563 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1564 {
1565 struct inode *inode = data->state->inode;
1566 struct nfs4_state *state = data->state;
1567 int ret;
1568
1569 if (!data->rpc_done) {
1570 if (data->rpc_status) {
1571 ret = data->rpc_status;
1572 goto err;
1573 }
1574 /* cached opens have already been processed */
1575 goto update;
1576 }
1577
1578 ret = nfs_refresh_inode(inode, &data->f_attr);
1579 if (ret)
1580 goto err;
1581
1582 if (data->o_res.delegation_type != 0)
1583 nfs4_opendata_check_deleg(data, state);
1584 update:
1585 update_open_stateid(state, &data->o_res.stateid, NULL,
1586 data->o_arg.fmode);
1587 atomic_inc(&state->count);
1588
1589 return state;
1590 err:
1591 return ERR_PTR(ret);
1592
1593 }
1594
1595 static struct nfs4_state *
1596 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1597 {
1598 struct inode *inode;
1599 struct nfs4_state *state = NULL;
1600 int ret;
1601
1602 if (!data->rpc_done) {
1603 state = nfs4_try_open_cached(data);
1604 trace_nfs4_cached_open(data->state);
1605 goto out;
1606 }
1607
1608 ret = -EAGAIN;
1609 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1610 goto err;
1611 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label);
1612 ret = PTR_ERR(inode);
1613 if (IS_ERR(inode))
1614 goto err;
1615 ret = -ENOMEM;
1616 state = nfs4_get_open_state(inode, data->owner);
1617 if (state == NULL)
1618 goto err_put_inode;
1619 if (data->o_res.delegation_type != 0)
1620 nfs4_opendata_check_deleg(data, state);
1621 update_open_stateid(state, &data->o_res.stateid, NULL,
1622 data->o_arg.fmode);
1623 iput(inode);
1624 out:
1625 nfs_release_seqid(data->o_arg.seqid);
1626 return state;
1627 err_put_inode:
1628 iput(inode);
1629 err:
1630 return ERR_PTR(ret);
1631 }
1632
1633 static struct nfs4_state *
1634 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1635 {
1636 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1637 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1638 return _nfs4_opendata_to_nfs4_state(data);
1639 }
1640
1641 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1642 {
1643 struct nfs_inode *nfsi = NFS_I(state->inode);
1644 struct nfs_open_context *ctx;
1645
1646 spin_lock(&state->inode->i_lock);
1647 list_for_each_entry(ctx, &nfsi->open_files, list) {
1648 if (ctx->state != state)
1649 continue;
1650 get_nfs_open_context(ctx);
1651 spin_unlock(&state->inode->i_lock);
1652 return ctx;
1653 }
1654 spin_unlock(&state->inode->i_lock);
1655 return ERR_PTR(-ENOENT);
1656 }
1657
1658 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1659 struct nfs4_state *state, enum open_claim_type4 claim)
1660 {
1661 struct nfs4_opendata *opendata;
1662
1663 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1664 NULL, NULL, claim, GFP_NOFS);
1665 if (opendata == NULL)
1666 return ERR_PTR(-ENOMEM);
1667 opendata->state = state;
1668 atomic_inc(&state->count);
1669 return opendata;
1670 }
1671
1672 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
1673 fmode_t fmode)
1674 {
1675 struct nfs4_state *newstate;
1676 int ret;
1677
1678 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
1679 return 0;
1680 opendata->o_arg.open_flags = 0;
1681 opendata->o_arg.fmode = fmode;
1682 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1683 NFS_SB(opendata->dentry->d_sb),
1684 fmode, 0);
1685 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1686 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1687 nfs4_init_opendata_res(opendata);
1688 ret = _nfs4_recover_proc_open(opendata);
1689 if (ret != 0)
1690 return ret;
1691 newstate = nfs4_opendata_to_nfs4_state(opendata);
1692 if (IS_ERR(newstate))
1693 return PTR_ERR(newstate);
1694 if (newstate != opendata->state)
1695 ret = -ESTALE;
1696 nfs4_close_state(newstate, fmode);
1697 return ret;
1698 }
1699
1700 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1701 {
1702 int ret;
1703
1704 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1705 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1706 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1707 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1708 /* memory barrier prior to reading state->n_* */
1709 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1710 clear_bit(NFS_OPEN_STATE, &state->flags);
1711 smp_rmb();
1712 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1713 if (ret != 0)
1714 return ret;
1715 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1716 if (ret != 0)
1717 return ret;
1718 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
1719 if (ret != 0)
1720 return ret;
1721 /*
1722 * We may have performed cached opens for all three recoveries.
1723 * Check if we need to update the current stateid.
1724 */
1725 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1726 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1727 write_seqlock(&state->seqlock);
1728 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1729 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1730 write_sequnlock(&state->seqlock);
1731 }
1732 return 0;
1733 }
1734
1735 /*
1736 * OPEN_RECLAIM:
1737 * reclaim state on the server after a reboot.
1738 */
1739 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1740 {
1741 struct nfs_delegation *delegation;
1742 struct nfs4_opendata *opendata;
1743 fmode_t delegation_type = 0;
1744 int status;
1745
1746 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1747 NFS4_OPEN_CLAIM_PREVIOUS);
1748 if (IS_ERR(opendata))
1749 return PTR_ERR(opendata);
1750 rcu_read_lock();
1751 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1752 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1753 delegation_type = delegation->type;
1754 rcu_read_unlock();
1755 opendata->o_arg.u.delegation_type = delegation_type;
1756 status = nfs4_open_recover(opendata, state);
1757 nfs4_opendata_put(opendata);
1758 return status;
1759 }
1760
1761 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1762 {
1763 struct nfs_server *server = NFS_SERVER(state->inode);
1764 struct nfs4_exception exception = { };
1765 int err;
1766 do {
1767 err = _nfs4_do_open_reclaim(ctx, state);
1768 trace_nfs4_open_reclaim(ctx, 0, err);
1769 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1770 continue;
1771 if (err != -NFS4ERR_DELAY)
1772 break;
1773 nfs4_handle_exception(server, err, &exception);
1774 } while (exception.retry);
1775 return err;
1776 }
1777
1778 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1779 {
1780 struct nfs_open_context *ctx;
1781 int ret;
1782
1783 ctx = nfs4_state_find_open_context(state);
1784 if (IS_ERR(ctx))
1785 return -EAGAIN;
1786 ret = nfs4_do_open_reclaim(ctx, state);
1787 put_nfs_open_context(ctx);
1788 return ret;
1789 }
1790
1791 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1792 {
1793 switch (err) {
1794 default:
1795 printk(KERN_ERR "NFS: %s: unhandled error "
1796 "%d.\n", __func__, err);
1797 case 0:
1798 case -ENOENT:
1799 case -EAGAIN:
1800 case -ESTALE:
1801 break;
1802 case -NFS4ERR_BADSESSION:
1803 case -NFS4ERR_BADSLOT:
1804 case -NFS4ERR_BAD_HIGH_SLOT:
1805 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1806 case -NFS4ERR_DEADSESSION:
1807 set_bit(NFS_DELEGATED_STATE, &state->flags);
1808 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1809 return -EAGAIN;
1810 case -NFS4ERR_STALE_CLIENTID:
1811 case -NFS4ERR_STALE_STATEID:
1812 set_bit(NFS_DELEGATED_STATE, &state->flags);
1813 case -NFS4ERR_EXPIRED:
1814 /* Don't recall a delegation if it was lost */
1815 nfs4_schedule_lease_recovery(server->nfs_client);
1816 return -EAGAIN;
1817 case -NFS4ERR_MOVED:
1818 nfs4_schedule_migration_recovery(server);
1819 return -EAGAIN;
1820 case -NFS4ERR_LEASE_MOVED:
1821 nfs4_schedule_lease_moved_recovery(server->nfs_client);
1822 return -EAGAIN;
1823 case -NFS4ERR_DELEG_REVOKED:
1824 case -NFS4ERR_ADMIN_REVOKED:
1825 case -NFS4ERR_BAD_STATEID:
1826 case -NFS4ERR_OPENMODE:
1827 nfs_inode_find_state_and_recover(state->inode,
1828 stateid);
1829 nfs4_schedule_stateid_recovery(server, state);
1830 return -EAGAIN;
1831 case -NFS4ERR_DELAY:
1832 case -NFS4ERR_GRACE:
1833 set_bit(NFS_DELEGATED_STATE, &state->flags);
1834 ssleep(1);
1835 return -EAGAIN;
1836 case -ENOMEM:
1837 case -NFS4ERR_DENIED:
1838 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1839 return 0;
1840 }
1841 return err;
1842 }
1843
1844 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
1845 struct nfs4_state *state, const nfs4_stateid *stateid,
1846 fmode_t type)
1847 {
1848 struct nfs_server *server = NFS_SERVER(state->inode);
1849 struct nfs4_opendata *opendata;
1850 int err = 0;
1851
1852 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1853 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1854 if (IS_ERR(opendata))
1855 return PTR_ERR(opendata);
1856 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1857 write_seqlock(&state->seqlock);
1858 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1859 write_sequnlock(&state->seqlock);
1860 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1861 switch (type & (FMODE_READ|FMODE_WRITE)) {
1862 case FMODE_READ|FMODE_WRITE:
1863 case FMODE_WRITE:
1864 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1865 if (err)
1866 break;
1867 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1868 if (err)
1869 break;
1870 case FMODE_READ:
1871 err = nfs4_open_recover_helper(opendata, FMODE_READ);
1872 }
1873 nfs4_opendata_put(opendata);
1874 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1875 }
1876
1877 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1878 {
1879 struct nfs4_opendata *data = calldata;
1880
1881 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
1882 &data->c_arg.seq_args, &data->c_res.seq_res, task);
1883 }
1884
1885 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1886 {
1887 struct nfs4_opendata *data = calldata;
1888
1889 nfs40_sequence_done(task, &data->c_res.seq_res);
1890
1891 data->rpc_status = task->tk_status;
1892 if (data->rpc_status == 0) {
1893 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1894 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1895 renew_lease(data->o_res.server, data->timestamp);
1896 data->rpc_done = 1;
1897 }
1898 }
1899
1900 static void nfs4_open_confirm_release(void *calldata)
1901 {
1902 struct nfs4_opendata *data = calldata;
1903 struct nfs4_state *state = NULL;
1904
1905 /* If this request hasn't been cancelled, do nothing */
1906 if (data->cancelled == 0)
1907 goto out_free;
1908 /* In case of error, no cleanup! */
1909 if (!data->rpc_done)
1910 goto out_free;
1911 state = nfs4_opendata_to_nfs4_state(data);
1912 if (!IS_ERR(state))
1913 nfs4_close_state(state, data->o_arg.fmode);
1914 out_free:
1915 nfs4_opendata_put(data);
1916 }
1917
1918 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1919 .rpc_call_prepare = nfs4_open_confirm_prepare,
1920 .rpc_call_done = nfs4_open_confirm_done,
1921 .rpc_release = nfs4_open_confirm_release,
1922 };
1923
1924 /*
1925 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1926 */
1927 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1928 {
1929 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
1930 struct rpc_task *task;
1931 struct rpc_message msg = {
1932 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1933 .rpc_argp = &data->c_arg,
1934 .rpc_resp = &data->c_res,
1935 .rpc_cred = data->owner->so_cred,
1936 };
1937 struct rpc_task_setup task_setup_data = {
1938 .rpc_client = server->client,
1939 .rpc_message = &msg,
1940 .callback_ops = &nfs4_open_confirm_ops,
1941 .callback_data = data,
1942 .workqueue = nfsiod_workqueue,
1943 .flags = RPC_TASK_ASYNC,
1944 };
1945 int status;
1946
1947 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
1948 kref_get(&data->kref);
1949 data->rpc_done = 0;
1950 data->rpc_status = 0;
1951 data->timestamp = jiffies;
1952 if (data->is_recover)
1953 nfs4_set_sequence_privileged(&data->c_arg.seq_args);
1954 task = rpc_run_task(&task_setup_data);
1955 if (IS_ERR(task))
1956 return PTR_ERR(task);
1957 status = nfs4_wait_for_completion_rpc_task(task);
1958 if (status != 0) {
1959 data->cancelled = 1;
1960 smp_wmb();
1961 } else
1962 status = data->rpc_status;
1963 rpc_put_task(task);
1964 return status;
1965 }
1966
1967 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1968 {
1969 struct nfs4_opendata *data = calldata;
1970 struct nfs4_state_owner *sp = data->owner;
1971 struct nfs_client *clp = sp->so_server->nfs_client;
1972 enum open_claim_type4 claim = data->o_arg.claim;
1973
1974 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1975 goto out_wait;
1976 /*
1977 * Check if we still need to send an OPEN call, or if we can use
1978 * a delegation instead.
1979 */
1980 if (data->state != NULL) {
1981 struct nfs_delegation *delegation;
1982
1983 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1984 goto out_no_action;
1985 rcu_read_lock();
1986 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1987 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
1988 goto unlock_no_action;
1989 rcu_read_unlock();
1990 }
1991 /* Update client id. */
1992 data->o_arg.clientid = clp->cl_clientid;
1993 switch (claim) {
1994 default:
1995 break;
1996 case NFS4_OPEN_CLAIM_PREVIOUS:
1997 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1998 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1999 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2000 case NFS4_OPEN_CLAIM_FH:
2001 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2002 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
2003 }
2004 data->timestamp = jiffies;
2005 if (nfs4_setup_sequence(data->o_arg.server,
2006 &data->o_arg.seq_args,
2007 &data->o_res.seq_res,
2008 task) != 0)
2009 nfs_release_seqid(data->o_arg.seqid);
2010
2011 /* Set the create mode (note dependency on the session type) */
2012 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2013 if (data->o_arg.open_flags & O_EXCL) {
2014 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2015 if (nfs4_has_persistent_session(clp))
2016 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2017 else if (clp->cl_mvops->minor_version > 0)
2018 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2019 }
2020 return;
2021 unlock_no_action:
2022 trace_nfs4_cached_open(data->state);
2023 rcu_read_unlock();
2024 out_no_action:
2025 task->tk_action = NULL;
2026 out_wait:
2027 nfs4_sequence_done(task, &data->o_res.seq_res);
2028 }
2029
2030 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2031 {
2032 struct nfs4_opendata *data = calldata;
2033
2034 data->rpc_status = task->tk_status;
2035
2036 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
2037 return;
2038
2039 if (task->tk_status == 0) {
2040 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2041 switch (data->o_res.f_attr->mode & S_IFMT) {
2042 case S_IFREG:
2043 break;
2044 case S_IFLNK:
2045 data->rpc_status = -ELOOP;
2046 break;
2047 case S_IFDIR:
2048 data->rpc_status = -EISDIR;
2049 break;
2050 default:
2051 data->rpc_status = -ENOTDIR;
2052 }
2053 }
2054 renew_lease(data->o_res.server, data->timestamp);
2055 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2056 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2057 }
2058 data->rpc_done = 1;
2059 }
2060
2061 static void nfs4_open_release(void *calldata)
2062 {
2063 struct nfs4_opendata *data = calldata;
2064 struct nfs4_state *state = NULL;
2065
2066 /* If this request hasn't been cancelled, do nothing */
2067 if (data->cancelled == 0)
2068 goto out_free;
2069 /* In case of error, no cleanup! */
2070 if (data->rpc_status != 0 || !data->rpc_done)
2071 goto out_free;
2072 /* In case we need an open_confirm, no cleanup! */
2073 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2074 goto out_free;
2075 state = nfs4_opendata_to_nfs4_state(data);
2076 if (!IS_ERR(state))
2077 nfs4_close_state(state, data->o_arg.fmode);
2078 out_free:
2079 nfs4_opendata_put(data);
2080 }
2081
2082 static const struct rpc_call_ops nfs4_open_ops = {
2083 .rpc_call_prepare = nfs4_open_prepare,
2084 .rpc_call_done = nfs4_open_done,
2085 .rpc_release = nfs4_open_release,
2086 };
2087
2088 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
2089 {
2090 struct inode *dir = d_inode(data->dir);
2091 struct nfs_server *server = NFS_SERVER(dir);
2092 struct nfs_openargs *o_arg = &data->o_arg;
2093 struct nfs_openres *o_res = &data->o_res;
2094 struct rpc_task *task;
2095 struct rpc_message msg = {
2096 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2097 .rpc_argp = o_arg,
2098 .rpc_resp = o_res,
2099 .rpc_cred = data->owner->so_cred,
2100 };
2101 struct rpc_task_setup task_setup_data = {
2102 .rpc_client = server->client,
2103 .rpc_message = &msg,
2104 .callback_ops = &nfs4_open_ops,
2105 .callback_data = data,
2106 .workqueue = nfsiod_workqueue,
2107 .flags = RPC_TASK_ASYNC,
2108 };
2109 int status;
2110
2111 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
2112 kref_get(&data->kref);
2113 data->rpc_done = 0;
2114 data->rpc_status = 0;
2115 data->cancelled = 0;
2116 data->is_recover = 0;
2117 if (isrecover) {
2118 nfs4_set_sequence_privileged(&o_arg->seq_args);
2119 data->is_recover = 1;
2120 }
2121 task = rpc_run_task(&task_setup_data);
2122 if (IS_ERR(task))
2123 return PTR_ERR(task);
2124 status = nfs4_wait_for_completion_rpc_task(task);
2125 if (status != 0) {
2126 data->cancelled = 1;
2127 smp_wmb();
2128 } else
2129 status = data->rpc_status;
2130 rpc_put_task(task);
2131
2132 return status;
2133 }
2134
2135 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2136 {
2137 struct inode *dir = d_inode(data->dir);
2138 struct nfs_openres *o_res = &data->o_res;
2139 int status;
2140
2141 status = nfs4_run_open_task(data, 1);
2142 if (status != 0 || !data->rpc_done)
2143 return status;
2144
2145 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2146
2147 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2148 status = _nfs4_proc_open_confirm(data);
2149 if (status != 0)
2150 return status;
2151 }
2152
2153 return status;
2154 }
2155
2156 /*
2157 * Additional permission checks in order to distinguish between an
2158 * open for read, and an open for execute. This works around the
2159 * fact that NFSv4 OPEN treats read and execute permissions as being
2160 * the same.
2161 * Note that in the non-execute case, we want to turn off permission
2162 * checking if we just created a new file (POSIX open() semantics).
2163 */
2164 static int nfs4_opendata_access(struct rpc_cred *cred,
2165 struct nfs4_opendata *opendata,
2166 struct nfs4_state *state, fmode_t fmode,
2167 int openflags)
2168 {
2169 struct nfs_access_entry cache;
2170 u32 mask;
2171
2172 /* access call failed or for some reason the server doesn't
2173 * support any access modes -- defer access call until later */
2174 if (opendata->o_res.access_supported == 0)
2175 return 0;
2176
2177 mask = 0;
2178 /*
2179 * Use openflags to check for exec, because fmode won't
2180 * always have FMODE_EXEC set when file open for exec.
2181 */
2182 if (openflags & __FMODE_EXEC) {
2183 /* ONLY check for exec rights */
2184 mask = MAY_EXEC;
2185 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2186 mask = MAY_READ;
2187
2188 cache.cred = cred;
2189 cache.jiffies = jiffies;
2190 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2191 nfs_access_add_cache(state->inode, &cache);
2192
2193 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2194 return 0;
2195
2196 /* even though OPEN succeeded, access is denied. Close the file */
2197 nfs4_close_state(state, fmode);
2198 return -EACCES;
2199 }
2200
2201 /*
2202 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2203 */
2204 static int _nfs4_proc_open(struct nfs4_opendata *data)
2205 {
2206 struct inode *dir = d_inode(data->dir);
2207 struct nfs_server *server = NFS_SERVER(dir);
2208 struct nfs_openargs *o_arg = &data->o_arg;
2209 struct nfs_openres *o_res = &data->o_res;
2210 int status;
2211
2212 status = nfs4_run_open_task(data, 0);
2213 if (!data->rpc_done)
2214 return status;
2215 if (status != 0) {
2216 if (status == -NFS4ERR_BADNAME &&
2217 !(o_arg->open_flags & O_CREAT))
2218 return -ENOENT;
2219 return status;
2220 }
2221
2222 nfs_fattr_map_and_free_names(server, &data->f_attr);
2223
2224 if (o_arg->open_flags & O_CREAT) {
2225 update_changeattr(dir, &o_res->cinfo);
2226 if (o_arg->open_flags & O_EXCL)
2227 data->file_created = 1;
2228 else if (o_res->cinfo.before != o_res->cinfo.after)
2229 data->file_created = 1;
2230 }
2231 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2232 server->caps &= ~NFS_CAP_POSIX_LOCK;
2233 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2234 status = _nfs4_proc_open_confirm(data);
2235 if (status != 0)
2236 return status;
2237 }
2238 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
2239 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2240 return 0;
2241 }
2242
2243 static int nfs4_recover_expired_lease(struct nfs_server *server)
2244 {
2245 return nfs4_client_recover_expired_lease(server->nfs_client);
2246 }
2247
2248 /*
2249 * OPEN_EXPIRED:
2250 * reclaim state on the server after a network partition.
2251 * Assumes caller holds the appropriate lock
2252 */
2253 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2254 {
2255 struct nfs4_opendata *opendata;
2256 int ret;
2257
2258 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2259 NFS4_OPEN_CLAIM_FH);
2260 if (IS_ERR(opendata))
2261 return PTR_ERR(opendata);
2262 ret = nfs4_open_recover(opendata, state);
2263 if (ret == -ESTALE)
2264 d_drop(ctx->dentry);
2265 nfs4_opendata_put(opendata);
2266 return ret;
2267 }
2268
2269 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2270 {
2271 struct nfs_server *server = NFS_SERVER(state->inode);
2272 struct nfs4_exception exception = { };
2273 int err;
2274
2275 do {
2276 err = _nfs4_open_expired(ctx, state);
2277 trace_nfs4_open_expired(ctx, 0, err);
2278 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2279 continue;
2280 switch (err) {
2281 default:
2282 goto out;
2283 case -NFS4ERR_GRACE:
2284 case -NFS4ERR_DELAY:
2285 nfs4_handle_exception(server, err, &exception);
2286 err = 0;
2287 }
2288 } while (exception.retry);
2289 out:
2290 return err;
2291 }
2292
2293 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2294 {
2295 struct nfs_open_context *ctx;
2296 int ret;
2297
2298 ctx = nfs4_state_find_open_context(state);
2299 if (IS_ERR(ctx))
2300 return -EAGAIN;
2301 ret = nfs4_do_open_expired(ctx, state);
2302 put_nfs_open_context(ctx);
2303 return ret;
2304 }
2305
2306 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2307 {
2308 nfs_remove_bad_delegation(state->inode);
2309 write_seqlock(&state->seqlock);
2310 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2311 write_sequnlock(&state->seqlock);
2312 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2313 }
2314
2315 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2316 {
2317 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2318 nfs_finish_clear_delegation_stateid(state);
2319 }
2320
2321 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2322 {
2323 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2324 nfs40_clear_delegation_stateid(state);
2325 return nfs4_open_expired(sp, state);
2326 }
2327
2328 #if defined(CONFIG_NFS_V4_1)
2329 static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2330 {
2331 struct nfs_server *server = NFS_SERVER(state->inode);
2332 nfs4_stateid stateid;
2333 struct nfs_delegation *delegation;
2334 struct rpc_cred *cred;
2335 int status;
2336
2337 /* Get the delegation credential for use by test/free_stateid */
2338 rcu_read_lock();
2339 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2340 if (delegation == NULL) {
2341 rcu_read_unlock();
2342 return;
2343 }
2344
2345 nfs4_stateid_copy(&stateid, &delegation->stateid);
2346 cred = get_rpccred(delegation->cred);
2347 rcu_read_unlock();
2348 status = nfs41_test_stateid(server, &stateid, cred);
2349 trace_nfs4_test_delegation_stateid(state, NULL, status);
2350
2351 if (status != NFS_OK) {
2352 /* Free the stateid unless the server explicitly
2353 * informs us the stateid is unrecognized. */
2354 if (status != -NFS4ERR_BAD_STATEID)
2355 nfs41_free_stateid(server, &stateid, cred);
2356 nfs_finish_clear_delegation_stateid(state);
2357 }
2358
2359 put_rpccred(cred);
2360 }
2361
2362 /**
2363 * nfs41_check_open_stateid - possibly free an open stateid
2364 *
2365 * @state: NFSv4 state for an inode
2366 *
2367 * Returns NFS_OK if recovery for this stateid is now finished.
2368 * Otherwise a negative NFS4ERR value is returned.
2369 */
2370 static int nfs41_check_open_stateid(struct nfs4_state *state)
2371 {
2372 struct nfs_server *server = NFS_SERVER(state->inode);
2373 nfs4_stateid *stateid = &state->open_stateid;
2374 struct rpc_cred *cred = state->owner->so_cred;
2375 int status;
2376
2377 /* If a state reset has been done, test_stateid is unneeded */
2378 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2379 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2380 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2381 return -NFS4ERR_BAD_STATEID;
2382
2383 status = nfs41_test_stateid(server, stateid, cred);
2384 trace_nfs4_test_open_stateid(state, NULL, status);
2385 if (status != NFS_OK) {
2386 /* Free the stateid unless the server explicitly
2387 * informs us the stateid is unrecognized. */
2388 if (status != -NFS4ERR_BAD_STATEID)
2389 nfs41_free_stateid(server, stateid, cred);
2390
2391 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2392 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2393 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2394 clear_bit(NFS_OPEN_STATE, &state->flags);
2395 }
2396 return status;
2397 }
2398
2399 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2400 {
2401 int status;
2402
2403 nfs41_check_delegation_stateid(state);
2404 status = nfs41_check_open_stateid(state);
2405 if (status != NFS_OK)
2406 status = nfs4_open_expired(sp, state);
2407 return status;
2408 }
2409 #endif
2410
2411 /*
2412 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2413 * fields corresponding to attributes that were used to store the verifier.
2414 * Make sure we clobber those fields in the later setattr call
2415 */
2416 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2417 struct iattr *sattr, struct nfs4_label **label)
2418 {
2419 const u32 *attrset = opendata->o_res.attrset;
2420
2421 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2422 !(sattr->ia_valid & ATTR_ATIME_SET))
2423 sattr->ia_valid |= ATTR_ATIME;
2424
2425 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2426 !(sattr->ia_valid & ATTR_MTIME_SET))
2427 sattr->ia_valid |= ATTR_MTIME;
2428
2429 /* Except MODE, it seems harmless of setting twice. */
2430 if ((attrset[1] & FATTR4_WORD1_MODE))
2431 sattr->ia_valid &= ~ATTR_MODE;
2432
2433 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
2434 *label = NULL;
2435 }
2436
2437 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2438 fmode_t fmode,
2439 int flags,
2440 struct nfs_open_context *ctx)
2441 {
2442 struct nfs4_state_owner *sp = opendata->owner;
2443 struct nfs_server *server = sp->so_server;
2444 struct dentry *dentry;
2445 struct nfs4_state *state;
2446 unsigned int seq;
2447 int ret;
2448
2449 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2450
2451 ret = _nfs4_proc_open(opendata);
2452 if (ret != 0)
2453 goto out;
2454
2455 state = nfs4_opendata_to_nfs4_state(opendata);
2456 ret = PTR_ERR(state);
2457 if (IS_ERR(state))
2458 goto out;
2459 if (server->caps & NFS_CAP_POSIX_LOCK)
2460 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2461
2462 dentry = opendata->dentry;
2463 if (d_really_is_negative(dentry)) {
2464 struct dentry *alias;
2465 d_drop(dentry);
2466 alias = d_exact_alias(dentry, state->inode);
2467 if (!alias)
2468 alias = d_splice_alias(igrab(state->inode), dentry);
2469 /* d_splice_alias() can't fail here - it's a non-directory */
2470 if (alias) {
2471 dput(ctx->dentry);
2472 ctx->dentry = dentry = alias;
2473 }
2474 nfs_set_verifier(dentry,
2475 nfs_save_change_attribute(d_inode(opendata->dir)));
2476 }
2477
2478 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2479 if (ret != 0)
2480 goto out;
2481
2482 ctx->state = state;
2483 if (d_inode(dentry) == state->inode) {
2484 nfs_inode_attach_open_context(ctx);
2485 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2486 nfs4_schedule_stateid_recovery(server, state);
2487 }
2488 out:
2489 return ret;
2490 }
2491
2492 /*
2493 * Returns a referenced nfs4_state
2494 */
2495 static int _nfs4_do_open(struct inode *dir,
2496 struct nfs_open_context *ctx,
2497 int flags,
2498 struct iattr *sattr,
2499 struct nfs4_label *label,
2500 int *opened)
2501 {
2502 struct nfs4_state_owner *sp;
2503 struct nfs4_state *state = NULL;
2504 struct nfs_server *server = NFS_SERVER(dir);
2505 struct nfs4_opendata *opendata;
2506 struct dentry *dentry = ctx->dentry;
2507 struct rpc_cred *cred = ctx->cred;
2508 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2509 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2510 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2511 struct nfs4_label *olabel = NULL;
2512 int status;
2513
2514 /* Protect against reboot recovery conflicts */
2515 status = -ENOMEM;
2516 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2517 if (sp == NULL) {
2518 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2519 goto out_err;
2520 }
2521 status = nfs4_recover_expired_lease(server);
2522 if (status != 0)
2523 goto err_put_state_owner;
2524 if (d_really_is_positive(dentry))
2525 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2526 status = -ENOMEM;
2527 if (d_really_is_positive(dentry))
2528 claim = NFS4_OPEN_CLAIM_FH;
2529 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2530 label, claim, GFP_KERNEL);
2531 if (opendata == NULL)
2532 goto err_put_state_owner;
2533
2534 if (label) {
2535 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2536 if (IS_ERR(olabel)) {
2537 status = PTR_ERR(olabel);
2538 goto err_opendata_put;
2539 }
2540 }
2541
2542 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2543 if (!opendata->f_attr.mdsthreshold) {
2544 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2545 if (!opendata->f_attr.mdsthreshold)
2546 goto err_free_label;
2547 }
2548 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2549 }
2550 if (d_really_is_positive(dentry))
2551 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2552
2553 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2554 if (status != 0)
2555 goto err_free_label;
2556 state = ctx->state;
2557
2558 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
2559 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2560 nfs4_exclusive_attrset(opendata, sattr, &label);
2561
2562 nfs_fattr_init(opendata->o_res.f_attr);
2563 status = nfs4_do_setattr(state->inode, cred,
2564 opendata->o_res.f_attr, sattr,
2565 state, label, olabel);
2566 if (status == 0) {
2567 nfs_setattr_update_inode(state->inode, sattr,
2568 opendata->o_res.f_attr);
2569 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2570 }
2571 }
2572 if (opened && opendata->file_created)
2573 *opened |= FILE_CREATED;
2574
2575 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2576 *ctx_th = opendata->f_attr.mdsthreshold;
2577 opendata->f_attr.mdsthreshold = NULL;
2578 }
2579
2580 nfs4_label_free(olabel);
2581
2582 nfs4_opendata_put(opendata);
2583 nfs4_put_state_owner(sp);
2584 return 0;
2585 err_free_label:
2586 nfs4_label_free(olabel);
2587 err_opendata_put:
2588 nfs4_opendata_put(opendata);
2589 err_put_state_owner:
2590 nfs4_put_state_owner(sp);
2591 out_err:
2592 return status;
2593 }
2594
2595
2596 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2597 struct nfs_open_context *ctx,
2598 int flags,
2599 struct iattr *sattr,
2600 struct nfs4_label *label,
2601 int *opened)
2602 {
2603 struct nfs_server *server = NFS_SERVER(dir);
2604 struct nfs4_exception exception = { };
2605 struct nfs4_state *res;
2606 int status;
2607
2608 do {
2609 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2610 res = ctx->state;
2611 trace_nfs4_open_file(ctx, flags, status);
2612 if (status == 0)
2613 break;
2614 /* NOTE: BAD_SEQID means the server and client disagree about the
2615 * book-keeping w.r.t. state-changing operations
2616 * (OPEN/CLOSE/LOCK/LOCKU...)
2617 * It is actually a sign of a bug on the client or on the server.
2618 *
2619 * If we receive a BAD_SEQID error in the particular case of
2620 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2621 * have unhashed the old state_owner for us, and that we can
2622 * therefore safely retry using a new one. We should still warn
2623 * the user though...
2624 */
2625 if (status == -NFS4ERR_BAD_SEQID) {
2626 pr_warn_ratelimited("NFS: v4 server %s "
2627 " returned a bad sequence-id error!\n",
2628 NFS_SERVER(dir)->nfs_client->cl_hostname);
2629 exception.retry = 1;
2630 continue;
2631 }
2632 /*
2633 * BAD_STATEID on OPEN means that the server cancelled our
2634 * state before it received the OPEN_CONFIRM.
2635 * Recover by retrying the request as per the discussion
2636 * on Page 181 of RFC3530.
2637 */
2638 if (status == -NFS4ERR_BAD_STATEID) {
2639 exception.retry = 1;
2640 continue;
2641 }
2642 if (status == -EAGAIN) {
2643 /* We must have found a delegation */
2644 exception.retry = 1;
2645 continue;
2646 }
2647 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2648 continue;
2649 res = ERR_PTR(nfs4_handle_exception(server,
2650 status, &exception));
2651 } while (exception.retry);
2652 return res;
2653 }
2654
2655 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2656 struct nfs_fattr *fattr, struct iattr *sattr,
2657 struct nfs4_state *state, struct nfs4_label *ilabel,
2658 struct nfs4_label *olabel)
2659 {
2660 struct nfs_server *server = NFS_SERVER(inode);
2661 struct nfs_setattrargs arg = {
2662 .fh = NFS_FH(inode),
2663 .iap = sattr,
2664 .server = server,
2665 .bitmask = server->attr_bitmask,
2666 .label = ilabel,
2667 };
2668 struct nfs_setattrres res = {
2669 .fattr = fattr,
2670 .label = olabel,
2671 .server = server,
2672 };
2673 struct rpc_message msg = {
2674 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2675 .rpc_argp = &arg,
2676 .rpc_resp = &res,
2677 .rpc_cred = cred,
2678 };
2679 unsigned long timestamp = jiffies;
2680 fmode_t fmode;
2681 bool truncate;
2682 int status;
2683
2684 arg.bitmask = nfs4_bitmask(server, ilabel);
2685 if (ilabel)
2686 arg.bitmask = nfs4_bitmask(server, olabel);
2687
2688 nfs_fattr_init(fattr);
2689
2690 /* Servers should only apply open mode checks for file size changes */
2691 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
2692 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2693
2694 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2695 /* Use that stateid */
2696 } else if (truncate && state != NULL) {
2697 struct nfs_lockowner lockowner = {
2698 .l_owner = current->files,
2699 .l_pid = current->tgid,
2700 };
2701 if (!nfs4_valid_open_stateid(state))
2702 return -EBADF;
2703 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2704 &lockowner) == -EIO)
2705 return -EBADF;
2706 } else
2707 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2708
2709 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2710 if (status == 0 && state != NULL)
2711 renew_lease(server, timestamp);
2712 trace_nfs4_setattr(inode, &arg.stateid, status);
2713 return status;
2714 }
2715
2716 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2717 struct nfs_fattr *fattr, struct iattr *sattr,
2718 struct nfs4_state *state, struct nfs4_label *ilabel,
2719 struct nfs4_label *olabel)
2720 {
2721 struct nfs_server *server = NFS_SERVER(inode);
2722 struct nfs4_exception exception = {
2723 .state = state,
2724 .inode = inode,
2725 };
2726 int err;
2727 do {
2728 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel);
2729 switch (err) {
2730 case -NFS4ERR_OPENMODE:
2731 if (!(sattr->ia_valid & ATTR_SIZE)) {
2732 pr_warn_once("NFSv4: server %s is incorrectly "
2733 "applying open mode checks to "
2734 "a SETATTR that is not "
2735 "changing file size.\n",
2736 server->nfs_client->cl_hostname);
2737 }
2738 if (state && !(state->state & FMODE_WRITE)) {
2739 err = -EBADF;
2740 if (sattr->ia_valid & ATTR_OPEN)
2741 err = -EACCES;
2742 goto out;
2743 }
2744 }
2745 err = nfs4_handle_exception(server, err, &exception);
2746 } while (exception.retry);
2747 out:
2748 return err;
2749 }
2750
2751 static bool
2752 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
2753 {
2754 if (inode == NULL || !nfs_have_layout(inode))
2755 return false;
2756
2757 return pnfs_wait_on_layoutreturn(inode, task);
2758 }
2759
2760 struct nfs4_closedata {
2761 struct inode *inode;
2762 struct nfs4_state *state;
2763 struct nfs_closeargs arg;
2764 struct nfs_closeres res;
2765 struct nfs_fattr fattr;
2766 unsigned long timestamp;
2767 bool roc;
2768 u32 roc_barrier;
2769 };
2770
2771 static void nfs4_free_closedata(void *data)
2772 {
2773 struct nfs4_closedata *calldata = data;
2774 struct nfs4_state_owner *sp = calldata->state->owner;
2775 struct super_block *sb = calldata->state->inode->i_sb;
2776
2777 if (calldata->roc)
2778 pnfs_roc_release(calldata->state->inode);
2779 nfs4_put_open_state(calldata->state);
2780 nfs_free_seqid(calldata->arg.seqid);
2781 nfs4_put_state_owner(sp);
2782 nfs_sb_deactive(sb);
2783 kfree(calldata);
2784 }
2785
2786 static void nfs4_close_done(struct rpc_task *task, void *data)
2787 {
2788 struct nfs4_closedata *calldata = data;
2789 struct nfs4_state *state = calldata->state;
2790 struct nfs_server *server = NFS_SERVER(calldata->inode);
2791 nfs4_stateid *res_stateid = NULL;
2792
2793 dprintk("%s: begin!\n", __func__);
2794 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2795 return;
2796 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
2797 /* hmm. we are done with the inode, and in the process of freeing
2798 * the state_owner. we keep this around to process errors
2799 */
2800 switch (task->tk_status) {
2801 case 0:
2802 res_stateid = &calldata->res.stateid;
2803 if (calldata->roc)
2804 pnfs_roc_set_barrier(state->inode,
2805 calldata->roc_barrier);
2806 renew_lease(server, calldata->timestamp);
2807 break;
2808 case -NFS4ERR_ADMIN_REVOKED:
2809 case -NFS4ERR_STALE_STATEID:
2810 case -NFS4ERR_OLD_STATEID:
2811 case -NFS4ERR_BAD_STATEID:
2812 case -NFS4ERR_EXPIRED:
2813 if (!nfs4_stateid_match(&calldata->arg.stateid,
2814 &state->open_stateid)) {
2815 rpc_restart_call_prepare(task);
2816 goto out_release;
2817 }
2818 if (calldata->arg.fmode == 0)
2819 break;
2820 default:
2821 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
2822 rpc_restart_call_prepare(task);
2823 goto out_release;
2824 }
2825 }
2826 nfs_clear_open_stateid(state, &calldata->arg.stateid,
2827 res_stateid, calldata->arg.fmode);
2828 out_release:
2829 nfs_release_seqid(calldata->arg.seqid);
2830 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2831 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2832 }
2833
2834 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2835 {
2836 struct nfs4_closedata *calldata = data;
2837 struct nfs4_state *state = calldata->state;
2838 struct inode *inode = calldata->inode;
2839 bool is_rdonly, is_wronly, is_rdwr;
2840 int call_close = 0;
2841
2842 dprintk("%s: begin!\n", __func__);
2843 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2844 goto out_wait;
2845
2846 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2847 spin_lock(&state->owner->so_lock);
2848 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2849 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2850 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2851 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2852 /* Calculate the change in open mode */
2853 calldata->arg.fmode = 0;
2854 if (state->n_rdwr == 0) {
2855 if (state->n_rdonly == 0)
2856 call_close |= is_rdonly;
2857 else if (is_rdonly)
2858 calldata->arg.fmode |= FMODE_READ;
2859 if (state->n_wronly == 0)
2860 call_close |= is_wronly;
2861 else if (is_wronly)
2862 calldata->arg.fmode |= FMODE_WRITE;
2863 } else if (is_rdwr)
2864 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2865
2866 if (calldata->arg.fmode == 0)
2867 call_close |= is_rdwr;
2868
2869 if (!nfs4_valid_open_stateid(state))
2870 call_close = 0;
2871 spin_unlock(&state->owner->so_lock);
2872
2873 if (!call_close) {
2874 /* Note: exit _without_ calling nfs4_close_done */
2875 goto out_no_action;
2876 }
2877
2878 if (nfs4_wait_on_layoutreturn(inode, task)) {
2879 nfs_release_seqid(calldata->arg.seqid);
2880 goto out_wait;
2881 }
2882
2883 if (calldata->arg.fmode == 0)
2884 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2885 if (calldata->roc)
2886 pnfs_roc_get_barrier(inode, &calldata->roc_barrier);
2887
2888 calldata->arg.share_access =
2889 nfs4_map_atomic_open_share(NFS_SERVER(inode),
2890 calldata->arg.fmode, 0);
2891
2892 nfs_fattr_init(calldata->res.fattr);
2893 calldata->timestamp = jiffies;
2894 if (nfs4_setup_sequence(NFS_SERVER(inode),
2895 &calldata->arg.seq_args,
2896 &calldata->res.seq_res,
2897 task) != 0)
2898 nfs_release_seqid(calldata->arg.seqid);
2899 dprintk("%s: done!\n", __func__);
2900 return;
2901 out_no_action:
2902 task->tk_action = NULL;
2903 out_wait:
2904 nfs4_sequence_done(task, &calldata->res.seq_res);
2905 }
2906
2907 static const struct rpc_call_ops nfs4_close_ops = {
2908 .rpc_call_prepare = nfs4_close_prepare,
2909 .rpc_call_done = nfs4_close_done,
2910 .rpc_release = nfs4_free_closedata,
2911 };
2912
2913 static bool nfs4_roc(struct inode *inode)
2914 {
2915 if (!nfs_have_layout(inode))
2916 return false;
2917 return pnfs_roc(inode);
2918 }
2919
2920 /*
2921 * It is possible for data to be read/written from a mem-mapped file
2922 * after the sys_close call (which hits the vfs layer as a flush).
2923 * This means that we can't safely call nfsv4 close on a file until
2924 * the inode is cleared. This in turn means that we are not good
2925 * NFSv4 citizens - we do not indicate to the server to update the file's
2926 * share state even when we are done with one of the three share
2927 * stateid's in the inode.
2928 *
2929 * NOTE: Caller must be holding the sp->so_owner semaphore!
2930 */
2931 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2932 {
2933 struct nfs_server *server = NFS_SERVER(state->inode);
2934 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
2935 struct nfs4_closedata *calldata;
2936 struct nfs4_state_owner *sp = state->owner;
2937 struct rpc_task *task;
2938 struct rpc_message msg = {
2939 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2940 .rpc_cred = state->owner->so_cred,
2941 };
2942 struct rpc_task_setup task_setup_data = {
2943 .rpc_client = server->client,
2944 .rpc_message = &msg,
2945 .callback_ops = &nfs4_close_ops,
2946 .workqueue = nfsiod_workqueue,
2947 .flags = RPC_TASK_ASYNC,
2948 };
2949 int status = -ENOMEM;
2950
2951 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
2952 &task_setup_data.rpc_client, &msg);
2953
2954 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2955 if (calldata == NULL)
2956 goto out;
2957 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2958 calldata->inode = state->inode;
2959 calldata->state = state;
2960 calldata->arg.fh = NFS_FH(state->inode);
2961 /* Serialization for the sequence id */
2962 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
2963 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
2964 if (IS_ERR(calldata->arg.seqid))
2965 goto out_free_calldata;
2966 calldata->arg.fmode = 0;
2967 calldata->arg.bitmask = server->cache_consistency_bitmask;
2968 calldata->res.fattr = &calldata->fattr;
2969 calldata->res.seqid = calldata->arg.seqid;
2970 calldata->res.server = server;
2971 calldata->roc = nfs4_roc(state->inode);
2972 nfs_sb_active(calldata->inode->i_sb);
2973
2974 msg.rpc_argp = &calldata->arg;
2975 msg.rpc_resp = &calldata->res;
2976 task_setup_data.callback_data = calldata;
2977 task = rpc_run_task(&task_setup_data);
2978 if (IS_ERR(task))
2979 return PTR_ERR(task);
2980 status = 0;
2981 if (wait)
2982 status = rpc_wait_for_completion_task(task);
2983 rpc_put_task(task);
2984 return status;
2985 out_free_calldata:
2986 kfree(calldata);
2987 out:
2988 nfs4_put_open_state(state);
2989 nfs4_put_state_owner(sp);
2990 return status;
2991 }
2992
2993 static struct inode *
2994 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2995 int open_flags, struct iattr *attr, int *opened)
2996 {
2997 struct nfs4_state *state;
2998 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
2999
3000 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3001
3002 /* Protect against concurrent sillydeletes */
3003 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3004
3005 nfs4_label_release_security(label);
3006
3007 if (IS_ERR(state))
3008 return ERR_CAST(state);
3009 return state->inode;
3010 }
3011
3012 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3013 {
3014 if (ctx->state == NULL)
3015 return;
3016 if (is_sync)
3017 nfs4_close_sync(ctx->state, ctx->mode);
3018 else
3019 nfs4_close_state(ctx->state, ctx->mode);
3020 }
3021
3022 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3023 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3024 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
3025
3026 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3027 {
3028 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3029 struct nfs4_server_caps_arg args = {
3030 .fhandle = fhandle,
3031 .bitmask = bitmask,
3032 };
3033 struct nfs4_server_caps_res res = {};
3034 struct rpc_message msg = {
3035 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3036 .rpc_argp = &args,
3037 .rpc_resp = &res,
3038 };
3039 int status;
3040
3041 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3042 FATTR4_WORD0_FH_EXPIRE_TYPE |
3043 FATTR4_WORD0_LINK_SUPPORT |
3044 FATTR4_WORD0_SYMLINK_SUPPORT |
3045 FATTR4_WORD0_ACLSUPPORT;
3046 if (minorversion)
3047 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3048
3049 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3050 if (status == 0) {
3051 /* Sanity check the server answers */
3052 switch (minorversion) {
3053 case 0:
3054 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3055 res.attr_bitmask[2] = 0;
3056 break;
3057 case 1:
3058 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3059 break;
3060 case 2:
3061 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3062 }
3063 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3064 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
3065 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
3066 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
3067 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
3068 NFS_CAP_CTIME|NFS_CAP_MTIME|
3069 NFS_CAP_SECURITY_LABEL);
3070 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3071 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3072 server->caps |= NFS_CAP_ACLS;
3073 if (res.has_links != 0)
3074 server->caps |= NFS_CAP_HARDLINKS;
3075 if (res.has_symlinks != 0)
3076 server->caps |= NFS_CAP_SYMLINKS;
3077 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
3078 server->caps |= NFS_CAP_FILEID;
3079 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
3080 server->caps |= NFS_CAP_MODE;
3081 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
3082 server->caps |= NFS_CAP_NLINK;
3083 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
3084 server->caps |= NFS_CAP_OWNER;
3085 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
3086 server->caps |= NFS_CAP_OWNER_GROUP;
3087 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
3088 server->caps |= NFS_CAP_ATIME;
3089 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
3090 server->caps |= NFS_CAP_CTIME;
3091 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
3092 server->caps |= NFS_CAP_MTIME;
3093 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3094 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3095 server->caps |= NFS_CAP_SECURITY_LABEL;
3096 #endif
3097 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3098 sizeof(server->attr_bitmask));
3099 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3100
3101 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3102 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3103 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3104 server->cache_consistency_bitmask[2] = 0;
3105 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3106 sizeof(server->exclcreat_bitmask));
3107 server->acl_bitmask = res.acl_bitmask;
3108 server->fh_expire_type = res.fh_expire_type;
3109 }
3110
3111 return status;
3112 }
3113
3114 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3115 {
3116 struct nfs4_exception exception = { };
3117 int err;
3118 do {
3119 err = nfs4_handle_exception(server,
3120 _nfs4_server_capabilities(server, fhandle),
3121 &exception);
3122 } while (exception.retry);
3123 return err;
3124 }
3125
3126 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3127 struct nfs_fsinfo *info)
3128 {
3129 u32 bitmask[3];
3130 struct nfs4_lookup_root_arg args = {
3131 .bitmask = bitmask,
3132 };
3133 struct nfs4_lookup_res res = {
3134 .server = server,
3135 .fattr = info->fattr,
3136 .fh = fhandle,
3137 };
3138 struct rpc_message msg = {
3139 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
3140 .rpc_argp = &args,
3141 .rpc_resp = &res,
3142 };
3143
3144 bitmask[0] = nfs4_fattr_bitmap[0];
3145 bitmask[1] = nfs4_fattr_bitmap[1];
3146 /*
3147 * Process the label in the upcoming getfattr
3148 */
3149 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
3150
3151 nfs_fattr_init(info->fattr);
3152 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3153 }
3154
3155 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3156 struct nfs_fsinfo *info)
3157 {
3158 struct nfs4_exception exception = { };
3159 int err;
3160 do {
3161 err = _nfs4_lookup_root(server, fhandle, info);
3162 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3163 switch (err) {
3164 case 0:
3165 case -NFS4ERR_WRONGSEC:
3166 goto out;
3167 default:
3168 err = nfs4_handle_exception(server, err, &exception);
3169 }
3170 } while (exception.retry);
3171 out:
3172 return err;
3173 }
3174
3175 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3176 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3177 {
3178 struct rpc_auth_create_args auth_args = {
3179 .pseudoflavor = flavor,
3180 };
3181 struct rpc_auth *auth;
3182 int ret;
3183
3184 auth = rpcauth_create(&auth_args, server->client);
3185 if (IS_ERR(auth)) {
3186 ret = -EACCES;
3187 goto out;
3188 }
3189 ret = nfs4_lookup_root(server, fhandle, info);
3190 out:
3191 return ret;
3192 }
3193
3194 /*
3195 * Retry pseudoroot lookup with various security flavors. We do this when:
3196 *
3197 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3198 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3199 *
3200 * Returns zero on success, or a negative NFS4ERR value, or a
3201 * negative errno value.
3202 */
3203 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3204 struct nfs_fsinfo *info)
3205 {
3206 /* Per 3530bis 15.33.5 */
3207 static const rpc_authflavor_t flav_array[] = {
3208 RPC_AUTH_GSS_KRB5P,
3209 RPC_AUTH_GSS_KRB5I,
3210 RPC_AUTH_GSS_KRB5,
3211 RPC_AUTH_UNIX, /* courtesy */
3212 RPC_AUTH_NULL,
3213 };
3214 int status = -EPERM;
3215 size_t i;
3216
3217 if (server->auth_info.flavor_len > 0) {
3218 /* try each flavor specified by user */
3219 for (i = 0; i < server->auth_info.flavor_len; i++) {
3220 status = nfs4_lookup_root_sec(server, fhandle, info,
3221 server->auth_info.flavors[i]);
3222 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3223 continue;
3224 break;
3225 }
3226 } else {
3227 /* no flavors specified by user, try default list */
3228 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3229 status = nfs4_lookup_root_sec(server, fhandle, info,
3230 flav_array[i]);
3231 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3232 continue;
3233 break;
3234 }
3235 }
3236
3237 /*
3238 * -EACCESS could mean that the user doesn't have correct permissions
3239 * to access the mount. It could also mean that we tried to mount
3240 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3241 * existing mount programs don't handle -EACCES very well so it should
3242 * be mapped to -EPERM instead.
3243 */
3244 if (status == -EACCES)
3245 status = -EPERM;
3246 return status;
3247 }
3248
3249 static int nfs4_do_find_root_sec(struct nfs_server *server,
3250 struct nfs_fh *fhandle, struct nfs_fsinfo *info)
3251 {
3252 int mv = server->nfs_client->cl_minorversion;
3253 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
3254 }
3255
3256 /**
3257 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3258 * @server: initialized nfs_server handle
3259 * @fhandle: we fill in the pseudo-fs root file handle
3260 * @info: we fill in an FSINFO struct
3261 * @auth_probe: probe the auth flavours
3262 *
3263 * Returns zero on success, or a negative errno.
3264 */
3265 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3266 struct nfs_fsinfo *info,
3267 bool auth_probe)
3268 {
3269 int status = 0;
3270
3271 if (!auth_probe)
3272 status = nfs4_lookup_root(server, fhandle, info);
3273
3274 if (auth_probe || status == NFS4ERR_WRONGSEC)
3275 status = nfs4_do_find_root_sec(server, fhandle, info);
3276
3277 if (status == 0)
3278 status = nfs4_server_capabilities(server, fhandle);
3279 if (status == 0)
3280 status = nfs4_do_fsinfo(server, fhandle, info);
3281
3282 return nfs4_map_errors(status);
3283 }
3284
3285 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3286 struct nfs_fsinfo *info)
3287 {
3288 int error;
3289 struct nfs_fattr *fattr = info->fattr;
3290 struct nfs4_label *label = NULL;
3291
3292 error = nfs4_server_capabilities(server, mntfh);
3293 if (error < 0) {
3294 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3295 return error;
3296 }
3297
3298 label = nfs4_label_alloc(server, GFP_KERNEL);
3299 if (IS_ERR(label))
3300 return PTR_ERR(label);
3301
3302 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3303 if (error < 0) {
3304 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3305 goto err_free_label;
3306 }
3307
3308 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3309 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3310 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3311
3312 err_free_label:
3313 nfs4_label_free(label);
3314
3315 return error;
3316 }
3317
3318 /*
3319 * Get locations and (maybe) other attributes of a referral.
3320 * Note that we'll actually follow the referral later when
3321 * we detect fsid mismatch in inode revalidation
3322 */
3323 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3324 const struct qstr *name, struct nfs_fattr *fattr,
3325 struct nfs_fh *fhandle)
3326 {
3327 int status = -ENOMEM;
3328 struct page *page = NULL;
3329 struct nfs4_fs_locations *locations = NULL;
3330
3331 page = alloc_page(GFP_KERNEL);
3332 if (page == NULL)
3333 goto out;
3334 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3335 if (locations == NULL)
3336 goto out;
3337
3338 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3339 if (status != 0)
3340 goto out;
3341
3342 /*
3343 * If the fsid didn't change, this is a migration event, not a
3344 * referral. Cause us to drop into the exception handler, which
3345 * will kick off migration recovery.
3346 */
3347 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3348 dprintk("%s: server did not return a different fsid for"
3349 " a referral at %s\n", __func__, name->name);
3350 status = -NFS4ERR_MOVED;
3351 goto out;
3352 }
3353 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3354 nfs_fixup_referral_attributes(&locations->fattr);
3355
3356 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3357 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3358 memset(fhandle, 0, sizeof(struct nfs_fh));
3359 out:
3360 if (page)
3361 __free_page(page);
3362 kfree(locations);
3363 return status;
3364 }
3365
3366 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3367 struct nfs_fattr *fattr, struct nfs4_label *label)
3368 {
3369 struct nfs4_getattr_arg args = {
3370 .fh = fhandle,
3371 .bitmask = server->attr_bitmask,
3372 };
3373 struct nfs4_getattr_res res = {
3374 .fattr = fattr,
3375 .label = label,
3376 .server = server,
3377 };
3378 struct rpc_message msg = {
3379 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3380 .rpc_argp = &args,
3381 .rpc_resp = &res,
3382 };
3383
3384 args.bitmask = nfs4_bitmask(server, label);
3385
3386 nfs_fattr_init(fattr);
3387 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3388 }
3389
3390 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3391 struct nfs_fattr *fattr, struct nfs4_label *label)
3392 {
3393 struct nfs4_exception exception = { };
3394 int err;
3395 do {
3396 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3397 trace_nfs4_getattr(server, fhandle, fattr, err);
3398 err = nfs4_handle_exception(server, err,
3399 &exception);
3400 } while (exception.retry);
3401 return err;
3402 }
3403
3404 /*
3405 * The file is not closed if it is opened due to the a request to change
3406 * the size of the file. The open call will not be needed once the
3407 * VFS layer lookup-intents are implemented.
3408 *
3409 * Close is called when the inode is destroyed.
3410 * If we haven't opened the file for O_WRONLY, we
3411 * need to in the size_change case to obtain a stateid.
3412 *
3413 * Got race?
3414 * Because OPEN is always done by name in nfsv4, it is
3415 * possible that we opened a different file by the same
3416 * name. We can recognize this race condition, but we
3417 * can't do anything about it besides returning an error.
3418 *
3419 * This will be fixed with VFS changes (lookup-intent).
3420 */
3421 static int
3422 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3423 struct iattr *sattr)
3424 {
3425 struct inode *inode = d_inode(dentry);
3426 struct rpc_cred *cred = NULL;
3427 struct nfs4_state *state = NULL;
3428 struct nfs4_label *label = NULL;
3429 int status;
3430
3431 if (pnfs_ld_layoutret_on_setattr(inode) &&
3432 sattr->ia_valid & ATTR_SIZE &&
3433 sattr->ia_size < i_size_read(inode))
3434 pnfs_commit_and_return_layout(inode);
3435
3436 nfs_fattr_init(fattr);
3437
3438 /* Deal with open(O_TRUNC) */
3439 if (sattr->ia_valid & ATTR_OPEN)
3440 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3441
3442 /* Optimization: if the end result is no change, don't RPC */
3443 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3444 return 0;
3445
3446 /* Search for an existing open(O_WRITE) file */
3447 if (sattr->ia_valid & ATTR_FILE) {
3448 struct nfs_open_context *ctx;
3449
3450 ctx = nfs_file_open_context(sattr->ia_file);
3451 if (ctx) {
3452 cred = ctx->cred;
3453 state = ctx->state;
3454 }
3455 }
3456
3457 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3458 if (IS_ERR(label))
3459 return PTR_ERR(label);
3460
3461 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3462 if (status == 0) {
3463 nfs_setattr_update_inode(inode, sattr, fattr);
3464 nfs_setsecurity(inode, fattr, label);
3465 }
3466 nfs4_label_free(label);
3467 return status;
3468 }
3469
3470 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3471 const struct qstr *name, struct nfs_fh *fhandle,
3472 struct nfs_fattr *fattr, struct nfs4_label *label)
3473 {
3474 struct nfs_server *server = NFS_SERVER(dir);
3475 int status;
3476 struct nfs4_lookup_arg args = {
3477 .bitmask = server->attr_bitmask,
3478 .dir_fh = NFS_FH(dir),
3479 .name = name,
3480 };
3481 struct nfs4_lookup_res res = {
3482 .server = server,
3483 .fattr = fattr,
3484 .label = label,
3485 .fh = fhandle,
3486 };
3487 struct rpc_message msg = {
3488 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3489 .rpc_argp = &args,
3490 .rpc_resp = &res,
3491 };
3492
3493 args.bitmask = nfs4_bitmask(server, label);
3494
3495 nfs_fattr_init(fattr);
3496
3497 dprintk("NFS call lookup %s\n", name->name);
3498 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3499 dprintk("NFS reply lookup: %d\n", status);
3500 return status;
3501 }
3502
3503 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3504 {
3505 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3506 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3507 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3508 fattr->nlink = 2;
3509 }
3510
3511 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3512 struct qstr *name, struct nfs_fh *fhandle,
3513 struct nfs_fattr *fattr, struct nfs4_label *label)
3514 {
3515 struct nfs4_exception exception = { };
3516 struct rpc_clnt *client = *clnt;
3517 int err;
3518 do {
3519 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3520 trace_nfs4_lookup(dir, name, err);
3521 switch (err) {
3522 case -NFS4ERR_BADNAME:
3523 err = -ENOENT;
3524 goto out;
3525 case -NFS4ERR_MOVED:
3526 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3527 if (err == -NFS4ERR_MOVED)
3528 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3529 goto out;
3530 case -NFS4ERR_WRONGSEC:
3531 err = -EPERM;
3532 if (client != *clnt)
3533 goto out;
3534 client = nfs4_negotiate_security(client, dir, name);
3535 if (IS_ERR(client))
3536 return PTR_ERR(client);
3537
3538 exception.retry = 1;
3539 break;
3540 default:
3541 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3542 }
3543 } while (exception.retry);
3544
3545 out:
3546 if (err == 0)
3547 *clnt = client;
3548 else if (client != *clnt)
3549 rpc_shutdown_client(client);
3550
3551 return err;
3552 }
3553
3554 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
3555 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3556 struct nfs4_label *label)
3557 {
3558 int status;
3559 struct rpc_clnt *client = NFS_CLIENT(dir);
3560
3561 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3562 if (client != NFS_CLIENT(dir)) {
3563 rpc_shutdown_client(client);
3564 nfs_fixup_secinfo_attributes(fattr);
3565 }
3566 return status;
3567 }
3568
3569 struct rpc_clnt *
3570 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
3571 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3572 {
3573 struct rpc_clnt *client = NFS_CLIENT(dir);
3574 int status;
3575
3576 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3577 if (status < 0)
3578 return ERR_PTR(status);
3579 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3580 }
3581
3582 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3583 {
3584 struct nfs_server *server = NFS_SERVER(inode);
3585 struct nfs4_accessargs args = {
3586 .fh = NFS_FH(inode),
3587 .bitmask = server->cache_consistency_bitmask,
3588 };
3589 struct nfs4_accessres res = {
3590 .server = server,
3591 };
3592 struct rpc_message msg = {
3593 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
3594 .rpc_argp = &args,
3595 .rpc_resp = &res,
3596 .rpc_cred = entry->cred,
3597 };
3598 int mode = entry->mask;
3599 int status = 0;
3600
3601 /*
3602 * Determine which access bits we want to ask for...
3603 */
3604 if (mode & MAY_READ)
3605 args.access |= NFS4_ACCESS_READ;
3606 if (S_ISDIR(inode->i_mode)) {
3607 if (mode & MAY_WRITE)
3608 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
3609 if (mode & MAY_EXEC)
3610 args.access |= NFS4_ACCESS_LOOKUP;
3611 } else {
3612 if (mode & MAY_WRITE)
3613 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
3614 if (mode & MAY_EXEC)
3615 args.access |= NFS4_ACCESS_EXECUTE;
3616 }
3617
3618 res.fattr = nfs_alloc_fattr();
3619 if (res.fattr == NULL)
3620 return -ENOMEM;
3621
3622 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3623 if (!status) {
3624 nfs_access_set_mask(entry, res.access);
3625 nfs_refresh_inode(inode, res.fattr);
3626 }
3627 nfs_free_fattr(res.fattr);
3628 return status;
3629 }
3630
3631 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3632 {
3633 struct nfs4_exception exception = { };
3634 int err;
3635 do {
3636 err = _nfs4_proc_access(inode, entry);
3637 trace_nfs4_access(inode, err);
3638 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3639 &exception);
3640 } while (exception.retry);
3641 return err;
3642 }
3643
3644 /*
3645 * TODO: For the time being, we don't try to get any attributes
3646 * along with any of the zero-copy operations READ, READDIR,
3647 * READLINK, WRITE.
3648 *
3649 * In the case of the first three, we want to put the GETATTR
3650 * after the read-type operation -- this is because it is hard
3651 * to predict the length of a GETATTR response in v4, and thus
3652 * align the READ data correctly. This means that the GETATTR
3653 * may end up partially falling into the page cache, and we should
3654 * shift it into the 'tail' of the xdr_buf before processing.
3655 * To do this efficiently, we need to know the total length
3656 * of data received, which doesn't seem to be available outside
3657 * of the RPC layer.
3658 *
3659 * In the case of WRITE, we also want to put the GETATTR after
3660 * the operation -- in this case because we want to make sure
3661 * we get the post-operation mtime and size.
3662 *
3663 * Both of these changes to the XDR layer would in fact be quite
3664 * minor, but I decided to leave them for a subsequent patch.
3665 */
3666 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3667 unsigned int pgbase, unsigned int pglen)
3668 {
3669 struct nfs4_readlink args = {
3670 .fh = NFS_FH(inode),
3671 .pgbase = pgbase,
3672 .pglen = pglen,
3673 .pages = &page,
3674 };
3675 struct nfs4_readlink_res res;
3676 struct rpc_message msg = {
3677 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3678 .rpc_argp = &args,
3679 .rpc_resp = &res,
3680 };
3681
3682 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3683 }
3684
3685 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3686 unsigned int pgbase, unsigned int pglen)
3687 {
3688 struct nfs4_exception exception = { };
3689 int err;
3690 do {
3691 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
3692 trace_nfs4_readlink(inode, err);
3693 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3694 &exception);
3695 } while (exception.retry);
3696 return err;
3697 }
3698
3699 /*
3700 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3701 */
3702 static int
3703 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3704 int flags)
3705 {
3706 struct nfs4_label l, *ilabel = NULL;
3707 struct nfs_open_context *ctx;
3708 struct nfs4_state *state;
3709 int status = 0;
3710
3711 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3712 if (IS_ERR(ctx))
3713 return PTR_ERR(ctx);
3714
3715 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3716
3717 sattr->ia_mode &= ~current_umask();
3718 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
3719 if (IS_ERR(state)) {
3720 status = PTR_ERR(state);
3721 goto out;
3722 }
3723 out:
3724 nfs4_label_release_security(ilabel);
3725 put_nfs_open_context(ctx);
3726 return status;
3727 }
3728
3729 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3730 {
3731 struct nfs_server *server = NFS_SERVER(dir);
3732 struct nfs_removeargs args = {
3733 .fh = NFS_FH(dir),
3734 .name = *name,
3735 };
3736 struct nfs_removeres res = {
3737 .server = server,
3738 };
3739 struct rpc_message msg = {
3740 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3741 .rpc_argp = &args,
3742 .rpc_resp = &res,
3743 };
3744 int status;
3745
3746 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3747 if (status == 0)
3748 update_changeattr(dir, &res.cinfo);
3749 return status;
3750 }
3751
3752 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3753 {
3754 struct nfs4_exception exception = { };
3755 int err;
3756 do {
3757 err = _nfs4_proc_remove(dir, name);
3758 trace_nfs4_remove(dir, name, err);
3759 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3760 &exception);
3761 } while (exception.retry);
3762 return err;
3763 }
3764
3765 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3766 {
3767 struct nfs_server *server = NFS_SERVER(dir);
3768 struct nfs_removeargs *args = msg->rpc_argp;
3769 struct nfs_removeres *res = msg->rpc_resp;
3770
3771 res->server = server;
3772 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3773 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
3774
3775 nfs_fattr_init(res->dir_attr);
3776 }
3777
3778 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3779 {
3780 nfs4_setup_sequence(NFS_SERVER(data->dir),
3781 &data->args.seq_args,
3782 &data->res.seq_res,
3783 task);
3784 }
3785
3786 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3787 {
3788 struct nfs_unlinkdata *data = task->tk_calldata;
3789 struct nfs_removeres *res = &data->res;
3790
3791 if (!nfs4_sequence_done(task, &res->seq_res))
3792 return 0;
3793 if (nfs4_async_handle_error(task, res->server, NULL,
3794 &data->timeout) == -EAGAIN)
3795 return 0;
3796 update_changeattr(dir, &res->cinfo);
3797 return 1;
3798 }
3799
3800 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3801 {
3802 struct nfs_server *server = NFS_SERVER(dir);
3803 struct nfs_renameargs *arg = msg->rpc_argp;
3804 struct nfs_renameres *res = msg->rpc_resp;
3805
3806 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3807 res->server = server;
3808 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
3809 }
3810
3811 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3812 {
3813 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3814 &data->args.seq_args,
3815 &data->res.seq_res,
3816 task);
3817 }
3818
3819 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3820 struct inode *new_dir)
3821 {
3822 struct nfs_renamedata *data = task->tk_calldata;
3823 struct nfs_renameres *res = &data->res;
3824
3825 if (!nfs4_sequence_done(task, &res->seq_res))
3826 return 0;
3827 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
3828 return 0;
3829
3830 update_changeattr(old_dir, &res->old_cinfo);
3831 update_changeattr(new_dir, &res->new_cinfo);
3832 return 1;
3833 }
3834
3835 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3836 {
3837 struct nfs_server *server = NFS_SERVER(inode);
3838 struct nfs4_link_arg arg = {
3839 .fh = NFS_FH(inode),
3840 .dir_fh = NFS_FH(dir),
3841 .name = name,
3842 .bitmask = server->attr_bitmask,
3843 };
3844 struct nfs4_link_res res = {
3845 .server = server,
3846 .label = NULL,
3847 };
3848 struct rpc_message msg = {
3849 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3850 .rpc_argp = &arg,
3851 .rpc_resp = &res,
3852 };
3853 int status = -ENOMEM;
3854
3855 res.fattr = nfs_alloc_fattr();
3856 if (res.fattr == NULL)
3857 goto out;
3858
3859 res.label = nfs4_label_alloc(server, GFP_KERNEL);
3860 if (IS_ERR(res.label)) {
3861 status = PTR_ERR(res.label);
3862 goto out;
3863 }
3864 arg.bitmask = nfs4_bitmask(server, res.label);
3865
3866 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3867 if (!status) {
3868 update_changeattr(dir, &res.cinfo);
3869 status = nfs_post_op_update_inode(inode, res.fattr);
3870 if (!status)
3871 nfs_setsecurity(inode, res.fattr, res.label);
3872 }
3873
3874
3875 nfs4_label_free(res.label);
3876
3877 out:
3878 nfs_free_fattr(res.fattr);
3879 return status;
3880 }
3881
3882 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3883 {
3884 struct nfs4_exception exception = { };
3885 int err;
3886 do {
3887 err = nfs4_handle_exception(NFS_SERVER(inode),
3888 _nfs4_proc_link(inode, dir, name),
3889 &exception);
3890 } while (exception.retry);
3891 return err;
3892 }
3893
3894 struct nfs4_createdata {
3895 struct rpc_message msg;
3896 struct nfs4_create_arg arg;
3897 struct nfs4_create_res res;
3898 struct nfs_fh fh;
3899 struct nfs_fattr fattr;
3900 struct nfs4_label *label;
3901 };
3902
3903 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3904 struct qstr *name, struct iattr *sattr, u32 ftype)
3905 {
3906 struct nfs4_createdata *data;
3907
3908 data = kzalloc(sizeof(*data), GFP_KERNEL);
3909 if (data != NULL) {
3910 struct nfs_server *server = NFS_SERVER(dir);
3911
3912 data->label = nfs4_label_alloc(server, GFP_KERNEL);
3913 if (IS_ERR(data->label))
3914 goto out_free;
3915
3916 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3917 data->msg.rpc_argp = &data->arg;
3918 data->msg.rpc_resp = &data->res;
3919 data->arg.dir_fh = NFS_FH(dir);
3920 data->arg.server = server;
3921 data->arg.name = name;
3922 data->arg.attrs = sattr;
3923 data->arg.ftype = ftype;
3924 data->arg.bitmask = nfs4_bitmask(server, data->label);
3925 data->res.server = server;
3926 data->res.fh = &data->fh;
3927 data->res.fattr = &data->fattr;
3928 data->res.label = data->label;
3929 nfs_fattr_init(data->res.fattr);
3930 }
3931 return data;
3932 out_free:
3933 kfree(data);
3934 return NULL;
3935 }
3936
3937 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3938 {
3939 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3940 &data->arg.seq_args, &data->res.seq_res, 1);
3941 if (status == 0) {
3942 update_changeattr(dir, &data->res.dir_cinfo);
3943 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
3944 }
3945 return status;
3946 }
3947
3948 static void nfs4_free_createdata(struct nfs4_createdata *data)
3949 {
3950 nfs4_label_free(data->label);
3951 kfree(data);
3952 }
3953
3954 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3955 struct page *page, unsigned int len, struct iattr *sattr,
3956 struct nfs4_label *label)
3957 {
3958 struct nfs4_createdata *data;
3959 int status = -ENAMETOOLONG;
3960
3961 if (len > NFS4_MAXPATHLEN)
3962 goto out;
3963
3964 status = -ENOMEM;
3965 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3966 if (data == NULL)
3967 goto out;
3968
3969 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3970 data->arg.u.symlink.pages = &page;
3971 data->arg.u.symlink.len = len;
3972 data->arg.label = label;
3973
3974 status = nfs4_do_create(dir, dentry, data);
3975
3976 nfs4_free_createdata(data);
3977 out:
3978 return status;
3979 }
3980
3981 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3982 struct page *page, unsigned int len, struct iattr *sattr)
3983 {
3984 struct nfs4_exception exception = { };
3985 struct nfs4_label l, *label = NULL;
3986 int err;
3987
3988 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3989
3990 do {
3991 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
3992 trace_nfs4_symlink(dir, &dentry->d_name, err);
3993 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3994 &exception);
3995 } while (exception.retry);
3996
3997 nfs4_label_release_security(label);
3998 return err;
3999 }
4000
4001 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4002 struct iattr *sattr, struct nfs4_label *label)
4003 {
4004 struct nfs4_createdata *data;
4005 int status = -ENOMEM;
4006
4007 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
4008 if (data == NULL)
4009 goto out;
4010
4011 data->arg.label = label;
4012 status = nfs4_do_create(dir, dentry, data);
4013
4014 nfs4_free_createdata(data);
4015 out:
4016 return status;
4017 }
4018
4019 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4020 struct iattr *sattr)
4021 {
4022 struct nfs4_exception exception = { };
4023 struct nfs4_label l, *label = NULL;
4024 int err;
4025
4026 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4027
4028 sattr->ia_mode &= ~current_umask();
4029 do {
4030 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
4031 trace_nfs4_mkdir(dir, &dentry->d_name, err);
4032 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4033 &exception);
4034 } while (exception.retry);
4035 nfs4_label_release_security(label);
4036
4037 return err;
4038 }
4039
4040 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4041 u64 cookie, struct page **pages, unsigned int count, int plus)
4042 {
4043 struct inode *dir = d_inode(dentry);
4044 struct nfs4_readdir_arg args = {
4045 .fh = NFS_FH(dir),
4046 .pages = pages,
4047 .pgbase = 0,
4048 .count = count,
4049 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
4050 .plus = plus,
4051 };
4052 struct nfs4_readdir_res res;
4053 struct rpc_message msg = {
4054 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
4055 .rpc_argp = &args,
4056 .rpc_resp = &res,
4057 .rpc_cred = cred,
4058 };
4059 int status;
4060
4061 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
4062 dentry,
4063 (unsigned long long)cookie);
4064 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
4065 res.pgbase = args.pgbase;
4066 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
4067 if (status >= 0) {
4068 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
4069 status += args.pgbase;
4070 }
4071
4072 nfs_invalidate_atime(dir);
4073
4074 dprintk("%s: returns %d\n", __func__, status);
4075 return status;
4076 }
4077
4078 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4079 u64 cookie, struct page **pages, unsigned int count, int plus)
4080 {
4081 struct nfs4_exception exception = { };
4082 int err;
4083 do {
4084 err = _nfs4_proc_readdir(dentry, cred, cookie,
4085 pages, count, plus);
4086 trace_nfs4_readdir(d_inode(dentry), err);
4087 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
4088 &exception);
4089 } while (exception.retry);
4090 return err;
4091 }
4092
4093 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4094 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
4095 {
4096 struct nfs4_createdata *data;
4097 int mode = sattr->ia_mode;
4098 int status = -ENOMEM;
4099
4100 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
4101 if (data == NULL)
4102 goto out;
4103
4104 if (S_ISFIFO(mode))
4105 data->arg.ftype = NF4FIFO;
4106 else if (S_ISBLK(mode)) {
4107 data->arg.ftype = NF4BLK;
4108 data->arg.u.device.specdata1 = MAJOR(rdev);
4109 data->arg.u.device.specdata2 = MINOR(rdev);
4110 }
4111 else if (S_ISCHR(mode)) {
4112 data->arg.ftype = NF4CHR;
4113 data->arg.u.device.specdata1 = MAJOR(rdev);
4114 data->arg.u.device.specdata2 = MINOR(rdev);
4115 } else if (!S_ISSOCK(mode)) {
4116 status = -EINVAL;
4117 goto out_free;
4118 }
4119
4120 data->arg.label = label;
4121 status = nfs4_do_create(dir, dentry, data);
4122 out_free:
4123 nfs4_free_createdata(data);
4124 out:
4125 return status;
4126 }
4127
4128 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4129 struct iattr *sattr, dev_t rdev)
4130 {
4131 struct nfs4_exception exception = { };
4132 struct nfs4_label l, *label = NULL;
4133 int err;
4134
4135 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4136
4137 sattr->ia_mode &= ~current_umask();
4138 do {
4139 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
4140 trace_nfs4_mknod(dir, &dentry->d_name, err);
4141 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4142 &exception);
4143 } while (exception.retry);
4144
4145 nfs4_label_release_security(label);
4146
4147 return err;
4148 }
4149
4150 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
4151 struct nfs_fsstat *fsstat)
4152 {
4153 struct nfs4_statfs_arg args = {
4154 .fh = fhandle,
4155 .bitmask = server->attr_bitmask,
4156 };
4157 struct nfs4_statfs_res res = {
4158 .fsstat = fsstat,
4159 };
4160 struct rpc_message msg = {
4161 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
4162 .rpc_argp = &args,
4163 .rpc_resp = &res,
4164 };
4165
4166 nfs_fattr_init(fsstat->fattr);
4167 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4168 }
4169
4170 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4171 {
4172 struct nfs4_exception exception = { };
4173 int err;
4174 do {
4175 err = nfs4_handle_exception(server,
4176 _nfs4_proc_statfs(server, fhandle, fsstat),
4177 &exception);
4178 } while (exception.retry);
4179 return err;
4180 }
4181
4182 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4183 struct nfs_fsinfo *fsinfo)
4184 {
4185 struct nfs4_fsinfo_arg args = {
4186 .fh = fhandle,
4187 .bitmask = server->attr_bitmask,
4188 };
4189 struct nfs4_fsinfo_res res = {
4190 .fsinfo = fsinfo,
4191 };
4192 struct rpc_message msg = {
4193 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4194 .rpc_argp = &args,
4195 .rpc_resp = &res,
4196 };
4197
4198 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4199 }
4200
4201 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4202 {
4203 struct nfs4_exception exception = { };
4204 unsigned long now = jiffies;
4205 int err;
4206
4207 do {
4208 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4209 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4210 if (err == 0) {
4211 struct nfs_client *clp = server->nfs_client;
4212
4213 spin_lock(&clp->cl_lock);
4214 clp->cl_lease_time = fsinfo->lease_time * HZ;
4215 clp->cl_last_renewal = now;
4216 spin_unlock(&clp->cl_lock);
4217 break;
4218 }
4219 err = nfs4_handle_exception(server, err, &exception);
4220 } while (exception.retry);
4221 return err;
4222 }
4223
4224 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4225 {
4226 int error;
4227
4228 nfs_fattr_init(fsinfo->fattr);
4229 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4230 if (error == 0) {
4231 /* block layout checks this! */
4232 server->pnfs_blksize = fsinfo->blksize;
4233 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
4234 }
4235
4236 return error;
4237 }
4238
4239 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4240 struct nfs_pathconf *pathconf)
4241 {
4242 struct nfs4_pathconf_arg args = {
4243 .fh = fhandle,
4244 .bitmask = server->attr_bitmask,
4245 };
4246 struct nfs4_pathconf_res res = {
4247 .pathconf = pathconf,
4248 };
4249 struct rpc_message msg = {
4250 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4251 .rpc_argp = &args,
4252 .rpc_resp = &res,
4253 };
4254
4255 /* None of the pathconf attributes are mandatory to implement */
4256 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4257 memset(pathconf, 0, sizeof(*pathconf));
4258 return 0;
4259 }
4260
4261 nfs_fattr_init(pathconf->fattr);
4262 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4263 }
4264
4265 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4266 struct nfs_pathconf *pathconf)
4267 {
4268 struct nfs4_exception exception = { };
4269 int err;
4270
4271 do {
4272 err = nfs4_handle_exception(server,
4273 _nfs4_proc_pathconf(server, fhandle, pathconf),
4274 &exception);
4275 } while (exception.retry);
4276 return err;
4277 }
4278
4279 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4280 const struct nfs_open_context *ctx,
4281 const struct nfs_lock_context *l_ctx,
4282 fmode_t fmode)
4283 {
4284 const struct nfs_lockowner *lockowner = NULL;
4285
4286 if (l_ctx != NULL)
4287 lockowner = &l_ctx->lockowner;
4288 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
4289 }
4290 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4291
4292 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4293 const struct nfs_open_context *ctx,
4294 const struct nfs_lock_context *l_ctx,
4295 fmode_t fmode)
4296 {
4297 nfs4_stateid current_stateid;
4298
4299 /* If the current stateid represents a lost lock, then exit */
4300 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4301 return true;
4302 return nfs4_stateid_match(stateid, &current_stateid);
4303 }
4304
4305 static bool nfs4_error_stateid_expired(int err)
4306 {
4307 switch (err) {
4308 case -NFS4ERR_DELEG_REVOKED:
4309 case -NFS4ERR_ADMIN_REVOKED:
4310 case -NFS4ERR_BAD_STATEID:
4311 case -NFS4ERR_STALE_STATEID:
4312 case -NFS4ERR_OLD_STATEID:
4313 case -NFS4ERR_OPENMODE:
4314 case -NFS4ERR_EXPIRED:
4315 return true;
4316 }
4317 return false;
4318 }
4319
4320 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
4321 {
4322 nfs_invalidate_atime(hdr->inode);
4323 }
4324
4325 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4326 {
4327 struct nfs_server *server = NFS_SERVER(hdr->inode);
4328
4329 trace_nfs4_read(hdr, task->tk_status);
4330 if (nfs4_async_handle_error(task, server,
4331 hdr->args.context->state,
4332 NULL) == -EAGAIN) {
4333 rpc_restart_call_prepare(task);
4334 return -EAGAIN;
4335 }
4336
4337 __nfs4_read_done_cb(hdr);
4338 if (task->tk_status > 0)
4339 renew_lease(server, hdr->timestamp);
4340 return 0;
4341 }
4342
4343 static bool nfs4_read_stateid_changed(struct rpc_task *task,
4344 struct nfs_pgio_args *args)
4345 {
4346
4347 if (!nfs4_error_stateid_expired(task->tk_status) ||
4348 nfs4_stateid_is_current(&args->stateid,
4349 args->context,
4350 args->lock_context,
4351 FMODE_READ))
4352 return false;
4353 rpc_restart_call_prepare(task);
4354 return true;
4355 }
4356
4357 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4358 {
4359
4360 dprintk("--> %s\n", __func__);
4361
4362 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4363 return -EAGAIN;
4364 if (nfs4_read_stateid_changed(task, &hdr->args))
4365 return -EAGAIN;
4366 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4367 nfs4_read_done_cb(task, hdr);
4368 }
4369
4370 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4371 struct rpc_message *msg)
4372 {
4373 hdr->timestamp = jiffies;
4374 hdr->pgio_done_cb = nfs4_read_done_cb;
4375 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4376 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4377 }
4378
4379 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4380 struct nfs_pgio_header *hdr)
4381 {
4382 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode),
4383 &hdr->args.seq_args,
4384 &hdr->res.seq_res,
4385 task))
4386 return 0;
4387 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4388 hdr->args.lock_context,
4389 hdr->rw_ops->rw_mode) == -EIO)
4390 return -EIO;
4391 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4392 return -EIO;
4393 return 0;
4394 }
4395
4396 static int nfs4_write_done_cb(struct rpc_task *task,
4397 struct nfs_pgio_header *hdr)
4398 {
4399 struct inode *inode = hdr->inode;
4400
4401 trace_nfs4_write(hdr, task->tk_status);
4402 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4403 hdr->args.context->state,
4404 NULL) == -EAGAIN) {
4405 rpc_restart_call_prepare(task);
4406 return -EAGAIN;
4407 }
4408 if (task->tk_status >= 0) {
4409 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4410 nfs_writeback_update_inode(hdr);
4411 }
4412 return 0;
4413 }
4414
4415 static bool nfs4_write_stateid_changed(struct rpc_task *task,
4416 struct nfs_pgio_args *args)
4417 {
4418
4419 if (!nfs4_error_stateid_expired(task->tk_status) ||
4420 nfs4_stateid_is_current(&args->stateid,
4421 args->context,
4422 args->lock_context,
4423 FMODE_WRITE))
4424 return false;
4425 rpc_restart_call_prepare(task);
4426 return true;
4427 }
4428
4429 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4430 {
4431 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4432 return -EAGAIN;
4433 if (nfs4_write_stateid_changed(task, &hdr->args))
4434 return -EAGAIN;
4435 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4436 nfs4_write_done_cb(task, hdr);
4437 }
4438
4439 static
4440 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4441 {
4442 /* Don't request attributes for pNFS or O_DIRECT writes */
4443 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4444 return false;
4445 /* Otherwise, request attributes if and only if we don't hold
4446 * a delegation
4447 */
4448 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4449 }
4450
4451 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4452 struct rpc_message *msg)
4453 {
4454 struct nfs_server *server = NFS_SERVER(hdr->inode);
4455
4456 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4457 hdr->args.bitmask = NULL;
4458 hdr->res.fattr = NULL;
4459 } else
4460 hdr->args.bitmask = server->cache_consistency_bitmask;
4461
4462 if (!hdr->pgio_done_cb)
4463 hdr->pgio_done_cb = nfs4_write_done_cb;
4464 hdr->res.server = server;
4465 hdr->timestamp = jiffies;
4466
4467 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4468 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4469 }
4470
4471 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4472 {
4473 nfs4_setup_sequence(NFS_SERVER(data->inode),
4474 &data->args.seq_args,
4475 &data->res.seq_res,
4476 task);
4477 }
4478
4479 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4480 {
4481 struct inode *inode = data->inode;
4482
4483 trace_nfs4_commit(data, task->tk_status);
4484 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4485 NULL, NULL) == -EAGAIN) {
4486 rpc_restart_call_prepare(task);
4487 return -EAGAIN;
4488 }
4489 return 0;
4490 }
4491
4492 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4493 {
4494 if (!nfs4_sequence_done(task, &data->res.seq_res))
4495 return -EAGAIN;
4496 return data->commit_done_cb(task, data);
4497 }
4498
4499 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4500 {
4501 struct nfs_server *server = NFS_SERVER(data->inode);
4502
4503 if (data->commit_done_cb == NULL)
4504 data->commit_done_cb = nfs4_commit_done_cb;
4505 data->res.server = server;
4506 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4507 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4508 }
4509
4510 struct nfs4_renewdata {
4511 struct nfs_client *client;
4512 unsigned long timestamp;
4513 };
4514
4515 /*
4516 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4517 * standalone procedure for queueing an asynchronous RENEW.
4518 */
4519 static void nfs4_renew_release(void *calldata)
4520 {
4521 struct nfs4_renewdata *data = calldata;
4522 struct nfs_client *clp = data->client;
4523
4524 if (atomic_read(&clp->cl_count) > 1)
4525 nfs4_schedule_state_renewal(clp);
4526 nfs_put_client(clp);
4527 kfree(data);
4528 }
4529
4530 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4531 {
4532 struct nfs4_renewdata *data = calldata;
4533 struct nfs_client *clp = data->client;
4534 unsigned long timestamp = data->timestamp;
4535
4536 trace_nfs4_renew_async(clp, task->tk_status);
4537 switch (task->tk_status) {
4538 case 0:
4539 break;
4540 case -NFS4ERR_LEASE_MOVED:
4541 nfs4_schedule_lease_moved_recovery(clp);
4542 break;
4543 default:
4544 /* Unless we're shutting down, schedule state recovery! */
4545 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
4546 return;
4547 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
4548 nfs4_schedule_lease_recovery(clp);
4549 return;
4550 }
4551 nfs4_schedule_path_down_recovery(clp);
4552 }
4553 do_renew_lease(clp, timestamp);
4554 }
4555
4556 static const struct rpc_call_ops nfs4_renew_ops = {
4557 .rpc_call_done = nfs4_renew_done,
4558 .rpc_release = nfs4_renew_release,
4559 };
4560
4561 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
4562 {
4563 struct rpc_message msg = {
4564 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4565 .rpc_argp = clp,
4566 .rpc_cred = cred,
4567 };
4568 struct nfs4_renewdata *data;
4569
4570 if (renew_flags == 0)
4571 return 0;
4572 if (!atomic_inc_not_zero(&clp->cl_count))
4573 return -EIO;
4574 data = kmalloc(sizeof(*data), GFP_NOFS);
4575 if (data == NULL)
4576 return -ENOMEM;
4577 data->client = clp;
4578 data->timestamp = jiffies;
4579 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
4580 &nfs4_renew_ops, data);
4581 }
4582
4583 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
4584 {
4585 struct rpc_message msg = {
4586 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4587 .rpc_argp = clp,
4588 .rpc_cred = cred,
4589 };
4590 unsigned long now = jiffies;
4591 int status;
4592
4593 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4594 if (status < 0)
4595 return status;
4596 do_renew_lease(clp, now);
4597 return 0;
4598 }
4599
4600 static inline int nfs4_server_supports_acls(struct nfs_server *server)
4601 {
4602 return server->caps & NFS_CAP_ACLS;
4603 }
4604
4605 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
4606 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
4607 * the stack.
4608 */
4609 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
4610
4611 static int buf_to_pages_noslab(const void *buf, size_t buflen,
4612 struct page **pages)
4613 {
4614 struct page *newpage, **spages;
4615 int rc = 0;
4616 size_t len;
4617 spages = pages;
4618
4619 do {
4620 len = min_t(size_t, PAGE_SIZE, buflen);
4621 newpage = alloc_page(GFP_KERNEL);
4622
4623 if (newpage == NULL)
4624 goto unwind;
4625 memcpy(page_address(newpage), buf, len);
4626 buf += len;
4627 buflen -= len;
4628 *pages++ = newpage;
4629 rc++;
4630 } while (buflen != 0);
4631
4632 return rc;
4633
4634 unwind:
4635 for(; rc > 0; rc--)
4636 __free_page(spages[rc-1]);
4637 return -ENOMEM;
4638 }
4639
4640 struct nfs4_cached_acl {
4641 int cached;
4642 size_t len;
4643 char data[0];
4644 };
4645
4646 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
4647 {
4648 struct nfs_inode *nfsi = NFS_I(inode);
4649
4650 spin_lock(&inode->i_lock);
4651 kfree(nfsi->nfs4_acl);
4652 nfsi->nfs4_acl = acl;
4653 spin_unlock(&inode->i_lock);
4654 }
4655
4656 static void nfs4_zap_acl_attr(struct inode *inode)
4657 {
4658 nfs4_set_cached_acl(inode, NULL);
4659 }
4660
4661 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
4662 {
4663 struct nfs_inode *nfsi = NFS_I(inode);
4664 struct nfs4_cached_acl *acl;
4665 int ret = -ENOENT;
4666
4667 spin_lock(&inode->i_lock);
4668 acl = nfsi->nfs4_acl;
4669 if (acl == NULL)
4670 goto out;
4671 if (buf == NULL) /* user is just asking for length */
4672 goto out_len;
4673 if (acl->cached == 0)
4674 goto out;
4675 ret = -ERANGE; /* see getxattr(2) man page */
4676 if (acl->len > buflen)
4677 goto out;
4678 memcpy(buf, acl->data, acl->len);
4679 out_len:
4680 ret = acl->len;
4681 out:
4682 spin_unlock(&inode->i_lock);
4683 return ret;
4684 }
4685
4686 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
4687 {
4688 struct nfs4_cached_acl *acl;
4689 size_t buflen = sizeof(*acl) + acl_len;
4690
4691 if (buflen <= PAGE_SIZE) {
4692 acl = kmalloc(buflen, GFP_KERNEL);
4693 if (acl == NULL)
4694 goto out;
4695 acl->cached = 1;
4696 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4697 } else {
4698 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4699 if (acl == NULL)
4700 goto out;
4701 acl->cached = 0;
4702 }
4703 acl->len = acl_len;
4704 out:
4705 nfs4_set_cached_acl(inode, acl);
4706 }
4707
4708 /*
4709 * The getxattr API returns the required buffer length when called with a
4710 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4711 * the required buf. On a NULL buf, we send a page of data to the server
4712 * guessing that the ACL request can be serviced by a page. If so, we cache
4713 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4714 * the cache. If not so, we throw away the page, and cache the required
4715 * length. The next getxattr call will then produce another round trip to
4716 * the server, this time with the input buf of the required size.
4717 */
4718 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4719 {
4720 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4721 struct nfs_getaclargs args = {
4722 .fh = NFS_FH(inode),
4723 .acl_pages = pages,
4724 .acl_len = buflen,
4725 };
4726 struct nfs_getaclres res = {
4727 .acl_len = buflen,
4728 };
4729 struct rpc_message msg = {
4730 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4731 .rpc_argp = &args,
4732 .rpc_resp = &res,
4733 };
4734 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4735 int ret = -ENOMEM, i;
4736
4737 /* As long as we're doing a round trip to the server anyway,
4738 * let's be prepared for a page of acl data. */
4739 if (npages == 0)
4740 npages = 1;
4741 if (npages > ARRAY_SIZE(pages))
4742 return -ERANGE;
4743
4744 for (i = 0; i < npages; i++) {
4745 pages[i] = alloc_page(GFP_KERNEL);
4746 if (!pages[i])
4747 goto out_free;
4748 }
4749
4750 /* for decoding across pages */
4751 res.acl_scratch = alloc_page(GFP_KERNEL);
4752 if (!res.acl_scratch)
4753 goto out_free;
4754
4755 args.acl_len = npages * PAGE_SIZE;
4756
4757 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4758 __func__, buf, buflen, npages, args.acl_len);
4759 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4760 &msg, &args.seq_args, &res.seq_res, 0);
4761 if (ret)
4762 goto out_free;
4763
4764 /* Handle the case where the passed-in buffer is too short */
4765 if (res.acl_flags & NFS4_ACL_TRUNC) {
4766 /* Did the user only issue a request for the acl length? */
4767 if (buf == NULL)
4768 goto out_ok;
4769 ret = -ERANGE;
4770 goto out_free;
4771 }
4772 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4773 if (buf) {
4774 if (res.acl_len > buflen) {
4775 ret = -ERANGE;
4776 goto out_free;
4777 }
4778 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4779 }
4780 out_ok:
4781 ret = res.acl_len;
4782 out_free:
4783 for (i = 0; i < npages; i++)
4784 if (pages[i])
4785 __free_page(pages[i]);
4786 if (res.acl_scratch)
4787 __free_page(res.acl_scratch);
4788 return ret;
4789 }
4790
4791 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4792 {
4793 struct nfs4_exception exception = { };
4794 ssize_t ret;
4795 do {
4796 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4797 trace_nfs4_get_acl(inode, ret);
4798 if (ret >= 0)
4799 break;
4800 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4801 } while (exception.retry);
4802 return ret;
4803 }
4804
4805 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4806 {
4807 struct nfs_server *server = NFS_SERVER(inode);
4808 int ret;
4809
4810 if (!nfs4_server_supports_acls(server))
4811 return -EOPNOTSUPP;
4812 ret = nfs_revalidate_inode(server, inode);
4813 if (ret < 0)
4814 return ret;
4815 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4816 nfs_zap_acl_cache(inode);
4817 ret = nfs4_read_cached_acl(inode, buf, buflen);
4818 if (ret != -ENOENT)
4819 /* -ENOENT is returned if there is no ACL or if there is an ACL
4820 * but no cached acl data, just the acl length */
4821 return ret;
4822 return nfs4_get_acl_uncached(inode, buf, buflen);
4823 }
4824
4825 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4826 {
4827 struct nfs_server *server = NFS_SERVER(inode);
4828 struct page *pages[NFS4ACL_MAXPAGES];
4829 struct nfs_setaclargs arg = {
4830 .fh = NFS_FH(inode),
4831 .acl_pages = pages,
4832 .acl_len = buflen,
4833 };
4834 struct nfs_setaclres res;
4835 struct rpc_message msg = {
4836 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4837 .rpc_argp = &arg,
4838 .rpc_resp = &res,
4839 };
4840 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4841 int ret, i;
4842
4843 if (!nfs4_server_supports_acls(server))
4844 return -EOPNOTSUPP;
4845 if (npages > ARRAY_SIZE(pages))
4846 return -ERANGE;
4847 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages);
4848 if (i < 0)
4849 return i;
4850 nfs4_inode_return_delegation(inode);
4851 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4852
4853 /*
4854 * Free each page after tx, so the only ref left is
4855 * held by the network stack
4856 */
4857 for (; i > 0; i--)
4858 put_page(pages[i-1]);
4859
4860 /*
4861 * Acl update can result in inode attribute update.
4862 * so mark the attribute cache invalid.
4863 */
4864 spin_lock(&inode->i_lock);
4865 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4866 spin_unlock(&inode->i_lock);
4867 nfs_access_zap_cache(inode);
4868 nfs_zap_acl_cache(inode);
4869 return ret;
4870 }
4871
4872 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4873 {
4874 struct nfs4_exception exception = { };
4875 int err;
4876 do {
4877 err = __nfs4_proc_set_acl(inode, buf, buflen);
4878 trace_nfs4_set_acl(inode, err);
4879 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4880 &exception);
4881 } while (exception.retry);
4882 return err;
4883 }
4884
4885 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4886 static int _nfs4_get_security_label(struct inode *inode, void *buf,
4887 size_t buflen)
4888 {
4889 struct nfs_server *server = NFS_SERVER(inode);
4890 struct nfs_fattr fattr;
4891 struct nfs4_label label = {0, 0, buflen, buf};
4892
4893 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4894 struct nfs4_getattr_arg arg = {
4895 .fh = NFS_FH(inode),
4896 .bitmask = bitmask,
4897 };
4898 struct nfs4_getattr_res res = {
4899 .fattr = &fattr,
4900 .label = &label,
4901 .server = server,
4902 };
4903 struct rpc_message msg = {
4904 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4905 .rpc_argp = &arg,
4906 .rpc_resp = &res,
4907 };
4908 int ret;
4909
4910 nfs_fattr_init(&fattr);
4911
4912 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
4913 if (ret)
4914 return ret;
4915 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4916 return -ENOENT;
4917 if (buflen < label.len)
4918 return -ERANGE;
4919 return 0;
4920 }
4921
4922 static int nfs4_get_security_label(struct inode *inode, void *buf,
4923 size_t buflen)
4924 {
4925 struct nfs4_exception exception = { };
4926 int err;
4927
4928 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4929 return -EOPNOTSUPP;
4930
4931 do {
4932 err = _nfs4_get_security_label(inode, buf, buflen);
4933 trace_nfs4_get_security_label(inode, err);
4934 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4935 &exception);
4936 } while (exception.retry);
4937 return err;
4938 }
4939
4940 static int _nfs4_do_set_security_label(struct inode *inode,
4941 struct nfs4_label *ilabel,
4942 struct nfs_fattr *fattr,
4943 struct nfs4_label *olabel)
4944 {
4945
4946 struct iattr sattr = {0};
4947 struct nfs_server *server = NFS_SERVER(inode);
4948 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4949 struct nfs_setattrargs arg = {
4950 .fh = NFS_FH(inode),
4951 .iap = &sattr,
4952 .server = server,
4953 .bitmask = bitmask,
4954 .label = ilabel,
4955 };
4956 struct nfs_setattrres res = {
4957 .fattr = fattr,
4958 .label = olabel,
4959 .server = server,
4960 };
4961 struct rpc_message msg = {
4962 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
4963 .rpc_argp = &arg,
4964 .rpc_resp = &res,
4965 };
4966 int status;
4967
4968 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
4969
4970 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4971 if (status)
4972 dprintk("%s failed: %d\n", __func__, status);
4973
4974 return status;
4975 }
4976
4977 static int nfs4_do_set_security_label(struct inode *inode,
4978 struct nfs4_label *ilabel,
4979 struct nfs_fattr *fattr,
4980 struct nfs4_label *olabel)
4981 {
4982 struct nfs4_exception exception = { };
4983 int err;
4984
4985 do {
4986 err = _nfs4_do_set_security_label(inode, ilabel,
4987 fattr, olabel);
4988 trace_nfs4_set_security_label(inode, err);
4989 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4990 &exception);
4991 } while (exception.retry);
4992 return err;
4993 }
4994
4995 static int
4996 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
4997 {
4998 struct nfs4_label ilabel, *olabel = NULL;
4999 struct nfs_fattr fattr;
5000 struct rpc_cred *cred;
5001 struct inode *inode = d_inode(dentry);
5002 int status;
5003
5004 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5005 return -EOPNOTSUPP;
5006
5007 nfs_fattr_init(&fattr);
5008
5009 ilabel.pi = 0;
5010 ilabel.lfs = 0;
5011 ilabel.label = (char *)buf;
5012 ilabel.len = buflen;
5013
5014 cred = rpc_lookup_cred();
5015 if (IS_ERR(cred))
5016 return PTR_ERR(cred);
5017
5018 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
5019 if (IS_ERR(olabel)) {
5020 status = -PTR_ERR(olabel);
5021 goto out;
5022 }
5023
5024 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
5025 if (status == 0)
5026 nfs_setsecurity(inode, &fattr, olabel);
5027
5028 nfs4_label_free(olabel);
5029 out:
5030 put_rpccred(cred);
5031 return status;
5032 }
5033 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
5034
5035
5036 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
5037 nfs4_verifier *bootverf)
5038 {
5039 __be32 verf[2];
5040
5041 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
5042 /* An impossible timestamp guarantees this value
5043 * will never match a generated boot time. */
5044 verf[0] = 0;
5045 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
5046 } else {
5047 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
5048 verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
5049 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
5050 }
5051 memcpy(bootverf->data, verf, sizeof(bootverf->data));
5052 }
5053
5054 static int
5055 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
5056 {
5057 size_t len;
5058 char *str;
5059
5060 if (clp->cl_owner_id != NULL)
5061 return 0;
5062
5063 rcu_read_lock();
5064 len = 14 + strlen(clp->cl_ipaddr) + 1 +
5065 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
5066 1 +
5067 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
5068 1;
5069 rcu_read_unlock();
5070
5071 if (len > NFS4_OPAQUE_LIMIT + 1)
5072 return -EINVAL;
5073
5074 /*
5075 * Since this string is allocated at mount time, and held until the
5076 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5077 * about a memory-reclaim deadlock.
5078 */
5079 str = kmalloc(len, GFP_KERNEL);
5080 if (!str)
5081 return -ENOMEM;
5082
5083 rcu_read_lock();
5084 scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
5085 clp->cl_ipaddr,
5086 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
5087 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
5088 rcu_read_unlock();
5089
5090 clp->cl_owner_id = str;
5091 return 0;
5092 }
5093
5094 static int
5095 nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5096 {
5097 size_t len;
5098 char *str;
5099
5100 len = 10 + 10 + 1 + 10 + 1 +
5101 strlen(nfs4_client_id_uniquifier) + 1 +
5102 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5103
5104 if (len > NFS4_OPAQUE_LIMIT + 1)
5105 return -EINVAL;
5106
5107 /*
5108 * Since this string is allocated at mount time, and held until the
5109 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5110 * about a memory-reclaim deadlock.
5111 */
5112 str = kmalloc(len, GFP_KERNEL);
5113 if (!str)
5114 return -ENOMEM;
5115
5116 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5117 clp->rpc_ops->version, clp->cl_minorversion,
5118 nfs4_client_id_uniquifier,
5119 clp->cl_rpcclient->cl_nodename);
5120 clp->cl_owner_id = str;
5121 return 0;
5122 }
5123
5124 static int
5125 nfs4_init_uniform_client_string(struct nfs_client *clp)
5126 {
5127 size_t len;
5128 char *str;
5129
5130 if (clp->cl_owner_id != NULL)
5131 return 0;
5132
5133 if (nfs4_client_id_uniquifier[0] != '\0')
5134 return nfs4_init_uniquifier_client_string(clp);
5135
5136 len = 10 + 10 + 1 + 10 + 1 +
5137 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5138
5139 if (len > NFS4_OPAQUE_LIMIT + 1)
5140 return -EINVAL;
5141
5142 /*
5143 * Since this string is allocated at mount time, and held until the
5144 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5145 * about a memory-reclaim deadlock.
5146 */
5147 str = kmalloc(len, GFP_KERNEL);
5148 if (!str)
5149 return -ENOMEM;
5150
5151 scnprintf(str, len, "Linux NFSv%u.%u %s",
5152 clp->rpc_ops->version, clp->cl_minorversion,
5153 clp->cl_rpcclient->cl_nodename);
5154 clp->cl_owner_id = str;
5155 return 0;
5156 }
5157
5158 /*
5159 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5160 * services. Advertise one based on the address family of the
5161 * clientaddr.
5162 */
5163 static unsigned int
5164 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5165 {
5166 if (strchr(clp->cl_ipaddr, ':') != NULL)
5167 return scnprintf(buf, len, "tcp6");
5168 else
5169 return scnprintf(buf, len, "tcp");
5170 }
5171
5172 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5173 {
5174 struct nfs4_setclientid *sc = calldata;
5175
5176 if (task->tk_status == 0)
5177 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5178 }
5179
5180 static const struct rpc_call_ops nfs4_setclientid_ops = {
5181 .rpc_call_done = nfs4_setclientid_done,
5182 };
5183
5184 /**
5185 * nfs4_proc_setclientid - Negotiate client ID
5186 * @clp: state data structure
5187 * @program: RPC program for NFSv4 callback service
5188 * @port: IP port number for NFS4 callback service
5189 * @cred: RPC credential to use for this call
5190 * @res: where to place the result
5191 *
5192 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5193 */
5194 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5195 unsigned short port, struct rpc_cred *cred,
5196 struct nfs4_setclientid_res *res)
5197 {
5198 nfs4_verifier sc_verifier;
5199 struct nfs4_setclientid setclientid = {
5200 .sc_verifier = &sc_verifier,
5201 .sc_prog = program,
5202 .sc_clnt = clp,
5203 };
5204 struct rpc_message msg = {
5205 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5206 .rpc_argp = &setclientid,
5207 .rpc_resp = res,
5208 .rpc_cred = cred,
5209 };
5210 struct rpc_task *task;
5211 struct rpc_task_setup task_setup_data = {
5212 .rpc_client = clp->cl_rpcclient,
5213 .rpc_message = &msg,
5214 .callback_ops = &nfs4_setclientid_ops,
5215 .callback_data = &setclientid,
5216 .flags = RPC_TASK_TIMEOUT,
5217 };
5218 int status;
5219
5220 /* nfs_client_id4 */
5221 nfs4_init_boot_verifier(clp, &sc_verifier);
5222
5223 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5224 status = nfs4_init_uniform_client_string(clp);
5225 else
5226 status = nfs4_init_nonuniform_client_string(clp);
5227
5228 if (status)
5229 goto out;
5230
5231 /* cb_client4 */
5232 setclientid.sc_netid_len =
5233 nfs4_init_callback_netid(clp,
5234 setclientid.sc_netid,
5235 sizeof(setclientid.sc_netid));
5236 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5237 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5238 clp->cl_ipaddr, port >> 8, port & 255);
5239
5240 dprintk("NFS call setclientid auth=%s, '%s'\n",
5241 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5242 clp->cl_owner_id);
5243 task = rpc_run_task(&task_setup_data);
5244 if (IS_ERR(task)) {
5245 status = PTR_ERR(task);
5246 goto out;
5247 }
5248 status = task->tk_status;
5249 if (setclientid.sc_cred) {
5250 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5251 put_rpccred(setclientid.sc_cred);
5252 }
5253 rpc_put_task(task);
5254 out:
5255 trace_nfs4_setclientid(clp, status);
5256 dprintk("NFS reply setclientid: %d\n", status);
5257 return status;
5258 }
5259
5260 /**
5261 * nfs4_proc_setclientid_confirm - Confirm client ID
5262 * @clp: state data structure
5263 * @res: result of a previous SETCLIENTID
5264 * @cred: RPC credential to use for this call
5265 *
5266 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5267 */
5268 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5269 struct nfs4_setclientid_res *arg,
5270 struct rpc_cred *cred)
5271 {
5272 struct rpc_message msg = {
5273 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5274 .rpc_argp = arg,
5275 .rpc_cred = cred,
5276 };
5277 int status;
5278
5279 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5280 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5281 clp->cl_clientid);
5282 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5283 trace_nfs4_setclientid_confirm(clp, status);
5284 dprintk("NFS reply setclientid_confirm: %d\n", status);
5285 return status;
5286 }
5287
5288 struct nfs4_delegreturndata {
5289 struct nfs4_delegreturnargs args;
5290 struct nfs4_delegreturnres res;
5291 struct nfs_fh fh;
5292 nfs4_stateid stateid;
5293 unsigned long timestamp;
5294 struct nfs_fattr fattr;
5295 int rpc_status;
5296 struct inode *inode;
5297 bool roc;
5298 u32 roc_barrier;
5299 };
5300
5301 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5302 {
5303 struct nfs4_delegreturndata *data = calldata;
5304
5305 if (!nfs4_sequence_done(task, &data->res.seq_res))
5306 return;
5307
5308 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5309 switch (task->tk_status) {
5310 case 0:
5311 renew_lease(data->res.server, data->timestamp);
5312 case -NFS4ERR_ADMIN_REVOKED:
5313 case -NFS4ERR_DELEG_REVOKED:
5314 case -NFS4ERR_BAD_STATEID:
5315 case -NFS4ERR_OLD_STATEID:
5316 case -NFS4ERR_STALE_STATEID:
5317 case -NFS4ERR_EXPIRED:
5318 task->tk_status = 0;
5319 if (data->roc)
5320 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5321 break;
5322 default:
5323 if (nfs4_async_handle_error(task, data->res.server,
5324 NULL, NULL) == -EAGAIN) {
5325 rpc_restart_call_prepare(task);
5326 return;
5327 }
5328 }
5329 data->rpc_status = task->tk_status;
5330 }
5331
5332 static void nfs4_delegreturn_release(void *calldata)
5333 {
5334 struct nfs4_delegreturndata *data = calldata;
5335 struct inode *inode = data->inode;
5336
5337 if (inode) {
5338 if (data->roc)
5339 pnfs_roc_release(inode);
5340 nfs_iput_and_deactive(inode);
5341 }
5342 kfree(calldata);
5343 }
5344
5345 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5346 {
5347 struct nfs4_delegreturndata *d_data;
5348
5349 d_data = (struct nfs4_delegreturndata *)data;
5350
5351 if (nfs4_wait_on_layoutreturn(d_data->inode, task))
5352 return;
5353
5354 if (d_data->roc)
5355 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
5356
5357 nfs4_setup_sequence(d_data->res.server,
5358 &d_data->args.seq_args,
5359 &d_data->res.seq_res,
5360 task);
5361 }
5362
5363 static const struct rpc_call_ops nfs4_delegreturn_ops = {
5364 .rpc_call_prepare = nfs4_delegreturn_prepare,
5365 .rpc_call_done = nfs4_delegreturn_done,
5366 .rpc_release = nfs4_delegreturn_release,
5367 };
5368
5369 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5370 {
5371 struct nfs4_delegreturndata *data;
5372 struct nfs_server *server = NFS_SERVER(inode);
5373 struct rpc_task *task;
5374 struct rpc_message msg = {
5375 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5376 .rpc_cred = cred,
5377 };
5378 struct rpc_task_setup task_setup_data = {
5379 .rpc_client = server->client,
5380 .rpc_message = &msg,
5381 .callback_ops = &nfs4_delegreturn_ops,
5382 .flags = RPC_TASK_ASYNC,
5383 };
5384 int status = 0;
5385
5386 data = kzalloc(sizeof(*data), GFP_NOFS);
5387 if (data == NULL)
5388 return -ENOMEM;
5389 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5390
5391 nfs4_state_protect(server->nfs_client,
5392 NFS_SP4_MACH_CRED_CLEANUP,
5393 &task_setup_data.rpc_client, &msg);
5394
5395 data->args.fhandle = &data->fh;
5396 data->args.stateid = &data->stateid;
5397 data->args.bitmask = server->cache_consistency_bitmask;
5398 nfs_copy_fh(&data->fh, NFS_FH(inode));
5399 nfs4_stateid_copy(&data->stateid, stateid);
5400 data->res.fattr = &data->fattr;
5401 data->res.server = server;
5402 nfs_fattr_init(data->res.fattr);
5403 data->timestamp = jiffies;
5404 data->rpc_status = 0;
5405 data->inode = nfs_igrab_and_active(inode);
5406 if (data->inode)
5407 data->roc = nfs4_roc(inode);
5408
5409 task_setup_data.callback_data = data;
5410 msg.rpc_argp = &data->args;
5411 msg.rpc_resp = &data->res;
5412 task = rpc_run_task(&task_setup_data);
5413 if (IS_ERR(task))
5414 return PTR_ERR(task);
5415 if (!issync)
5416 goto out;
5417 status = nfs4_wait_for_completion_rpc_task(task);
5418 if (status != 0)
5419 goto out;
5420 status = data->rpc_status;
5421 if (status == 0)
5422 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5423 else
5424 nfs_refresh_inode(inode, &data->fattr);
5425 out:
5426 rpc_put_task(task);
5427 return status;
5428 }
5429
5430 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5431 {
5432 struct nfs_server *server = NFS_SERVER(inode);
5433 struct nfs4_exception exception = { };
5434 int err;
5435 do {
5436 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5437 trace_nfs4_delegreturn(inode, stateid, err);
5438 switch (err) {
5439 case -NFS4ERR_STALE_STATEID:
5440 case -NFS4ERR_EXPIRED:
5441 case 0:
5442 return 0;
5443 }
5444 err = nfs4_handle_exception(server, err, &exception);
5445 } while (exception.retry);
5446 return err;
5447 }
5448
5449 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
5450 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
5451
5452 /*
5453 * sleep, with exponential backoff, and retry the LOCK operation.
5454 */
5455 static unsigned long
5456 nfs4_set_lock_task_retry(unsigned long timeout)
5457 {
5458 freezable_schedule_timeout_killable_unsafe(timeout);
5459 timeout <<= 1;
5460 if (timeout > NFS4_LOCK_MAXTIMEOUT)
5461 return NFS4_LOCK_MAXTIMEOUT;
5462 return timeout;
5463 }
5464
5465 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5466 {
5467 struct inode *inode = state->inode;
5468 struct nfs_server *server = NFS_SERVER(inode);
5469 struct nfs_client *clp = server->nfs_client;
5470 struct nfs_lockt_args arg = {
5471 .fh = NFS_FH(inode),
5472 .fl = request,
5473 };
5474 struct nfs_lockt_res res = {
5475 .denied = request,
5476 };
5477 struct rpc_message msg = {
5478 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5479 .rpc_argp = &arg,
5480 .rpc_resp = &res,
5481 .rpc_cred = state->owner->so_cred,
5482 };
5483 struct nfs4_lock_state *lsp;
5484 int status;
5485
5486 arg.lock_owner.clientid = clp->cl_clientid;
5487 status = nfs4_set_lock_state(state, request);
5488 if (status != 0)
5489 goto out;
5490 lsp = request->fl_u.nfs4_fl.owner;
5491 arg.lock_owner.id = lsp->ls_seqid.owner_id;
5492 arg.lock_owner.s_dev = server->s_dev;
5493 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5494 switch (status) {
5495 case 0:
5496 request->fl_type = F_UNLCK;
5497 break;
5498 case -NFS4ERR_DENIED:
5499 status = 0;
5500 }
5501 request->fl_ops->fl_release_private(request);
5502 request->fl_ops = NULL;
5503 out:
5504 return status;
5505 }
5506
5507 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5508 {
5509 struct nfs4_exception exception = { };
5510 int err;
5511
5512 do {
5513 err = _nfs4_proc_getlk(state, cmd, request);
5514 trace_nfs4_get_lock(request, state, cmd, err);
5515 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
5516 &exception);
5517 } while (exception.retry);
5518 return err;
5519 }
5520
5521 static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
5522 {
5523 return locks_lock_inode_wait(inode, fl);
5524 }
5525
5526 struct nfs4_unlockdata {
5527 struct nfs_locku_args arg;
5528 struct nfs_locku_res res;
5529 struct nfs4_lock_state *lsp;
5530 struct nfs_open_context *ctx;
5531 struct file_lock fl;
5532 struct nfs_server *server;
5533 unsigned long timestamp;
5534 };
5535
5536 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5537 struct nfs_open_context *ctx,
5538 struct nfs4_lock_state *lsp,
5539 struct nfs_seqid *seqid)
5540 {
5541 struct nfs4_unlockdata *p;
5542 struct inode *inode = lsp->ls_state->inode;
5543
5544 p = kzalloc(sizeof(*p), GFP_NOFS);
5545 if (p == NULL)
5546 return NULL;
5547 p->arg.fh = NFS_FH(inode);
5548 p->arg.fl = &p->fl;
5549 p->arg.seqid = seqid;
5550 p->res.seqid = seqid;
5551 p->lsp = lsp;
5552 atomic_inc(&lsp->ls_count);
5553 /* Ensure we don't close file until we're done freeing locks! */
5554 p->ctx = get_nfs_open_context(ctx);
5555 memcpy(&p->fl, fl, sizeof(p->fl));
5556 p->server = NFS_SERVER(inode);
5557 return p;
5558 }
5559
5560 static void nfs4_locku_release_calldata(void *data)
5561 {
5562 struct nfs4_unlockdata *calldata = data;
5563 nfs_free_seqid(calldata->arg.seqid);
5564 nfs4_put_lock_state(calldata->lsp);
5565 put_nfs_open_context(calldata->ctx);
5566 kfree(calldata);
5567 }
5568
5569 static void nfs4_locku_done(struct rpc_task *task, void *data)
5570 {
5571 struct nfs4_unlockdata *calldata = data;
5572
5573 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
5574 return;
5575 switch (task->tk_status) {
5576 case 0:
5577 renew_lease(calldata->server, calldata->timestamp);
5578 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
5579 if (nfs4_update_lock_stateid(calldata->lsp,
5580 &calldata->res.stateid))
5581 break;
5582 case -NFS4ERR_BAD_STATEID:
5583 case -NFS4ERR_OLD_STATEID:
5584 case -NFS4ERR_STALE_STATEID:
5585 case -NFS4ERR_EXPIRED:
5586 if (!nfs4_stateid_match(&calldata->arg.stateid,
5587 &calldata->lsp->ls_stateid))
5588 rpc_restart_call_prepare(task);
5589 break;
5590 default:
5591 if (nfs4_async_handle_error(task, calldata->server,
5592 NULL, NULL) == -EAGAIN)
5593 rpc_restart_call_prepare(task);
5594 }
5595 nfs_release_seqid(calldata->arg.seqid);
5596 }
5597
5598 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
5599 {
5600 struct nfs4_unlockdata *calldata = data;
5601
5602 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
5603 goto out_wait;
5604 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
5605 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
5606 /* Note: exit _without_ running nfs4_locku_done */
5607 goto out_no_action;
5608 }
5609 calldata->timestamp = jiffies;
5610 if (nfs4_setup_sequence(calldata->server,
5611 &calldata->arg.seq_args,
5612 &calldata->res.seq_res,
5613 task) != 0)
5614 nfs_release_seqid(calldata->arg.seqid);
5615 return;
5616 out_no_action:
5617 task->tk_action = NULL;
5618 out_wait:
5619 nfs4_sequence_done(task, &calldata->res.seq_res);
5620 }
5621
5622 static const struct rpc_call_ops nfs4_locku_ops = {
5623 .rpc_call_prepare = nfs4_locku_prepare,
5624 .rpc_call_done = nfs4_locku_done,
5625 .rpc_release = nfs4_locku_release_calldata,
5626 };
5627
5628 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
5629 struct nfs_open_context *ctx,
5630 struct nfs4_lock_state *lsp,
5631 struct nfs_seqid *seqid)
5632 {
5633 struct nfs4_unlockdata *data;
5634 struct rpc_message msg = {
5635 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
5636 .rpc_cred = ctx->cred,
5637 };
5638 struct rpc_task_setup task_setup_data = {
5639 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
5640 .rpc_message = &msg,
5641 .callback_ops = &nfs4_locku_ops,
5642 .workqueue = nfsiod_workqueue,
5643 .flags = RPC_TASK_ASYNC,
5644 };
5645
5646 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
5647 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
5648
5649 /* Ensure this is an unlock - when canceling a lock, the
5650 * canceled lock is passed in, and it won't be an unlock.
5651 */
5652 fl->fl_type = F_UNLCK;
5653
5654 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
5655 if (data == NULL) {
5656 nfs_free_seqid(seqid);
5657 return ERR_PTR(-ENOMEM);
5658 }
5659
5660 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5661 msg.rpc_argp = &data->arg;
5662 msg.rpc_resp = &data->res;
5663 task_setup_data.callback_data = data;
5664 return rpc_run_task(&task_setup_data);
5665 }
5666
5667 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
5668 {
5669 struct inode *inode = state->inode;
5670 struct nfs4_state_owner *sp = state->owner;
5671 struct nfs_inode *nfsi = NFS_I(inode);
5672 struct nfs_seqid *seqid;
5673 struct nfs4_lock_state *lsp;
5674 struct rpc_task *task;
5675 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5676 int status = 0;
5677 unsigned char fl_flags = request->fl_flags;
5678
5679 status = nfs4_set_lock_state(state, request);
5680 /* Unlock _before_ we do the RPC call */
5681 request->fl_flags |= FL_EXISTS;
5682 /* Exclude nfs_delegation_claim_locks() */
5683 mutex_lock(&sp->so_delegreturn_mutex);
5684 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5685 down_read(&nfsi->rwsem);
5686 if (do_vfs_lock(inode, request) == -ENOENT) {
5687 up_read(&nfsi->rwsem);
5688 mutex_unlock(&sp->so_delegreturn_mutex);
5689 goto out;
5690 }
5691 up_read(&nfsi->rwsem);
5692 mutex_unlock(&sp->so_delegreturn_mutex);
5693 if (status != 0)
5694 goto out;
5695 /* Is this a delegated lock? */
5696 lsp = request->fl_u.nfs4_fl.owner;
5697 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
5698 goto out;
5699 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
5700 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
5701 status = -ENOMEM;
5702 if (IS_ERR(seqid))
5703 goto out;
5704 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
5705 status = PTR_ERR(task);
5706 if (IS_ERR(task))
5707 goto out;
5708 status = nfs4_wait_for_completion_rpc_task(task);
5709 rpc_put_task(task);
5710 out:
5711 request->fl_flags = fl_flags;
5712 trace_nfs4_unlock(request, state, F_SETLK, status);
5713 return status;
5714 }
5715
5716 struct nfs4_lockdata {
5717 struct nfs_lock_args arg;
5718 struct nfs_lock_res res;
5719 struct nfs4_lock_state *lsp;
5720 struct nfs_open_context *ctx;
5721 struct file_lock fl;
5722 unsigned long timestamp;
5723 int rpc_status;
5724 int cancelled;
5725 struct nfs_server *server;
5726 };
5727
5728 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5729 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
5730 gfp_t gfp_mask)
5731 {
5732 struct nfs4_lockdata *p;
5733 struct inode *inode = lsp->ls_state->inode;
5734 struct nfs_server *server = NFS_SERVER(inode);
5735 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5736
5737 p = kzalloc(sizeof(*p), gfp_mask);
5738 if (p == NULL)
5739 return NULL;
5740
5741 p->arg.fh = NFS_FH(inode);
5742 p->arg.fl = &p->fl;
5743 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
5744 if (IS_ERR(p->arg.open_seqid))
5745 goto out_free;
5746 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
5747 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
5748 if (IS_ERR(p->arg.lock_seqid))
5749 goto out_free_seqid;
5750 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
5751 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
5752 p->arg.lock_owner.s_dev = server->s_dev;
5753 p->res.lock_seqid = p->arg.lock_seqid;
5754 p->lsp = lsp;
5755 p->server = server;
5756 atomic_inc(&lsp->ls_count);
5757 p->ctx = get_nfs_open_context(ctx);
5758 get_file(fl->fl_file);
5759 memcpy(&p->fl, fl, sizeof(p->fl));
5760 return p;
5761 out_free_seqid:
5762 nfs_free_seqid(p->arg.open_seqid);
5763 out_free:
5764 kfree(p);
5765 return NULL;
5766 }
5767
5768 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
5769 {
5770 struct nfs4_lockdata *data = calldata;
5771 struct nfs4_state *state = data->lsp->ls_state;
5772
5773 dprintk("%s: begin!\n", __func__);
5774 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
5775 goto out_wait;
5776 /* Do we need to do an open_to_lock_owner? */
5777 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
5778 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
5779 goto out_release_lock_seqid;
5780 }
5781 nfs4_stateid_copy(&data->arg.open_stateid,
5782 &state->open_stateid);
5783 data->arg.new_lock_owner = 1;
5784 data->res.open_seqid = data->arg.open_seqid;
5785 } else {
5786 data->arg.new_lock_owner = 0;
5787 nfs4_stateid_copy(&data->arg.lock_stateid,
5788 &data->lsp->ls_stateid);
5789 }
5790 if (!nfs4_valid_open_stateid(state)) {
5791 data->rpc_status = -EBADF;
5792 task->tk_action = NULL;
5793 goto out_release_open_seqid;
5794 }
5795 data->timestamp = jiffies;
5796 if (nfs4_setup_sequence(data->server,
5797 &data->arg.seq_args,
5798 &data->res.seq_res,
5799 task) == 0)
5800 return;
5801 out_release_open_seqid:
5802 nfs_release_seqid(data->arg.open_seqid);
5803 out_release_lock_seqid:
5804 nfs_release_seqid(data->arg.lock_seqid);
5805 out_wait:
5806 nfs4_sequence_done(task, &data->res.seq_res);
5807 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
5808 }
5809
5810 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5811 {
5812 struct nfs4_lockdata *data = calldata;
5813 struct nfs4_lock_state *lsp = data->lsp;
5814
5815 dprintk("%s: begin!\n", __func__);
5816
5817 if (!nfs4_sequence_done(task, &data->res.seq_res))
5818 return;
5819
5820 data->rpc_status = task->tk_status;
5821 switch (task->tk_status) {
5822 case 0:
5823 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
5824 data->timestamp);
5825 if (data->arg.new_lock) {
5826 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5827 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
5828 rpc_restart_call_prepare(task);
5829 break;
5830 }
5831 }
5832 if (data->arg.new_lock_owner != 0) {
5833 nfs_confirm_seqid(&lsp->ls_seqid, 0);
5834 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
5835 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5836 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
5837 rpc_restart_call_prepare(task);
5838 break;
5839 case -NFS4ERR_BAD_STATEID:
5840 case -NFS4ERR_OLD_STATEID:
5841 case -NFS4ERR_STALE_STATEID:
5842 case -NFS4ERR_EXPIRED:
5843 if (data->arg.new_lock_owner != 0) {
5844 if (!nfs4_stateid_match(&data->arg.open_stateid,
5845 &lsp->ls_state->open_stateid))
5846 rpc_restart_call_prepare(task);
5847 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
5848 &lsp->ls_stateid))
5849 rpc_restart_call_prepare(task);
5850 }
5851 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
5852 }
5853
5854 static void nfs4_lock_release(void *calldata)
5855 {
5856 struct nfs4_lockdata *data = calldata;
5857
5858 dprintk("%s: begin!\n", __func__);
5859 nfs_free_seqid(data->arg.open_seqid);
5860 if (data->cancelled != 0) {
5861 struct rpc_task *task;
5862 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
5863 data->arg.lock_seqid);
5864 if (!IS_ERR(task))
5865 rpc_put_task_async(task);
5866 dprintk("%s: cancelling lock!\n", __func__);
5867 } else
5868 nfs_free_seqid(data->arg.lock_seqid);
5869 nfs4_put_lock_state(data->lsp);
5870 put_nfs_open_context(data->ctx);
5871 fput(data->fl.fl_file);
5872 kfree(data);
5873 dprintk("%s: done!\n", __func__);
5874 }
5875
5876 static const struct rpc_call_ops nfs4_lock_ops = {
5877 .rpc_call_prepare = nfs4_lock_prepare,
5878 .rpc_call_done = nfs4_lock_done,
5879 .rpc_release = nfs4_lock_release,
5880 };
5881
5882 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
5883 {
5884 switch (error) {
5885 case -NFS4ERR_ADMIN_REVOKED:
5886 case -NFS4ERR_BAD_STATEID:
5887 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5888 if (new_lock_owner != 0 ||
5889 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
5890 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
5891 break;
5892 case -NFS4ERR_STALE_STATEID:
5893 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5894 case -NFS4ERR_EXPIRED:
5895 nfs4_schedule_lease_recovery(server->nfs_client);
5896 };
5897 }
5898
5899 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
5900 {
5901 struct nfs4_lockdata *data;
5902 struct rpc_task *task;
5903 struct rpc_message msg = {
5904 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
5905 .rpc_cred = state->owner->so_cred,
5906 };
5907 struct rpc_task_setup task_setup_data = {
5908 .rpc_client = NFS_CLIENT(state->inode),
5909 .rpc_message = &msg,
5910 .callback_ops = &nfs4_lock_ops,
5911 .workqueue = nfsiod_workqueue,
5912 .flags = RPC_TASK_ASYNC,
5913 };
5914 int ret;
5915
5916 dprintk("%s: begin!\n", __func__);
5917 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
5918 fl->fl_u.nfs4_fl.owner,
5919 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
5920 if (data == NULL)
5921 return -ENOMEM;
5922 if (IS_SETLKW(cmd))
5923 data->arg.block = 1;
5924 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5925 msg.rpc_argp = &data->arg;
5926 msg.rpc_resp = &data->res;
5927 task_setup_data.callback_data = data;
5928 if (recovery_type > NFS_LOCK_NEW) {
5929 if (recovery_type == NFS_LOCK_RECLAIM)
5930 data->arg.reclaim = NFS_LOCK_RECLAIM;
5931 nfs4_set_sequence_privileged(&data->arg.seq_args);
5932 } else
5933 data->arg.new_lock = 1;
5934 task = rpc_run_task(&task_setup_data);
5935 if (IS_ERR(task))
5936 return PTR_ERR(task);
5937 ret = nfs4_wait_for_completion_rpc_task(task);
5938 if (ret == 0) {
5939 ret = data->rpc_status;
5940 if (ret)
5941 nfs4_handle_setlk_error(data->server, data->lsp,
5942 data->arg.new_lock_owner, ret);
5943 } else
5944 data->cancelled = 1;
5945 rpc_put_task(task);
5946 dprintk("%s: done, ret = %d!\n", __func__, ret);
5947 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
5948 return ret;
5949 }
5950
5951 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
5952 {
5953 struct nfs_server *server = NFS_SERVER(state->inode);
5954 struct nfs4_exception exception = {
5955 .inode = state->inode,
5956 };
5957 int err;
5958
5959 do {
5960 /* Cache the lock if possible... */
5961 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5962 return 0;
5963 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
5964 if (err != -NFS4ERR_DELAY)
5965 break;
5966 nfs4_handle_exception(server, err, &exception);
5967 } while (exception.retry);
5968 return err;
5969 }
5970
5971 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
5972 {
5973 struct nfs_server *server = NFS_SERVER(state->inode);
5974 struct nfs4_exception exception = {
5975 .inode = state->inode,
5976 };
5977 int err;
5978
5979 err = nfs4_set_lock_state(state, request);
5980 if (err != 0)
5981 return err;
5982 if (!recover_lost_locks) {
5983 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
5984 return 0;
5985 }
5986 do {
5987 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5988 return 0;
5989 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5990 switch (err) {
5991 default:
5992 goto out;
5993 case -NFS4ERR_GRACE:
5994 case -NFS4ERR_DELAY:
5995 nfs4_handle_exception(server, err, &exception);
5996 err = 0;
5997 }
5998 } while (exception.retry);
5999 out:
6000 return err;
6001 }
6002
6003 #if defined(CONFIG_NFS_V4_1)
6004 /**
6005 * nfs41_check_expired_locks - possibly free a lock stateid
6006 *
6007 * @state: NFSv4 state for an inode
6008 *
6009 * Returns NFS_OK if recovery for this stateid is now finished.
6010 * Otherwise a negative NFS4ERR value is returned.
6011 */
6012 static int nfs41_check_expired_locks(struct nfs4_state *state)
6013 {
6014 int status, ret = -NFS4ERR_BAD_STATEID;
6015 struct nfs4_lock_state *lsp;
6016 struct nfs_server *server = NFS_SERVER(state->inode);
6017
6018 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
6019 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
6020 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
6021
6022 status = nfs41_test_stateid(server,
6023 &lsp->ls_stateid,
6024 cred);
6025 trace_nfs4_test_lock_stateid(state, lsp, status);
6026 if (status != NFS_OK) {
6027 /* Free the stateid unless the server
6028 * informs us the stateid is unrecognized. */
6029 if (status != -NFS4ERR_BAD_STATEID)
6030 nfs41_free_stateid(server,
6031 &lsp->ls_stateid,
6032 cred);
6033 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
6034 ret = status;
6035 }
6036 }
6037 };
6038
6039 return ret;
6040 }
6041
6042 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
6043 {
6044 int status = NFS_OK;
6045
6046 if (test_bit(LK_STATE_IN_USE, &state->flags))
6047 status = nfs41_check_expired_locks(state);
6048 if (status != NFS_OK)
6049 status = nfs4_lock_expired(state, request);
6050 return status;
6051 }
6052 #endif
6053
6054 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6055 {
6056 struct nfs_inode *nfsi = NFS_I(state->inode);
6057 struct nfs4_state_owner *sp = state->owner;
6058 unsigned char fl_flags = request->fl_flags;
6059 int status = -ENOLCK;
6060
6061 if ((fl_flags & FL_POSIX) &&
6062 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6063 goto out;
6064 /* Is this a delegated open? */
6065 status = nfs4_set_lock_state(state, request);
6066 if (status != 0)
6067 goto out;
6068 request->fl_flags |= FL_ACCESS;
6069 status = do_vfs_lock(state->inode, request);
6070 if (status < 0)
6071 goto out;
6072 mutex_lock(&sp->so_delegreturn_mutex);
6073 down_read(&nfsi->rwsem);
6074 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6075 /* Yes: cache locks! */
6076 /* ...but avoid races with delegation recall... */
6077 request->fl_flags = fl_flags & ~FL_SLEEP;
6078 status = do_vfs_lock(state->inode, request);
6079 up_read(&nfsi->rwsem);
6080 mutex_unlock(&sp->so_delegreturn_mutex);
6081 goto out;
6082 }
6083 up_read(&nfsi->rwsem);
6084 mutex_unlock(&sp->so_delegreturn_mutex);
6085 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6086 out:
6087 request->fl_flags = fl_flags;
6088 return status;
6089 }
6090
6091 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6092 {
6093 struct nfs4_exception exception = {
6094 .state = state,
6095 .inode = state->inode,
6096 };
6097 int err;
6098
6099 do {
6100 err = _nfs4_proc_setlk(state, cmd, request);
6101 if (err == -NFS4ERR_DENIED)
6102 err = -EAGAIN;
6103 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6104 err, &exception);
6105 } while (exception.retry);
6106 return err;
6107 }
6108
6109 static int
6110 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6111 {
6112 struct nfs_open_context *ctx;
6113 struct nfs4_state *state;
6114 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6115 int status;
6116
6117 /* verify open state */
6118 ctx = nfs_file_open_context(filp);
6119 state = ctx->state;
6120
6121 if (request->fl_start < 0 || request->fl_end < 0)
6122 return -EINVAL;
6123
6124 if (IS_GETLK(cmd)) {
6125 if (state != NULL)
6126 return nfs4_proc_getlk(state, F_GETLK, request);
6127 return 0;
6128 }
6129
6130 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6131 return -EINVAL;
6132
6133 if (request->fl_type == F_UNLCK) {
6134 if (state != NULL)
6135 return nfs4_proc_unlck(state, cmd, request);
6136 return 0;
6137 }
6138
6139 if (state == NULL)
6140 return -ENOLCK;
6141 /*
6142 * Don't rely on the VFS having checked the file open mode,
6143 * since it won't do this for flock() locks.
6144 */
6145 switch (request->fl_type) {
6146 case F_RDLCK:
6147 if (!(filp->f_mode & FMODE_READ))
6148 return -EBADF;
6149 break;
6150 case F_WRLCK:
6151 if (!(filp->f_mode & FMODE_WRITE))
6152 return -EBADF;
6153 }
6154
6155 do {
6156 status = nfs4_proc_setlk(state, cmd, request);
6157 if ((status != -EAGAIN) || IS_SETLK(cmd))
6158 break;
6159 timeout = nfs4_set_lock_task_retry(timeout);
6160 status = -ERESTARTSYS;
6161 if (signalled())
6162 break;
6163 } while(status < 0);
6164 return status;
6165 }
6166
6167 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6168 {
6169 struct nfs_server *server = NFS_SERVER(state->inode);
6170 int err;
6171
6172 err = nfs4_set_lock_state(state, fl);
6173 if (err != 0)
6174 return err;
6175 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6176 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6177 }
6178
6179 struct nfs_release_lockowner_data {
6180 struct nfs4_lock_state *lsp;
6181 struct nfs_server *server;
6182 struct nfs_release_lockowner_args args;
6183 struct nfs_release_lockowner_res res;
6184 unsigned long timestamp;
6185 };
6186
6187 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6188 {
6189 struct nfs_release_lockowner_data *data = calldata;
6190 struct nfs_server *server = data->server;
6191 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
6192 &data->args.seq_args, &data->res.seq_res, task);
6193 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6194 data->timestamp = jiffies;
6195 }
6196
6197 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6198 {
6199 struct nfs_release_lockowner_data *data = calldata;
6200 struct nfs_server *server = data->server;
6201
6202 nfs40_sequence_done(task, &data->res.seq_res);
6203
6204 switch (task->tk_status) {
6205 case 0:
6206 renew_lease(server, data->timestamp);
6207 break;
6208 case -NFS4ERR_STALE_CLIENTID:
6209 case -NFS4ERR_EXPIRED:
6210 nfs4_schedule_lease_recovery(server->nfs_client);
6211 break;
6212 case -NFS4ERR_LEASE_MOVED:
6213 case -NFS4ERR_DELAY:
6214 if (nfs4_async_handle_error(task, server,
6215 NULL, NULL) == -EAGAIN)
6216 rpc_restart_call_prepare(task);
6217 }
6218 }
6219
6220 static void nfs4_release_lockowner_release(void *calldata)
6221 {
6222 struct nfs_release_lockowner_data *data = calldata;
6223 nfs4_free_lock_state(data->server, data->lsp);
6224 kfree(calldata);
6225 }
6226
6227 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6228 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6229 .rpc_call_done = nfs4_release_lockowner_done,
6230 .rpc_release = nfs4_release_lockowner_release,
6231 };
6232
6233 static void
6234 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6235 {
6236 struct nfs_release_lockowner_data *data;
6237 struct rpc_message msg = {
6238 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6239 };
6240
6241 if (server->nfs_client->cl_mvops->minor_version != 0)
6242 return;
6243
6244 data = kmalloc(sizeof(*data), GFP_NOFS);
6245 if (!data)
6246 return;
6247 data->lsp = lsp;
6248 data->server = server;
6249 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6250 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6251 data->args.lock_owner.s_dev = server->s_dev;
6252
6253 msg.rpc_argp = &data->args;
6254 msg.rpc_resp = &data->res;
6255 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6256 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6257 }
6258
6259 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6260
6261 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
6262 struct dentry *dentry, const char *key,
6263 const void *buf, size_t buflen,
6264 int flags)
6265 {
6266 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
6267 }
6268
6269 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
6270 struct dentry *dentry, const char *key,
6271 void *buf, size_t buflen)
6272 {
6273 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen);
6274 }
6275
6276 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
6277 {
6278 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
6279 }
6280
6281 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6282
6283 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
6284 struct dentry *dentry, const char *key,
6285 const void *buf, size_t buflen,
6286 int flags)
6287 {
6288 if (security_ismaclabel(key))
6289 return nfs4_set_security_label(dentry, buf, buflen);
6290
6291 return -EOPNOTSUPP;
6292 }
6293
6294 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
6295 struct dentry *dentry, const char *key,
6296 void *buf, size_t buflen)
6297 {
6298 if (security_ismaclabel(key))
6299 return nfs4_get_security_label(d_inode(dentry), buf, buflen);
6300 return -EOPNOTSUPP;
6301 }
6302
6303 static ssize_t
6304 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
6305 {
6306 int len = 0;
6307
6308 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
6309 len = security_inode_listsecurity(inode, list, list_len);
6310 if (list_len && len > list_len)
6311 return -ERANGE;
6312 }
6313 return len;
6314 }
6315
6316 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6317 .prefix = XATTR_SECURITY_PREFIX,
6318 .get = nfs4_xattr_get_nfs4_label,
6319 .set = nfs4_xattr_set_nfs4_label,
6320 };
6321
6322 #else
6323
6324 static ssize_t
6325 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
6326 {
6327 return 0;
6328 }
6329
6330 #endif
6331
6332 /*
6333 * nfs_fhget will use either the mounted_on_fileid or the fileid
6334 */
6335 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6336 {
6337 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6338 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6339 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6340 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6341 return;
6342
6343 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6344 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6345 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6346 fattr->nlink = 2;
6347 }
6348
6349 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6350 const struct qstr *name,
6351 struct nfs4_fs_locations *fs_locations,
6352 struct page *page)
6353 {
6354 struct nfs_server *server = NFS_SERVER(dir);
6355 u32 bitmask[3] = {
6356 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6357 };
6358 struct nfs4_fs_locations_arg args = {
6359 .dir_fh = NFS_FH(dir),
6360 .name = name,
6361 .page = page,
6362 .bitmask = bitmask,
6363 };
6364 struct nfs4_fs_locations_res res = {
6365 .fs_locations = fs_locations,
6366 };
6367 struct rpc_message msg = {
6368 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6369 .rpc_argp = &args,
6370 .rpc_resp = &res,
6371 };
6372 int status;
6373
6374 dprintk("%s: start\n", __func__);
6375
6376 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6377 * is not supported */
6378 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6379 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6380 else
6381 bitmask[0] |= FATTR4_WORD0_FILEID;
6382
6383 nfs_fattr_init(&fs_locations->fattr);
6384 fs_locations->server = server;
6385 fs_locations->nlocations = 0;
6386 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6387 dprintk("%s: returned status = %d\n", __func__, status);
6388 return status;
6389 }
6390
6391 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6392 const struct qstr *name,
6393 struct nfs4_fs_locations *fs_locations,
6394 struct page *page)
6395 {
6396 struct nfs4_exception exception = { };
6397 int err;
6398 do {
6399 err = _nfs4_proc_fs_locations(client, dir, name,
6400 fs_locations, page);
6401 trace_nfs4_get_fs_locations(dir, name, err);
6402 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6403 &exception);
6404 } while (exception.retry);
6405 return err;
6406 }
6407
6408 /*
6409 * This operation also signals the server that this client is
6410 * performing migration recovery. The server can stop returning
6411 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
6412 * appended to this compound to identify the client ID which is
6413 * performing recovery.
6414 */
6415 static int _nfs40_proc_get_locations(struct inode *inode,
6416 struct nfs4_fs_locations *locations,
6417 struct page *page, struct rpc_cred *cred)
6418 {
6419 struct nfs_server *server = NFS_SERVER(inode);
6420 struct rpc_clnt *clnt = server->client;
6421 u32 bitmask[2] = {
6422 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6423 };
6424 struct nfs4_fs_locations_arg args = {
6425 .clientid = server->nfs_client->cl_clientid,
6426 .fh = NFS_FH(inode),
6427 .page = page,
6428 .bitmask = bitmask,
6429 .migration = 1, /* skip LOOKUP */
6430 .renew = 1, /* append RENEW */
6431 };
6432 struct nfs4_fs_locations_res res = {
6433 .fs_locations = locations,
6434 .migration = 1,
6435 .renew = 1,
6436 };
6437 struct rpc_message msg = {
6438 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6439 .rpc_argp = &args,
6440 .rpc_resp = &res,
6441 .rpc_cred = cred,
6442 };
6443 unsigned long now = jiffies;
6444 int status;
6445
6446 nfs_fattr_init(&locations->fattr);
6447 locations->server = server;
6448 locations->nlocations = 0;
6449
6450 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6451 nfs4_set_sequence_privileged(&args.seq_args);
6452 status = nfs4_call_sync_sequence(clnt, server, &msg,
6453 &args.seq_args, &res.seq_res);
6454 if (status)
6455 return status;
6456
6457 renew_lease(server, now);
6458 return 0;
6459 }
6460
6461 #ifdef CONFIG_NFS_V4_1
6462
6463 /*
6464 * This operation also signals the server that this client is
6465 * performing migration recovery. The server can stop asserting
6466 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
6467 * performing this operation is identified in the SEQUENCE
6468 * operation in this compound.
6469 *
6470 * When the client supports GETATTR(fs_locations_info), it can
6471 * be plumbed in here.
6472 */
6473 static int _nfs41_proc_get_locations(struct inode *inode,
6474 struct nfs4_fs_locations *locations,
6475 struct page *page, struct rpc_cred *cred)
6476 {
6477 struct nfs_server *server = NFS_SERVER(inode);
6478 struct rpc_clnt *clnt = server->client;
6479 u32 bitmask[2] = {
6480 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6481 };
6482 struct nfs4_fs_locations_arg args = {
6483 .fh = NFS_FH(inode),
6484 .page = page,
6485 .bitmask = bitmask,
6486 .migration = 1, /* skip LOOKUP */
6487 };
6488 struct nfs4_fs_locations_res res = {
6489 .fs_locations = locations,
6490 .migration = 1,
6491 };
6492 struct rpc_message msg = {
6493 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6494 .rpc_argp = &args,
6495 .rpc_resp = &res,
6496 .rpc_cred = cred,
6497 };
6498 int status;
6499
6500 nfs_fattr_init(&locations->fattr);
6501 locations->server = server;
6502 locations->nlocations = 0;
6503
6504 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6505 nfs4_set_sequence_privileged(&args.seq_args);
6506 status = nfs4_call_sync_sequence(clnt, server, &msg,
6507 &args.seq_args, &res.seq_res);
6508 if (status == NFS4_OK &&
6509 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6510 status = -NFS4ERR_LEASE_MOVED;
6511 return status;
6512 }
6513
6514 #endif /* CONFIG_NFS_V4_1 */
6515
6516 /**
6517 * nfs4_proc_get_locations - discover locations for a migrated FSID
6518 * @inode: inode on FSID that is migrating
6519 * @locations: result of query
6520 * @page: buffer
6521 * @cred: credential to use for this operation
6522 *
6523 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
6524 * operation failed, or a negative errno if a local error occurred.
6525 *
6526 * On success, "locations" is filled in, but if the server has
6527 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
6528 * asserted.
6529 *
6530 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
6531 * from this client that require migration recovery.
6532 */
6533 int nfs4_proc_get_locations(struct inode *inode,
6534 struct nfs4_fs_locations *locations,
6535 struct page *page, struct rpc_cred *cred)
6536 {
6537 struct nfs_server *server = NFS_SERVER(inode);
6538 struct nfs_client *clp = server->nfs_client;
6539 const struct nfs4_mig_recovery_ops *ops =
6540 clp->cl_mvops->mig_recovery_ops;
6541 struct nfs4_exception exception = { };
6542 int status;
6543
6544 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6545 (unsigned long long)server->fsid.major,
6546 (unsigned long long)server->fsid.minor,
6547 clp->cl_hostname);
6548 nfs_display_fhandle(NFS_FH(inode), __func__);
6549
6550 do {
6551 status = ops->get_locations(inode, locations, page, cred);
6552 if (status != -NFS4ERR_DELAY)
6553 break;
6554 nfs4_handle_exception(server, status, &exception);
6555 } while (exception.retry);
6556 return status;
6557 }
6558
6559 /*
6560 * This operation also signals the server that this client is
6561 * performing "lease moved" recovery. The server can stop
6562 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
6563 * is appended to this compound to identify the client ID which is
6564 * performing recovery.
6565 */
6566 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6567 {
6568 struct nfs_server *server = NFS_SERVER(inode);
6569 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
6570 struct rpc_clnt *clnt = server->client;
6571 struct nfs4_fsid_present_arg args = {
6572 .fh = NFS_FH(inode),
6573 .clientid = clp->cl_clientid,
6574 .renew = 1, /* append RENEW */
6575 };
6576 struct nfs4_fsid_present_res res = {
6577 .renew = 1,
6578 };
6579 struct rpc_message msg = {
6580 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6581 .rpc_argp = &args,
6582 .rpc_resp = &res,
6583 .rpc_cred = cred,
6584 };
6585 unsigned long now = jiffies;
6586 int status;
6587
6588 res.fh = nfs_alloc_fhandle();
6589 if (res.fh == NULL)
6590 return -ENOMEM;
6591
6592 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6593 nfs4_set_sequence_privileged(&args.seq_args);
6594 status = nfs4_call_sync_sequence(clnt, server, &msg,
6595 &args.seq_args, &res.seq_res);
6596 nfs_free_fhandle(res.fh);
6597 if (status)
6598 return status;
6599
6600 do_renew_lease(clp, now);
6601 return 0;
6602 }
6603
6604 #ifdef CONFIG_NFS_V4_1
6605
6606 /*
6607 * This operation also signals the server that this client is
6608 * performing "lease moved" recovery. The server can stop asserting
6609 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
6610 * this operation is identified in the SEQUENCE operation in this
6611 * compound.
6612 */
6613 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6614 {
6615 struct nfs_server *server = NFS_SERVER(inode);
6616 struct rpc_clnt *clnt = server->client;
6617 struct nfs4_fsid_present_arg args = {
6618 .fh = NFS_FH(inode),
6619 };
6620 struct nfs4_fsid_present_res res = {
6621 };
6622 struct rpc_message msg = {
6623 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6624 .rpc_argp = &args,
6625 .rpc_resp = &res,
6626 .rpc_cred = cred,
6627 };
6628 int status;
6629
6630 res.fh = nfs_alloc_fhandle();
6631 if (res.fh == NULL)
6632 return -ENOMEM;
6633
6634 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6635 nfs4_set_sequence_privileged(&args.seq_args);
6636 status = nfs4_call_sync_sequence(clnt, server, &msg,
6637 &args.seq_args, &res.seq_res);
6638 nfs_free_fhandle(res.fh);
6639 if (status == NFS4_OK &&
6640 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6641 status = -NFS4ERR_LEASE_MOVED;
6642 return status;
6643 }
6644
6645 #endif /* CONFIG_NFS_V4_1 */
6646
6647 /**
6648 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
6649 * @inode: inode on FSID to check
6650 * @cred: credential to use for this operation
6651 *
6652 * Server indicates whether the FSID is present, moved, or not
6653 * recognized. This operation is necessary to clear a LEASE_MOVED
6654 * condition for this client ID.
6655 *
6656 * Returns NFS4_OK if the FSID is present on this server,
6657 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
6658 * NFS4ERR code if some error occurred on the server, or a
6659 * negative errno if a local failure occurred.
6660 */
6661 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6662 {
6663 struct nfs_server *server = NFS_SERVER(inode);
6664 struct nfs_client *clp = server->nfs_client;
6665 const struct nfs4_mig_recovery_ops *ops =
6666 clp->cl_mvops->mig_recovery_ops;
6667 struct nfs4_exception exception = { };
6668 int status;
6669
6670 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6671 (unsigned long long)server->fsid.major,
6672 (unsigned long long)server->fsid.minor,
6673 clp->cl_hostname);
6674 nfs_display_fhandle(NFS_FH(inode), __func__);
6675
6676 do {
6677 status = ops->fsid_present(inode, cred);
6678 if (status != -NFS4ERR_DELAY)
6679 break;
6680 nfs4_handle_exception(server, status, &exception);
6681 } while (exception.retry);
6682 return status;
6683 }
6684
6685 /**
6686 * If 'use_integrity' is true and the state managment nfs_client
6687 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
6688 * and the machine credential as per RFC3530bis and RFC5661 Security
6689 * Considerations sections. Otherwise, just use the user cred with the
6690 * filesystem's rpc_client.
6691 */
6692 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
6693 {
6694 int status;
6695 struct nfs4_secinfo_arg args = {
6696 .dir_fh = NFS_FH(dir),
6697 .name = name,
6698 };
6699 struct nfs4_secinfo_res res = {
6700 .flavors = flavors,
6701 };
6702 struct rpc_message msg = {
6703 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
6704 .rpc_argp = &args,
6705 .rpc_resp = &res,
6706 };
6707 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
6708 struct rpc_cred *cred = NULL;
6709
6710 if (use_integrity) {
6711 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
6712 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
6713 msg.rpc_cred = cred;
6714 }
6715
6716 dprintk("NFS call secinfo %s\n", name->name);
6717
6718 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
6719 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
6720
6721 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
6722 &res.seq_res, 0);
6723 dprintk("NFS reply secinfo: %d\n", status);
6724
6725 if (cred)
6726 put_rpccred(cred);
6727
6728 return status;
6729 }
6730
6731 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
6732 struct nfs4_secinfo_flavors *flavors)
6733 {
6734 struct nfs4_exception exception = { };
6735 int err;
6736 do {
6737 err = -NFS4ERR_WRONGSEC;
6738
6739 /* try to use integrity protection with machine cred */
6740 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
6741 err = _nfs4_proc_secinfo(dir, name, flavors, true);
6742
6743 /*
6744 * if unable to use integrity protection, or SECINFO with
6745 * integrity protection returns NFS4ERR_WRONGSEC (which is
6746 * disallowed by spec, but exists in deployed servers) use
6747 * the current filesystem's rpc_client and the user cred.
6748 */
6749 if (err == -NFS4ERR_WRONGSEC)
6750 err = _nfs4_proc_secinfo(dir, name, flavors, false);
6751
6752 trace_nfs4_secinfo(dir, name, err);
6753 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6754 &exception);
6755 } while (exception.retry);
6756 return err;
6757 }
6758
6759 #ifdef CONFIG_NFS_V4_1
6760 /*
6761 * Check the exchange flags returned by the server for invalid flags, having
6762 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
6763 * DS flags set.
6764 */
6765 static int nfs4_check_cl_exchange_flags(u32 flags)
6766 {
6767 if (flags & ~EXCHGID4_FLAG_MASK_R)
6768 goto out_inval;
6769 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
6770 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
6771 goto out_inval;
6772 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
6773 goto out_inval;
6774 return NFS_OK;
6775 out_inval:
6776 return -NFS4ERR_INVAL;
6777 }
6778
6779 static bool
6780 nfs41_same_server_scope(struct nfs41_server_scope *a,
6781 struct nfs41_server_scope *b)
6782 {
6783 if (a->server_scope_sz == b->server_scope_sz &&
6784 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
6785 return true;
6786
6787 return false;
6788 }
6789
6790 static void
6791 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
6792 {
6793 }
6794
6795 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
6796 .rpc_call_done = &nfs4_bind_one_conn_to_session_done,
6797 };
6798
6799 /*
6800 * nfs4_proc_bind_one_conn_to_session()
6801 *
6802 * The 4.1 client currently uses the same TCP connection for the
6803 * fore and backchannel.
6804 */
6805 static
6806 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
6807 struct rpc_xprt *xprt,
6808 struct nfs_client *clp,
6809 struct rpc_cred *cred)
6810 {
6811 int status;
6812 struct nfs41_bind_conn_to_session_args args = {
6813 .client = clp,
6814 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6815 };
6816 struct nfs41_bind_conn_to_session_res res;
6817 struct rpc_message msg = {
6818 .rpc_proc =
6819 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6820 .rpc_argp = &args,
6821 .rpc_resp = &res,
6822 .rpc_cred = cred,
6823 };
6824 struct rpc_task_setup task_setup_data = {
6825 .rpc_client = clnt,
6826 .rpc_xprt = xprt,
6827 .callback_ops = &nfs4_bind_one_conn_to_session_ops,
6828 .rpc_message = &msg,
6829 .flags = RPC_TASK_TIMEOUT,
6830 };
6831 struct rpc_task *task;
6832
6833 dprintk("--> %s\n", __func__);
6834
6835 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6836 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6837 args.dir = NFS4_CDFC4_FORE;
6838
6839 /* Do not set the backchannel flag unless this is clnt->cl_xprt */
6840 if (xprt != rcu_access_pointer(clnt->cl_xprt))
6841 args.dir = NFS4_CDFC4_FORE;
6842
6843 task = rpc_run_task(&task_setup_data);
6844 if (!IS_ERR(task)) {
6845 status = task->tk_status;
6846 rpc_put_task(task);
6847 } else
6848 status = PTR_ERR(task);
6849 trace_nfs4_bind_conn_to_session(clp, status);
6850 if (status == 0) {
6851 if (memcmp(res.sessionid.data,
6852 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6853 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6854 status = -EIO;
6855 goto out;
6856 }
6857 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6858 dprintk("NFS: %s: Unexpected direction from server\n",
6859 __func__);
6860 status = -EIO;
6861 goto out;
6862 }
6863 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6864 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6865 __func__);
6866 status = -EIO;
6867 goto out;
6868 }
6869 }
6870 out:
6871 dprintk("<-- %s status= %d\n", __func__, status);
6872 return status;
6873 }
6874
6875 struct rpc_bind_conn_calldata {
6876 struct nfs_client *clp;
6877 struct rpc_cred *cred;
6878 };
6879
6880 static int
6881 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
6882 struct rpc_xprt *xprt,
6883 void *calldata)
6884 {
6885 struct rpc_bind_conn_calldata *p = calldata;
6886
6887 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
6888 }
6889
6890 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6891 {
6892 struct rpc_bind_conn_calldata data = {
6893 .clp = clp,
6894 .cred = cred,
6895 };
6896 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
6897 nfs4_proc_bind_conn_to_session_callback, &data);
6898 }
6899
6900 /*
6901 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
6902 * and operations we'd like to see to enable certain features in the allow map
6903 */
6904 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
6905 .how = SP4_MACH_CRED,
6906 .enforce.u.words = {
6907 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6908 1 << (OP_EXCHANGE_ID - 32) |
6909 1 << (OP_CREATE_SESSION - 32) |
6910 1 << (OP_DESTROY_SESSION - 32) |
6911 1 << (OP_DESTROY_CLIENTID - 32)
6912 },
6913 .allow.u.words = {
6914 [0] = 1 << (OP_CLOSE) |
6915 1 << (OP_OPEN_DOWNGRADE) |
6916 1 << (OP_LOCKU) |
6917 1 << (OP_DELEGRETURN) |
6918 1 << (OP_COMMIT),
6919 [1] = 1 << (OP_SECINFO - 32) |
6920 1 << (OP_SECINFO_NO_NAME - 32) |
6921 1 << (OP_LAYOUTRETURN - 32) |
6922 1 << (OP_TEST_STATEID - 32) |
6923 1 << (OP_FREE_STATEID - 32) |
6924 1 << (OP_WRITE - 32)
6925 }
6926 };
6927
6928 /*
6929 * Select the state protection mode for client `clp' given the server results
6930 * from exchange_id in `sp'.
6931 *
6932 * Returns 0 on success, negative errno otherwise.
6933 */
6934 static int nfs4_sp4_select_mode(struct nfs_client *clp,
6935 struct nfs41_state_protection *sp)
6936 {
6937 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
6938 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6939 1 << (OP_EXCHANGE_ID - 32) |
6940 1 << (OP_CREATE_SESSION - 32) |
6941 1 << (OP_DESTROY_SESSION - 32) |
6942 1 << (OP_DESTROY_CLIENTID - 32)
6943 };
6944 unsigned int i;
6945
6946 if (sp->how == SP4_MACH_CRED) {
6947 /* Print state protect result */
6948 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
6949 for (i = 0; i <= LAST_NFS4_OP; i++) {
6950 if (test_bit(i, sp->enforce.u.longs))
6951 dfprintk(MOUNT, " enforce op %d\n", i);
6952 if (test_bit(i, sp->allow.u.longs))
6953 dfprintk(MOUNT, " allow op %d\n", i);
6954 }
6955
6956 /* make sure nothing is on enforce list that isn't supported */
6957 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
6958 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
6959 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6960 return -EINVAL;
6961 }
6962 }
6963
6964 /*
6965 * Minimal mode - state operations are allowed to use machine
6966 * credential. Note this already happens by default, so the
6967 * client doesn't have to do anything more than the negotiation.
6968 *
6969 * NOTE: we don't care if EXCHANGE_ID is in the list -
6970 * we're already using the machine cred for exchange_id
6971 * and will never use a different cred.
6972 */
6973 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
6974 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
6975 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
6976 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
6977 dfprintk(MOUNT, "sp4_mach_cred:\n");
6978 dfprintk(MOUNT, " minimal mode enabled\n");
6979 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags);
6980 } else {
6981 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6982 return -EINVAL;
6983 }
6984
6985 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
6986 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
6987 test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
6988 test_bit(OP_LOCKU, sp->allow.u.longs)) {
6989 dfprintk(MOUNT, " cleanup mode enabled\n");
6990 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags);
6991 }
6992
6993 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
6994 dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
6995 set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP,
6996 &clp->cl_sp4_flags);
6997 }
6998
6999 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
7000 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
7001 dfprintk(MOUNT, " secinfo mode enabled\n");
7002 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags);
7003 }
7004
7005 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
7006 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
7007 dfprintk(MOUNT, " stateid mode enabled\n");
7008 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags);
7009 }
7010
7011 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
7012 dfprintk(MOUNT, " write mode enabled\n");
7013 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags);
7014 }
7015
7016 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
7017 dfprintk(MOUNT, " commit mode enabled\n");
7018 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags);
7019 }
7020 }
7021
7022 return 0;
7023 }
7024
7025 /*
7026 * _nfs4_proc_exchange_id()
7027 *
7028 * Wrapper for EXCHANGE_ID operation.
7029 */
7030 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
7031 u32 sp4_how)
7032 {
7033 nfs4_verifier verifier;
7034 struct nfs41_exchange_id_args args = {
7035 .verifier = &verifier,
7036 .client = clp,
7037 #ifdef CONFIG_NFS_V4_1_MIGRATION
7038 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
7039 EXCHGID4_FLAG_BIND_PRINC_STATEID |
7040 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
7041 #else
7042 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
7043 EXCHGID4_FLAG_BIND_PRINC_STATEID,
7044 #endif
7045 };
7046 struct nfs41_exchange_id_res res = {
7047 0
7048 };
7049 int status;
7050 struct rpc_message msg = {
7051 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
7052 .rpc_argp = &args,
7053 .rpc_resp = &res,
7054 .rpc_cred = cred,
7055 };
7056
7057 nfs4_init_boot_verifier(clp, &verifier);
7058
7059 status = nfs4_init_uniform_client_string(clp);
7060 if (status)
7061 goto out;
7062
7063 dprintk("NFS call exchange_id auth=%s, '%s'\n",
7064 clp->cl_rpcclient->cl_auth->au_ops->au_name,
7065 clp->cl_owner_id);
7066
7067 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
7068 GFP_NOFS);
7069 if (unlikely(res.server_owner == NULL)) {
7070 status = -ENOMEM;
7071 goto out;
7072 }
7073
7074 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
7075 GFP_NOFS);
7076 if (unlikely(res.server_scope == NULL)) {
7077 status = -ENOMEM;
7078 goto out_server_owner;
7079 }
7080
7081 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
7082 if (unlikely(res.impl_id == NULL)) {
7083 status = -ENOMEM;
7084 goto out_server_scope;
7085 }
7086
7087 switch (sp4_how) {
7088 case SP4_NONE:
7089 args.state_protect.how = SP4_NONE;
7090 break;
7091
7092 case SP4_MACH_CRED:
7093 args.state_protect = nfs4_sp4_mach_cred_request;
7094 break;
7095
7096 default:
7097 /* unsupported! */
7098 WARN_ON_ONCE(1);
7099 status = -EINVAL;
7100 goto out_impl_id;
7101 }
7102
7103 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7104 trace_nfs4_exchange_id(clp, status);
7105 if (status == 0)
7106 status = nfs4_check_cl_exchange_flags(res.flags);
7107
7108 if (status == 0)
7109 status = nfs4_sp4_select_mode(clp, &res.state_protect);
7110
7111 if (status == 0) {
7112 clp->cl_clientid = res.clientid;
7113 clp->cl_exchange_flags = res.flags;
7114 /* Client ID is not confirmed */
7115 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
7116 clear_bit(NFS4_SESSION_ESTABLISHED,
7117 &clp->cl_session->session_state);
7118 clp->cl_seqid = res.seqid;
7119 }
7120
7121 kfree(clp->cl_serverowner);
7122 clp->cl_serverowner = res.server_owner;
7123 res.server_owner = NULL;
7124
7125 /* use the most recent implementation id */
7126 kfree(clp->cl_implid);
7127 clp->cl_implid = res.impl_id;
7128 res.impl_id = NULL;
7129
7130 if (clp->cl_serverscope != NULL &&
7131 !nfs41_same_server_scope(clp->cl_serverscope,
7132 res.server_scope)) {
7133 dprintk("%s: server_scope mismatch detected\n",
7134 __func__);
7135 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7136 kfree(clp->cl_serverscope);
7137 clp->cl_serverscope = NULL;
7138 }
7139
7140 if (clp->cl_serverscope == NULL) {
7141 clp->cl_serverscope = res.server_scope;
7142 res.server_scope = NULL;
7143 }
7144 }
7145
7146 out_impl_id:
7147 kfree(res.impl_id);
7148 out_server_scope:
7149 kfree(res.server_scope);
7150 out_server_owner:
7151 kfree(res.server_owner);
7152 out:
7153 if (clp->cl_implid != NULL)
7154 dprintk("NFS reply exchange_id: Server Implementation ID: "
7155 "domain: %s, name: %s, date: %llu,%u\n",
7156 clp->cl_implid->domain, clp->cl_implid->name,
7157 clp->cl_implid->date.seconds,
7158 clp->cl_implid->date.nseconds);
7159 dprintk("NFS reply exchange_id: %d\n", status);
7160 return status;
7161 }
7162
7163 /*
7164 * nfs4_proc_exchange_id()
7165 *
7166 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7167 *
7168 * Since the clientid has expired, all compounds using sessions
7169 * associated with the stale clientid will be returning
7170 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7171 * be in some phase of session reset.
7172 *
7173 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7174 */
7175 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7176 {
7177 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7178 int status;
7179
7180 /* try SP4_MACH_CRED if krb5i/p */
7181 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7182 authflavor == RPC_AUTH_GSS_KRB5P) {
7183 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7184 if (!status)
7185 return 0;
7186 }
7187
7188 /* try SP4_NONE */
7189 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7190 }
7191
7192 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7193 struct rpc_cred *cred)
7194 {
7195 struct rpc_message msg = {
7196 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7197 .rpc_argp = clp,
7198 .rpc_cred = cred,
7199 };
7200 int status;
7201
7202 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7203 trace_nfs4_destroy_clientid(clp, status);
7204 if (status)
7205 dprintk("NFS: Got error %d from the server %s on "
7206 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7207 return status;
7208 }
7209
7210 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7211 struct rpc_cred *cred)
7212 {
7213 unsigned int loop;
7214 int ret;
7215
7216 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7217 ret = _nfs4_proc_destroy_clientid(clp, cred);
7218 switch (ret) {
7219 case -NFS4ERR_DELAY:
7220 case -NFS4ERR_CLIENTID_BUSY:
7221 ssleep(1);
7222 break;
7223 default:
7224 return ret;
7225 }
7226 }
7227 return 0;
7228 }
7229
7230 int nfs4_destroy_clientid(struct nfs_client *clp)
7231 {
7232 struct rpc_cred *cred;
7233 int ret = 0;
7234
7235 if (clp->cl_mvops->minor_version < 1)
7236 goto out;
7237 if (clp->cl_exchange_flags == 0)
7238 goto out;
7239 if (clp->cl_preserve_clid)
7240 goto out;
7241 cred = nfs4_get_clid_cred(clp);
7242 ret = nfs4_proc_destroy_clientid(clp, cred);
7243 if (cred)
7244 put_rpccred(cred);
7245 switch (ret) {
7246 case 0:
7247 case -NFS4ERR_STALE_CLIENTID:
7248 clp->cl_exchange_flags = 0;
7249 }
7250 out:
7251 return ret;
7252 }
7253
7254 struct nfs4_get_lease_time_data {
7255 struct nfs4_get_lease_time_args *args;
7256 struct nfs4_get_lease_time_res *res;
7257 struct nfs_client *clp;
7258 };
7259
7260 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7261 void *calldata)
7262 {
7263 struct nfs4_get_lease_time_data *data =
7264 (struct nfs4_get_lease_time_data *)calldata;
7265
7266 dprintk("--> %s\n", __func__);
7267 /* just setup sequence, do not trigger session recovery
7268 since we're invoked within one */
7269 nfs41_setup_sequence(data->clp->cl_session,
7270 &data->args->la_seq_args,
7271 &data->res->lr_seq_res,
7272 task);
7273 dprintk("<-- %s\n", __func__);
7274 }
7275
7276 /*
7277 * Called from nfs4_state_manager thread for session setup, so don't recover
7278 * from sequence operation or clientid errors.
7279 */
7280 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7281 {
7282 struct nfs4_get_lease_time_data *data =
7283 (struct nfs4_get_lease_time_data *)calldata;
7284
7285 dprintk("--> %s\n", __func__);
7286 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7287 return;
7288 switch (task->tk_status) {
7289 case -NFS4ERR_DELAY:
7290 case -NFS4ERR_GRACE:
7291 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7292 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7293 task->tk_status = 0;
7294 /* fall through */
7295 case -NFS4ERR_RETRY_UNCACHED_REP:
7296 rpc_restart_call_prepare(task);
7297 return;
7298 }
7299 dprintk("<-- %s\n", __func__);
7300 }
7301
7302 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7303 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7304 .rpc_call_done = nfs4_get_lease_time_done,
7305 };
7306
7307 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7308 {
7309 struct rpc_task *task;
7310 struct nfs4_get_lease_time_args args;
7311 struct nfs4_get_lease_time_res res = {
7312 .lr_fsinfo = fsinfo,
7313 };
7314 struct nfs4_get_lease_time_data data = {
7315 .args = &args,
7316 .res = &res,
7317 .clp = clp,
7318 };
7319 struct rpc_message msg = {
7320 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7321 .rpc_argp = &args,
7322 .rpc_resp = &res,
7323 };
7324 struct rpc_task_setup task_setup = {
7325 .rpc_client = clp->cl_rpcclient,
7326 .rpc_message = &msg,
7327 .callback_ops = &nfs4_get_lease_time_ops,
7328 .callback_data = &data,
7329 .flags = RPC_TASK_TIMEOUT,
7330 };
7331 int status;
7332
7333 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
7334 nfs4_set_sequence_privileged(&args.la_seq_args);
7335 dprintk("--> %s\n", __func__);
7336 task = rpc_run_task(&task_setup);
7337
7338 if (IS_ERR(task))
7339 status = PTR_ERR(task);
7340 else {
7341 status = task->tk_status;
7342 rpc_put_task(task);
7343 }
7344 dprintk("<-- %s return %d\n", __func__, status);
7345
7346 return status;
7347 }
7348
7349 /*
7350 * Initialize the values to be used by the client in CREATE_SESSION
7351 * If nfs4_init_session set the fore channel request and response sizes,
7352 * use them.
7353 *
7354 * Set the back channel max_resp_sz_cached to zero to force the client to
7355 * always set csa_cachethis to FALSE because the current implementation
7356 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
7357 */
7358 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
7359 {
7360 unsigned int max_rqst_sz, max_resp_sz;
7361
7362 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
7363 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
7364
7365 /* Fore channel attributes */
7366 args->fc_attrs.max_rqst_sz = max_rqst_sz;
7367 args->fc_attrs.max_resp_sz = max_resp_sz;
7368 args->fc_attrs.max_ops = NFS4_MAX_OPS;
7369 args->fc_attrs.max_reqs = max_session_slots;
7370
7371 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
7372 "max_ops=%u max_reqs=%u\n",
7373 __func__,
7374 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
7375 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
7376
7377 /* Back channel attributes */
7378 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
7379 args->bc_attrs.max_resp_sz = PAGE_SIZE;
7380 args->bc_attrs.max_resp_sz_cached = 0;
7381 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
7382 args->bc_attrs.max_reqs = NFS41_BC_MAX_CALLBACKS;
7383
7384 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
7385 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
7386 __func__,
7387 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
7388 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
7389 args->bc_attrs.max_reqs);
7390 }
7391
7392 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7393 struct nfs41_create_session_res *res)
7394 {
7395 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7396 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7397
7398 if (rcvd->max_resp_sz > sent->max_resp_sz)
7399 return -EINVAL;
7400 /*
7401 * Our requested max_ops is the minimum we need; we're not
7402 * prepared to break up compounds into smaller pieces than that.
7403 * So, no point even trying to continue if the server won't
7404 * cooperate:
7405 */
7406 if (rcvd->max_ops < sent->max_ops)
7407 return -EINVAL;
7408 if (rcvd->max_reqs == 0)
7409 return -EINVAL;
7410 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
7411 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
7412 return 0;
7413 }
7414
7415 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7416 struct nfs41_create_session_res *res)
7417 {
7418 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7419 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7420
7421 if (!(res->flags & SESSION4_BACK_CHAN))
7422 goto out;
7423 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7424 return -EINVAL;
7425 if (rcvd->max_resp_sz < sent->max_resp_sz)
7426 return -EINVAL;
7427 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
7428 return -EINVAL;
7429 /* These would render the backchannel useless: */
7430 if (rcvd->max_ops != sent->max_ops)
7431 return -EINVAL;
7432 if (rcvd->max_reqs != sent->max_reqs)
7433 return -EINVAL;
7434 out:
7435 return 0;
7436 }
7437
7438 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7439 struct nfs41_create_session_res *res)
7440 {
7441 int ret;
7442
7443 ret = nfs4_verify_fore_channel_attrs(args, res);
7444 if (ret)
7445 return ret;
7446 return nfs4_verify_back_channel_attrs(args, res);
7447 }
7448
7449 static void nfs4_update_session(struct nfs4_session *session,
7450 struct nfs41_create_session_res *res)
7451 {
7452 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7453 /* Mark client id and session as being confirmed */
7454 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7455 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7456 session->flags = res->flags;
7457 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7458 if (res->flags & SESSION4_BACK_CHAN)
7459 memcpy(&session->bc_attrs, &res->bc_attrs,
7460 sizeof(session->bc_attrs));
7461 }
7462
7463 static int _nfs4_proc_create_session(struct nfs_client *clp,
7464 struct rpc_cred *cred)
7465 {
7466 struct nfs4_session *session = clp->cl_session;
7467 struct nfs41_create_session_args args = {
7468 .client = clp,
7469 .clientid = clp->cl_clientid,
7470 .seqid = clp->cl_seqid,
7471 .cb_program = NFS4_CALLBACK,
7472 };
7473 struct nfs41_create_session_res res;
7474
7475 struct rpc_message msg = {
7476 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7477 .rpc_argp = &args,
7478 .rpc_resp = &res,
7479 .rpc_cred = cred,
7480 };
7481 int status;
7482
7483 nfs4_init_channel_attrs(&args);
7484 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
7485
7486 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7487 trace_nfs4_create_session(clp, status);
7488
7489 if (!status) {
7490 /* Verify the session's negotiated channel_attrs values */
7491 status = nfs4_verify_channel_attrs(&args, &res);
7492 /* Increment the clientid slot sequence id */
7493 if (clp->cl_seqid == res.seqid)
7494 clp->cl_seqid++;
7495 if (status)
7496 goto out;
7497 nfs4_update_session(session, &res);
7498 }
7499 out:
7500 return status;
7501 }
7502
7503 /*
7504 * Issues a CREATE_SESSION operation to the server.
7505 * It is the responsibility of the caller to verify the session is
7506 * expired before calling this routine.
7507 */
7508 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
7509 {
7510 int status;
7511 unsigned *ptr;
7512 struct nfs4_session *session = clp->cl_session;
7513
7514 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
7515
7516 status = _nfs4_proc_create_session(clp, cred);
7517 if (status)
7518 goto out;
7519
7520 /* Init or reset the session slot tables */
7521 status = nfs4_setup_session_slot_tables(session);
7522 dprintk("slot table setup returned %d\n", status);
7523 if (status)
7524 goto out;
7525
7526 ptr = (unsigned *)&session->sess_id.data[0];
7527 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
7528 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
7529 out:
7530 dprintk("<-- %s\n", __func__);
7531 return status;
7532 }
7533
7534 /*
7535 * Issue the over-the-wire RPC DESTROY_SESSION.
7536 * The caller must serialize access to this routine.
7537 */
7538 int nfs4_proc_destroy_session(struct nfs4_session *session,
7539 struct rpc_cred *cred)
7540 {
7541 struct rpc_message msg = {
7542 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
7543 .rpc_argp = session,
7544 .rpc_cred = cred,
7545 };
7546 int status = 0;
7547
7548 dprintk("--> nfs4_proc_destroy_session\n");
7549
7550 /* session is still being setup */
7551 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7552 return 0;
7553
7554 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7555 trace_nfs4_destroy_session(session->clp, status);
7556
7557 if (status)
7558 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
7559 "Session has been destroyed regardless...\n", status);
7560
7561 dprintk("<-- nfs4_proc_destroy_session\n");
7562 return status;
7563 }
7564
7565 /*
7566 * Renew the cl_session lease.
7567 */
7568 struct nfs4_sequence_data {
7569 struct nfs_client *clp;
7570 struct nfs4_sequence_args args;
7571 struct nfs4_sequence_res res;
7572 };
7573
7574 static void nfs41_sequence_release(void *data)
7575 {
7576 struct nfs4_sequence_data *calldata = data;
7577 struct nfs_client *clp = calldata->clp;
7578
7579 if (atomic_read(&clp->cl_count) > 1)
7580 nfs4_schedule_state_renewal(clp);
7581 nfs_put_client(clp);
7582 kfree(calldata);
7583 }
7584
7585 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7586 {
7587 switch(task->tk_status) {
7588 case -NFS4ERR_DELAY:
7589 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7590 return -EAGAIN;
7591 default:
7592 nfs4_schedule_lease_recovery(clp);
7593 }
7594 return 0;
7595 }
7596
7597 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
7598 {
7599 struct nfs4_sequence_data *calldata = data;
7600 struct nfs_client *clp = calldata->clp;
7601
7602 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
7603 return;
7604
7605 trace_nfs4_sequence(clp, task->tk_status);
7606 if (task->tk_status < 0) {
7607 dprintk("%s ERROR %d\n", __func__, task->tk_status);
7608 if (atomic_read(&clp->cl_count) == 1)
7609 goto out;
7610
7611 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
7612 rpc_restart_call_prepare(task);
7613 return;
7614 }
7615 }
7616 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
7617 out:
7618 dprintk("<-- %s\n", __func__);
7619 }
7620
7621 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
7622 {
7623 struct nfs4_sequence_data *calldata = data;
7624 struct nfs_client *clp = calldata->clp;
7625 struct nfs4_sequence_args *args;
7626 struct nfs4_sequence_res *res;
7627
7628 args = task->tk_msg.rpc_argp;
7629 res = task->tk_msg.rpc_resp;
7630
7631 nfs41_setup_sequence(clp->cl_session, args, res, task);
7632 }
7633
7634 static const struct rpc_call_ops nfs41_sequence_ops = {
7635 .rpc_call_done = nfs41_sequence_call_done,
7636 .rpc_call_prepare = nfs41_sequence_prepare,
7637 .rpc_release = nfs41_sequence_release,
7638 };
7639
7640 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
7641 struct rpc_cred *cred,
7642 bool is_privileged)
7643 {
7644 struct nfs4_sequence_data *calldata;
7645 struct rpc_message msg = {
7646 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
7647 .rpc_cred = cred,
7648 };
7649 struct rpc_task_setup task_setup_data = {
7650 .rpc_client = clp->cl_rpcclient,
7651 .rpc_message = &msg,
7652 .callback_ops = &nfs41_sequence_ops,
7653 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
7654 };
7655
7656 if (!atomic_inc_not_zero(&clp->cl_count))
7657 return ERR_PTR(-EIO);
7658 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7659 if (calldata == NULL) {
7660 nfs_put_client(clp);
7661 return ERR_PTR(-ENOMEM);
7662 }
7663 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
7664 if (is_privileged)
7665 nfs4_set_sequence_privileged(&calldata->args);
7666 msg.rpc_argp = &calldata->args;
7667 msg.rpc_resp = &calldata->res;
7668 calldata->clp = clp;
7669 task_setup_data.callback_data = calldata;
7670
7671 return rpc_run_task(&task_setup_data);
7672 }
7673
7674 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
7675 {
7676 struct rpc_task *task;
7677 int ret = 0;
7678
7679 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
7680 return -EAGAIN;
7681 task = _nfs41_proc_sequence(clp, cred, false);
7682 if (IS_ERR(task))
7683 ret = PTR_ERR(task);
7684 else
7685 rpc_put_task_async(task);
7686 dprintk("<-- %s status=%d\n", __func__, ret);
7687 return ret;
7688 }
7689
7690 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7691 {
7692 struct rpc_task *task;
7693 int ret;
7694
7695 task = _nfs41_proc_sequence(clp, cred, true);
7696 if (IS_ERR(task)) {
7697 ret = PTR_ERR(task);
7698 goto out;
7699 }
7700 ret = rpc_wait_for_completion_task(task);
7701 if (!ret)
7702 ret = task->tk_status;
7703 rpc_put_task(task);
7704 out:
7705 dprintk("<-- %s status=%d\n", __func__, ret);
7706 return ret;
7707 }
7708
7709 struct nfs4_reclaim_complete_data {
7710 struct nfs_client *clp;
7711 struct nfs41_reclaim_complete_args arg;
7712 struct nfs41_reclaim_complete_res res;
7713 };
7714
7715 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
7716 {
7717 struct nfs4_reclaim_complete_data *calldata = data;
7718
7719 nfs41_setup_sequence(calldata->clp->cl_session,
7720 &calldata->arg.seq_args,
7721 &calldata->res.seq_res,
7722 task);
7723 }
7724
7725 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7726 {
7727 switch(task->tk_status) {
7728 case 0:
7729 case -NFS4ERR_COMPLETE_ALREADY:
7730 case -NFS4ERR_WRONG_CRED: /* What to do here? */
7731 break;
7732 case -NFS4ERR_DELAY:
7733 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7734 /* fall through */
7735 case -NFS4ERR_RETRY_UNCACHED_REP:
7736 return -EAGAIN;
7737 default:
7738 nfs4_schedule_lease_recovery(clp);
7739 }
7740 return 0;
7741 }
7742
7743 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
7744 {
7745 struct nfs4_reclaim_complete_data *calldata = data;
7746 struct nfs_client *clp = calldata->clp;
7747 struct nfs4_sequence_res *res = &calldata->res.seq_res;
7748
7749 dprintk("--> %s\n", __func__);
7750 if (!nfs41_sequence_done(task, res))
7751 return;
7752
7753 trace_nfs4_reclaim_complete(clp, task->tk_status);
7754 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
7755 rpc_restart_call_prepare(task);
7756 return;
7757 }
7758 dprintk("<-- %s\n", __func__);
7759 }
7760
7761 static void nfs4_free_reclaim_complete_data(void *data)
7762 {
7763 struct nfs4_reclaim_complete_data *calldata = data;
7764
7765 kfree(calldata);
7766 }
7767
7768 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
7769 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
7770 .rpc_call_done = nfs4_reclaim_complete_done,
7771 .rpc_release = nfs4_free_reclaim_complete_data,
7772 };
7773
7774 /*
7775 * Issue a global reclaim complete.
7776 */
7777 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7778 struct rpc_cred *cred)
7779 {
7780 struct nfs4_reclaim_complete_data *calldata;
7781 struct rpc_task *task;
7782 struct rpc_message msg = {
7783 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
7784 .rpc_cred = cred,
7785 };
7786 struct rpc_task_setup task_setup_data = {
7787 .rpc_client = clp->cl_rpcclient,
7788 .rpc_message = &msg,
7789 .callback_ops = &nfs4_reclaim_complete_call_ops,
7790 .flags = RPC_TASK_ASYNC,
7791 };
7792 int status = -ENOMEM;
7793
7794 dprintk("--> %s\n", __func__);
7795 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7796 if (calldata == NULL)
7797 goto out;
7798 calldata->clp = clp;
7799 calldata->arg.one_fs = 0;
7800
7801 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
7802 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
7803 msg.rpc_argp = &calldata->arg;
7804 msg.rpc_resp = &calldata->res;
7805 task_setup_data.callback_data = calldata;
7806 task = rpc_run_task(&task_setup_data);
7807 if (IS_ERR(task)) {
7808 status = PTR_ERR(task);
7809 goto out;
7810 }
7811 status = nfs4_wait_for_completion_rpc_task(task);
7812 if (status == 0)
7813 status = task->tk_status;
7814 rpc_put_task(task);
7815 return 0;
7816 out:
7817 dprintk("<-- %s status=%d\n", __func__, status);
7818 return status;
7819 }
7820
7821 static void
7822 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
7823 {
7824 struct nfs4_layoutget *lgp = calldata;
7825 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
7826 struct nfs4_session *session = nfs4_get_session(server);
7827 int ret;
7828
7829 dprintk("--> %s\n", __func__);
7830 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
7831 * right now covering the LAYOUTGET we are about to send.
7832 * However, that is not so catastrophic, and there seems
7833 * to be no way to prevent it completely.
7834 */
7835 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
7836 &lgp->res.seq_res, task))
7837 return;
7838 ret = pnfs_choose_layoutget_stateid(&lgp->args.stateid,
7839 NFS_I(lgp->args.inode)->layout,
7840 &lgp->args.range,
7841 lgp->args.ctx->state);
7842 if (ret < 0)
7843 rpc_exit(task, ret);
7844 }
7845
7846 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7847 {
7848 struct nfs4_layoutget *lgp = calldata;
7849 struct inode *inode = lgp->args.inode;
7850 struct nfs_server *server = NFS_SERVER(inode);
7851 struct pnfs_layout_hdr *lo;
7852 struct nfs4_state *state = NULL;
7853 unsigned long timeo, now, giveup;
7854
7855 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
7856
7857 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
7858 goto out;
7859
7860 switch (task->tk_status) {
7861 case 0:
7862 goto out;
7863
7864 /*
7865 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
7866 * on the file. set tk_status to -ENODATA to tell upper layer to
7867 * retry go inband.
7868 */
7869 case -NFS4ERR_LAYOUTUNAVAILABLE:
7870 task->tk_status = -ENODATA;
7871 goto out;
7872 /*
7873 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
7874 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
7875 */
7876 case -NFS4ERR_BADLAYOUT:
7877 goto out_overflow;
7878 /*
7879 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
7880 * (or clients) writing to the same RAID stripe except when
7881 * the minlength argument is 0 (see RFC5661 section 18.43.3).
7882 */
7883 case -NFS4ERR_LAYOUTTRYLATER:
7884 if (lgp->args.minlength == 0)
7885 goto out_overflow;
7886 /*
7887 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
7888 * existing layout before getting a new one).
7889 */
7890 case -NFS4ERR_RECALLCONFLICT:
7891 timeo = rpc_get_timeout(task->tk_client);
7892 giveup = lgp->args.timestamp + timeo;
7893 now = jiffies;
7894 if (time_after(giveup, now)) {
7895 unsigned long delay;
7896
7897 /* Delay for:
7898 * - Not less then NFS4_POLL_RETRY_MIN.
7899 * - One last time a jiffie before we give up
7900 * - exponential backoff (time_now minus start_attempt)
7901 */
7902 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
7903 min((giveup - now - 1),
7904 now - lgp->args.timestamp));
7905
7906 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7907 __func__, delay);
7908 rpc_delay(task, delay);
7909 /* Do not call nfs4_async_handle_error() */
7910 goto out_restart;
7911 }
7912 break;
7913 case -NFS4ERR_EXPIRED:
7914 case -NFS4ERR_BAD_STATEID:
7915 spin_lock(&inode->i_lock);
7916 if (nfs4_stateid_match(&lgp->args.stateid,
7917 &lgp->args.ctx->state->stateid)) {
7918 spin_unlock(&inode->i_lock);
7919 /* If the open stateid was bad, then recover it. */
7920 state = lgp->args.ctx->state;
7921 break;
7922 }
7923 lo = NFS_I(inode)->layout;
7924 if (lo && nfs4_stateid_match(&lgp->args.stateid,
7925 &lo->plh_stateid)) {
7926 LIST_HEAD(head);
7927
7928 /*
7929 * Mark the bad layout state as invalid, then retry
7930 * with the current stateid.
7931 */
7932 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
7933 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7934 spin_unlock(&inode->i_lock);
7935 pnfs_free_lseg_list(&head);
7936 } else
7937 spin_unlock(&inode->i_lock);
7938 goto out_restart;
7939 }
7940 if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN)
7941 goto out_restart;
7942 out:
7943 dprintk("<-- %s\n", __func__);
7944 return;
7945 out_restart:
7946 task->tk_status = 0;
7947 rpc_restart_call_prepare(task);
7948 return;
7949 out_overflow:
7950 task->tk_status = -EOVERFLOW;
7951 goto out;
7952 }
7953
7954 static size_t max_response_pages(struct nfs_server *server)
7955 {
7956 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
7957 return nfs_page_array_len(0, max_resp_sz);
7958 }
7959
7960 static void nfs4_free_pages(struct page **pages, size_t size)
7961 {
7962 int i;
7963
7964 if (!pages)
7965 return;
7966
7967 for (i = 0; i < size; i++) {
7968 if (!pages[i])
7969 break;
7970 __free_page(pages[i]);
7971 }
7972 kfree(pages);
7973 }
7974
7975 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
7976 {
7977 struct page **pages;
7978 int i;
7979
7980 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
7981 if (!pages) {
7982 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
7983 return NULL;
7984 }
7985
7986 for (i = 0; i < size; i++) {
7987 pages[i] = alloc_page(gfp_flags);
7988 if (!pages[i]) {
7989 dprintk("%s: failed to allocate page\n", __func__);
7990 nfs4_free_pages(pages, size);
7991 return NULL;
7992 }
7993 }
7994
7995 return pages;
7996 }
7997
7998 static void nfs4_layoutget_release(void *calldata)
7999 {
8000 struct nfs4_layoutget *lgp = calldata;
8001 struct inode *inode = lgp->args.inode;
8002 struct nfs_server *server = NFS_SERVER(inode);
8003 size_t max_pages = max_response_pages(server);
8004
8005 dprintk("--> %s\n", __func__);
8006 nfs4_free_pages(lgp->args.layout.pages, max_pages);
8007 pnfs_put_layout_hdr(NFS_I(inode)->layout);
8008 put_nfs_open_context(lgp->args.ctx);
8009 kfree(calldata);
8010 dprintk("<-- %s\n", __func__);
8011 }
8012
8013 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
8014 .rpc_call_prepare = nfs4_layoutget_prepare,
8015 .rpc_call_done = nfs4_layoutget_done,
8016 .rpc_release = nfs4_layoutget_release,
8017 };
8018
8019 struct pnfs_layout_segment *
8020 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
8021 {
8022 struct inode *inode = lgp->args.inode;
8023 struct nfs_server *server = NFS_SERVER(inode);
8024 size_t max_pages = max_response_pages(server);
8025 struct rpc_task *task;
8026 struct rpc_message msg = {
8027 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
8028 .rpc_argp = &lgp->args,
8029 .rpc_resp = &lgp->res,
8030 .rpc_cred = lgp->cred,
8031 };
8032 struct rpc_task_setup task_setup_data = {
8033 .rpc_client = server->client,
8034 .rpc_message = &msg,
8035 .callback_ops = &nfs4_layoutget_call_ops,
8036 .callback_data = lgp,
8037 .flags = RPC_TASK_ASYNC,
8038 };
8039 struct pnfs_layout_segment *lseg = NULL;
8040 int status = 0;
8041
8042 dprintk("--> %s\n", __func__);
8043
8044 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
8045 pnfs_get_layout_hdr(NFS_I(inode)->layout);
8046
8047 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
8048 if (!lgp->args.layout.pages) {
8049 nfs4_layoutget_release(lgp);
8050 return ERR_PTR(-ENOMEM);
8051 }
8052 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
8053 lgp->args.timestamp = jiffies;
8054
8055 lgp->res.layoutp = &lgp->args.layout;
8056 lgp->res.seq_res.sr_slot = NULL;
8057 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
8058
8059 task = rpc_run_task(&task_setup_data);
8060 if (IS_ERR(task))
8061 return ERR_CAST(task);
8062 status = nfs4_wait_for_completion_rpc_task(task);
8063 if (status == 0)
8064 status = task->tk_status;
8065 trace_nfs4_layoutget(lgp->args.ctx,
8066 &lgp->args.range,
8067 &lgp->res.range,
8068 &lgp->res.stateid,
8069 status);
8070 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
8071 if (status == 0 && lgp->res.layoutp->len)
8072 lseg = pnfs_layout_process(lgp);
8073 rpc_put_task(task);
8074 dprintk("<-- %s status=%d\n", __func__, status);
8075 if (status)
8076 return ERR_PTR(status);
8077 return lseg;
8078 }
8079
8080 static void
8081 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
8082 {
8083 struct nfs4_layoutreturn *lrp = calldata;
8084
8085 dprintk("--> %s\n", __func__);
8086 nfs41_setup_sequence(lrp->clp->cl_session,
8087 &lrp->args.seq_args,
8088 &lrp->res.seq_res,
8089 task);
8090 }
8091
8092 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
8093 {
8094 struct nfs4_layoutreturn *lrp = calldata;
8095 struct nfs_server *server;
8096
8097 dprintk("--> %s\n", __func__);
8098
8099 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
8100 return;
8101
8102 server = NFS_SERVER(lrp->args.inode);
8103 switch (task->tk_status) {
8104 default:
8105 task->tk_status = 0;
8106 case 0:
8107 break;
8108 case -NFS4ERR_DELAY:
8109 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
8110 break;
8111 rpc_restart_call_prepare(task);
8112 return;
8113 }
8114 dprintk("<-- %s\n", __func__);
8115 }
8116
8117 static void nfs4_layoutreturn_release(void *calldata)
8118 {
8119 struct nfs4_layoutreturn *lrp = calldata;
8120 struct pnfs_layout_hdr *lo = lrp->args.layout;
8121 LIST_HEAD(freeme);
8122
8123 dprintk("--> %s\n", __func__);
8124 spin_lock(&lo->plh_inode->i_lock);
8125 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
8126 pnfs_mark_layout_returned_if_empty(lo);
8127 if (lrp->res.lrs_present)
8128 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
8129 pnfs_clear_layoutreturn_waitbit(lo);
8130 spin_unlock(&lo->plh_inode->i_lock);
8131 pnfs_free_lseg_list(&freeme);
8132 pnfs_put_layout_hdr(lrp->args.layout);
8133 nfs_iput_and_deactive(lrp->inode);
8134 kfree(calldata);
8135 dprintk("<-- %s\n", __func__);
8136 }
8137
8138 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
8139 .rpc_call_prepare = nfs4_layoutreturn_prepare,
8140 .rpc_call_done = nfs4_layoutreturn_done,
8141 .rpc_release = nfs4_layoutreturn_release,
8142 };
8143
8144 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
8145 {
8146 struct rpc_task *task;
8147 struct rpc_message msg = {
8148 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
8149 .rpc_argp = &lrp->args,
8150 .rpc_resp = &lrp->res,
8151 .rpc_cred = lrp->cred,
8152 };
8153 struct rpc_task_setup task_setup_data = {
8154 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8155 .rpc_message = &msg,
8156 .callback_ops = &nfs4_layoutreturn_call_ops,
8157 .callback_data = lrp,
8158 };
8159 int status = 0;
8160
8161 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
8162 NFS_SP4_MACH_CRED_PNFS_CLEANUP,
8163 &task_setup_data.rpc_client, &msg);
8164
8165 dprintk("--> %s\n", __func__);
8166 if (!sync) {
8167 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8168 if (!lrp->inode) {
8169 nfs4_layoutreturn_release(lrp);
8170 return -EAGAIN;
8171 }
8172 task_setup_data.flags |= RPC_TASK_ASYNC;
8173 }
8174 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8175 task = rpc_run_task(&task_setup_data);
8176 if (IS_ERR(task))
8177 return PTR_ERR(task);
8178 if (sync)
8179 status = task->tk_status;
8180 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
8181 dprintk("<-- %s status=%d\n", __func__, status);
8182 rpc_put_task(task);
8183 return status;
8184 }
8185
8186 static int
8187 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
8188 struct pnfs_device *pdev,
8189 struct rpc_cred *cred)
8190 {
8191 struct nfs4_getdeviceinfo_args args = {
8192 .pdev = pdev,
8193 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8194 NOTIFY_DEVICEID4_DELETE,
8195 };
8196 struct nfs4_getdeviceinfo_res res = {
8197 .pdev = pdev,
8198 };
8199 struct rpc_message msg = {
8200 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8201 .rpc_argp = &args,
8202 .rpc_resp = &res,
8203 .rpc_cred = cred,
8204 };
8205 int status;
8206
8207 dprintk("--> %s\n", __func__);
8208 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8209 if (res.notification & ~args.notify_types)
8210 dprintk("%s: unsupported notification\n", __func__);
8211 if (res.notification != args.notify_types)
8212 pdev->nocache = 1;
8213
8214 dprintk("<-- %s status=%d\n", __func__, status);
8215
8216 return status;
8217 }
8218
8219 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8220 struct pnfs_device *pdev,
8221 struct rpc_cred *cred)
8222 {
8223 struct nfs4_exception exception = { };
8224 int err;
8225
8226 do {
8227 err = nfs4_handle_exception(server,
8228 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8229 &exception);
8230 } while (exception.retry);
8231 return err;
8232 }
8233 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8234
8235 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8236 {
8237 struct nfs4_layoutcommit_data *data = calldata;
8238 struct nfs_server *server = NFS_SERVER(data->args.inode);
8239 struct nfs4_session *session = nfs4_get_session(server);
8240
8241 nfs41_setup_sequence(session,
8242 &data->args.seq_args,
8243 &data->res.seq_res,
8244 task);
8245 }
8246
8247 static void
8248 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8249 {
8250 struct nfs4_layoutcommit_data *data = calldata;
8251 struct nfs_server *server = NFS_SERVER(data->args.inode);
8252
8253 if (!nfs41_sequence_done(task, &data->res.seq_res))
8254 return;
8255
8256 switch (task->tk_status) { /* Just ignore these failures */
8257 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8258 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8259 case -NFS4ERR_BADLAYOUT: /* no layout */
8260 case -NFS4ERR_GRACE: /* loca_recalim always false */
8261 task->tk_status = 0;
8262 case 0:
8263 break;
8264 default:
8265 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8266 rpc_restart_call_prepare(task);
8267 return;
8268 }
8269 }
8270 }
8271
8272 static void nfs4_layoutcommit_release(void *calldata)
8273 {
8274 struct nfs4_layoutcommit_data *data = calldata;
8275
8276 pnfs_cleanup_layoutcommit(data);
8277 nfs_post_op_update_inode_force_wcc(data->args.inode,
8278 data->res.fattr);
8279 put_rpccred(data->cred);
8280 nfs_iput_and_deactive(data->inode);
8281 kfree(data);
8282 }
8283
8284 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8285 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8286 .rpc_call_done = nfs4_layoutcommit_done,
8287 .rpc_release = nfs4_layoutcommit_release,
8288 };
8289
8290 int
8291 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8292 {
8293 struct rpc_message msg = {
8294 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8295 .rpc_argp = &data->args,
8296 .rpc_resp = &data->res,
8297 .rpc_cred = data->cred,
8298 };
8299 struct rpc_task_setup task_setup_data = {
8300 .task = &data->task,
8301 .rpc_client = NFS_CLIENT(data->args.inode),
8302 .rpc_message = &msg,
8303 .callback_ops = &nfs4_layoutcommit_ops,
8304 .callback_data = data,
8305 };
8306 struct rpc_task *task;
8307 int status = 0;
8308
8309 dprintk("NFS: initiating layoutcommit call. sync %d "
8310 "lbw: %llu inode %lu\n", sync,
8311 data->args.lastbytewritten,
8312 data->args.inode->i_ino);
8313
8314 if (!sync) {
8315 data->inode = nfs_igrab_and_active(data->args.inode);
8316 if (data->inode == NULL) {
8317 nfs4_layoutcommit_release(data);
8318 return -EAGAIN;
8319 }
8320 task_setup_data.flags = RPC_TASK_ASYNC;
8321 }
8322 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
8323 task = rpc_run_task(&task_setup_data);
8324 if (IS_ERR(task))
8325 return PTR_ERR(task);
8326 if (sync)
8327 status = task->tk_status;
8328 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
8329 dprintk("%s: status %d\n", __func__, status);
8330 rpc_put_task(task);
8331 return status;
8332 }
8333
8334 /**
8335 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
8336 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
8337 */
8338 static int
8339 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8340 struct nfs_fsinfo *info,
8341 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8342 {
8343 struct nfs41_secinfo_no_name_args args = {
8344 .style = SECINFO_STYLE_CURRENT_FH,
8345 };
8346 struct nfs4_secinfo_res res = {
8347 .flavors = flavors,
8348 };
8349 struct rpc_message msg = {
8350 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
8351 .rpc_argp = &args,
8352 .rpc_resp = &res,
8353 };
8354 struct rpc_clnt *clnt = server->client;
8355 struct rpc_cred *cred = NULL;
8356 int status;
8357
8358 if (use_integrity) {
8359 clnt = server->nfs_client->cl_rpcclient;
8360 cred = nfs4_get_clid_cred(server->nfs_client);
8361 msg.rpc_cred = cred;
8362 }
8363
8364 dprintk("--> %s\n", __func__);
8365 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
8366 &res.seq_res, 0);
8367 dprintk("<-- %s status=%d\n", __func__, status);
8368
8369 if (cred)
8370 put_rpccred(cred);
8371
8372 return status;
8373 }
8374
8375 static int
8376 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8377 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
8378 {
8379 struct nfs4_exception exception = { };
8380 int err;
8381 do {
8382 /* first try using integrity protection */
8383 err = -NFS4ERR_WRONGSEC;
8384
8385 /* try to use integrity protection with machine cred */
8386 if (_nfs4_is_integrity_protected(server->nfs_client))
8387 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8388 flavors, true);
8389
8390 /*
8391 * if unable to use integrity protection, or SECINFO with
8392 * integrity protection returns NFS4ERR_WRONGSEC (which is
8393 * disallowed by spec, but exists in deployed servers) use
8394 * the current filesystem's rpc_client and the user cred.
8395 */
8396 if (err == -NFS4ERR_WRONGSEC)
8397 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8398 flavors, false);
8399
8400 switch (err) {
8401 case 0:
8402 case -NFS4ERR_WRONGSEC:
8403 case -ENOTSUPP:
8404 goto out;
8405 default:
8406 err = nfs4_handle_exception(server, err, &exception);
8407 }
8408 } while (exception.retry);
8409 out:
8410 return err;
8411 }
8412
8413 static int
8414 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
8415 struct nfs_fsinfo *info)
8416 {
8417 int err;
8418 struct page *page;
8419 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
8420 struct nfs4_secinfo_flavors *flavors;
8421 struct nfs4_secinfo4 *secinfo;
8422 int i;
8423
8424 page = alloc_page(GFP_KERNEL);
8425 if (!page) {
8426 err = -ENOMEM;
8427 goto out;
8428 }
8429
8430 flavors = page_address(page);
8431 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
8432
8433 /*
8434 * Fall back on "guess and check" method if
8435 * the server doesn't support SECINFO_NO_NAME
8436 */
8437 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
8438 err = nfs4_find_root_sec(server, fhandle, info);
8439 goto out_freepage;
8440 }
8441 if (err)
8442 goto out_freepage;
8443
8444 for (i = 0; i < flavors->num_flavors; i++) {
8445 secinfo = &flavors->flavors[i];
8446
8447 switch (secinfo->flavor) {
8448 case RPC_AUTH_NULL:
8449 case RPC_AUTH_UNIX:
8450 case RPC_AUTH_GSS:
8451 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
8452 &secinfo->flavor_info);
8453 break;
8454 default:
8455 flavor = RPC_AUTH_MAXFLAVOR;
8456 break;
8457 }
8458
8459 if (!nfs_auth_info_match(&server->auth_info, flavor))
8460 flavor = RPC_AUTH_MAXFLAVOR;
8461
8462 if (flavor != RPC_AUTH_MAXFLAVOR) {
8463 err = nfs4_lookup_root_sec(server, fhandle,
8464 info, flavor);
8465 if (!err)
8466 break;
8467 }
8468 }
8469
8470 if (flavor == RPC_AUTH_MAXFLAVOR)
8471 err = -EPERM;
8472
8473 out_freepage:
8474 put_page(page);
8475 if (err == -EACCES)
8476 return -EPERM;
8477 out:
8478 return err;
8479 }
8480
8481 static int _nfs41_test_stateid(struct nfs_server *server,
8482 nfs4_stateid *stateid,
8483 struct rpc_cred *cred)
8484 {
8485 int status;
8486 struct nfs41_test_stateid_args args = {
8487 .stateid = stateid,
8488 };
8489 struct nfs41_test_stateid_res res;
8490 struct rpc_message msg = {
8491 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
8492 .rpc_argp = &args,
8493 .rpc_resp = &res,
8494 .rpc_cred = cred,
8495 };
8496 struct rpc_clnt *rpc_client = server->client;
8497
8498 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8499 &rpc_client, &msg);
8500
8501 dprintk("NFS call test_stateid %p\n", stateid);
8502 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
8503 nfs4_set_sequence_privileged(&args.seq_args);
8504 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
8505 &args.seq_args, &res.seq_res);
8506 if (status != NFS_OK) {
8507 dprintk("NFS reply test_stateid: failed, %d\n", status);
8508 return status;
8509 }
8510 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
8511 return -res.status;
8512 }
8513
8514 /**
8515 * nfs41_test_stateid - perform a TEST_STATEID operation
8516 *
8517 * @server: server / transport on which to perform the operation
8518 * @stateid: state ID to test
8519 * @cred: credential
8520 *
8521 * Returns NFS_OK if the server recognizes that "stateid" is valid.
8522 * Otherwise a negative NFS4ERR value is returned if the operation
8523 * failed or the state ID is not currently valid.
8524 */
8525 static int nfs41_test_stateid(struct nfs_server *server,
8526 nfs4_stateid *stateid,
8527 struct rpc_cred *cred)
8528 {
8529 struct nfs4_exception exception = { };
8530 int err;
8531 do {
8532 err = _nfs41_test_stateid(server, stateid, cred);
8533 if (err != -NFS4ERR_DELAY)
8534 break;
8535 nfs4_handle_exception(server, err, &exception);
8536 } while (exception.retry);
8537 return err;
8538 }
8539
8540 struct nfs_free_stateid_data {
8541 struct nfs_server *server;
8542 struct nfs41_free_stateid_args args;
8543 struct nfs41_free_stateid_res res;
8544 };
8545
8546 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
8547 {
8548 struct nfs_free_stateid_data *data = calldata;
8549 nfs41_setup_sequence(nfs4_get_session(data->server),
8550 &data->args.seq_args,
8551 &data->res.seq_res,
8552 task);
8553 }
8554
8555 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
8556 {
8557 struct nfs_free_stateid_data *data = calldata;
8558
8559 nfs41_sequence_done(task, &data->res.seq_res);
8560
8561 switch (task->tk_status) {
8562 case -NFS4ERR_DELAY:
8563 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
8564 rpc_restart_call_prepare(task);
8565 }
8566 }
8567
8568 static void nfs41_free_stateid_release(void *calldata)
8569 {
8570 kfree(calldata);
8571 }
8572
8573 static const struct rpc_call_ops nfs41_free_stateid_ops = {
8574 .rpc_call_prepare = nfs41_free_stateid_prepare,
8575 .rpc_call_done = nfs41_free_stateid_done,
8576 .rpc_release = nfs41_free_stateid_release,
8577 };
8578
8579 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
8580 nfs4_stateid *stateid,
8581 struct rpc_cred *cred,
8582 bool privileged)
8583 {
8584 struct rpc_message msg = {
8585 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
8586 .rpc_cred = cred,
8587 };
8588 struct rpc_task_setup task_setup = {
8589 .rpc_client = server->client,
8590 .rpc_message = &msg,
8591 .callback_ops = &nfs41_free_stateid_ops,
8592 .flags = RPC_TASK_ASYNC,
8593 };
8594 struct nfs_free_stateid_data *data;
8595
8596 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8597 &task_setup.rpc_client, &msg);
8598
8599 dprintk("NFS call free_stateid %p\n", stateid);
8600 data = kmalloc(sizeof(*data), GFP_NOFS);
8601 if (!data)
8602 return ERR_PTR(-ENOMEM);
8603 data->server = server;
8604 nfs4_stateid_copy(&data->args.stateid, stateid);
8605
8606 task_setup.callback_data = data;
8607
8608 msg.rpc_argp = &data->args;
8609 msg.rpc_resp = &data->res;
8610 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
8611 if (privileged)
8612 nfs4_set_sequence_privileged(&data->args.seq_args);
8613
8614 return rpc_run_task(&task_setup);
8615 }
8616
8617 /**
8618 * nfs41_free_stateid - perform a FREE_STATEID operation
8619 *
8620 * @server: server / transport on which to perform the operation
8621 * @stateid: state ID to release
8622 * @cred: credential
8623 *
8624 * Returns NFS_OK if the server freed "stateid". Otherwise a
8625 * negative NFS4ERR value is returned.
8626 */
8627 static int nfs41_free_stateid(struct nfs_server *server,
8628 nfs4_stateid *stateid,
8629 struct rpc_cred *cred)
8630 {
8631 struct rpc_task *task;
8632 int ret;
8633
8634 task = _nfs41_free_stateid(server, stateid, cred, true);
8635 if (IS_ERR(task))
8636 return PTR_ERR(task);
8637 ret = rpc_wait_for_completion_task(task);
8638 if (!ret)
8639 ret = task->tk_status;
8640 rpc_put_task(task);
8641 return ret;
8642 }
8643
8644 static void
8645 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
8646 {
8647 struct rpc_task *task;
8648 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
8649
8650 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
8651 nfs4_free_lock_state(server, lsp);
8652 if (IS_ERR(task))
8653 return;
8654 rpc_put_task(task);
8655 }
8656
8657 static bool nfs41_match_stateid(const nfs4_stateid *s1,
8658 const nfs4_stateid *s2)
8659 {
8660 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
8661 return false;
8662
8663 if (s1->seqid == s2->seqid)
8664 return true;
8665 if (s1->seqid == 0 || s2->seqid == 0)
8666 return true;
8667
8668 return false;
8669 }
8670
8671 #endif /* CONFIG_NFS_V4_1 */
8672
8673 static bool nfs4_match_stateid(const nfs4_stateid *s1,
8674 const nfs4_stateid *s2)
8675 {
8676 return nfs4_stateid_match(s1, s2);
8677 }
8678
8679
8680 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
8681 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8682 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8683 .recover_open = nfs4_open_reclaim,
8684 .recover_lock = nfs4_lock_reclaim,
8685 .establish_clid = nfs4_init_clientid,
8686 .detect_trunking = nfs40_discover_server_trunking,
8687 };
8688
8689 #if defined(CONFIG_NFS_V4_1)
8690 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8691 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8692 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8693 .recover_open = nfs4_open_reclaim,
8694 .recover_lock = nfs4_lock_reclaim,
8695 .establish_clid = nfs41_init_clientid,
8696 .reclaim_complete = nfs41_proc_reclaim_complete,
8697 .detect_trunking = nfs41_discover_server_trunking,
8698 };
8699 #endif /* CONFIG_NFS_V4_1 */
8700
8701 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8702 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8703 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8704 .recover_open = nfs40_open_expired,
8705 .recover_lock = nfs4_lock_expired,
8706 .establish_clid = nfs4_init_clientid,
8707 };
8708
8709 #if defined(CONFIG_NFS_V4_1)
8710 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
8711 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8712 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8713 .recover_open = nfs41_open_expired,
8714 .recover_lock = nfs41_lock_expired,
8715 .establish_clid = nfs41_init_clientid,
8716 };
8717 #endif /* CONFIG_NFS_V4_1 */
8718
8719 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
8720 .sched_state_renewal = nfs4_proc_async_renew,
8721 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
8722 .renew_lease = nfs4_proc_renew,
8723 };
8724
8725 #if defined(CONFIG_NFS_V4_1)
8726 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
8727 .sched_state_renewal = nfs41_proc_async_sequence,
8728 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
8729 .renew_lease = nfs4_proc_sequence,
8730 };
8731 #endif
8732
8733 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
8734 .get_locations = _nfs40_proc_get_locations,
8735 .fsid_present = _nfs40_proc_fsid_present,
8736 };
8737
8738 #if defined(CONFIG_NFS_V4_1)
8739 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
8740 .get_locations = _nfs41_proc_get_locations,
8741 .fsid_present = _nfs41_proc_fsid_present,
8742 };
8743 #endif /* CONFIG_NFS_V4_1 */
8744
8745 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8746 .minor_version = 0,
8747 .init_caps = NFS_CAP_READDIRPLUS
8748 | NFS_CAP_ATOMIC_OPEN
8749 | NFS_CAP_POSIX_LOCK,
8750 .init_client = nfs40_init_client,
8751 .shutdown_client = nfs40_shutdown_client,
8752 .match_stateid = nfs4_match_stateid,
8753 .find_root_sec = nfs4_find_root_sec,
8754 .free_lock_state = nfs4_release_lockowner,
8755 .alloc_seqid = nfs_alloc_seqid,
8756 .call_sync_ops = &nfs40_call_sync_ops,
8757 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
8758 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
8759 .state_renewal_ops = &nfs40_state_renewal_ops,
8760 .mig_recovery_ops = &nfs40_mig_recovery_ops,
8761 };
8762
8763 #if defined(CONFIG_NFS_V4_1)
8764 static struct nfs_seqid *
8765 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
8766 {
8767 return NULL;
8768 }
8769
8770 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8771 .minor_version = 1,
8772 .init_caps = NFS_CAP_READDIRPLUS
8773 | NFS_CAP_ATOMIC_OPEN
8774 | NFS_CAP_POSIX_LOCK
8775 | NFS_CAP_STATEID_NFSV41
8776 | NFS_CAP_ATOMIC_OPEN_V1,
8777 .init_client = nfs41_init_client,
8778 .shutdown_client = nfs41_shutdown_client,
8779 .match_stateid = nfs41_match_stateid,
8780 .find_root_sec = nfs41_find_root_sec,
8781 .free_lock_state = nfs41_free_lock_state,
8782 .alloc_seqid = nfs_alloc_no_seqid,
8783 .call_sync_ops = &nfs41_call_sync_ops,
8784 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8785 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8786 .state_renewal_ops = &nfs41_state_renewal_ops,
8787 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8788 };
8789 #endif
8790
8791 #if defined(CONFIG_NFS_V4_2)
8792 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8793 .minor_version = 2,
8794 .init_caps = NFS_CAP_READDIRPLUS
8795 | NFS_CAP_ATOMIC_OPEN
8796 | NFS_CAP_POSIX_LOCK
8797 | NFS_CAP_STATEID_NFSV41
8798 | NFS_CAP_ATOMIC_OPEN_V1
8799 | NFS_CAP_ALLOCATE
8800 | NFS_CAP_DEALLOCATE
8801 | NFS_CAP_SEEK
8802 | NFS_CAP_LAYOUTSTATS
8803 | NFS_CAP_CLONE,
8804 .init_client = nfs41_init_client,
8805 .shutdown_client = nfs41_shutdown_client,
8806 .match_stateid = nfs41_match_stateid,
8807 .find_root_sec = nfs41_find_root_sec,
8808 .free_lock_state = nfs41_free_lock_state,
8809 .call_sync_ops = &nfs41_call_sync_ops,
8810 .alloc_seqid = nfs_alloc_no_seqid,
8811 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8812 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8813 .state_renewal_ops = &nfs41_state_renewal_ops,
8814 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8815 };
8816 #endif
8817
8818 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
8819 [0] = &nfs_v4_0_minor_ops,
8820 #if defined(CONFIG_NFS_V4_1)
8821 [1] = &nfs_v4_1_minor_ops,
8822 #endif
8823 #if defined(CONFIG_NFS_V4_2)
8824 [2] = &nfs_v4_2_minor_ops,
8825 #endif
8826 };
8827
8828 ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
8829 {
8830 ssize_t error, error2;
8831
8832 error = generic_listxattr(dentry, list, size);
8833 if (error < 0)
8834 return error;
8835 if (list) {
8836 list += error;
8837 size -= error;
8838 }
8839
8840 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
8841 if (error2 < 0)
8842 return error2;
8843 return error + error2;
8844 }
8845
8846 static const struct inode_operations nfs4_dir_inode_operations = {
8847 .create = nfs_create,
8848 .lookup = nfs_lookup,
8849 .atomic_open = nfs_atomic_open,
8850 .link = nfs_link,
8851 .unlink = nfs_unlink,
8852 .symlink = nfs_symlink,
8853 .mkdir = nfs_mkdir,
8854 .rmdir = nfs_rmdir,
8855 .mknod = nfs_mknod,
8856 .rename = nfs_rename,
8857 .permission = nfs_permission,
8858 .getattr = nfs_getattr,
8859 .setattr = nfs_setattr,
8860 .getxattr = generic_getxattr,
8861 .setxattr = generic_setxattr,
8862 .listxattr = nfs4_listxattr,
8863 .removexattr = generic_removexattr,
8864 };
8865
8866 static const struct inode_operations nfs4_file_inode_operations = {
8867 .permission = nfs_permission,
8868 .getattr = nfs_getattr,
8869 .setattr = nfs_setattr,
8870 .getxattr = generic_getxattr,
8871 .setxattr = generic_setxattr,
8872 .listxattr = nfs4_listxattr,
8873 .removexattr = generic_removexattr,
8874 };
8875
8876 const struct nfs_rpc_ops nfs_v4_clientops = {
8877 .version = 4, /* protocol version */
8878 .dentry_ops = &nfs4_dentry_operations,
8879 .dir_inode_ops = &nfs4_dir_inode_operations,
8880 .file_inode_ops = &nfs4_file_inode_operations,
8881 .file_ops = &nfs4_file_operations,
8882 .getroot = nfs4_proc_get_root,
8883 .submount = nfs4_submount,
8884 .try_mount = nfs4_try_mount,
8885 .getattr = nfs4_proc_getattr,
8886 .setattr = nfs4_proc_setattr,
8887 .lookup = nfs4_proc_lookup,
8888 .access = nfs4_proc_access,
8889 .readlink = nfs4_proc_readlink,
8890 .create = nfs4_proc_create,
8891 .remove = nfs4_proc_remove,
8892 .unlink_setup = nfs4_proc_unlink_setup,
8893 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
8894 .unlink_done = nfs4_proc_unlink_done,
8895 .rename_setup = nfs4_proc_rename_setup,
8896 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
8897 .rename_done = nfs4_proc_rename_done,
8898 .link = nfs4_proc_link,
8899 .symlink = nfs4_proc_symlink,
8900 .mkdir = nfs4_proc_mkdir,
8901 .rmdir = nfs4_proc_remove,
8902 .readdir = nfs4_proc_readdir,
8903 .mknod = nfs4_proc_mknod,
8904 .statfs = nfs4_proc_statfs,
8905 .fsinfo = nfs4_proc_fsinfo,
8906 .pathconf = nfs4_proc_pathconf,
8907 .set_capabilities = nfs4_server_capabilities,
8908 .decode_dirent = nfs4_decode_dirent,
8909 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
8910 .read_setup = nfs4_proc_read_setup,
8911 .read_done = nfs4_read_done,
8912 .write_setup = nfs4_proc_write_setup,
8913 .write_done = nfs4_write_done,
8914 .commit_setup = nfs4_proc_commit_setup,
8915 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
8916 .commit_done = nfs4_commit_done,
8917 .lock = nfs4_proc_lock,
8918 .clear_acl_cache = nfs4_zap_acl_attr,
8919 .close_context = nfs4_close_context,
8920 .open_context = nfs4_atomic_open,
8921 .have_delegation = nfs4_have_delegation,
8922 .return_delegation = nfs4_inode_return_delegation,
8923 .alloc_client = nfs4_alloc_client,
8924 .init_client = nfs4_init_client,
8925 .free_client = nfs4_free_client,
8926 .create_server = nfs4_create_server,
8927 .clone_server = nfs_clone_server,
8928 };
8929
8930 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
8931 .name = XATTR_NAME_NFSV4_ACL,
8932 .list = nfs4_xattr_list_nfs4_acl,
8933 .get = nfs4_xattr_get_nfs4_acl,
8934 .set = nfs4_xattr_set_nfs4_acl,
8935 };
8936
8937 const struct xattr_handler *nfs4_xattr_handlers[] = {
8938 &nfs4_xattr_nfs4_acl_handler,
8939 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
8940 &nfs4_xattr_nfs4_label_handler,
8941 #endif
8942 NULL
8943 };
8944
8945 /*
8946 * Local variables:
8947 * c-basic-offset: 8
8948 * End:
8949 */
This page took 0.460895 seconds and 4 git commands to generate.