Merge tag 'media/v4.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[deliverable/linux.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/file.h>
42 #include <linux/string.h>
43 #include <linux/ratelimit.h>
44 #include <linux/printk.h>
45 #include <linux/slab.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/xattr.h>
56 #include <linux/utsname.h>
57 #include <linux/freezer.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4idmap.h"
67 #include "nfs4session.h"
68 #include "fscache.h"
69
70 #include "nfs4trace.h"
71
72 #define NFSDBG_FACILITY NFSDBG_PROC
73
74 #define NFS4_POLL_RETRY_MIN (HZ/10)
75 #define NFS4_POLL_RETRY_MAX (15*HZ)
76
77 struct nfs4_opendata;
78 static int _nfs4_proc_open(struct nfs4_opendata *data);
79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *, long *);
82 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
83 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
84 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
85 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
86 struct nfs_fattr *fattr, struct iattr *sattr,
87 struct nfs4_state *state, struct nfs4_label *ilabel,
88 struct nfs4_label *olabel);
89 #ifdef CONFIG_NFS_V4_1
90 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
91 struct rpc_cred *);
92 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
93 struct rpc_cred *);
94 #endif
95
96 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
97 static inline struct nfs4_label *
98 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
99 struct iattr *sattr, struct nfs4_label *label)
100 {
101 int err;
102
103 if (label == NULL)
104 return NULL;
105
106 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
107 return NULL;
108
109 err = security_dentry_init_security(dentry, sattr->ia_mode,
110 &dentry->d_name, (void **)&label->label, &label->len);
111 if (err == 0)
112 return label;
113
114 return NULL;
115 }
116 static inline void
117 nfs4_label_release_security(struct nfs4_label *label)
118 {
119 if (label)
120 security_release_secctx(label->label, label->len);
121 }
122 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
123 {
124 if (label)
125 return server->attr_bitmask;
126
127 return server->attr_bitmask_nl;
128 }
129 #else
130 static inline struct nfs4_label *
131 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
132 struct iattr *sattr, struct nfs4_label *l)
133 { return NULL; }
134 static inline void
135 nfs4_label_release_security(struct nfs4_label *label)
136 { return; }
137 static inline u32 *
138 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
139 { return server->attr_bitmask; }
140 #endif
141
142 /* Prevent leaks of NFSv4 errors into userland */
143 static int nfs4_map_errors(int err)
144 {
145 if (err >= -1000)
146 return err;
147 switch (err) {
148 case -NFS4ERR_RESOURCE:
149 case -NFS4ERR_LAYOUTTRYLATER:
150 case -NFS4ERR_RECALLCONFLICT:
151 return -EREMOTEIO;
152 case -NFS4ERR_WRONGSEC:
153 case -NFS4ERR_WRONG_CRED:
154 return -EPERM;
155 case -NFS4ERR_BADOWNER:
156 case -NFS4ERR_BADNAME:
157 return -EINVAL;
158 case -NFS4ERR_SHARE_DENIED:
159 return -EACCES;
160 case -NFS4ERR_MINOR_VERS_MISMATCH:
161 return -EPROTONOSUPPORT;
162 case -NFS4ERR_FILE_OPEN:
163 return -EBUSY;
164 default:
165 dprintk("%s could not handle NFSv4 error %d\n",
166 __func__, -err);
167 break;
168 }
169 return -EIO;
170 }
171
172 /*
173 * This is our standard bitmap for GETATTR requests.
174 */
175 const u32 nfs4_fattr_bitmap[3] = {
176 FATTR4_WORD0_TYPE
177 | FATTR4_WORD0_CHANGE
178 | FATTR4_WORD0_SIZE
179 | FATTR4_WORD0_FSID
180 | FATTR4_WORD0_FILEID,
181 FATTR4_WORD1_MODE
182 | FATTR4_WORD1_NUMLINKS
183 | FATTR4_WORD1_OWNER
184 | FATTR4_WORD1_OWNER_GROUP
185 | FATTR4_WORD1_RAWDEV
186 | FATTR4_WORD1_SPACE_USED
187 | FATTR4_WORD1_TIME_ACCESS
188 | FATTR4_WORD1_TIME_METADATA
189 | FATTR4_WORD1_TIME_MODIFY
190 | FATTR4_WORD1_MOUNTED_ON_FILEID,
191 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
192 FATTR4_WORD2_SECURITY_LABEL
193 #endif
194 };
195
196 static const u32 nfs4_pnfs_open_bitmap[3] = {
197 FATTR4_WORD0_TYPE
198 | FATTR4_WORD0_CHANGE
199 | FATTR4_WORD0_SIZE
200 | FATTR4_WORD0_FSID
201 | FATTR4_WORD0_FILEID,
202 FATTR4_WORD1_MODE
203 | FATTR4_WORD1_NUMLINKS
204 | FATTR4_WORD1_OWNER
205 | FATTR4_WORD1_OWNER_GROUP
206 | FATTR4_WORD1_RAWDEV
207 | FATTR4_WORD1_SPACE_USED
208 | FATTR4_WORD1_TIME_ACCESS
209 | FATTR4_WORD1_TIME_METADATA
210 | FATTR4_WORD1_TIME_MODIFY,
211 FATTR4_WORD2_MDSTHRESHOLD
212 };
213
214 static const u32 nfs4_open_noattr_bitmap[3] = {
215 FATTR4_WORD0_TYPE
216 | FATTR4_WORD0_CHANGE
217 | FATTR4_WORD0_FILEID,
218 };
219
220 const u32 nfs4_statfs_bitmap[3] = {
221 FATTR4_WORD0_FILES_AVAIL
222 | FATTR4_WORD0_FILES_FREE
223 | FATTR4_WORD0_FILES_TOTAL,
224 FATTR4_WORD1_SPACE_AVAIL
225 | FATTR4_WORD1_SPACE_FREE
226 | FATTR4_WORD1_SPACE_TOTAL
227 };
228
229 const u32 nfs4_pathconf_bitmap[3] = {
230 FATTR4_WORD0_MAXLINK
231 | FATTR4_WORD0_MAXNAME,
232 0
233 };
234
235 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
236 | FATTR4_WORD0_MAXREAD
237 | FATTR4_WORD0_MAXWRITE
238 | FATTR4_WORD0_LEASE_TIME,
239 FATTR4_WORD1_TIME_DELTA
240 | FATTR4_WORD1_FS_LAYOUT_TYPES,
241 FATTR4_WORD2_LAYOUT_BLKSIZE
242 };
243
244 const u32 nfs4_fs_locations_bitmap[3] = {
245 FATTR4_WORD0_TYPE
246 | FATTR4_WORD0_CHANGE
247 | FATTR4_WORD0_SIZE
248 | FATTR4_WORD0_FSID
249 | FATTR4_WORD0_FILEID
250 | FATTR4_WORD0_FS_LOCATIONS,
251 FATTR4_WORD1_MODE
252 | FATTR4_WORD1_NUMLINKS
253 | FATTR4_WORD1_OWNER
254 | FATTR4_WORD1_OWNER_GROUP
255 | FATTR4_WORD1_RAWDEV
256 | FATTR4_WORD1_SPACE_USED
257 | FATTR4_WORD1_TIME_ACCESS
258 | FATTR4_WORD1_TIME_METADATA
259 | FATTR4_WORD1_TIME_MODIFY
260 | FATTR4_WORD1_MOUNTED_ON_FILEID,
261 };
262
263 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
264 struct nfs4_readdir_arg *readdir)
265 {
266 __be32 *start, *p;
267
268 if (cookie > 2) {
269 readdir->cookie = cookie;
270 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
271 return;
272 }
273
274 readdir->cookie = 0;
275 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
276 if (cookie == 2)
277 return;
278
279 /*
280 * NFSv4 servers do not return entries for '.' and '..'
281 * Therefore, we fake these entries here. We let '.'
282 * have cookie 0 and '..' have cookie 1. Note that
283 * when talking to the server, we always send cookie 0
284 * instead of 1 or 2.
285 */
286 start = p = kmap_atomic(*readdir->pages);
287
288 if (cookie == 0) {
289 *p++ = xdr_one; /* next */
290 *p++ = xdr_zero; /* cookie, first word */
291 *p++ = xdr_one; /* cookie, second word */
292 *p++ = xdr_one; /* entry len */
293 memcpy(p, ".\0\0\0", 4); /* entry */
294 p++;
295 *p++ = xdr_one; /* bitmap length */
296 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
297 *p++ = htonl(8); /* attribute buffer length */
298 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
299 }
300
301 *p++ = xdr_one; /* next */
302 *p++ = xdr_zero; /* cookie, first word */
303 *p++ = xdr_two; /* cookie, second word */
304 *p++ = xdr_two; /* entry len */
305 memcpy(p, "..\0\0", 4); /* entry */
306 p++;
307 *p++ = xdr_one; /* bitmap length */
308 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
309 *p++ = htonl(8); /* attribute buffer length */
310 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
311
312 readdir->pgbase = (char *)p - (char *)start;
313 readdir->count -= readdir->pgbase;
314 kunmap_atomic(start);
315 }
316
317 static long nfs4_update_delay(long *timeout)
318 {
319 long ret;
320 if (!timeout)
321 return NFS4_POLL_RETRY_MAX;
322 if (*timeout <= 0)
323 *timeout = NFS4_POLL_RETRY_MIN;
324 if (*timeout > NFS4_POLL_RETRY_MAX)
325 *timeout = NFS4_POLL_RETRY_MAX;
326 ret = *timeout;
327 *timeout <<= 1;
328 return ret;
329 }
330
331 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
332 {
333 int res = 0;
334
335 might_sleep();
336
337 freezable_schedule_timeout_killable_unsafe(
338 nfs4_update_delay(timeout));
339 if (fatal_signal_pending(current))
340 res = -ERESTARTSYS;
341 return res;
342 }
343
344 /* This is the error handling routine for processes that are allowed
345 * to sleep.
346 */
347 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
348 {
349 struct nfs_client *clp = server->nfs_client;
350 struct nfs4_state *state = exception->state;
351 struct inode *inode = exception->inode;
352 int ret = errorcode;
353
354 exception->retry = 0;
355 switch(errorcode) {
356 case 0:
357 return 0;
358 case -NFS4ERR_OPENMODE:
359 case -NFS4ERR_DELEG_REVOKED:
360 case -NFS4ERR_ADMIN_REVOKED:
361 case -NFS4ERR_BAD_STATEID:
362 if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
363 nfs4_inode_return_delegation(inode);
364 exception->retry = 1;
365 return 0;
366 }
367 if (state == NULL)
368 break;
369 ret = nfs4_schedule_stateid_recovery(server, state);
370 if (ret < 0)
371 break;
372 goto wait_on_recovery;
373 case -NFS4ERR_EXPIRED:
374 if (state != NULL) {
375 ret = nfs4_schedule_stateid_recovery(server, state);
376 if (ret < 0)
377 break;
378 }
379 case -NFS4ERR_STALE_STATEID:
380 case -NFS4ERR_STALE_CLIENTID:
381 nfs4_schedule_lease_recovery(clp);
382 goto wait_on_recovery;
383 case -NFS4ERR_MOVED:
384 ret = nfs4_schedule_migration_recovery(server);
385 if (ret < 0)
386 break;
387 goto wait_on_recovery;
388 case -NFS4ERR_LEASE_MOVED:
389 nfs4_schedule_lease_moved_recovery(clp);
390 goto wait_on_recovery;
391 #if defined(CONFIG_NFS_V4_1)
392 case -NFS4ERR_BADSESSION:
393 case -NFS4ERR_BADSLOT:
394 case -NFS4ERR_BAD_HIGH_SLOT:
395 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
396 case -NFS4ERR_DEADSESSION:
397 case -NFS4ERR_SEQ_FALSE_RETRY:
398 case -NFS4ERR_SEQ_MISORDERED:
399 dprintk("%s ERROR: %d Reset session\n", __func__,
400 errorcode);
401 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
402 goto wait_on_recovery;
403 #endif /* defined(CONFIG_NFS_V4_1) */
404 case -NFS4ERR_FILE_OPEN:
405 if (exception->timeout > HZ) {
406 /* We have retried a decent amount, time to
407 * fail
408 */
409 ret = -EBUSY;
410 break;
411 }
412 case -NFS4ERR_GRACE:
413 case -NFS4ERR_DELAY:
414 ret = nfs4_delay(server->client, &exception->timeout);
415 if (ret != 0)
416 break;
417 case -NFS4ERR_RETRY_UNCACHED_REP:
418 case -NFS4ERR_OLD_STATEID:
419 exception->retry = 1;
420 break;
421 case -NFS4ERR_BADOWNER:
422 /* The following works around a Linux server bug! */
423 case -NFS4ERR_BADNAME:
424 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
425 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
426 exception->retry = 1;
427 printk(KERN_WARNING "NFS: v4 server %s "
428 "does not accept raw "
429 "uid/gids. "
430 "Reenabling the idmapper.\n",
431 server->nfs_client->cl_hostname);
432 }
433 }
434 /* We failed to handle the error */
435 return nfs4_map_errors(ret);
436 wait_on_recovery:
437 ret = nfs4_wait_clnt_recover(clp);
438 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
439 return -EIO;
440 if (ret == 0)
441 exception->retry = 1;
442 return ret;
443 }
444
445 /*
446 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
447 * or 'false' otherwise.
448 */
449 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
450 {
451 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
452
453 if (flavor == RPC_AUTH_GSS_KRB5I ||
454 flavor == RPC_AUTH_GSS_KRB5P)
455 return true;
456
457 return false;
458 }
459
460 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
461 {
462 spin_lock(&clp->cl_lock);
463 if (time_before(clp->cl_last_renewal,timestamp))
464 clp->cl_last_renewal = timestamp;
465 spin_unlock(&clp->cl_lock);
466 }
467
468 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
469 {
470 struct nfs_client *clp = server->nfs_client;
471
472 if (!nfs4_has_session(clp))
473 do_renew_lease(clp, timestamp);
474 }
475
476 struct nfs4_call_sync_data {
477 const struct nfs_server *seq_server;
478 struct nfs4_sequence_args *seq_args;
479 struct nfs4_sequence_res *seq_res;
480 };
481
482 void nfs4_init_sequence(struct nfs4_sequence_args *args,
483 struct nfs4_sequence_res *res, int cache_reply)
484 {
485 args->sa_slot = NULL;
486 args->sa_cache_this = cache_reply;
487 args->sa_privileged = 0;
488
489 res->sr_slot = NULL;
490 }
491
492 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
493 {
494 args->sa_privileged = 1;
495 }
496
497 int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
498 struct nfs4_sequence_args *args,
499 struct nfs4_sequence_res *res,
500 struct rpc_task *task)
501 {
502 struct nfs4_slot *slot;
503
504 /* slot already allocated? */
505 if (res->sr_slot != NULL)
506 goto out_start;
507
508 spin_lock(&tbl->slot_tbl_lock);
509 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
510 goto out_sleep;
511
512 slot = nfs4_alloc_slot(tbl);
513 if (IS_ERR(slot)) {
514 if (slot == ERR_PTR(-ENOMEM))
515 task->tk_timeout = HZ >> 2;
516 goto out_sleep;
517 }
518 spin_unlock(&tbl->slot_tbl_lock);
519
520 args->sa_slot = slot;
521 res->sr_slot = slot;
522
523 out_start:
524 rpc_call_start(task);
525 return 0;
526
527 out_sleep:
528 if (args->sa_privileged)
529 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
530 NULL, RPC_PRIORITY_PRIVILEGED);
531 else
532 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
533 spin_unlock(&tbl->slot_tbl_lock);
534 return -EAGAIN;
535 }
536 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
537
538 static int nfs40_sequence_done(struct rpc_task *task,
539 struct nfs4_sequence_res *res)
540 {
541 struct nfs4_slot *slot = res->sr_slot;
542 struct nfs4_slot_table *tbl;
543
544 if (slot == NULL)
545 goto out;
546
547 tbl = slot->table;
548 spin_lock(&tbl->slot_tbl_lock);
549 if (!nfs41_wake_and_assign_slot(tbl, slot))
550 nfs4_free_slot(tbl, slot);
551 spin_unlock(&tbl->slot_tbl_lock);
552
553 res->sr_slot = NULL;
554 out:
555 return 1;
556 }
557
558 #if defined(CONFIG_NFS_V4_1)
559
560 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
561 {
562 struct nfs4_session *session;
563 struct nfs4_slot_table *tbl;
564 struct nfs4_slot *slot = res->sr_slot;
565 bool send_new_highest_used_slotid = false;
566
567 tbl = slot->table;
568 session = tbl->session;
569
570 spin_lock(&tbl->slot_tbl_lock);
571 /* Be nice to the server: try to ensure that the last transmitted
572 * value for highest_user_slotid <= target_highest_slotid
573 */
574 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
575 send_new_highest_used_slotid = true;
576
577 if (nfs41_wake_and_assign_slot(tbl, slot)) {
578 send_new_highest_used_slotid = false;
579 goto out_unlock;
580 }
581 nfs4_free_slot(tbl, slot);
582
583 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
584 send_new_highest_used_slotid = false;
585 out_unlock:
586 spin_unlock(&tbl->slot_tbl_lock);
587 res->sr_slot = NULL;
588 if (send_new_highest_used_slotid)
589 nfs41_server_notify_highest_slotid_update(session->clp);
590 }
591
592 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
593 {
594 struct nfs4_session *session;
595 struct nfs4_slot *slot = res->sr_slot;
596 struct nfs_client *clp;
597 bool interrupted = false;
598 int ret = 1;
599
600 if (slot == NULL)
601 goto out_noaction;
602 /* don't increment the sequence number if the task wasn't sent */
603 if (!RPC_WAS_SENT(task))
604 goto out;
605
606 session = slot->table->session;
607
608 if (slot->interrupted) {
609 slot->interrupted = 0;
610 interrupted = true;
611 }
612
613 trace_nfs4_sequence_done(session, res);
614 /* Check the SEQUENCE operation status */
615 switch (res->sr_status) {
616 case 0:
617 /* Update the slot's sequence and clientid lease timer */
618 ++slot->seq_nr;
619 clp = session->clp;
620 do_renew_lease(clp, res->sr_timestamp);
621 /* Check sequence flags */
622 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
623 nfs41_update_target_slotid(slot->table, slot, res);
624 break;
625 case 1:
626 /*
627 * sr_status remains 1 if an RPC level error occurred.
628 * The server may or may not have processed the sequence
629 * operation..
630 * Mark the slot as having hosted an interrupted RPC call.
631 */
632 slot->interrupted = 1;
633 goto out;
634 case -NFS4ERR_DELAY:
635 /* The server detected a resend of the RPC call and
636 * returned NFS4ERR_DELAY as per Section 2.10.6.2
637 * of RFC5661.
638 */
639 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
640 __func__,
641 slot->slot_nr,
642 slot->seq_nr);
643 goto out_retry;
644 case -NFS4ERR_BADSLOT:
645 /*
646 * The slot id we used was probably retired. Try again
647 * using a different slot id.
648 */
649 goto retry_nowait;
650 case -NFS4ERR_SEQ_MISORDERED:
651 /*
652 * Was the last operation on this sequence interrupted?
653 * If so, retry after bumping the sequence number.
654 */
655 if (interrupted) {
656 ++slot->seq_nr;
657 goto retry_nowait;
658 }
659 /*
660 * Could this slot have been previously retired?
661 * If so, then the server may be expecting seq_nr = 1!
662 */
663 if (slot->seq_nr != 1) {
664 slot->seq_nr = 1;
665 goto retry_nowait;
666 }
667 break;
668 case -NFS4ERR_SEQ_FALSE_RETRY:
669 ++slot->seq_nr;
670 goto retry_nowait;
671 default:
672 /* Just update the slot sequence no. */
673 ++slot->seq_nr;
674 }
675 out:
676 /* The session may be reset by one of the error handlers. */
677 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
678 nfs41_sequence_free_slot(res);
679 out_noaction:
680 return ret;
681 retry_nowait:
682 if (rpc_restart_call_prepare(task)) {
683 task->tk_status = 0;
684 ret = 0;
685 }
686 goto out;
687 out_retry:
688 if (!rpc_restart_call(task))
689 goto out;
690 rpc_delay(task, NFS4_POLL_RETRY_MAX);
691 return 0;
692 }
693 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
694
695 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
696 {
697 if (res->sr_slot == NULL)
698 return 1;
699 if (!res->sr_slot->table->session)
700 return nfs40_sequence_done(task, res);
701 return nfs41_sequence_done(task, res);
702 }
703 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
704
705 int nfs41_setup_sequence(struct nfs4_session *session,
706 struct nfs4_sequence_args *args,
707 struct nfs4_sequence_res *res,
708 struct rpc_task *task)
709 {
710 struct nfs4_slot *slot;
711 struct nfs4_slot_table *tbl;
712
713 dprintk("--> %s\n", __func__);
714 /* slot already allocated? */
715 if (res->sr_slot != NULL)
716 goto out_success;
717
718 tbl = &session->fc_slot_table;
719
720 task->tk_timeout = 0;
721
722 spin_lock(&tbl->slot_tbl_lock);
723 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
724 !args->sa_privileged) {
725 /* The state manager will wait until the slot table is empty */
726 dprintk("%s session is draining\n", __func__);
727 goto out_sleep;
728 }
729
730 slot = nfs4_alloc_slot(tbl);
731 if (IS_ERR(slot)) {
732 /* If out of memory, try again in 1/4 second */
733 if (slot == ERR_PTR(-ENOMEM))
734 task->tk_timeout = HZ >> 2;
735 dprintk("<-- %s: no free slots\n", __func__);
736 goto out_sleep;
737 }
738 spin_unlock(&tbl->slot_tbl_lock);
739
740 args->sa_slot = slot;
741
742 dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
743 slot->slot_nr, slot->seq_nr);
744
745 res->sr_slot = slot;
746 res->sr_timestamp = jiffies;
747 res->sr_status_flags = 0;
748 /*
749 * sr_status is only set in decode_sequence, and so will remain
750 * set to 1 if an rpc level failure occurs.
751 */
752 res->sr_status = 1;
753 trace_nfs4_setup_sequence(session, args);
754 out_success:
755 rpc_call_start(task);
756 return 0;
757 out_sleep:
758 /* Privileged tasks are queued with top priority */
759 if (args->sa_privileged)
760 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
761 NULL, RPC_PRIORITY_PRIVILEGED);
762 else
763 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
764 spin_unlock(&tbl->slot_tbl_lock);
765 return -EAGAIN;
766 }
767 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
768
769 static int nfs4_setup_sequence(const struct nfs_server *server,
770 struct nfs4_sequence_args *args,
771 struct nfs4_sequence_res *res,
772 struct rpc_task *task)
773 {
774 struct nfs4_session *session = nfs4_get_session(server);
775 int ret = 0;
776
777 if (!session)
778 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
779 args, res, task);
780
781 dprintk("--> %s clp %p session %p sr_slot %u\n",
782 __func__, session->clp, session, res->sr_slot ?
783 res->sr_slot->slot_nr : NFS4_NO_SLOT);
784
785 ret = nfs41_setup_sequence(session, args, res, task);
786
787 dprintk("<-- %s status=%d\n", __func__, ret);
788 return ret;
789 }
790
791 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
792 {
793 struct nfs4_call_sync_data *data = calldata;
794 struct nfs4_session *session = nfs4_get_session(data->seq_server);
795
796 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
797
798 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
799 }
800
801 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
802 {
803 struct nfs4_call_sync_data *data = calldata;
804
805 nfs41_sequence_done(task, data->seq_res);
806 }
807
808 static const struct rpc_call_ops nfs41_call_sync_ops = {
809 .rpc_call_prepare = nfs41_call_sync_prepare,
810 .rpc_call_done = nfs41_call_sync_done,
811 };
812
813 #else /* !CONFIG_NFS_V4_1 */
814
815 static int nfs4_setup_sequence(const struct nfs_server *server,
816 struct nfs4_sequence_args *args,
817 struct nfs4_sequence_res *res,
818 struct rpc_task *task)
819 {
820 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
821 args, res, task);
822 }
823
824 int nfs4_sequence_done(struct rpc_task *task,
825 struct nfs4_sequence_res *res)
826 {
827 return nfs40_sequence_done(task, res);
828 }
829 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
830
831 #endif /* !CONFIG_NFS_V4_1 */
832
833 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
834 {
835 struct nfs4_call_sync_data *data = calldata;
836 nfs4_setup_sequence(data->seq_server,
837 data->seq_args, data->seq_res, task);
838 }
839
840 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
841 {
842 struct nfs4_call_sync_data *data = calldata;
843 nfs4_sequence_done(task, data->seq_res);
844 }
845
846 static const struct rpc_call_ops nfs40_call_sync_ops = {
847 .rpc_call_prepare = nfs40_call_sync_prepare,
848 .rpc_call_done = nfs40_call_sync_done,
849 };
850
851 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
852 struct nfs_server *server,
853 struct rpc_message *msg,
854 struct nfs4_sequence_args *args,
855 struct nfs4_sequence_res *res)
856 {
857 int ret;
858 struct rpc_task *task;
859 struct nfs_client *clp = server->nfs_client;
860 struct nfs4_call_sync_data data = {
861 .seq_server = server,
862 .seq_args = args,
863 .seq_res = res,
864 };
865 struct rpc_task_setup task_setup = {
866 .rpc_client = clnt,
867 .rpc_message = msg,
868 .callback_ops = clp->cl_mvops->call_sync_ops,
869 .callback_data = &data
870 };
871
872 task = rpc_run_task(&task_setup);
873 if (IS_ERR(task))
874 ret = PTR_ERR(task);
875 else {
876 ret = task->tk_status;
877 rpc_put_task(task);
878 }
879 return ret;
880 }
881
882 int nfs4_call_sync(struct rpc_clnt *clnt,
883 struct nfs_server *server,
884 struct rpc_message *msg,
885 struct nfs4_sequence_args *args,
886 struct nfs4_sequence_res *res,
887 int cache_reply)
888 {
889 nfs4_init_sequence(args, res, cache_reply);
890 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
891 }
892
893 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
894 {
895 struct nfs_inode *nfsi = NFS_I(dir);
896
897 spin_lock(&dir->i_lock);
898 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
899 if (!cinfo->atomic || cinfo->before != dir->i_version)
900 nfs_force_lookup_revalidate(dir);
901 dir->i_version = cinfo->after;
902 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
903 nfs_fscache_invalidate(dir);
904 spin_unlock(&dir->i_lock);
905 }
906
907 struct nfs4_opendata {
908 struct kref kref;
909 struct nfs_openargs o_arg;
910 struct nfs_openres o_res;
911 struct nfs_open_confirmargs c_arg;
912 struct nfs_open_confirmres c_res;
913 struct nfs4_string owner_name;
914 struct nfs4_string group_name;
915 struct nfs4_label *a_label;
916 struct nfs_fattr f_attr;
917 struct nfs4_label *f_label;
918 struct dentry *dir;
919 struct dentry *dentry;
920 struct nfs4_state_owner *owner;
921 struct nfs4_state *state;
922 struct iattr attrs;
923 unsigned long timestamp;
924 unsigned int rpc_done : 1;
925 unsigned int file_created : 1;
926 unsigned int is_recover : 1;
927 int rpc_status;
928 int cancelled;
929 };
930
931 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
932 int err, struct nfs4_exception *exception)
933 {
934 if (err != -EINVAL)
935 return false;
936 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
937 return false;
938 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
939 exception->retry = 1;
940 return true;
941 }
942
943 static u32
944 nfs4_map_atomic_open_share(struct nfs_server *server,
945 fmode_t fmode, int openflags)
946 {
947 u32 res = 0;
948
949 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
950 case FMODE_READ:
951 res = NFS4_SHARE_ACCESS_READ;
952 break;
953 case FMODE_WRITE:
954 res = NFS4_SHARE_ACCESS_WRITE;
955 break;
956 case FMODE_READ|FMODE_WRITE:
957 res = NFS4_SHARE_ACCESS_BOTH;
958 }
959 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
960 goto out;
961 /* Want no delegation if we're using O_DIRECT */
962 if (openflags & O_DIRECT)
963 res |= NFS4_SHARE_WANT_NO_DELEG;
964 out:
965 return res;
966 }
967
968 static enum open_claim_type4
969 nfs4_map_atomic_open_claim(struct nfs_server *server,
970 enum open_claim_type4 claim)
971 {
972 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
973 return claim;
974 switch (claim) {
975 default:
976 return claim;
977 case NFS4_OPEN_CLAIM_FH:
978 return NFS4_OPEN_CLAIM_NULL;
979 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
980 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
981 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
982 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
983 }
984 }
985
986 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
987 {
988 p->o_res.f_attr = &p->f_attr;
989 p->o_res.f_label = p->f_label;
990 p->o_res.seqid = p->o_arg.seqid;
991 p->c_res.seqid = p->c_arg.seqid;
992 p->o_res.server = p->o_arg.server;
993 p->o_res.access_request = p->o_arg.access;
994 nfs_fattr_init(&p->f_attr);
995 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
996 }
997
998 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
999 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1000 const struct iattr *attrs,
1001 struct nfs4_label *label,
1002 enum open_claim_type4 claim,
1003 gfp_t gfp_mask)
1004 {
1005 struct dentry *parent = dget_parent(dentry);
1006 struct inode *dir = d_inode(parent);
1007 struct nfs_server *server = NFS_SERVER(dir);
1008 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1009 struct nfs4_opendata *p;
1010
1011 p = kzalloc(sizeof(*p), gfp_mask);
1012 if (p == NULL)
1013 goto err;
1014
1015 p->f_label = nfs4_label_alloc(server, gfp_mask);
1016 if (IS_ERR(p->f_label))
1017 goto err_free_p;
1018
1019 p->a_label = nfs4_label_alloc(server, gfp_mask);
1020 if (IS_ERR(p->a_label))
1021 goto err_free_f;
1022
1023 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1024 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1025 if (IS_ERR(p->o_arg.seqid))
1026 goto err_free_label;
1027 nfs_sb_active(dentry->d_sb);
1028 p->dentry = dget(dentry);
1029 p->dir = parent;
1030 p->owner = sp;
1031 atomic_inc(&sp->so_count);
1032 p->o_arg.open_flags = flags;
1033 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1034 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1035 fmode, flags);
1036 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1037 * will return permission denied for all bits until close */
1038 if (!(flags & O_EXCL)) {
1039 /* ask server to check for all possible rights as results
1040 * are cached */
1041 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1042 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
1043 }
1044 p->o_arg.clientid = server->nfs_client->cl_clientid;
1045 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1046 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1047 p->o_arg.name = &dentry->d_name;
1048 p->o_arg.server = server;
1049 p->o_arg.bitmask = nfs4_bitmask(server, label);
1050 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1051 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1052 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1053 switch (p->o_arg.claim) {
1054 case NFS4_OPEN_CLAIM_NULL:
1055 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1056 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1057 p->o_arg.fh = NFS_FH(dir);
1058 break;
1059 case NFS4_OPEN_CLAIM_PREVIOUS:
1060 case NFS4_OPEN_CLAIM_FH:
1061 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1062 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1063 p->o_arg.fh = NFS_FH(d_inode(dentry));
1064 }
1065 if (attrs != NULL && attrs->ia_valid != 0) {
1066 __u32 verf[2];
1067
1068 p->o_arg.u.attrs = &p->attrs;
1069 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1070
1071 verf[0] = jiffies;
1072 verf[1] = current->pid;
1073 memcpy(p->o_arg.u.verifier.data, verf,
1074 sizeof(p->o_arg.u.verifier.data));
1075 }
1076 p->c_arg.fh = &p->o_res.fh;
1077 p->c_arg.stateid = &p->o_res.stateid;
1078 p->c_arg.seqid = p->o_arg.seqid;
1079 nfs4_init_opendata_res(p);
1080 kref_init(&p->kref);
1081 return p;
1082
1083 err_free_label:
1084 nfs4_label_free(p->a_label);
1085 err_free_f:
1086 nfs4_label_free(p->f_label);
1087 err_free_p:
1088 kfree(p);
1089 err:
1090 dput(parent);
1091 return NULL;
1092 }
1093
1094 static void nfs4_opendata_free(struct kref *kref)
1095 {
1096 struct nfs4_opendata *p = container_of(kref,
1097 struct nfs4_opendata, kref);
1098 struct super_block *sb = p->dentry->d_sb;
1099
1100 nfs_free_seqid(p->o_arg.seqid);
1101 if (p->state != NULL)
1102 nfs4_put_open_state(p->state);
1103 nfs4_put_state_owner(p->owner);
1104
1105 nfs4_label_free(p->a_label);
1106 nfs4_label_free(p->f_label);
1107
1108 dput(p->dir);
1109 dput(p->dentry);
1110 nfs_sb_deactive(sb);
1111 nfs_fattr_free_names(&p->f_attr);
1112 kfree(p->f_attr.mdsthreshold);
1113 kfree(p);
1114 }
1115
1116 static void nfs4_opendata_put(struct nfs4_opendata *p)
1117 {
1118 if (p != NULL)
1119 kref_put(&p->kref, nfs4_opendata_free);
1120 }
1121
1122 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1123 {
1124 int ret;
1125
1126 ret = rpc_wait_for_completion_task(task);
1127 return ret;
1128 }
1129
1130 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1131 {
1132 int ret = 0;
1133
1134 if (open_mode & (O_EXCL|O_TRUNC))
1135 goto out;
1136 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1137 case FMODE_READ:
1138 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1139 && state->n_rdonly != 0;
1140 break;
1141 case FMODE_WRITE:
1142 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1143 && state->n_wronly != 0;
1144 break;
1145 case FMODE_READ|FMODE_WRITE:
1146 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1147 && state->n_rdwr != 0;
1148 }
1149 out:
1150 return ret;
1151 }
1152
1153 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
1154 {
1155 if (delegation == NULL)
1156 return 0;
1157 if ((delegation->type & fmode) != fmode)
1158 return 0;
1159 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1160 return 0;
1161 nfs_mark_delegation_referenced(delegation);
1162 return 1;
1163 }
1164
1165 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1166 {
1167 switch (fmode) {
1168 case FMODE_WRITE:
1169 state->n_wronly++;
1170 break;
1171 case FMODE_READ:
1172 state->n_rdonly++;
1173 break;
1174 case FMODE_READ|FMODE_WRITE:
1175 state->n_rdwr++;
1176 }
1177 nfs4_state_set_mode_locked(state, state->state | fmode);
1178 }
1179
1180 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1181 {
1182 struct nfs_client *clp = state->owner->so_server->nfs_client;
1183 bool need_recover = false;
1184
1185 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1186 need_recover = true;
1187 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1188 need_recover = true;
1189 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1190 need_recover = true;
1191 if (need_recover)
1192 nfs4_state_mark_reclaim_nograce(clp, state);
1193 }
1194
1195 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1196 nfs4_stateid *stateid)
1197 {
1198 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
1199 return true;
1200 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1201 nfs_test_and_clear_all_open_stateid(state);
1202 return true;
1203 }
1204 if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
1205 return true;
1206 return false;
1207 }
1208
1209 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1210 {
1211 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1212 return;
1213 if (state->n_wronly)
1214 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1215 if (state->n_rdonly)
1216 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1217 if (state->n_rdwr)
1218 set_bit(NFS_O_RDWR_STATE, &state->flags);
1219 set_bit(NFS_OPEN_STATE, &state->flags);
1220 }
1221
1222 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1223 nfs4_stateid *stateid, fmode_t fmode)
1224 {
1225 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1226 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1227 case FMODE_WRITE:
1228 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1229 break;
1230 case FMODE_READ:
1231 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1232 break;
1233 case 0:
1234 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1235 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1236 clear_bit(NFS_OPEN_STATE, &state->flags);
1237 }
1238 if (stateid == NULL)
1239 return;
1240 /* Handle races with OPEN */
1241 if (!nfs4_stateid_match_other(stateid, &state->open_stateid) ||
1242 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1243 nfs_resync_open_stateid_locked(state);
1244 return;
1245 }
1246 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1247 nfs4_stateid_copy(&state->stateid, stateid);
1248 nfs4_stateid_copy(&state->open_stateid, stateid);
1249 }
1250
1251 static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1252 {
1253 write_seqlock(&state->seqlock);
1254 nfs_clear_open_stateid_locked(state, stateid, fmode);
1255 write_sequnlock(&state->seqlock);
1256 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1257 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1258 }
1259
1260 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1261 {
1262 switch (fmode) {
1263 case FMODE_READ:
1264 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1265 break;
1266 case FMODE_WRITE:
1267 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1268 break;
1269 case FMODE_READ|FMODE_WRITE:
1270 set_bit(NFS_O_RDWR_STATE, &state->flags);
1271 }
1272 if (!nfs_need_update_open_stateid(state, stateid))
1273 return;
1274 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1275 nfs4_stateid_copy(&state->stateid, stateid);
1276 nfs4_stateid_copy(&state->open_stateid, stateid);
1277 }
1278
1279 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1280 {
1281 /*
1282 * Protect the call to nfs4_state_set_mode_locked and
1283 * serialise the stateid update
1284 */
1285 write_seqlock(&state->seqlock);
1286 if (deleg_stateid != NULL) {
1287 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1288 set_bit(NFS_DELEGATED_STATE, &state->flags);
1289 }
1290 if (open_stateid != NULL)
1291 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1292 write_sequnlock(&state->seqlock);
1293 spin_lock(&state->owner->so_lock);
1294 update_open_stateflags(state, fmode);
1295 spin_unlock(&state->owner->so_lock);
1296 }
1297
1298 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1299 {
1300 struct nfs_inode *nfsi = NFS_I(state->inode);
1301 struct nfs_delegation *deleg_cur;
1302 int ret = 0;
1303
1304 fmode &= (FMODE_READ|FMODE_WRITE);
1305
1306 rcu_read_lock();
1307 deleg_cur = rcu_dereference(nfsi->delegation);
1308 if (deleg_cur == NULL)
1309 goto no_delegation;
1310
1311 spin_lock(&deleg_cur->lock);
1312 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1313 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1314 (deleg_cur->type & fmode) != fmode)
1315 goto no_delegation_unlock;
1316
1317 if (delegation == NULL)
1318 delegation = &deleg_cur->stateid;
1319 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1320 goto no_delegation_unlock;
1321
1322 nfs_mark_delegation_referenced(deleg_cur);
1323 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1324 ret = 1;
1325 no_delegation_unlock:
1326 spin_unlock(&deleg_cur->lock);
1327 no_delegation:
1328 rcu_read_unlock();
1329
1330 if (!ret && open_stateid != NULL) {
1331 __update_open_stateid(state, open_stateid, NULL, fmode);
1332 ret = 1;
1333 }
1334 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1335 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1336
1337 return ret;
1338 }
1339
1340 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1341 const nfs4_stateid *stateid)
1342 {
1343 struct nfs4_state *state = lsp->ls_state;
1344 bool ret = false;
1345
1346 spin_lock(&state->state_lock);
1347 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1348 goto out_noupdate;
1349 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1350 goto out_noupdate;
1351 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1352 ret = true;
1353 out_noupdate:
1354 spin_unlock(&state->state_lock);
1355 return ret;
1356 }
1357
1358 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1359 {
1360 struct nfs_delegation *delegation;
1361
1362 rcu_read_lock();
1363 delegation = rcu_dereference(NFS_I(inode)->delegation);
1364 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1365 rcu_read_unlock();
1366 return;
1367 }
1368 rcu_read_unlock();
1369 nfs4_inode_return_delegation(inode);
1370 }
1371
1372 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1373 {
1374 struct nfs4_state *state = opendata->state;
1375 struct nfs_inode *nfsi = NFS_I(state->inode);
1376 struct nfs_delegation *delegation;
1377 int open_mode = opendata->o_arg.open_flags;
1378 fmode_t fmode = opendata->o_arg.fmode;
1379 nfs4_stateid stateid;
1380 int ret = -EAGAIN;
1381
1382 for (;;) {
1383 spin_lock(&state->owner->so_lock);
1384 if (can_open_cached(state, fmode, open_mode)) {
1385 update_open_stateflags(state, fmode);
1386 spin_unlock(&state->owner->so_lock);
1387 goto out_return_state;
1388 }
1389 spin_unlock(&state->owner->so_lock);
1390 rcu_read_lock();
1391 delegation = rcu_dereference(nfsi->delegation);
1392 if (!can_open_delegated(delegation, fmode)) {
1393 rcu_read_unlock();
1394 break;
1395 }
1396 /* Save the delegation */
1397 nfs4_stateid_copy(&stateid, &delegation->stateid);
1398 rcu_read_unlock();
1399 nfs_release_seqid(opendata->o_arg.seqid);
1400 if (!opendata->is_recover) {
1401 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1402 if (ret != 0)
1403 goto out;
1404 }
1405 ret = -EAGAIN;
1406
1407 /* Try to update the stateid using the delegation */
1408 if (update_open_stateid(state, NULL, &stateid, fmode))
1409 goto out_return_state;
1410 }
1411 out:
1412 return ERR_PTR(ret);
1413 out_return_state:
1414 atomic_inc(&state->count);
1415 return state;
1416 }
1417
1418 static void
1419 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1420 {
1421 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1422 struct nfs_delegation *delegation;
1423 int delegation_flags = 0;
1424
1425 rcu_read_lock();
1426 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1427 if (delegation)
1428 delegation_flags = delegation->flags;
1429 rcu_read_unlock();
1430 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1431 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1432 "returning a delegation for "
1433 "OPEN(CLAIM_DELEGATE_CUR)\n",
1434 clp->cl_hostname);
1435 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1436 nfs_inode_set_delegation(state->inode,
1437 data->owner->so_cred,
1438 &data->o_res);
1439 else
1440 nfs_inode_reclaim_delegation(state->inode,
1441 data->owner->so_cred,
1442 &data->o_res);
1443 }
1444
1445 /*
1446 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1447 * and update the nfs4_state.
1448 */
1449 static struct nfs4_state *
1450 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1451 {
1452 struct inode *inode = data->state->inode;
1453 struct nfs4_state *state = data->state;
1454 int ret;
1455
1456 if (!data->rpc_done) {
1457 if (data->rpc_status) {
1458 ret = data->rpc_status;
1459 goto err;
1460 }
1461 /* cached opens have already been processed */
1462 goto update;
1463 }
1464
1465 ret = nfs_refresh_inode(inode, &data->f_attr);
1466 if (ret)
1467 goto err;
1468
1469 if (data->o_res.delegation_type != 0)
1470 nfs4_opendata_check_deleg(data, state);
1471 update:
1472 update_open_stateid(state, &data->o_res.stateid, NULL,
1473 data->o_arg.fmode);
1474 atomic_inc(&state->count);
1475
1476 return state;
1477 err:
1478 return ERR_PTR(ret);
1479
1480 }
1481
1482 static struct nfs4_state *
1483 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1484 {
1485 struct inode *inode;
1486 struct nfs4_state *state = NULL;
1487 int ret;
1488
1489 if (!data->rpc_done) {
1490 state = nfs4_try_open_cached(data);
1491 goto out;
1492 }
1493
1494 ret = -EAGAIN;
1495 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1496 goto err;
1497 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label);
1498 ret = PTR_ERR(inode);
1499 if (IS_ERR(inode))
1500 goto err;
1501 ret = -ENOMEM;
1502 state = nfs4_get_open_state(inode, data->owner);
1503 if (state == NULL)
1504 goto err_put_inode;
1505 if (data->o_res.delegation_type != 0)
1506 nfs4_opendata_check_deleg(data, state);
1507 update_open_stateid(state, &data->o_res.stateid, NULL,
1508 data->o_arg.fmode);
1509 iput(inode);
1510 out:
1511 nfs_release_seqid(data->o_arg.seqid);
1512 return state;
1513 err_put_inode:
1514 iput(inode);
1515 err:
1516 return ERR_PTR(ret);
1517 }
1518
1519 static struct nfs4_state *
1520 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1521 {
1522 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1523 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1524 return _nfs4_opendata_to_nfs4_state(data);
1525 }
1526
1527 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1528 {
1529 struct nfs_inode *nfsi = NFS_I(state->inode);
1530 struct nfs_open_context *ctx;
1531
1532 spin_lock(&state->inode->i_lock);
1533 list_for_each_entry(ctx, &nfsi->open_files, list) {
1534 if (ctx->state != state)
1535 continue;
1536 get_nfs_open_context(ctx);
1537 spin_unlock(&state->inode->i_lock);
1538 return ctx;
1539 }
1540 spin_unlock(&state->inode->i_lock);
1541 return ERR_PTR(-ENOENT);
1542 }
1543
1544 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1545 struct nfs4_state *state, enum open_claim_type4 claim)
1546 {
1547 struct nfs4_opendata *opendata;
1548
1549 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1550 NULL, NULL, claim, GFP_NOFS);
1551 if (opendata == NULL)
1552 return ERR_PTR(-ENOMEM);
1553 opendata->state = state;
1554 atomic_inc(&state->count);
1555 return opendata;
1556 }
1557
1558 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1559 {
1560 struct nfs4_state *newstate;
1561 int ret;
1562
1563 if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
1564 opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) &&
1565 (opendata->o_arg.u.delegation_type & fmode) != fmode)
1566 /* This mode can't have been delegated, so we must have
1567 * a valid open_stateid to cover it - not need to reclaim.
1568 */
1569 return 0;
1570 opendata->o_arg.open_flags = 0;
1571 opendata->o_arg.fmode = fmode;
1572 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1573 NFS_SB(opendata->dentry->d_sb),
1574 fmode, 0);
1575 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1576 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1577 nfs4_init_opendata_res(opendata);
1578 ret = _nfs4_recover_proc_open(opendata);
1579 if (ret != 0)
1580 return ret;
1581 newstate = nfs4_opendata_to_nfs4_state(opendata);
1582 if (IS_ERR(newstate))
1583 return PTR_ERR(newstate);
1584 nfs4_close_state(newstate, fmode);
1585 *res = newstate;
1586 return 0;
1587 }
1588
1589 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1590 {
1591 struct nfs4_state *newstate;
1592 int ret;
1593
1594 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1595 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1596 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1597 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1598 /* memory barrier prior to reading state->n_* */
1599 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1600 clear_bit(NFS_OPEN_STATE, &state->flags);
1601 smp_rmb();
1602 if (state->n_rdwr != 0) {
1603 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1604 if (ret != 0)
1605 return ret;
1606 if (newstate != state)
1607 return -ESTALE;
1608 }
1609 if (state->n_wronly != 0) {
1610 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1611 if (ret != 0)
1612 return ret;
1613 if (newstate != state)
1614 return -ESTALE;
1615 }
1616 if (state->n_rdonly != 0) {
1617 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1618 if (ret != 0)
1619 return ret;
1620 if (newstate != state)
1621 return -ESTALE;
1622 }
1623 /*
1624 * We may have performed cached opens for all three recoveries.
1625 * Check if we need to update the current stateid.
1626 */
1627 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1628 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1629 write_seqlock(&state->seqlock);
1630 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1631 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1632 write_sequnlock(&state->seqlock);
1633 }
1634 return 0;
1635 }
1636
1637 /*
1638 * OPEN_RECLAIM:
1639 * reclaim state on the server after a reboot.
1640 */
1641 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1642 {
1643 struct nfs_delegation *delegation;
1644 struct nfs4_opendata *opendata;
1645 fmode_t delegation_type = 0;
1646 int status;
1647
1648 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1649 NFS4_OPEN_CLAIM_PREVIOUS);
1650 if (IS_ERR(opendata))
1651 return PTR_ERR(opendata);
1652 rcu_read_lock();
1653 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1654 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1655 delegation_type = delegation->type;
1656 rcu_read_unlock();
1657 opendata->o_arg.u.delegation_type = delegation_type;
1658 status = nfs4_open_recover(opendata, state);
1659 nfs4_opendata_put(opendata);
1660 return status;
1661 }
1662
1663 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1664 {
1665 struct nfs_server *server = NFS_SERVER(state->inode);
1666 struct nfs4_exception exception = { };
1667 int err;
1668 do {
1669 err = _nfs4_do_open_reclaim(ctx, state);
1670 trace_nfs4_open_reclaim(ctx, 0, err);
1671 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1672 continue;
1673 if (err != -NFS4ERR_DELAY)
1674 break;
1675 nfs4_handle_exception(server, err, &exception);
1676 } while (exception.retry);
1677 return err;
1678 }
1679
1680 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1681 {
1682 struct nfs_open_context *ctx;
1683 int ret;
1684
1685 ctx = nfs4_state_find_open_context(state);
1686 if (IS_ERR(ctx))
1687 return -EAGAIN;
1688 ret = nfs4_do_open_reclaim(ctx, state);
1689 put_nfs_open_context(ctx);
1690 return ret;
1691 }
1692
1693 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1694 {
1695 switch (err) {
1696 default:
1697 printk(KERN_ERR "NFS: %s: unhandled error "
1698 "%d.\n", __func__, err);
1699 case 0:
1700 case -ENOENT:
1701 case -EAGAIN:
1702 case -ESTALE:
1703 break;
1704 case -NFS4ERR_BADSESSION:
1705 case -NFS4ERR_BADSLOT:
1706 case -NFS4ERR_BAD_HIGH_SLOT:
1707 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1708 case -NFS4ERR_DEADSESSION:
1709 set_bit(NFS_DELEGATED_STATE, &state->flags);
1710 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1711 return -EAGAIN;
1712 case -NFS4ERR_STALE_CLIENTID:
1713 case -NFS4ERR_STALE_STATEID:
1714 set_bit(NFS_DELEGATED_STATE, &state->flags);
1715 case -NFS4ERR_EXPIRED:
1716 /* Don't recall a delegation if it was lost */
1717 nfs4_schedule_lease_recovery(server->nfs_client);
1718 return -EAGAIN;
1719 case -NFS4ERR_MOVED:
1720 nfs4_schedule_migration_recovery(server);
1721 return -EAGAIN;
1722 case -NFS4ERR_LEASE_MOVED:
1723 nfs4_schedule_lease_moved_recovery(server->nfs_client);
1724 return -EAGAIN;
1725 case -NFS4ERR_DELEG_REVOKED:
1726 case -NFS4ERR_ADMIN_REVOKED:
1727 case -NFS4ERR_BAD_STATEID:
1728 case -NFS4ERR_OPENMODE:
1729 nfs_inode_find_state_and_recover(state->inode,
1730 stateid);
1731 nfs4_schedule_stateid_recovery(server, state);
1732 return -EAGAIN;
1733 case -NFS4ERR_DELAY:
1734 case -NFS4ERR_GRACE:
1735 set_bit(NFS_DELEGATED_STATE, &state->flags);
1736 ssleep(1);
1737 return -EAGAIN;
1738 case -ENOMEM:
1739 case -NFS4ERR_DENIED:
1740 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1741 return 0;
1742 }
1743 return err;
1744 }
1745
1746 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1747 {
1748 struct nfs_server *server = NFS_SERVER(state->inode);
1749 struct nfs4_opendata *opendata;
1750 int err;
1751
1752 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1753 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1754 if (IS_ERR(opendata))
1755 return PTR_ERR(opendata);
1756 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1757 err = nfs4_open_recover(opendata, state);
1758 nfs4_opendata_put(opendata);
1759 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1760 }
1761
1762 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1763 {
1764 struct nfs4_opendata *data = calldata;
1765
1766 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
1767 &data->c_arg.seq_args, &data->c_res.seq_res, task);
1768 }
1769
1770 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1771 {
1772 struct nfs4_opendata *data = calldata;
1773
1774 nfs40_sequence_done(task, &data->c_res.seq_res);
1775
1776 data->rpc_status = task->tk_status;
1777 if (data->rpc_status == 0) {
1778 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1779 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1780 renew_lease(data->o_res.server, data->timestamp);
1781 data->rpc_done = 1;
1782 }
1783 }
1784
1785 static void nfs4_open_confirm_release(void *calldata)
1786 {
1787 struct nfs4_opendata *data = calldata;
1788 struct nfs4_state *state = NULL;
1789
1790 /* If this request hasn't been cancelled, do nothing */
1791 if (data->cancelled == 0)
1792 goto out_free;
1793 /* In case of error, no cleanup! */
1794 if (!data->rpc_done)
1795 goto out_free;
1796 state = nfs4_opendata_to_nfs4_state(data);
1797 if (!IS_ERR(state))
1798 nfs4_close_state(state, data->o_arg.fmode);
1799 out_free:
1800 nfs4_opendata_put(data);
1801 }
1802
1803 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1804 .rpc_call_prepare = nfs4_open_confirm_prepare,
1805 .rpc_call_done = nfs4_open_confirm_done,
1806 .rpc_release = nfs4_open_confirm_release,
1807 };
1808
1809 /*
1810 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1811 */
1812 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1813 {
1814 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
1815 struct rpc_task *task;
1816 struct rpc_message msg = {
1817 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1818 .rpc_argp = &data->c_arg,
1819 .rpc_resp = &data->c_res,
1820 .rpc_cred = data->owner->so_cred,
1821 };
1822 struct rpc_task_setup task_setup_data = {
1823 .rpc_client = server->client,
1824 .rpc_message = &msg,
1825 .callback_ops = &nfs4_open_confirm_ops,
1826 .callback_data = data,
1827 .workqueue = nfsiod_workqueue,
1828 .flags = RPC_TASK_ASYNC,
1829 };
1830 int status;
1831
1832 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
1833 kref_get(&data->kref);
1834 data->rpc_done = 0;
1835 data->rpc_status = 0;
1836 data->timestamp = jiffies;
1837 task = rpc_run_task(&task_setup_data);
1838 if (IS_ERR(task))
1839 return PTR_ERR(task);
1840 status = nfs4_wait_for_completion_rpc_task(task);
1841 if (status != 0) {
1842 data->cancelled = 1;
1843 smp_wmb();
1844 } else
1845 status = data->rpc_status;
1846 rpc_put_task(task);
1847 return status;
1848 }
1849
1850 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1851 {
1852 struct nfs4_opendata *data = calldata;
1853 struct nfs4_state_owner *sp = data->owner;
1854 struct nfs_client *clp = sp->so_server->nfs_client;
1855
1856 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1857 goto out_wait;
1858 /*
1859 * Check if we still need to send an OPEN call, or if we can use
1860 * a delegation instead.
1861 */
1862 if (data->state != NULL) {
1863 struct nfs_delegation *delegation;
1864
1865 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1866 goto out_no_action;
1867 rcu_read_lock();
1868 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1869 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1870 data->o_arg.claim != NFS4_OPEN_CLAIM_DELEG_CUR_FH &&
1871 can_open_delegated(delegation, data->o_arg.fmode))
1872 goto unlock_no_action;
1873 rcu_read_unlock();
1874 }
1875 /* Update client id. */
1876 data->o_arg.clientid = clp->cl_clientid;
1877 switch (data->o_arg.claim) {
1878 case NFS4_OPEN_CLAIM_PREVIOUS:
1879 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1880 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1881 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1882 case NFS4_OPEN_CLAIM_FH:
1883 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1884 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1885 }
1886 data->timestamp = jiffies;
1887 if (nfs4_setup_sequence(data->o_arg.server,
1888 &data->o_arg.seq_args,
1889 &data->o_res.seq_res,
1890 task) != 0)
1891 nfs_release_seqid(data->o_arg.seqid);
1892
1893 /* Set the create mode (note dependency on the session type) */
1894 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
1895 if (data->o_arg.open_flags & O_EXCL) {
1896 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
1897 if (nfs4_has_persistent_session(clp))
1898 data->o_arg.createmode = NFS4_CREATE_GUARDED;
1899 else if (clp->cl_mvops->minor_version > 0)
1900 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
1901 }
1902 return;
1903 unlock_no_action:
1904 rcu_read_unlock();
1905 out_no_action:
1906 task->tk_action = NULL;
1907 out_wait:
1908 nfs4_sequence_done(task, &data->o_res.seq_res);
1909 }
1910
1911 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1912 {
1913 struct nfs4_opendata *data = calldata;
1914
1915 data->rpc_status = task->tk_status;
1916
1917 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1918 return;
1919
1920 if (task->tk_status == 0) {
1921 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
1922 switch (data->o_res.f_attr->mode & S_IFMT) {
1923 case S_IFREG:
1924 break;
1925 case S_IFLNK:
1926 data->rpc_status = -ELOOP;
1927 break;
1928 case S_IFDIR:
1929 data->rpc_status = -EISDIR;
1930 break;
1931 default:
1932 data->rpc_status = -ENOTDIR;
1933 }
1934 }
1935 renew_lease(data->o_res.server, data->timestamp);
1936 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1937 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1938 }
1939 data->rpc_done = 1;
1940 }
1941
1942 static void nfs4_open_release(void *calldata)
1943 {
1944 struct nfs4_opendata *data = calldata;
1945 struct nfs4_state *state = NULL;
1946
1947 /* If this request hasn't been cancelled, do nothing */
1948 if (data->cancelled == 0)
1949 goto out_free;
1950 /* In case of error, no cleanup! */
1951 if (data->rpc_status != 0 || !data->rpc_done)
1952 goto out_free;
1953 /* In case we need an open_confirm, no cleanup! */
1954 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1955 goto out_free;
1956 state = nfs4_opendata_to_nfs4_state(data);
1957 if (!IS_ERR(state))
1958 nfs4_close_state(state, data->o_arg.fmode);
1959 out_free:
1960 nfs4_opendata_put(data);
1961 }
1962
1963 static const struct rpc_call_ops nfs4_open_ops = {
1964 .rpc_call_prepare = nfs4_open_prepare,
1965 .rpc_call_done = nfs4_open_done,
1966 .rpc_release = nfs4_open_release,
1967 };
1968
1969 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1970 {
1971 struct inode *dir = d_inode(data->dir);
1972 struct nfs_server *server = NFS_SERVER(dir);
1973 struct nfs_openargs *o_arg = &data->o_arg;
1974 struct nfs_openres *o_res = &data->o_res;
1975 struct rpc_task *task;
1976 struct rpc_message msg = {
1977 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1978 .rpc_argp = o_arg,
1979 .rpc_resp = o_res,
1980 .rpc_cred = data->owner->so_cred,
1981 };
1982 struct rpc_task_setup task_setup_data = {
1983 .rpc_client = server->client,
1984 .rpc_message = &msg,
1985 .callback_ops = &nfs4_open_ops,
1986 .callback_data = data,
1987 .workqueue = nfsiod_workqueue,
1988 .flags = RPC_TASK_ASYNC,
1989 };
1990 int status;
1991
1992 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1993 kref_get(&data->kref);
1994 data->rpc_done = 0;
1995 data->rpc_status = 0;
1996 data->cancelled = 0;
1997 data->is_recover = 0;
1998 if (isrecover) {
1999 nfs4_set_sequence_privileged(&o_arg->seq_args);
2000 data->is_recover = 1;
2001 }
2002 task = rpc_run_task(&task_setup_data);
2003 if (IS_ERR(task))
2004 return PTR_ERR(task);
2005 status = nfs4_wait_for_completion_rpc_task(task);
2006 if (status != 0) {
2007 data->cancelled = 1;
2008 smp_wmb();
2009 } else
2010 status = data->rpc_status;
2011 rpc_put_task(task);
2012
2013 return status;
2014 }
2015
2016 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2017 {
2018 struct inode *dir = d_inode(data->dir);
2019 struct nfs_openres *o_res = &data->o_res;
2020 int status;
2021
2022 status = nfs4_run_open_task(data, 1);
2023 if (status != 0 || !data->rpc_done)
2024 return status;
2025
2026 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2027
2028 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2029 status = _nfs4_proc_open_confirm(data);
2030 if (status != 0)
2031 return status;
2032 }
2033
2034 return status;
2035 }
2036
2037 /*
2038 * Additional permission checks in order to distinguish between an
2039 * open for read, and an open for execute. This works around the
2040 * fact that NFSv4 OPEN treats read and execute permissions as being
2041 * the same.
2042 * Note that in the non-execute case, we want to turn off permission
2043 * checking if we just created a new file (POSIX open() semantics).
2044 */
2045 static int nfs4_opendata_access(struct rpc_cred *cred,
2046 struct nfs4_opendata *opendata,
2047 struct nfs4_state *state, fmode_t fmode,
2048 int openflags)
2049 {
2050 struct nfs_access_entry cache;
2051 u32 mask;
2052
2053 /* access call failed or for some reason the server doesn't
2054 * support any access modes -- defer access call until later */
2055 if (opendata->o_res.access_supported == 0)
2056 return 0;
2057
2058 mask = 0;
2059 /*
2060 * Use openflags to check for exec, because fmode won't
2061 * always have FMODE_EXEC set when file open for exec.
2062 */
2063 if (openflags & __FMODE_EXEC) {
2064 /* ONLY check for exec rights */
2065 mask = MAY_EXEC;
2066 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2067 mask = MAY_READ;
2068
2069 cache.cred = cred;
2070 cache.jiffies = jiffies;
2071 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2072 nfs_access_add_cache(state->inode, &cache);
2073
2074 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2075 return 0;
2076
2077 /* even though OPEN succeeded, access is denied. Close the file */
2078 nfs4_close_state(state, fmode);
2079 return -EACCES;
2080 }
2081
2082 /*
2083 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2084 */
2085 static int _nfs4_proc_open(struct nfs4_opendata *data)
2086 {
2087 struct inode *dir = d_inode(data->dir);
2088 struct nfs_server *server = NFS_SERVER(dir);
2089 struct nfs_openargs *o_arg = &data->o_arg;
2090 struct nfs_openres *o_res = &data->o_res;
2091 int status;
2092
2093 status = nfs4_run_open_task(data, 0);
2094 if (!data->rpc_done)
2095 return status;
2096 if (status != 0) {
2097 if (status == -NFS4ERR_BADNAME &&
2098 !(o_arg->open_flags & O_CREAT))
2099 return -ENOENT;
2100 return status;
2101 }
2102
2103 nfs_fattr_map_and_free_names(server, &data->f_attr);
2104
2105 if (o_arg->open_flags & O_CREAT) {
2106 update_changeattr(dir, &o_res->cinfo);
2107 if (o_arg->open_flags & O_EXCL)
2108 data->file_created = 1;
2109 else if (o_res->cinfo.before != o_res->cinfo.after)
2110 data->file_created = 1;
2111 }
2112 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2113 server->caps &= ~NFS_CAP_POSIX_LOCK;
2114 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2115 status = _nfs4_proc_open_confirm(data);
2116 if (status != 0)
2117 return status;
2118 }
2119 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
2120 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2121 return 0;
2122 }
2123
2124 static int nfs4_recover_expired_lease(struct nfs_server *server)
2125 {
2126 return nfs4_client_recover_expired_lease(server->nfs_client);
2127 }
2128
2129 /*
2130 * OPEN_EXPIRED:
2131 * reclaim state on the server after a network partition.
2132 * Assumes caller holds the appropriate lock
2133 */
2134 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2135 {
2136 struct nfs4_opendata *opendata;
2137 int ret;
2138
2139 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2140 NFS4_OPEN_CLAIM_FH);
2141 if (IS_ERR(opendata))
2142 return PTR_ERR(opendata);
2143 ret = nfs4_open_recover(opendata, state);
2144 if (ret == -ESTALE)
2145 d_drop(ctx->dentry);
2146 nfs4_opendata_put(opendata);
2147 return ret;
2148 }
2149
2150 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2151 {
2152 struct nfs_server *server = NFS_SERVER(state->inode);
2153 struct nfs4_exception exception = { };
2154 int err;
2155
2156 do {
2157 err = _nfs4_open_expired(ctx, state);
2158 trace_nfs4_open_expired(ctx, 0, err);
2159 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2160 continue;
2161 switch (err) {
2162 default:
2163 goto out;
2164 case -NFS4ERR_GRACE:
2165 case -NFS4ERR_DELAY:
2166 nfs4_handle_exception(server, err, &exception);
2167 err = 0;
2168 }
2169 } while (exception.retry);
2170 out:
2171 return err;
2172 }
2173
2174 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2175 {
2176 struct nfs_open_context *ctx;
2177 int ret;
2178
2179 ctx = nfs4_state_find_open_context(state);
2180 if (IS_ERR(ctx))
2181 return -EAGAIN;
2182 ret = nfs4_do_open_expired(ctx, state);
2183 put_nfs_open_context(ctx);
2184 return ret;
2185 }
2186
2187 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2188 {
2189 nfs_remove_bad_delegation(state->inode);
2190 write_seqlock(&state->seqlock);
2191 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2192 write_sequnlock(&state->seqlock);
2193 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2194 }
2195
2196 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2197 {
2198 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2199 nfs_finish_clear_delegation_stateid(state);
2200 }
2201
2202 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2203 {
2204 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2205 nfs40_clear_delegation_stateid(state);
2206 return nfs4_open_expired(sp, state);
2207 }
2208
2209 #if defined(CONFIG_NFS_V4_1)
2210 static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2211 {
2212 struct nfs_server *server = NFS_SERVER(state->inode);
2213 nfs4_stateid stateid;
2214 struct nfs_delegation *delegation;
2215 struct rpc_cred *cred;
2216 int status;
2217
2218 /* Get the delegation credential for use by test/free_stateid */
2219 rcu_read_lock();
2220 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2221 if (delegation == NULL) {
2222 rcu_read_unlock();
2223 return;
2224 }
2225
2226 nfs4_stateid_copy(&stateid, &delegation->stateid);
2227 cred = get_rpccred(delegation->cred);
2228 rcu_read_unlock();
2229 status = nfs41_test_stateid(server, &stateid, cred);
2230 trace_nfs4_test_delegation_stateid(state, NULL, status);
2231
2232 if (status != NFS_OK) {
2233 /* Free the stateid unless the server explicitly
2234 * informs us the stateid is unrecognized. */
2235 if (status != -NFS4ERR_BAD_STATEID)
2236 nfs41_free_stateid(server, &stateid, cred);
2237 nfs_finish_clear_delegation_stateid(state);
2238 }
2239
2240 put_rpccred(cred);
2241 }
2242
2243 /**
2244 * nfs41_check_open_stateid - possibly free an open stateid
2245 *
2246 * @state: NFSv4 state for an inode
2247 *
2248 * Returns NFS_OK if recovery for this stateid is now finished.
2249 * Otherwise a negative NFS4ERR value is returned.
2250 */
2251 static int nfs41_check_open_stateid(struct nfs4_state *state)
2252 {
2253 struct nfs_server *server = NFS_SERVER(state->inode);
2254 nfs4_stateid *stateid = &state->open_stateid;
2255 struct rpc_cred *cred = state->owner->so_cred;
2256 int status;
2257
2258 /* If a state reset has been done, test_stateid is unneeded */
2259 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2260 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2261 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2262 return -NFS4ERR_BAD_STATEID;
2263
2264 status = nfs41_test_stateid(server, stateid, cred);
2265 trace_nfs4_test_open_stateid(state, NULL, status);
2266 if (status != NFS_OK) {
2267 /* Free the stateid unless the server explicitly
2268 * informs us the stateid is unrecognized. */
2269 if (status != -NFS4ERR_BAD_STATEID)
2270 nfs41_free_stateid(server, stateid, cred);
2271
2272 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2273 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2274 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2275 clear_bit(NFS_OPEN_STATE, &state->flags);
2276 }
2277 return status;
2278 }
2279
2280 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2281 {
2282 int status;
2283
2284 nfs41_check_delegation_stateid(state);
2285 status = nfs41_check_open_stateid(state);
2286 if (status != NFS_OK)
2287 status = nfs4_open_expired(sp, state);
2288 return status;
2289 }
2290 #endif
2291
2292 /*
2293 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2294 * fields corresponding to attributes that were used to store the verifier.
2295 * Make sure we clobber those fields in the later setattr call
2296 */
2297 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
2298 {
2299 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2300 !(sattr->ia_valid & ATTR_ATIME_SET))
2301 sattr->ia_valid |= ATTR_ATIME;
2302
2303 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2304 !(sattr->ia_valid & ATTR_MTIME_SET))
2305 sattr->ia_valid |= ATTR_MTIME;
2306 }
2307
2308 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2309 fmode_t fmode,
2310 int flags,
2311 struct nfs_open_context *ctx)
2312 {
2313 struct nfs4_state_owner *sp = opendata->owner;
2314 struct nfs_server *server = sp->so_server;
2315 struct dentry *dentry;
2316 struct nfs4_state *state;
2317 unsigned int seq;
2318 int ret;
2319
2320 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2321
2322 ret = _nfs4_proc_open(opendata);
2323 if (ret != 0)
2324 goto out;
2325
2326 state = nfs4_opendata_to_nfs4_state(opendata);
2327 ret = PTR_ERR(state);
2328 if (IS_ERR(state))
2329 goto out;
2330 if (server->caps & NFS_CAP_POSIX_LOCK)
2331 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2332
2333 dentry = opendata->dentry;
2334 if (d_really_is_negative(dentry)) {
2335 /* FIXME: Is this d_drop() ever needed? */
2336 d_drop(dentry);
2337 dentry = d_add_unique(dentry, igrab(state->inode));
2338 if (dentry == NULL) {
2339 dentry = opendata->dentry;
2340 } else if (dentry != ctx->dentry) {
2341 dput(ctx->dentry);
2342 ctx->dentry = dget(dentry);
2343 }
2344 nfs_set_verifier(dentry,
2345 nfs_save_change_attribute(d_inode(opendata->dir)));
2346 }
2347
2348 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2349 if (ret != 0)
2350 goto out;
2351
2352 ctx->state = state;
2353 if (d_inode(dentry) == state->inode) {
2354 nfs_inode_attach_open_context(ctx);
2355 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2356 nfs4_schedule_stateid_recovery(server, state);
2357 }
2358 out:
2359 return ret;
2360 }
2361
2362 /*
2363 * Returns a referenced nfs4_state
2364 */
2365 static int _nfs4_do_open(struct inode *dir,
2366 struct nfs_open_context *ctx,
2367 int flags,
2368 struct iattr *sattr,
2369 struct nfs4_label *label,
2370 int *opened)
2371 {
2372 struct nfs4_state_owner *sp;
2373 struct nfs4_state *state = NULL;
2374 struct nfs_server *server = NFS_SERVER(dir);
2375 struct nfs4_opendata *opendata;
2376 struct dentry *dentry = ctx->dentry;
2377 struct rpc_cred *cred = ctx->cred;
2378 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2379 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2380 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2381 struct nfs4_label *olabel = NULL;
2382 int status;
2383
2384 /* Protect against reboot recovery conflicts */
2385 status = -ENOMEM;
2386 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2387 if (sp == NULL) {
2388 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2389 goto out_err;
2390 }
2391 status = nfs4_recover_expired_lease(server);
2392 if (status != 0)
2393 goto err_put_state_owner;
2394 if (d_really_is_positive(dentry))
2395 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2396 status = -ENOMEM;
2397 if (d_really_is_positive(dentry))
2398 claim = NFS4_OPEN_CLAIM_FH;
2399 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2400 label, claim, GFP_KERNEL);
2401 if (opendata == NULL)
2402 goto err_put_state_owner;
2403
2404 if (label) {
2405 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2406 if (IS_ERR(olabel)) {
2407 status = PTR_ERR(olabel);
2408 goto err_opendata_put;
2409 }
2410 }
2411
2412 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2413 if (!opendata->f_attr.mdsthreshold) {
2414 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2415 if (!opendata->f_attr.mdsthreshold)
2416 goto err_free_label;
2417 }
2418 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2419 }
2420 if (d_really_is_positive(dentry))
2421 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2422
2423 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2424 if (status != 0)
2425 goto err_free_label;
2426 state = ctx->state;
2427
2428 if ((opendata->o_arg.open_flags & O_EXCL) &&
2429 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2430 nfs4_exclusive_attrset(opendata, sattr);
2431
2432 nfs_fattr_init(opendata->o_res.f_attr);
2433 status = nfs4_do_setattr(state->inode, cred,
2434 opendata->o_res.f_attr, sattr,
2435 state, label, olabel);
2436 if (status == 0) {
2437 nfs_setattr_update_inode(state->inode, sattr,
2438 opendata->o_res.f_attr);
2439 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2440 }
2441 }
2442 if (opendata->file_created)
2443 *opened |= FILE_CREATED;
2444
2445 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2446 *ctx_th = opendata->f_attr.mdsthreshold;
2447 opendata->f_attr.mdsthreshold = NULL;
2448 }
2449
2450 nfs4_label_free(olabel);
2451
2452 nfs4_opendata_put(opendata);
2453 nfs4_put_state_owner(sp);
2454 return 0;
2455 err_free_label:
2456 nfs4_label_free(olabel);
2457 err_opendata_put:
2458 nfs4_opendata_put(opendata);
2459 err_put_state_owner:
2460 nfs4_put_state_owner(sp);
2461 out_err:
2462 return status;
2463 }
2464
2465
2466 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2467 struct nfs_open_context *ctx,
2468 int flags,
2469 struct iattr *sattr,
2470 struct nfs4_label *label,
2471 int *opened)
2472 {
2473 struct nfs_server *server = NFS_SERVER(dir);
2474 struct nfs4_exception exception = { };
2475 struct nfs4_state *res;
2476 int status;
2477
2478 do {
2479 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2480 res = ctx->state;
2481 trace_nfs4_open_file(ctx, flags, status);
2482 if (status == 0)
2483 break;
2484 /* NOTE: BAD_SEQID means the server and client disagree about the
2485 * book-keeping w.r.t. state-changing operations
2486 * (OPEN/CLOSE/LOCK/LOCKU...)
2487 * It is actually a sign of a bug on the client or on the server.
2488 *
2489 * If we receive a BAD_SEQID error in the particular case of
2490 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2491 * have unhashed the old state_owner for us, and that we can
2492 * therefore safely retry using a new one. We should still warn
2493 * the user though...
2494 */
2495 if (status == -NFS4ERR_BAD_SEQID) {
2496 pr_warn_ratelimited("NFS: v4 server %s "
2497 " returned a bad sequence-id error!\n",
2498 NFS_SERVER(dir)->nfs_client->cl_hostname);
2499 exception.retry = 1;
2500 continue;
2501 }
2502 /*
2503 * BAD_STATEID on OPEN means that the server cancelled our
2504 * state before it received the OPEN_CONFIRM.
2505 * Recover by retrying the request as per the discussion
2506 * on Page 181 of RFC3530.
2507 */
2508 if (status == -NFS4ERR_BAD_STATEID) {
2509 exception.retry = 1;
2510 continue;
2511 }
2512 if (status == -EAGAIN) {
2513 /* We must have found a delegation */
2514 exception.retry = 1;
2515 continue;
2516 }
2517 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2518 continue;
2519 res = ERR_PTR(nfs4_handle_exception(server,
2520 status, &exception));
2521 } while (exception.retry);
2522 return res;
2523 }
2524
2525 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2526 struct nfs_fattr *fattr, struct iattr *sattr,
2527 struct nfs4_state *state, struct nfs4_label *ilabel,
2528 struct nfs4_label *olabel)
2529 {
2530 struct nfs_server *server = NFS_SERVER(inode);
2531 struct nfs_setattrargs arg = {
2532 .fh = NFS_FH(inode),
2533 .iap = sattr,
2534 .server = server,
2535 .bitmask = server->attr_bitmask,
2536 .label = ilabel,
2537 };
2538 struct nfs_setattrres res = {
2539 .fattr = fattr,
2540 .label = olabel,
2541 .server = server,
2542 };
2543 struct rpc_message msg = {
2544 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2545 .rpc_argp = &arg,
2546 .rpc_resp = &res,
2547 .rpc_cred = cred,
2548 };
2549 unsigned long timestamp = jiffies;
2550 fmode_t fmode;
2551 bool truncate;
2552 int status;
2553
2554 arg.bitmask = nfs4_bitmask(server, ilabel);
2555 if (ilabel)
2556 arg.bitmask = nfs4_bitmask(server, olabel);
2557
2558 nfs_fattr_init(fattr);
2559
2560 /* Servers should only apply open mode checks for file size changes */
2561 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
2562 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2563
2564 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2565 /* Use that stateid */
2566 } else if (truncate && state != NULL) {
2567 struct nfs_lockowner lockowner = {
2568 .l_owner = current->files,
2569 .l_pid = current->tgid,
2570 };
2571 if (!nfs4_valid_open_stateid(state))
2572 return -EBADF;
2573 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2574 &lockowner) == -EIO)
2575 return -EBADF;
2576 } else
2577 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2578
2579 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2580 if (status == 0 && state != NULL)
2581 renew_lease(server, timestamp);
2582 return status;
2583 }
2584
2585 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2586 struct nfs_fattr *fattr, struct iattr *sattr,
2587 struct nfs4_state *state, struct nfs4_label *ilabel,
2588 struct nfs4_label *olabel)
2589 {
2590 struct nfs_server *server = NFS_SERVER(inode);
2591 struct nfs4_exception exception = {
2592 .state = state,
2593 .inode = inode,
2594 };
2595 int err;
2596 do {
2597 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel);
2598 trace_nfs4_setattr(inode, err);
2599 switch (err) {
2600 case -NFS4ERR_OPENMODE:
2601 if (!(sattr->ia_valid & ATTR_SIZE)) {
2602 pr_warn_once("NFSv4: server %s is incorrectly "
2603 "applying open mode checks to "
2604 "a SETATTR that is not "
2605 "changing file size.\n",
2606 server->nfs_client->cl_hostname);
2607 }
2608 if (state && !(state->state & FMODE_WRITE)) {
2609 err = -EBADF;
2610 if (sattr->ia_valid & ATTR_OPEN)
2611 err = -EACCES;
2612 goto out;
2613 }
2614 }
2615 err = nfs4_handle_exception(server, err, &exception);
2616 } while (exception.retry);
2617 out:
2618 return err;
2619 }
2620
2621 struct nfs4_closedata {
2622 struct inode *inode;
2623 struct nfs4_state *state;
2624 struct nfs_closeargs arg;
2625 struct nfs_closeres res;
2626 struct nfs_fattr fattr;
2627 unsigned long timestamp;
2628 bool roc;
2629 u32 roc_barrier;
2630 };
2631
2632 static void nfs4_free_closedata(void *data)
2633 {
2634 struct nfs4_closedata *calldata = data;
2635 struct nfs4_state_owner *sp = calldata->state->owner;
2636 struct super_block *sb = calldata->state->inode->i_sb;
2637
2638 if (calldata->roc)
2639 pnfs_roc_release(calldata->state->inode);
2640 nfs4_put_open_state(calldata->state);
2641 nfs_free_seqid(calldata->arg.seqid);
2642 nfs4_put_state_owner(sp);
2643 nfs_sb_deactive(sb);
2644 kfree(calldata);
2645 }
2646
2647 static void nfs4_close_done(struct rpc_task *task, void *data)
2648 {
2649 struct nfs4_closedata *calldata = data;
2650 struct nfs4_state *state = calldata->state;
2651 struct nfs_server *server = NFS_SERVER(calldata->inode);
2652 nfs4_stateid *res_stateid = NULL;
2653
2654 dprintk("%s: begin!\n", __func__);
2655 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2656 return;
2657 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
2658 /* hmm. we are done with the inode, and in the process of freeing
2659 * the state_owner. we keep this around to process errors
2660 */
2661 switch (task->tk_status) {
2662 case 0:
2663 res_stateid = &calldata->res.stateid;
2664 if (calldata->arg.fmode == 0 && calldata->roc)
2665 pnfs_roc_set_barrier(state->inode,
2666 calldata->roc_barrier);
2667 renew_lease(server, calldata->timestamp);
2668 break;
2669 case -NFS4ERR_ADMIN_REVOKED:
2670 case -NFS4ERR_STALE_STATEID:
2671 case -NFS4ERR_OLD_STATEID:
2672 case -NFS4ERR_BAD_STATEID:
2673 case -NFS4ERR_EXPIRED:
2674 if (!nfs4_stateid_match(&calldata->arg.stateid,
2675 &state->open_stateid)) {
2676 rpc_restart_call_prepare(task);
2677 goto out_release;
2678 }
2679 if (calldata->arg.fmode == 0)
2680 break;
2681 default:
2682 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
2683 rpc_restart_call_prepare(task);
2684 goto out_release;
2685 }
2686 }
2687 nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode);
2688 out_release:
2689 nfs_release_seqid(calldata->arg.seqid);
2690 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2691 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2692 }
2693
2694 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2695 {
2696 struct nfs4_closedata *calldata = data;
2697 struct nfs4_state *state = calldata->state;
2698 struct inode *inode = calldata->inode;
2699 bool is_rdonly, is_wronly, is_rdwr;
2700 int call_close = 0;
2701
2702 dprintk("%s: begin!\n", __func__);
2703 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2704 goto out_wait;
2705
2706 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2707 spin_lock(&state->owner->so_lock);
2708 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2709 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2710 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2711 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2712 /* Calculate the change in open mode */
2713 calldata->arg.fmode = 0;
2714 if (state->n_rdwr == 0) {
2715 if (state->n_rdonly == 0)
2716 call_close |= is_rdonly;
2717 else if (is_rdonly)
2718 calldata->arg.fmode |= FMODE_READ;
2719 if (state->n_wronly == 0)
2720 call_close |= is_wronly;
2721 else if (is_wronly)
2722 calldata->arg.fmode |= FMODE_WRITE;
2723 } else if (is_rdwr)
2724 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2725
2726 if (calldata->arg.fmode == 0)
2727 call_close |= is_rdwr;
2728
2729 if (!nfs4_valid_open_stateid(state))
2730 call_close = 0;
2731 spin_unlock(&state->owner->so_lock);
2732
2733 if (!call_close) {
2734 /* Note: exit _without_ calling nfs4_close_done */
2735 goto out_no_action;
2736 }
2737
2738 if (calldata->arg.fmode == 0) {
2739 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2740 if (calldata->roc &&
2741 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) {
2742 nfs_release_seqid(calldata->arg.seqid);
2743 goto out_wait;
2744 }
2745 }
2746 calldata->arg.share_access =
2747 nfs4_map_atomic_open_share(NFS_SERVER(inode),
2748 calldata->arg.fmode, 0);
2749
2750 nfs_fattr_init(calldata->res.fattr);
2751 calldata->timestamp = jiffies;
2752 if (nfs4_setup_sequence(NFS_SERVER(inode),
2753 &calldata->arg.seq_args,
2754 &calldata->res.seq_res,
2755 task) != 0)
2756 nfs_release_seqid(calldata->arg.seqid);
2757 dprintk("%s: done!\n", __func__);
2758 return;
2759 out_no_action:
2760 task->tk_action = NULL;
2761 out_wait:
2762 nfs4_sequence_done(task, &calldata->res.seq_res);
2763 }
2764
2765 static const struct rpc_call_ops nfs4_close_ops = {
2766 .rpc_call_prepare = nfs4_close_prepare,
2767 .rpc_call_done = nfs4_close_done,
2768 .rpc_release = nfs4_free_closedata,
2769 };
2770
2771 static bool nfs4_roc(struct inode *inode)
2772 {
2773 if (!nfs_have_layout(inode))
2774 return false;
2775 return pnfs_roc(inode);
2776 }
2777
2778 /*
2779 * It is possible for data to be read/written from a mem-mapped file
2780 * after the sys_close call (which hits the vfs layer as a flush).
2781 * This means that we can't safely call nfsv4 close on a file until
2782 * the inode is cleared. This in turn means that we are not good
2783 * NFSv4 citizens - we do not indicate to the server to update the file's
2784 * share state even when we are done with one of the three share
2785 * stateid's in the inode.
2786 *
2787 * NOTE: Caller must be holding the sp->so_owner semaphore!
2788 */
2789 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2790 {
2791 struct nfs_server *server = NFS_SERVER(state->inode);
2792 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
2793 struct nfs4_closedata *calldata;
2794 struct nfs4_state_owner *sp = state->owner;
2795 struct rpc_task *task;
2796 struct rpc_message msg = {
2797 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2798 .rpc_cred = state->owner->so_cred,
2799 };
2800 struct rpc_task_setup task_setup_data = {
2801 .rpc_client = server->client,
2802 .rpc_message = &msg,
2803 .callback_ops = &nfs4_close_ops,
2804 .workqueue = nfsiod_workqueue,
2805 .flags = RPC_TASK_ASYNC,
2806 };
2807 int status = -ENOMEM;
2808
2809 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
2810 &task_setup_data.rpc_client, &msg);
2811
2812 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2813 if (calldata == NULL)
2814 goto out;
2815 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2816 calldata->inode = state->inode;
2817 calldata->state = state;
2818 calldata->arg.fh = NFS_FH(state->inode);
2819 /* Serialization for the sequence id */
2820 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
2821 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
2822 if (IS_ERR(calldata->arg.seqid))
2823 goto out_free_calldata;
2824 calldata->arg.fmode = 0;
2825 calldata->arg.bitmask = server->cache_consistency_bitmask;
2826 calldata->res.fattr = &calldata->fattr;
2827 calldata->res.seqid = calldata->arg.seqid;
2828 calldata->res.server = server;
2829 calldata->roc = nfs4_roc(state->inode);
2830 nfs_sb_active(calldata->inode->i_sb);
2831
2832 msg.rpc_argp = &calldata->arg;
2833 msg.rpc_resp = &calldata->res;
2834 task_setup_data.callback_data = calldata;
2835 task = rpc_run_task(&task_setup_data);
2836 if (IS_ERR(task))
2837 return PTR_ERR(task);
2838 status = 0;
2839 if (wait)
2840 status = rpc_wait_for_completion_task(task);
2841 rpc_put_task(task);
2842 return status;
2843 out_free_calldata:
2844 kfree(calldata);
2845 out:
2846 nfs4_put_open_state(state);
2847 nfs4_put_state_owner(sp);
2848 return status;
2849 }
2850
2851 static struct inode *
2852 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2853 int open_flags, struct iattr *attr, int *opened)
2854 {
2855 struct nfs4_state *state;
2856 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
2857
2858 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
2859
2860 /* Protect against concurrent sillydeletes */
2861 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
2862
2863 nfs4_label_release_security(label);
2864
2865 if (IS_ERR(state))
2866 return ERR_CAST(state);
2867 return state->inode;
2868 }
2869
2870 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2871 {
2872 if (ctx->state == NULL)
2873 return;
2874 if (is_sync)
2875 nfs4_close_sync(ctx->state, ctx->mode);
2876 else
2877 nfs4_close_state(ctx->state, ctx->mode);
2878 }
2879
2880 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
2881 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
2882 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
2883
2884 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2885 {
2886 struct nfs4_server_caps_arg args = {
2887 .fhandle = fhandle,
2888 };
2889 struct nfs4_server_caps_res res = {};
2890 struct rpc_message msg = {
2891 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2892 .rpc_argp = &args,
2893 .rpc_resp = &res,
2894 };
2895 int status;
2896
2897 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2898 if (status == 0) {
2899 /* Sanity check the server answers */
2900 switch (server->nfs_client->cl_minorversion) {
2901 case 0:
2902 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
2903 res.attr_bitmask[2] = 0;
2904 break;
2905 case 1:
2906 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
2907 break;
2908 case 2:
2909 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
2910 }
2911 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2912 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2913 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2914 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2915 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2916 NFS_CAP_CTIME|NFS_CAP_MTIME|
2917 NFS_CAP_SECURITY_LABEL);
2918 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
2919 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
2920 server->caps |= NFS_CAP_ACLS;
2921 if (res.has_links != 0)
2922 server->caps |= NFS_CAP_HARDLINKS;
2923 if (res.has_symlinks != 0)
2924 server->caps |= NFS_CAP_SYMLINKS;
2925 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2926 server->caps |= NFS_CAP_FILEID;
2927 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2928 server->caps |= NFS_CAP_MODE;
2929 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2930 server->caps |= NFS_CAP_NLINK;
2931 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2932 server->caps |= NFS_CAP_OWNER;
2933 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2934 server->caps |= NFS_CAP_OWNER_GROUP;
2935 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2936 server->caps |= NFS_CAP_ATIME;
2937 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2938 server->caps |= NFS_CAP_CTIME;
2939 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2940 server->caps |= NFS_CAP_MTIME;
2941 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
2942 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
2943 server->caps |= NFS_CAP_SECURITY_LABEL;
2944 #endif
2945 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
2946 sizeof(server->attr_bitmask));
2947 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
2948
2949 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2950 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2951 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2952 server->cache_consistency_bitmask[2] = 0;
2953 server->acl_bitmask = res.acl_bitmask;
2954 server->fh_expire_type = res.fh_expire_type;
2955 }
2956
2957 return status;
2958 }
2959
2960 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2961 {
2962 struct nfs4_exception exception = { };
2963 int err;
2964 do {
2965 err = nfs4_handle_exception(server,
2966 _nfs4_server_capabilities(server, fhandle),
2967 &exception);
2968 } while (exception.retry);
2969 return err;
2970 }
2971
2972 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2973 struct nfs_fsinfo *info)
2974 {
2975 u32 bitmask[3];
2976 struct nfs4_lookup_root_arg args = {
2977 .bitmask = bitmask,
2978 };
2979 struct nfs4_lookup_res res = {
2980 .server = server,
2981 .fattr = info->fattr,
2982 .fh = fhandle,
2983 };
2984 struct rpc_message msg = {
2985 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2986 .rpc_argp = &args,
2987 .rpc_resp = &res,
2988 };
2989
2990 bitmask[0] = nfs4_fattr_bitmap[0];
2991 bitmask[1] = nfs4_fattr_bitmap[1];
2992 /*
2993 * Process the label in the upcoming getfattr
2994 */
2995 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
2996
2997 nfs_fattr_init(info->fattr);
2998 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2999 }
3000
3001 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3002 struct nfs_fsinfo *info)
3003 {
3004 struct nfs4_exception exception = { };
3005 int err;
3006 do {
3007 err = _nfs4_lookup_root(server, fhandle, info);
3008 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3009 switch (err) {
3010 case 0:
3011 case -NFS4ERR_WRONGSEC:
3012 goto out;
3013 default:
3014 err = nfs4_handle_exception(server, err, &exception);
3015 }
3016 } while (exception.retry);
3017 out:
3018 return err;
3019 }
3020
3021 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3022 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3023 {
3024 struct rpc_auth_create_args auth_args = {
3025 .pseudoflavor = flavor,
3026 };
3027 struct rpc_auth *auth;
3028 int ret;
3029
3030 auth = rpcauth_create(&auth_args, server->client);
3031 if (IS_ERR(auth)) {
3032 ret = -EACCES;
3033 goto out;
3034 }
3035 ret = nfs4_lookup_root(server, fhandle, info);
3036 out:
3037 return ret;
3038 }
3039
3040 /*
3041 * Retry pseudoroot lookup with various security flavors. We do this when:
3042 *
3043 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3044 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3045 *
3046 * Returns zero on success, or a negative NFS4ERR value, or a
3047 * negative errno value.
3048 */
3049 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3050 struct nfs_fsinfo *info)
3051 {
3052 /* Per 3530bis 15.33.5 */
3053 static const rpc_authflavor_t flav_array[] = {
3054 RPC_AUTH_GSS_KRB5P,
3055 RPC_AUTH_GSS_KRB5I,
3056 RPC_AUTH_GSS_KRB5,
3057 RPC_AUTH_UNIX, /* courtesy */
3058 RPC_AUTH_NULL,
3059 };
3060 int status = -EPERM;
3061 size_t i;
3062
3063 if (server->auth_info.flavor_len > 0) {
3064 /* try each flavor specified by user */
3065 for (i = 0; i < server->auth_info.flavor_len; i++) {
3066 status = nfs4_lookup_root_sec(server, fhandle, info,
3067 server->auth_info.flavors[i]);
3068 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3069 continue;
3070 break;
3071 }
3072 } else {
3073 /* no flavors specified by user, try default list */
3074 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3075 status = nfs4_lookup_root_sec(server, fhandle, info,
3076 flav_array[i]);
3077 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3078 continue;
3079 break;
3080 }
3081 }
3082
3083 /*
3084 * -EACCESS could mean that the user doesn't have correct permissions
3085 * to access the mount. It could also mean that we tried to mount
3086 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3087 * existing mount programs don't handle -EACCES very well so it should
3088 * be mapped to -EPERM instead.
3089 */
3090 if (status == -EACCES)
3091 status = -EPERM;
3092 return status;
3093 }
3094
3095 static int nfs4_do_find_root_sec(struct nfs_server *server,
3096 struct nfs_fh *fhandle, struct nfs_fsinfo *info)
3097 {
3098 int mv = server->nfs_client->cl_minorversion;
3099 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
3100 }
3101
3102 /**
3103 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3104 * @server: initialized nfs_server handle
3105 * @fhandle: we fill in the pseudo-fs root file handle
3106 * @info: we fill in an FSINFO struct
3107 * @auth_probe: probe the auth flavours
3108 *
3109 * Returns zero on success, or a negative errno.
3110 */
3111 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3112 struct nfs_fsinfo *info,
3113 bool auth_probe)
3114 {
3115 int status = 0;
3116
3117 if (!auth_probe)
3118 status = nfs4_lookup_root(server, fhandle, info);
3119
3120 if (auth_probe || status == NFS4ERR_WRONGSEC)
3121 status = nfs4_do_find_root_sec(server, fhandle, info);
3122
3123 if (status == 0)
3124 status = nfs4_server_capabilities(server, fhandle);
3125 if (status == 0)
3126 status = nfs4_do_fsinfo(server, fhandle, info);
3127
3128 return nfs4_map_errors(status);
3129 }
3130
3131 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3132 struct nfs_fsinfo *info)
3133 {
3134 int error;
3135 struct nfs_fattr *fattr = info->fattr;
3136 struct nfs4_label *label = NULL;
3137
3138 error = nfs4_server_capabilities(server, mntfh);
3139 if (error < 0) {
3140 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3141 return error;
3142 }
3143
3144 label = nfs4_label_alloc(server, GFP_KERNEL);
3145 if (IS_ERR(label))
3146 return PTR_ERR(label);
3147
3148 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3149 if (error < 0) {
3150 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3151 goto err_free_label;
3152 }
3153
3154 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3155 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3156 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3157
3158 err_free_label:
3159 nfs4_label_free(label);
3160
3161 return error;
3162 }
3163
3164 /*
3165 * Get locations and (maybe) other attributes of a referral.
3166 * Note that we'll actually follow the referral later when
3167 * we detect fsid mismatch in inode revalidation
3168 */
3169 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3170 const struct qstr *name, struct nfs_fattr *fattr,
3171 struct nfs_fh *fhandle)
3172 {
3173 int status = -ENOMEM;
3174 struct page *page = NULL;
3175 struct nfs4_fs_locations *locations = NULL;
3176
3177 page = alloc_page(GFP_KERNEL);
3178 if (page == NULL)
3179 goto out;
3180 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3181 if (locations == NULL)
3182 goto out;
3183
3184 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3185 if (status != 0)
3186 goto out;
3187
3188 /*
3189 * If the fsid didn't change, this is a migration event, not a
3190 * referral. Cause us to drop into the exception handler, which
3191 * will kick off migration recovery.
3192 */
3193 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3194 dprintk("%s: server did not return a different fsid for"
3195 " a referral at %s\n", __func__, name->name);
3196 status = -NFS4ERR_MOVED;
3197 goto out;
3198 }
3199 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3200 nfs_fixup_referral_attributes(&locations->fattr);
3201
3202 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3203 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3204 memset(fhandle, 0, sizeof(struct nfs_fh));
3205 out:
3206 if (page)
3207 __free_page(page);
3208 kfree(locations);
3209 return status;
3210 }
3211
3212 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3213 struct nfs_fattr *fattr, struct nfs4_label *label)
3214 {
3215 struct nfs4_getattr_arg args = {
3216 .fh = fhandle,
3217 .bitmask = server->attr_bitmask,
3218 };
3219 struct nfs4_getattr_res res = {
3220 .fattr = fattr,
3221 .label = label,
3222 .server = server,
3223 };
3224 struct rpc_message msg = {
3225 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3226 .rpc_argp = &args,
3227 .rpc_resp = &res,
3228 };
3229
3230 args.bitmask = nfs4_bitmask(server, label);
3231
3232 nfs_fattr_init(fattr);
3233 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3234 }
3235
3236 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3237 struct nfs_fattr *fattr, struct nfs4_label *label)
3238 {
3239 struct nfs4_exception exception = { };
3240 int err;
3241 do {
3242 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3243 trace_nfs4_getattr(server, fhandle, fattr, err);
3244 err = nfs4_handle_exception(server, err,
3245 &exception);
3246 } while (exception.retry);
3247 return err;
3248 }
3249
3250 /*
3251 * The file is not closed if it is opened due to the a request to change
3252 * the size of the file. The open call will not be needed once the
3253 * VFS layer lookup-intents are implemented.
3254 *
3255 * Close is called when the inode is destroyed.
3256 * If we haven't opened the file for O_WRONLY, we
3257 * need to in the size_change case to obtain a stateid.
3258 *
3259 * Got race?
3260 * Because OPEN is always done by name in nfsv4, it is
3261 * possible that we opened a different file by the same
3262 * name. We can recognize this race condition, but we
3263 * can't do anything about it besides returning an error.
3264 *
3265 * This will be fixed with VFS changes (lookup-intent).
3266 */
3267 static int
3268 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3269 struct iattr *sattr)
3270 {
3271 struct inode *inode = d_inode(dentry);
3272 struct rpc_cred *cred = NULL;
3273 struct nfs4_state *state = NULL;
3274 struct nfs4_label *label = NULL;
3275 int status;
3276
3277 if (pnfs_ld_layoutret_on_setattr(inode) &&
3278 sattr->ia_valid & ATTR_SIZE &&
3279 sattr->ia_size < i_size_read(inode))
3280 pnfs_commit_and_return_layout(inode);
3281
3282 nfs_fattr_init(fattr);
3283
3284 /* Deal with open(O_TRUNC) */
3285 if (sattr->ia_valid & ATTR_OPEN)
3286 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3287
3288 /* Optimization: if the end result is no change, don't RPC */
3289 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3290 return 0;
3291
3292 /* Search for an existing open(O_WRITE) file */
3293 if (sattr->ia_valid & ATTR_FILE) {
3294 struct nfs_open_context *ctx;
3295
3296 ctx = nfs_file_open_context(sattr->ia_file);
3297 if (ctx) {
3298 cred = ctx->cred;
3299 state = ctx->state;
3300 }
3301 }
3302
3303 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3304 if (IS_ERR(label))
3305 return PTR_ERR(label);
3306
3307 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3308 if (status == 0) {
3309 nfs_setattr_update_inode(inode, sattr, fattr);
3310 nfs_setsecurity(inode, fattr, label);
3311 }
3312 nfs4_label_free(label);
3313 return status;
3314 }
3315
3316 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3317 const struct qstr *name, struct nfs_fh *fhandle,
3318 struct nfs_fattr *fattr, struct nfs4_label *label)
3319 {
3320 struct nfs_server *server = NFS_SERVER(dir);
3321 int status;
3322 struct nfs4_lookup_arg args = {
3323 .bitmask = server->attr_bitmask,
3324 .dir_fh = NFS_FH(dir),
3325 .name = name,
3326 };
3327 struct nfs4_lookup_res res = {
3328 .server = server,
3329 .fattr = fattr,
3330 .label = label,
3331 .fh = fhandle,
3332 };
3333 struct rpc_message msg = {
3334 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3335 .rpc_argp = &args,
3336 .rpc_resp = &res,
3337 };
3338
3339 args.bitmask = nfs4_bitmask(server, label);
3340
3341 nfs_fattr_init(fattr);
3342
3343 dprintk("NFS call lookup %s\n", name->name);
3344 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3345 dprintk("NFS reply lookup: %d\n", status);
3346 return status;
3347 }
3348
3349 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3350 {
3351 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3352 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3353 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3354 fattr->nlink = 2;
3355 }
3356
3357 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3358 struct qstr *name, struct nfs_fh *fhandle,
3359 struct nfs_fattr *fattr, struct nfs4_label *label)
3360 {
3361 struct nfs4_exception exception = { };
3362 struct rpc_clnt *client = *clnt;
3363 int err;
3364 do {
3365 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3366 trace_nfs4_lookup(dir, name, err);
3367 switch (err) {
3368 case -NFS4ERR_BADNAME:
3369 err = -ENOENT;
3370 goto out;
3371 case -NFS4ERR_MOVED:
3372 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3373 if (err == -NFS4ERR_MOVED)
3374 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3375 goto out;
3376 case -NFS4ERR_WRONGSEC:
3377 err = -EPERM;
3378 if (client != *clnt)
3379 goto out;
3380 client = nfs4_negotiate_security(client, dir, name);
3381 if (IS_ERR(client))
3382 return PTR_ERR(client);
3383
3384 exception.retry = 1;
3385 break;
3386 default:
3387 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3388 }
3389 } while (exception.retry);
3390
3391 out:
3392 if (err == 0)
3393 *clnt = client;
3394 else if (client != *clnt)
3395 rpc_shutdown_client(client);
3396
3397 return err;
3398 }
3399
3400 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
3401 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3402 struct nfs4_label *label)
3403 {
3404 int status;
3405 struct rpc_clnt *client = NFS_CLIENT(dir);
3406
3407 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3408 if (client != NFS_CLIENT(dir)) {
3409 rpc_shutdown_client(client);
3410 nfs_fixup_secinfo_attributes(fattr);
3411 }
3412 return status;
3413 }
3414
3415 struct rpc_clnt *
3416 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
3417 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3418 {
3419 struct rpc_clnt *client = NFS_CLIENT(dir);
3420 int status;
3421
3422 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3423 if (status < 0)
3424 return ERR_PTR(status);
3425 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3426 }
3427
3428 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3429 {
3430 struct nfs_server *server = NFS_SERVER(inode);
3431 struct nfs4_accessargs args = {
3432 .fh = NFS_FH(inode),
3433 .bitmask = server->cache_consistency_bitmask,
3434 };
3435 struct nfs4_accessres res = {
3436 .server = server,
3437 };
3438 struct rpc_message msg = {
3439 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
3440 .rpc_argp = &args,
3441 .rpc_resp = &res,
3442 .rpc_cred = entry->cred,
3443 };
3444 int mode = entry->mask;
3445 int status = 0;
3446
3447 /*
3448 * Determine which access bits we want to ask for...
3449 */
3450 if (mode & MAY_READ)
3451 args.access |= NFS4_ACCESS_READ;
3452 if (S_ISDIR(inode->i_mode)) {
3453 if (mode & MAY_WRITE)
3454 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
3455 if (mode & MAY_EXEC)
3456 args.access |= NFS4_ACCESS_LOOKUP;
3457 } else {
3458 if (mode & MAY_WRITE)
3459 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
3460 if (mode & MAY_EXEC)
3461 args.access |= NFS4_ACCESS_EXECUTE;
3462 }
3463
3464 res.fattr = nfs_alloc_fattr();
3465 if (res.fattr == NULL)
3466 return -ENOMEM;
3467
3468 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3469 if (!status) {
3470 nfs_access_set_mask(entry, res.access);
3471 nfs_refresh_inode(inode, res.fattr);
3472 }
3473 nfs_free_fattr(res.fattr);
3474 return status;
3475 }
3476
3477 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3478 {
3479 struct nfs4_exception exception = { };
3480 int err;
3481 do {
3482 err = _nfs4_proc_access(inode, entry);
3483 trace_nfs4_access(inode, err);
3484 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3485 &exception);
3486 } while (exception.retry);
3487 return err;
3488 }
3489
3490 /*
3491 * TODO: For the time being, we don't try to get any attributes
3492 * along with any of the zero-copy operations READ, READDIR,
3493 * READLINK, WRITE.
3494 *
3495 * In the case of the first three, we want to put the GETATTR
3496 * after the read-type operation -- this is because it is hard
3497 * to predict the length of a GETATTR response in v4, and thus
3498 * align the READ data correctly. This means that the GETATTR
3499 * may end up partially falling into the page cache, and we should
3500 * shift it into the 'tail' of the xdr_buf before processing.
3501 * To do this efficiently, we need to know the total length
3502 * of data received, which doesn't seem to be available outside
3503 * of the RPC layer.
3504 *
3505 * In the case of WRITE, we also want to put the GETATTR after
3506 * the operation -- in this case because we want to make sure
3507 * we get the post-operation mtime and size.
3508 *
3509 * Both of these changes to the XDR layer would in fact be quite
3510 * minor, but I decided to leave them for a subsequent patch.
3511 */
3512 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3513 unsigned int pgbase, unsigned int pglen)
3514 {
3515 struct nfs4_readlink args = {
3516 .fh = NFS_FH(inode),
3517 .pgbase = pgbase,
3518 .pglen = pglen,
3519 .pages = &page,
3520 };
3521 struct nfs4_readlink_res res;
3522 struct rpc_message msg = {
3523 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3524 .rpc_argp = &args,
3525 .rpc_resp = &res,
3526 };
3527
3528 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3529 }
3530
3531 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3532 unsigned int pgbase, unsigned int pglen)
3533 {
3534 struct nfs4_exception exception = { };
3535 int err;
3536 do {
3537 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
3538 trace_nfs4_readlink(inode, err);
3539 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3540 &exception);
3541 } while (exception.retry);
3542 return err;
3543 }
3544
3545 /*
3546 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3547 */
3548 static int
3549 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3550 int flags)
3551 {
3552 struct nfs4_label l, *ilabel = NULL;
3553 struct nfs_open_context *ctx;
3554 struct nfs4_state *state;
3555 int opened = 0;
3556 int status = 0;
3557
3558 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3559 if (IS_ERR(ctx))
3560 return PTR_ERR(ctx);
3561
3562 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3563
3564 sattr->ia_mode &= ~current_umask();
3565 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened);
3566 if (IS_ERR(state)) {
3567 status = PTR_ERR(state);
3568 goto out;
3569 }
3570 out:
3571 nfs4_label_release_security(ilabel);
3572 put_nfs_open_context(ctx);
3573 return status;
3574 }
3575
3576 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3577 {
3578 struct nfs_server *server = NFS_SERVER(dir);
3579 struct nfs_removeargs args = {
3580 .fh = NFS_FH(dir),
3581 .name = *name,
3582 };
3583 struct nfs_removeres res = {
3584 .server = server,
3585 };
3586 struct rpc_message msg = {
3587 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3588 .rpc_argp = &args,
3589 .rpc_resp = &res,
3590 };
3591 int status;
3592
3593 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3594 if (status == 0)
3595 update_changeattr(dir, &res.cinfo);
3596 return status;
3597 }
3598
3599 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3600 {
3601 struct nfs4_exception exception = { };
3602 int err;
3603 do {
3604 err = _nfs4_proc_remove(dir, name);
3605 trace_nfs4_remove(dir, name, err);
3606 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3607 &exception);
3608 } while (exception.retry);
3609 return err;
3610 }
3611
3612 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3613 {
3614 struct nfs_server *server = NFS_SERVER(dir);
3615 struct nfs_removeargs *args = msg->rpc_argp;
3616 struct nfs_removeres *res = msg->rpc_resp;
3617
3618 res->server = server;
3619 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3620 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
3621
3622 nfs_fattr_init(res->dir_attr);
3623 }
3624
3625 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3626 {
3627 nfs4_setup_sequence(NFS_SERVER(data->dir),
3628 &data->args.seq_args,
3629 &data->res.seq_res,
3630 task);
3631 }
3632
3633 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3634 {
3635 struct nfs_unlinkdata *data = task->tk_calldata;
3636 struct nfs_removeres *res = &data->res;
3637
3638 if (!nfs4_sequence_done(task, &res->seq_res))
3639 return 0;
3640 if (nfs4_async_handle_error(task, res->server, NULL,
3641 &data->timeout) == -EAGAIN)
3642 return 0;
3643 update_changeattr(dir, &res->cinfo);
3644 return 1;
3645 }
3646
3647 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3648 {
3649 struct nfs_server *server = NFS_SERVER(dir);
3650 struct nfs_renameargs *arg = msg->rpc_argp;
3651 struct nfs_renameres *res = msg->rpc_resp;
3652
3653 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3654 res->server = server;
3655 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
3656 }
3657
3658 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3659 {
3660 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3661 &data->args.seq_args,
3662 &data->res.seq_res,
3663 task);
3664 }
3665
3666 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3667 struct inode *new_dir)
3668 {
3669 struct nfs_renamedata *data = task->tk_calldata;
3670 struct nfs_renameres *res = &data->res;
3671
3672 if (!nfs4_sequence_done(task, &res->seq_res))
3673 return 0;
3674 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
3675 return 0;
3676
3677 update_changeattr(old_dir, &res->old_cinfo);
3678 update_changeattr(new_dir, &res->new_cinfo);
3679 return 1;
3680 }
3681
3682 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3683 {
3684 struct nfs_server *server = NFS_SERVER(inode);
3685 struct nfs4_link_arg arg = {
3686 .fh = NFS_FH(inode),
3687 .dir_fh = NFS_FH(dir),
3688 .name = name,
3689 .bitmask = server->attr_bitmask,
3690 };
3691 struct nfs4_link_res res = {
3692 .server = server,
3693 .label = NULL,
3694 };
3695 struct rpc_message msg = {
3696 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3697 .rpc_argp = &arg,
3698 .rpc_resp = &res,
3699 };
3700 int status = -ENOMEM;
3701
3702 res.fattr = nfs_alloc_fattr();
3703 if (res.fattr == NULL)
3704 goto out;
3705
3706 res.label = nfs4_label_alloc(server, GFP_KERNEL);
3707 if (IS_ERR(res.label)) {
3708 status = PTR_ERR(res.label);
3709 goto out;
3710 }
3711 arg.bitmask = nfs4_bitmask(server, res.label);
3712
3713 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3714 if (!status) {
3715 update_changeattr(dir, &res.cinfo);
3716 status = nfs_post_op_update_inode(inode, res.fattr);
3717 if (!status)
3718 nfs_setsecurity(inode, res.fattr, res.label);
3719 }
3720
3721
3722 nfs4_label_free(res.label);
3723
3724 out:
3725 nfs_free_fattr(res.fattr);
3726 return status;
3727 }
3728
3729 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3730 {
3731 struct nfs4_exception exception = { };
3732 int err;
3733 do {
3734 err = nfs4_handle_exception(NFS_SERVER(inode),
3735 _nfs4_proc_link(inode, dir, name),
3736 &exception);
3737 } while (exception.retry);
3738 return err;
3739 }
3740
3741 struct nfs4_createdata {
3742 struct rpc_message msg;
3743 struct nfs4_create_arg arg;
3744 struct nfs4_create_res res;
3745 struct nfs_fh fh;
3746 struct nfs_fattr fattr;
3747 struct nfs4_label *label;
3748 };
3749
3750 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3751 struct qstr *name, struct iattr *sattr, u32 ftype)
3752 {
3753 struct nfs4_createdata *data;
3754
3755 data = kzalloc(sizeof(*data), GFP_KERNEL);
3756 if (data != NULL) {
3757 struct nfs_server *server = NFS_SERVER(dir);
3758
3759 data->label = nfs4_label_alloc(server, GFP_KERNEL);
3760 if (IS_ERR(data->label))
3761 goto out_free;
3762
3763 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3764 data->msg.rpc_argp = &data->arg;
3765 data->msg.rpc_resp = &data->res;
3766 data->arg.dir_fh = NFS_FH(dir);
3767 data->arg.server = server;
3768 data->arg.name = name;
3769 data->arg.attrs = sattr;
3770 data->arg.ftype = ftype;
3771 data->arg.bitmask = nfs4_bitmask(server, data->label);
3772 data->res.server = server;
3773 data->res.fh = &data->fh;
3774 data->res.fattr = &data->fattr;
3775 data->res.label = data->label;
3776 nfs_fattr_init(data->res.fattr);
3777 }
3778 return data;
3779 out_free:
3780 kfree(data);
3781 return NULL;
3782 }
3783
3784 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3785 {
3786 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3787 &data->arg.seq_args, &data->res.seq_res, 1);
3788 if (status == 0) {
3789 update_changeattr(dir, &data->res.dir_cinfo);
3790 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
3791 }
3792 return status;
3793 }
3794
3795 static void nfs4_free_createdata(struct nfs4_createdata *data)
3796 {
3797 nfs4_label_free(data->label);
3798 kfree(data);
3799 }
3800
3801 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3802 struct page *page, unsigned int len, struct iattr *sattr,
3803 struct nfs4_label *label)
3804 {
3805 struct nfs4_createdata *data;
3806 int status = -ENAMETOOLONG;
3807
3808 if (len > NFS4_MAXPATHLEN)
3809 goto out;
3810
3811 status = -ENOMEM;
3812 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3813 if (data == NULL)
3814 goto out;
3815
3816 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3817 data->arg.u.symlink.pages = &page;
3818 data->arg.u.symlink.len = len;
3819 data->arg.label = label;
3820
3821 status = nfs4_do_create(dir, dentry, data);
3822
3823 nfs4_free_createdata(data);
3824 out:
3825 return status;
3826 }
3827
3828 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3829 struct page *page, unsigned int len, struct iattr *sattr)
3830 {
3831 struct nfs4_exception exception = { };
3832 struct nfs4_label l, *label = NULL;
3833 int err;
3834
3835 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3836
3837 do {
3838 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
3839 trace_nfs4_symlink(dir, &dentry->d_name, err);
3840 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3841 &exception);
3842 } while (exception.retry);
3843
3844 nfs4_label_release_security(label);
3845 return err;
3846 }
3847
3848 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3849 struct iattr *sattr, struct nfs4_label *label)
3850 {
3851 struct nfs4_createdata *data;
3852 int status = -ENOMEM;
3853
3854 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3855 if (data == NULL)
3856 goto out;
3857
3858 data->arg.label = label;
3859 status = nfs4_do_create(dir, dentry, data);
3860
3861 nfs4_free_createdata(data);
3862 out:
3863 return status;
3864 }
3865
3866 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3867 struct iattr *sattr)
3868 {
3869 struct nfs4_exception exception = { };
3870 struct nfs4_label l, *label = NULL;
3871 int err;
3872
3873 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3874
3875 sattr->ia_mode &= ~current_umask();
3876 do {
3877 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
3878 trace_nfs4_mkdir(dir, &dentry->d_name, err);
3879 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3880 &exception);
3881 } while (exception.retry);
3882 nfs4_label_release_security(label);
3883
3884 return err;
3885 }
3886
3887 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3888 u64 cookie, struct page **pages, unsigned int count, int plus)
3889 {
3890 struct inode *dir = d_inode(dentry);
3891 struct nfs4_readdir_arg args = {
3892 .fh = NFS_FH(dir),
3893 .pages = pages,
3894 .pgbase = 0,
3895 .count = count,
3896 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
3897 .plus = plus,
3898 };
3899 struct nfs4_readdir_res res;
3900 struct rpc_message msg = {
3901 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3902 .rpc_argp = &args,
3903 .rpc_resp = &res,
3904 .rpc_cred = cred,
3905 };
3906 int status;
3907
3908 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
3909 dentry,
3910 (unsigned long long)cookie);
3911 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3912 res.pgbase = args.pgbase;
3913 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3914 if (status >= 0) {
3915 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3916 status += args.pgbase;
3917 }
3918
3919 nfs_invalidate_atime(dir);
3920
3921 dprintk("%s: returns %d\n", __func__, status);
3922 return status;
3923 }
3924
3925 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3926 u64 cookie, struct page **pages, unsigned int count, int plus)
3927 {
3928 struct nfs4_exception exception = { };
3929 int err;
3930 do {
3931 err = _nfs4_proc_readdir(dentry, cred, cookie,
3932 pages, count, plus);
3933 trace_nfs4_readdir(d_inode(dentry), err);
3934 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
3935 &exception);
3936 } while (exception.retry);
3937 return err;
3938 }
3939
3940 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3941 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
3942 {
3943 struct nfs4_createdata *data;
3944 int mode = sattr->ia_mode;
3945 int status = -ENOMEM;
3946
3947 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3948 if (data == NULL)
3949 goto out;
3950
3951 if (S_ISFIFO(mode))
3952 data->arg.ftype = NF4FIFO;
3953 else if (S_ISBLK(mode)) {
3954 data->arg.ftype = NF4BLK;
3955 data->arg.u.device.specdata1 = MAJOR(rdev);
3956 data->arg.u.device.specdata2 = MINOR(rdev);
3957 }
3958 else if (S_ISCHR(mode)) {
3959 data->arg.ftype = NF4CHR;
3960 data->arg.u.device.specdata1 = MAJOR(rdev);
3961 data->arg.u.device.specdata2 = MINOR(rdev);
3962 } else if (!S_ISSOCK(mode)) {
3963 status = -EINVAL;
3964 goto out_free;
3965 }
3966
3967 data->arg.label = label;
3968 status = nfs4_do_create(dir, dentry, data);
3969 out_free:
3970 nfs4_free_createdata(data);
3971 out:
3972 return status;
3973 }
3974
3975 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3976 struct iattr *sattr, dev_t rdev)
3977 {
3978 struct nfs4_exception exception = { };
3979 struct nfs4_label l, *label = NULL;
3980 int err;
3981
3982 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3983
3984 sattr->ia_mode &= ~current_umask();
3985 do {
3986 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
3987 trace_nfs4_mknod(dir, &dentry->d_name, err);
3988 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3989 &exception);
3990 } while (exception.retry);
3991
3992 nfs4_label_release_security(label);
3993
3994 return err;
3995 }
3996
3997 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3998 struct nfs_fsstat *fsstat)
3999 {
4000 struct nfs4_statfs_arg args = {
4001 .fh = fhandle,
4002 .bitmask = server->attr_bitmask,
4003 };
4004 struct nfs4_statfs_res res = {
4005 .fsstat = fsstat,
4006 };
4007 struct rpc_message msg = {
4008 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
4009 .rpc_argp = &args,
4010 .rpc_resp = &res,
4011 };
4012
4013 nfs_fattr_init(fsstat->fattr);
4014 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4015 }
4016
4017 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4018 {
4019 struct nfs4_exception exception = { };
4020 int err;
4021 do {
4022 err = nfs4_handle_exception(server,
4023 _nfs4_proc_statfs(server, fhandle, fsstat),
4024 &exception);
4025 } while (exception.retry);
4026 return err;
4027 }
4028
4029 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4030 struct nfs_fsinfo *fsinfo)
4031 {
4032 struct nfs4_fsinfo_arg args = {
4033 .fh = fhandle,
4034 .bitmask = server->attr_bitmask,
4035 };
4036 struct nfs4_fsinfo_res res = {
4037 .fsinfo = fsinfo,
4038 };
4039 struct rpc_message msg = {
4040 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4041 .rpc_argp = &args,
4042 .rpc_resp = &res,
4043 };
4044
4045 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4046 }
4047
4048 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4049 {
4050 struct nfs4_exception exception = { };
4051 unsigned long now = jiffies;
4052 int err;
4053
4054 do {
4055 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4056 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4057 if (err == 0) {
4058 struct nfs_client *clp = server->nfs_client;
4059
4060 spin_lock(&clp->cl_lock);
4061 clp->cl_lease_time = fsinfo->lease_time * HZ;
4062 clp->cl_last_renewal = now;
4063 spin_unlock(&clp->cl_lock);
4064 break;
4065 }
4066 err = nfs4_handle_exception(server, err, &exception);
4067 } while (exception.retry);
4068 return err;
4069 }
4070
4071 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4072 {
4073 int error;
4074
4075 nfs_fattr_init(fsinfo->fattr);
4076 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4077 if (error == 0) {
4078 /* block layout checks this! */
4079 server->pnfs_blksize = fsinfo->blksize;
4080 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
4081 }
4082
4083 return error;
4084 }
4085
4086 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4087 struct nfs_pathconf *pathconf)
4088 {
4089 struct nfs4_pathconf_arg args = {
4090 .fh = fhandle,
4091 .bitmask = server->attr_bitmask,
4092 };
4093 struct nfs4_pathconf_res res = {
4094 .pathconf = pathconf,
4095 };
4096 struct rpc_message msg = {
4097 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4098 .rpc_argp = &args,
4099 .rpc_resp = &res,
4100 };
4101
4102 /* None of the pathconf attributes are mandatory to implement */
4103 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4104 memset(pathconf, 0, sizeof(*pathconf));
4105 return 0;
4106 }
4107
4108 nfs_fattr_init(pathconf->fattr);
4109 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4110 }
4111
4112 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4113 struct nfs_pathconf *pathconf)
4114 {
4115 struct nfs4_exception exception = { };
4116 int err;
4117
4118 do {
4119 err = nfs4_handle_exception(server,
4120 _nfs4_proc_pathconf(server, fhandle, pathconf),
4121 &exception);
4122 } while (exception.retry);
4123 return err;
4124 }
4125
4126 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4127 const struct nfs_open_context *ctx,
4128 const struct nfs_lock_context *l_ctx,
4129 fmode_t fmode)
4130 {
4131 const struct nfs_lockowner *lockowner = NULL;
4132
4133 if (l_ctx != NULL)
4134 lockowner = &l_ctx->lockowner;
4135 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
4136 }
4137 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4138
4139 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4140 const struct nfs_open_context *ctx,
4141 const struct nfs_lock_context *l_ctx,
4142 fmode_t fmode)
4143 {
4144 nfs4_stateid current_stateid;
4145
4146 /* If the current stateid represents a lost lock, then exit */
4147 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4148 return true;
4149 return nfs4_stateid_match(stateid, &current_stateid);
4150 }
4151
4152 static bool nfs4_error_stateid_expired(int err)
4153 {
4154 switch (err) {
4155 case -NFS4ERR_DELEG_REVOKED:
4156 case -NFS4ERR_ADMIN_REVOKED:
4157 case -NFS4ERR_BAD_STATEID:
4158 case -NFS4ERR_STALE_STATEID:
4159 case -NFS4ERR_OLD_STATEID:
4160 case -NFS4ERR_OPENMODE:
4161 case -NFS4ERR_EXPIRED:
4162 return true;
4163 }
4164 return false;
4165 }
4166
4167 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
4168 {
4169 nfs_invalidate_atime(hdr->inode);
4170 }
4171
4172 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4173 {
4174 struct nfs_server *server = NFS_SERVER(hdr->inode);
4175
4176 trace_nfs4_read(hdr, task->tk_status);
4177 if (nfs4_async_handle_error(task, server,
4178 hdr->args.context->state,
4179 NULL) == -EAGAIN) {
4180 rpc_restart_call_prepare(task);
4181 return -EAGAIN;
4182 }
4183
4184 __nfs4_read_done_cb(hdr);
4185 if (task->tk_status > 0)
4186 renew_lease(server, hdr->timestamp);
4187 return 0;
4188 }
4189
4190 static bool nfs4_read_stateid_changed(struct rpc_task *task,
4191 struct nfs_pgio_args *args)
4192 {
4193
4194 if (!nfs4_error_stateid_expired(task->tk_status) ||
4195 nfs4_stateid_is_current(&args->stateid,
4196 args->context,
4197 args->lock_context,
4198 FMODE_READ))
4199 return false;
4200 rpc_restart_call_prepare(task);
4201 return true;
4202 }
4203
4204 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4205 {
4206
4207 dprintk("--> %s\n", __func__);
4208
4209 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4210 return -EAGAIN;
4211 if (nfs4_read_stateid_changed(task, &hdr->args))
4212 return -EAGAIN;
4213 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4214 nfs4_read_done_cb(task, hdr);
4215 }
4216
4217 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4218 struct rpc_message *msg)
4219 {
4220 hdr->timestamp = jiffies;
4221 hdr->pgio_done_cb = nfs4_read_done_cb;
4222 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4223 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4224 }
4225
4226 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4227 struct nfs_pgio_header *hdr)
4228 {
4229 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode),
4230 &hdr->args.seq_args,
4231 &hdr->res.seq_res,
4232 task))
4233 return 0;
4234 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4235 hdr->args.lock_context,
4236 hdr->rw_ops->rw_mode) == -EIO)
4237 return -EIO;
4238 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4239 return -EIO;
4240 return 0;
4241 }
4242
4243 static int nfs4_write_done_cb(struct rpc_task *task,
4244 struct nfs_pgio_header *hdr)
4245 {
4246 struct inode *inode = hdr->inode;
4247
4248 trace_nfs4_write(hdr, task->tk_status);
4249 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4250 hdr->args.context->state,
4251 NULL) == -EAGAIN) {
4252 rpc_restart_call_prepare(task);
4253 return -EAGAIN;
4254 }
4255 if (task->tk_status >= 0) {
4256 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4257 nfs_writeback_update_inode(hdr);
4258 }
4259 return 0;
4260 }
4261
4262 static bool nfs4_write_stateid_changed(struct rpc_task *task,
4263 struct nfs_pgio_args *args)
4264 {
4265
4266 if (!nfs4_error_stateid_expired(task->tk_status) ||
4267 nfs4_stateid_is_current(&args->stateid,
4268 args->context,
4269 args->lock_context,
4270 FMODE_WRITE))
4271 return false;
4272 rpc_restart_call_prepare(task);
4273 return true;
4274 }
4275
4276 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4277 {
4278 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4279 return -EAGAIN;
4280 if (nfs4_write_stateid_changed(task, &hdr->args))
4281 return -EAGAIN;
4282 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4283 nfs4_write_done_cb(task, hdr);
4284 }
4285
4286 static
4287 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4288 {
4289 /* Don't request attributes for pNFS or O_DIRECT writes */
4290 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4291 return false;
4292 /* Otherwise, request attributes if and only if we don't hold
4293 * a delegation
4294 */
4295 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4296 }
4297
4298 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4299 struct rpc_message *msg)
4300 {
4301 struct nfs_server *server = NFS_SERVER(hdr->inode);
4302
4303 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4304 hdr->args.bitmask = NULL;
4305 hdr->res.fattr = NULL;
4306 } else
4307 hdr->args.bitmask = server->cache_consistency_bitmask;
4308
4309 if (!hdr->pgio_done_cb)
4310 hdr->pgio_done_cb = nfs4_write_done_cb;
4311 hdr->res.server = server;
4312 hdr->timestamp = jiffies;
4313
4314 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4315 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4316 }
4317
4318 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4319 {
4320 nfs4_setup_sequence(NFS_SERVER(data->inode),
4321 &data->args.seq_args,
4322 &data->res.seq_res,
4323 task);
4324 }
4325
4326 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4327 {
4328 struct inode *inode = data->inode;
4329
4330 trace_nfs4_commit(data, task->tk_status);
4331 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4332 NULL, NULL) == -EAGAIN) {
4333 rpc_restart_call_prepare(task);
4334 return -EAGAIN;
4335 }
4336 return 0;
4337 }
4338
4339 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4340 {
4341 if (!nfs4_sequence_done(task, &data->res.seq_res))
4342 return -EAGAIN;
4343 return data->commit_done_cb(task, data);
4344 }
4345
4346 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4347 {
4348 struct nfs_server *server = NFS_SERVER(data->inode);
4349
4350 if (data->commit_done_cb == NULL)
4351 data->commit_done_cb = nfs4_commit_done_cb;
4352 data->res.server = server;
4353 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4354 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4355 }
4356
4357 struct nfs4_renewdata {
4358 struct nfs_client *client;
4359 unsigned long timestamp;
4360 };
4361
4362 /*
4363 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4364 * standalone procedure for queueing an asynchronous RENEW.
4365 */
4366 static void nfs4_renew_release(void *calldata)
4367 {
4368 struct nfs4_renewdata *data = calldata;
4369 struct nfs_client *clp = data->client;
4370
4371 if (atomic_read(&clp->cl_count) > 1)
4372 nfs4_schedule_state_renewal(clp);
4373 nfs_put_client(clp);
4374 kfree(data);
4375 }
4376
4377 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4378 {
4379 struct nfs4_renewdata *data = calldata;
4380 struct nfs_client *clp = data->client;
4381 unsigned long timestamp = data->timestamp;
4382
4383 trace_nfs4_renew_async(clp, task->tk_status);
4384 switch (task->tk_status) {
4385 case 0:
4386 break;
4387 case -NFS4ERR_LEASE_MOVED:
4388 nfs4_schedule_lease_moved_recovery(clp);
4389 break;
4390 default:
4391 /* Unless we're shutting down, schedule state recovery! */
4392 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
4393 return;
4394 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
4395 nfs4_schedule_lease_recovery(clp);
4396 return;
4397 }
4398 nfs4_schedule_path_down_recovery(clp);
4399 }
4400 do_renew_lease(clp, timestamp);
4401 }
4402
4403 static const struct rpc_call_ops nfs4_renew_ops = {
4404 .rpc_call_done = nfs4_renew_done,
4405 .rpc_release = nfs4_renew_release,
4406 };
4407
4408 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
4409 {
4410 struct rpc_message msg = {
4411 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4412 .rpc_argp = clp,
4413 .rpc_cred = cred,
4414 };
4415 struct nfs4_renewdata *data;
4416
4417 if (renew_flags == 0)
4418 return 0;
4419 if (!atomic_inc_not_zero(&clp->cl_count))
4420 return -EIO;
4421 data = kmalloc(sizeof(*data), GFP_NOFS);
4422 if (data == NULL)
4423 return -ENOMEM;
4424 data->client = clp;
4425 data->timestamp = jiffies;
4426 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
4427 &nfs4_renew_ops, data);
4428 }
4429
4430 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
4431 {
4432 struct rpc_message msg = {
4433 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4434 .rpc_argp = clp,
4435 .rpc_cred = cred,
4436 };
4437 unsigned long now = jiffies;
4438 int status;
4439
4440 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4441 if (status < 0)
4442 return status;
4443 do_renew_lease(clp, now);
4444 return 0;
4445 }
4446
4447 static inline int nfs4_server_supports_acls(struct nfs_server *server)
4448 {
4449 return server->caps & NFS_CAP_ACLS;
4450 }
4451
4452 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
4453 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
4454 * the stack.
4455 */
4456 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
4457
4458 static int buf_to_pages_noslab(const void *buf, size_t buflen,
4459 struct page **pages, unsigned int *pgbase)
4460 {
4461 struct page *newpage, **spages;
4462 int rc = 0;
4463 size_t len;
4464 spages = pages;
4465
4466 do {
4467 len = min_t(size_t, PAGE_SIZE, buflen);
4468 newpage = alloc_page(GFP_KERNEL);
4469
4470 if (newpage == NULL)
4471 goto unwind;
4472 memcpy(page_address(newpage), buf, len);
4473 buf += len;
4474 buflen -= len;
4475 *pages++ = newpage;
4476 rc++;
4477 } while (buflen != 0);
4478
4479 return rc;
4480
4481 unwind:
4482 for(; rc > 0; rc--)
4483 __free_page(spages[rc-1]);
4484 return -ENOMEM;
4485 }
4486
4487 struct nfs4_cached_acl {
4488 int cached;
4489 size_t len;
4490 char data[0];
4491 };
4492
4493 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
4494 {
4495 struct nfs_inode *nfsi = NFS_I(inode);
4496
4497 spin_lock(&inode->i_lock);
4498 kfree(nfsi->nfs4_acl);
4499 nfsi->nfs4_acl = acl;
4500 spin_unlock(&inode->i_lock);
4501 }
4502
4503 static void nfs4_zap_acl_attr(struct inode *inode)
4504 {
4505 nfs4_set_cached_acl(inode, NULL);
4506 }
4507
4508 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
4509 {
4510 struct nfs_inode *nfsi = NFS_I(inode);
4511 struct nfs4_cached_acl *acl;
4512 int ret = -ENOENT;
4513
4514 spin_lock(&inode->i_lock);
4515 acl = nfsi->nfs4_acl;
4516 if (acl == NULL)
4517 goto out;
4518 if (buf == NULL) /* user is just asking for length */
4519 goto out_len;
4520 if (acl->cached == 0)
4521 goto out;
4522 ret = -ERANGE; /* see getxattr(2) man page */
4523 if (acl->len > buflen)
4524 goto out;
4525 memcpy(buf, acl->data, acl->len);
4526 out_len:
4527 ret = acl->len;
4528 out:
4529 spin_unlock(&inode->i_lock);
4530 return ret;
4531 }
4532
4533 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
4534 {
4535 struct nfs4_cached_acl *acl;
4536 size_t buflen = sizeof(*acl) + acl_len;
4537
4538 if (buflen <= PAGE_SIZE) {
4539 acl = kmalloc(buflen, GFP_KERNEL);
4540 if (acl == NULL)
4541 goto out;
4542 acl->cached = 1;
4543 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4544 } else {
4545 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4546 if (acl == NULL)
4547 goto out;
4548 acl->cached = 0;
4549 }
4550 acl->len = acl_len;
4551 out:
4552 nfs4_set_cached_acl(inode, acl);
4553 }
4554
4555 /*
4556 * The getxattr API returns the required buffer length when called with a
4557 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4558 * the required buf. On a NULL buf, we send a page of data to the server
4559 * guessing that the ACL request can be serviced by a page. If so, we cache
4560 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4561 * the cache. If not so, we throw away the page, and cache the required
4562 * length. The next getxattr call will then produce another round trip to
4563 * the server, this time with the input buf of the required size.
4564 */
4565 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4566 {
4567 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4568 struct nfs_getaclargs args = {
4569 .fh = NFS_FH(inode),
4570 .acl_pages = pages,
4571 .acl_len = buflen,
4572 };
4573 struct nfs_getaclres res = {
4574 .acl_len = buflen,
4575 };
4576 struct rpc_message msg = {
4577 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4578 .rpc_argp = &args,
4579 .rpc_resp = &res,
4580 };
4581 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4582 int ret = -ENOMEM, i;
4583
4584 /* As long as we're doing a round trip to the server anyway,
4585 * let's be prepared for a page of acl data. */
4586 if (npages == 0)
4587 npages = 1;
4588 if (npages > ARRAY_SIZE(pages))
4589 return -ERANGE;
4590
4591 for (i = 0; i < npages; i++) {
4592 pages[i] = alloc_page(GFP_KERNEL);
4593 if (!pages[i])
4594 goto out_free;
4595 }
4596
4597 /* for decoding across pages */
4598 res.acl_scratch = alloc_page(GFP_KERNEL);
4599 if (!res.acl_scratch)
4600 goto out_free;
4601
4602 args.acl_len = npages * PAGE_SIZE;
4603 args.acl_pgbase = 0;
4604
4605 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4606 __func__, buf, buflen, npages, args.acl_len);
4607 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4608 &msg, &args.seq_args, &res.seq_res, 0);
4609 if (ret)
4610 goto out_free;
4611
4612 /* Handle the case where the passed-in buffer is too short */
4613 if (res.acl_flags & NFS4_ACL_TRUNC) {
4614 /* Did the user only issue a request for the acl length? */
4615 if (buf == NULL)
4616 goto out_ok;
4617 ret = -ERANGE;
4618 goto out_free;
4619 }
4620 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4621 if (buf) {
4622 if (res.acl_len > buflen) {
4623 ret = -ERANGE;
4624 goto out_free;
4625 }
4626 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4627 }
4628 out_ok:
4629 ret = res.acl_len;
4630 out_free:
4631 for (i = 0; i < npages; i++)
4632 if (pages[i])
4633 __free_page(pages[i]);
4634 if (res.acl_scratch)
4635 __free_page(res.acl_scratch);
4636 return ret;
4637 }
4638
4639 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4640 {
4641 struct nfs4_exception exception = { };
4642 ssize_t ret;
4643 do {
4644 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4645 trace_nfs4_get_acl(inode, ret);
4646 if (ret >= 0)
4647 break;
4648 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4649 } while (exception.retry);
4650 return ret;
4651 }
4652
4653 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4654 {
4655 struct nfs_server *server = NFS_SERVER(inode);
4656 int ret;
4657
4658 if (!nfs4_server_supports_acls(server))
4659 return -EOPNOTSUPP;
4660 ret = nfs_revalidate_inode(server, inode);
4661 if (ret < 0)
4662 return ret;
4663 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4664 nfs_zap_acl_cache(inode);
4665 ret = nfs4_read_cached_acl(inode, buf, buflen);
4666 if (ret != -ENOENT)
4667 /* -ENOENT is returned if there is no ACL or if there is an ACL
4668 * but no cached acl data, just the acl length */
4669 return ret;
4670 return nfs4_get_acl_uncached(inode, buf, buflen);
4671 }
4672
4673 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4674 {
4675 struct nfs_server *server = NFS_SERVER(inode);
4676 struct page *pages[NFS4ACL_MAXPAGES];
4677 struct nfs_setaclargs arg = {
4678 .fh = NFS_FH(inode),
4679 .acl_pages = pages,
4680 .acl_len = buflen,
4681 };
4682 struct nfs_setaclres res;
4683 struct rpc_message msg = {
4684 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4685 .rpc_argp = &arg,
4686 .rpc_resp = &res,
4687 };
4688 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4689 int ret, i;
4690
4691 if (!nfs4_server_supports_acls(server))
4692 return -EOPNOTSUPP;
4693 if (npages > ARRAY_SIZE(pages))
4694 return -ERANGE;
4695 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
4696 if (i < 0)
4697 return i;
4698 nfs4_inode_return_delegation(inode);
4699 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4700
4701 /*
4702 * Free each page after tx, so the only ref left is
4703 * held by the network stack
4704 */
4705 for (; i > 0; i--)
4706 put_page(pages[i-1]);
4707
4708 /*
4709 * Acl update can result in inode attribute update.
4710 * so mark the attribute cache invalid.
4711 */
4712 spin_lock(&inode->i_lock);
4713 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4714 spin_unlock(&inode->i_lock);
4715 nfs_access_zap_cache(inode);
4716 nfs_zap_acl_cache(inode);
4717 return ret;
4718 }
4719
4720 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4721 {
4722 struct nfs4_exception exception = { };
4723 int err;
4724 do {
4725 err = __nfs4_proc_set_acl(inode, buf, buflen);
4726 trace_nfs4_set_acl(inode, err);
4727 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4728 &exception);
4729 } while (exception.retry);
4730 return err;
4731 }
4732
4733 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4734 static int _nfs4_get_security_label(struct inode *inode, void *buf,
4735 size_t buflen)
4736 {
4737 struct nfs_server *server = NFS_SERVER(inode);
4738 struct nfs_fattr fattr;
4739 struct nfs4_label label = {0, 0, buflen, buf};
4740
4741 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4742 struct nfs4_getattr_arg arg = {
4743 .fh = NFS_FH(inode),
4744 .bitmask = bitmask,
4745 };
4746 struct nfs4_getattr_res res = {
4747 .fattr = &fattr,
4748 .label = &label,
4749 .server = server,
4750 };
4751 struct rpc_message msg = {
4752 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4753 .rpc_argp = &arg,
4754 .rpc_resp = &res,
4755 };
4756 int ret;
4757
4758 nfs_fattr_init(&fattr);
4759
4760 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
4761 if (ret)
4762 return ret;
4763 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4764 return -ENOENT;
4765 if (buflen < label.len)
4766 return -ERANGE;
4767 return 0;
4768 }
4769
4770 static int nfs4_get_security_label(struct inode *inode, void *buf,
4771 size_t buflen)
4772 {
4773 struct nfs4_exception exception = { };
4774 int err;
4775
4776 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4777 return -EOPNOTSUPP;
4778
4779 do {
4780 err = _nfs4_get_security_label(inode, buf, buflen);
4781 trace_nfs4_get_security_label(inode, err);
4782 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4783 &exception);
4784 } while (exception.retry);
4785 return err;
4786 }
4787
4788 static int _nfs4_do_set_security_label(struct inode *inode,
4789 struct nfs4_label *ilabel,
4790 struct nfs_fattr *fattr,
4791 struct nfs4_label *olabel)
4792 {
4793
4794 struct iattr sattr = {0};
4795 struct nfs_server *server = NFS_SERVER(inode);
4796 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4797 struct nfs_setattrargs arg = {
4798 .fh = NFS_FH(inode),
4799 .iap = &sattr,
4800 .server = server,
4801 .bitmask = bitmask,
4802 .label = ilabel,
4803 };
4804 struct nfs_setattrres res = {
4805 .fattr = fattr,
4806 .label = olabel,
4807 .server = server,
4808 };
4809 struct rpc_message msg = {
4810 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
4811 .rpc_argp = &arg,
4812 .rpc_resp = &res,
4813 };
4814 int status;
4815
4816 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
4817
4818 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4819 if (status)
4820 dprintk("%s failed: %d\n", __func__, status);
4821
4822 return status;
4823 }
4824
4825 static int nfs4_do_set_security_label(struct inode *inode,
4826 struct nfs4_label *ilabel,
4827 struct nfs_fattr *fattr,
4828 struct nfs4_label *olabel)
4829 {
4830 struct nfs4_exception exception = { };
4831 int err;
4832
4833 do {
4834 err = _nfs4_do_set_security_label(inode, ilabel,
4835 fattr, olabel);
4836 trace_nfs4_set_security_label(inode, err);
4837 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4838 &exception);
4839 } while (exception.retry);
4840 return err;
4841 }
4842
4843 static int
4844 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
4845 {
4846 struct nfs4_label ilabel, *olabel = NULL;
4847 struct nfs_fattr fattr;
4848 struct rpc_cred *cred;
4849 struct inode *inode = d_inode(dentry);
4850 int status;
4851
4852 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4853 return -EOPNOTSUPP;
4854
4855 nfs_fattr_init(&fattr);
4856
4857 ilabel.pi = 0;
4858 ilabel.lfs = 0;
4859 ilabel.label = (char *)buf;
4860 ilabel.len = buflen;
4861
4862 cred = rpc_lookup_cred();
4863 if (IS_ERR(cred))
4864 return PTR_ERR(cred);
4865
4866 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
4867 if (IS_ERR(olabel)) {
4868 status = -PTR_ERR(olabel);
4869 goto out;
4870 }
4871
4872 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
4873 if (status == 0)
4874 nfs_setsecurity(inode, &fattr, olabel);
4875
4876 nfs4_label_free(olabel);
4877 out:
4878 put_rpccred(cred);
4879 return status;
4880 }
4881 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
4882
4883
4884 static int
4885 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
4886 struct nfs4_state *state, long *timeout)
4887 {
4888 struct nfs_client *clp = server->nfs_client;
4889
4890 if (task->tk_status >= 0)
4891 return 0;
4892 switch(task->tk_status) {
4893 case -NFS4ERR_DELEG_REVOKED:
4894 case -NFS4ERR_ADMIN_REVOKED:
4895 case -NFS4ERR_BAD_STATEID:
4896 case -NFS4ERR_OPENMODE:
4897 if (state == NULL)
4898 break;
4899 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4900 goto recovery_failed;
4901 goto wait_on_recovery;
4902 case -NFS4ERR_EXPIRED:
4903 if (state != NULL) {
4904 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4905 goto recovery_failed;
4906 }
4907 case -NFS4ERR_STALE_STATEID:
4908 case -NFS4ERR_STALE_CLIENTID:
4909 nfs4_schedule_lease_recovery(clp);
4910 goto wait_on_recovery;
4911 case -NFS4ERR_MOVED:
4912 if (nfs4_schedule_migration_recovery(server) < 0)
4913 goto recovery_failed;
4914 goto wait_on_recovery;
4915 case -NFS4ERR_LEASE_MOVED:
4916 nfs4_schedule_lease_moved_recovery(clp);
4917 goto wait_on_recovery;
4918 #if defined(CONFIG_NFS_V4_1)
4919 case -NFS4ERR_BADSESSION:
4920 case -NFS4ERR_BADSLOT:
4921 case -NFS4ERR_BAD_HIGH_SLOT:
4922 case -NFS4ERR_DEADSESSION:
4923 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4924 case -NFS4ERR_SEQ_FALSE_RETRY:
4925 case -NFS4ERR_SEQ_MISORDERED:
4926 dprintk("%s ERROR %d, Reset session\n", __func__,
4927 task->tk_status);
4928 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4929 goto wait_on_recovery;
4930 #endif /* CONFIG_NFS_V4_1 */
4931 case -NFS4ERR_DELAY:
4932 nfs_inc_server_stats(server, NFSIOS_DELAY);
4933 rpc_delay(task, nfs4_update_delay(timeout));
4934 goto restart_call;
4935 case -NFS4ERR_GRACE:
4936 rpc_delay(task, NFS4_POLL_RETRY_MAX);
4937 case -NFS4ERR_RETRY_UNCACHED_REP:
4938 case -NFS4ERR_OLD_STATEID:
4939 goto restart_call;
4940 }
4941 task->tk_status = nfs4_map_errors(task->tk_status);
4942 return 0;
4943 recovery_failed:
4944 task->tk_status = -EIO;
4945 return 0;
4946 wait_on_recovery:
4947 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
4948 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
4949 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
4950 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
4951 goto recovery_failed;
4952 restart_call:
4953 task->tk_status = 0;
4954 return -EAGAIN;
4955 }
4956
4957 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
4958 nfs4_verifier *bootverf)
4959 {
4960 __be32 verf[2];
4961
4962 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
4963 /* An impossible timestamp guarantees this value
4964 * will never match a generated boot time. */
4965 verf[0] = 0;
4966 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
4967 } else {
4968 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
4969 verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
4970 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
4971 }
4972 memcpy(bootverf->data, verf, sizeof(bootverf->data));
4973 }
4974
4975 static int
4976 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
4977 {
4978 int result;
4979 size_t len;
4980 char *str;
4981 bool retried = false;
4982
4983 if (clp->cl_owner_id != NULL)
4984 return 0;
4985 retry:
4986 rcu_read_lock();
4987 len = 10 + strlen(clp->cl_ipaddr) + 1 +
4988 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
4989 1 +
4990 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
4991 1;
4992 rcu_read_unlock();
4993
4994 if (len > NFS4_OPAQUE_LIMIT + 1)
4995 return -EINVAL;
4996
4997 /*
4998 * Since this string is allocated at mount time, and held until the
4999 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5000 * about a memory-reclaim deadlock.
5001 */
5002 str = kmalloc(len, GFP_KERNEL);
5003 if (!str)
5004 return -ENOMEM;
5005
5006 rcu_read_lock();
5007 result = scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
5008 clp->cl_ipaddr,
5009 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
5010 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
5011 rcu_read_unlock();
5012
5013 /* Did something change? */
5014 if (result >= len) {
5015 kfree(str);
5016 if (retried)
5017 return -EINVAL;
5018 retried = true;
5019 goto retry;
5020 }
5021 clp->cl_owner_id = str;
5022 return 0;
5023 }
5024
5025 static int
5026 nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5027 {
5028 int result;
5029 size_t len;
5030 char *str;
5031
5032 len = 10 + 10 + 1 + 10 + 1 +
5033 strlen(nfs4_client_id_uniquifier) + 1 +
5034 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5035
5036 if (len > NFS4_OPAQUE_LIMIT + 1)
5037 return -EINVAL;
5038
5039 /*
5040 * Since this string is allocated at mount time, and held until the
5041 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5042 * about a memory-reclaim deadlock.
5043 */
5044 str = kmalloc(len, GFP_KERNEL);
5045 if (!str)
5046 return -ENOMEM;
5047
5048 result = scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5049 clp->rpc_ops->version, clp->cl_minorversion,
5050 nfs4_client_id_uniquifier,
5051 clp->cl_rpcclient->cl_nodename);
5052 if (result >= len) {
5053 kfree(str);
5054 return -EINVAL;
5055 }
5056 clp->cl_owner_id = str;
5057 return 0;
5058 }
5059
5060 static int
5061 nfs4_init_uniform_client_string(struct nfs_client *clp)
5062 {
5063 int result;
5064 size_t len;
5065 char *str;
5066
5067 if (clp->cl_owner_id != NULL)
5068 return 0;
5069
5070 if (nfs4_client_id_uniquifier[0] != '\0')
5071 return nfs4_init_uniquifier_client_string(clp);
5072
5073 len = 10 + 10 + 1 + 10 + 1 +
5074 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5075
5076 if (len > NFS4_OPAQUE_LIMIT + 1)
5077 return -EINVAL;
5078
5079 /*
5080 * Since this string is allocated at mount time, and held until the
5081 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5082 * about a memory-reclaim deadlock.
5083 */
5084 str = kmalloc(len, GFP_KERNEL);
5085 if (!str)
5086 return -ENOMEM;
5087
5088 result = scnprintf(str, len, "Linux NFSv%u.%u %s",
5089 clp->rpc_ops->version, clp->cl_minorversion,
5090 clp->cl_rpcclient->cl_nodename);
5091 if (result >= len) {
5092 kfree(str);
5093 return -EINVAL;
5094 }
5095 clp->cl_owner_id = str;
5096 return 0;
5097 }
5098
5099 /*
5100 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5101 * services. Advertise one based on the address family of the
5102 * clientaddr.
5103 */
5104 static unsigned int
5105 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5106 {
5107 if (strchr(clp->cl_ipaddr, ':') != NULL)
5108 return scnprintf(buf, len, "tcp6");
5109 else
5110 return scnprintf(buf, len, "tcp");
5111 }
5112
5113 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5114 {
5115 struct nfs4_setclientid *sc = calldata;
5116
5117 if (task->tk_status == 0)
5118 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5119 }
5120
5121 static const struct rpc_call_ops nfs4_setclientid_ops = {
5122 .rpc_call_done = nfs4_setclientid_done,
5123 };
5124
5125 /**
5126 * nfs4_proc_setclientid - Negotiate client ID
5127 * @clp: state data structure
5128 * @program: RPC program for NFSv4 callback service
5129 * @port: IP port number for NFS4 callback service
5130 * @cred: RPC credential to use for this call
5131 * @res: where to place the result
5132 *
5133 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5134 */
5135 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5136 unsigned short port, struct rpc_cred *cred,
5137 struct nfs4_setclientid_res *res)
5138 {
5139 nfs4_verifier sc_verifier;
5140 struct nfs4_setclientid setclientid = {
5141 .sc_verifier = &sc_verifier,
5142 .sc_prog = program,
5143 .sc_clnt = clp,
5144 };
5145 struct rpc_message msg = {
5146 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5147 .rpc_argp = &setclientid,
5148 .rpc_resp = res,
5149 .rpc_cred = cred,
5150 };
5151 struct rpc_task *task;
5152 struct rpc_task_setup task_setup_data = {
5153 .rpc_client = clp->cl_rpcclient,
5154 .rpc_message = &msg,
5155 .callback_ops = &nfs4_setclientid_ops,
5156 .callback_data = &setclientid,
5157 .flags = RPC_TASK_TIMEOUT,
5158 };
5159 int status;
5160
5161 /* nfs_client_id4 */
5162 nfs4_init_boot_verifier(clp, &sc_verifier);
5163
5164 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5165 status = nfs4_init_uniform_client_string(clp);
5166 else
5167 status = nfs4_init_nonuniform_client_string(clp);
5168
5169 if (status)
5170 goto out;
5171
5172 /* cb_client4 */
5173 setclientid.sc_netid_len =
5174 nfs4_init_callback_netid(clp,
5175 setclientid.sc_netid,
5176 sizeof(setclientid.sc_netid));
5177 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5178 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5179 clp->cl_ipaddr, port >> 8, port & 255);
5180
5181 dprintk("NFS call setclientid auth=%s, '%s'\n",
5182 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5183 clp->cl_owner_id);
5184 task = rpc_run_task(&task_setup_data);
5185 if (IS_ERR(task)) {
5186 status = PTR_ERR(task);
5187 goto out;
5188 }
5189 status = task->tk_status;
5190 if (setclientid.sc_cred) {
5191 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5192 put_rpccred(setclientid.sc_cred);
5193 }
5194 rpc_put_task(task);
5195 out:
5196 trace_nfs4_setclientid(clp, status);
5197 dprintk("NFS reply setclientid: %d\n", status);
5198 return status;
5199 }
5200
5201 /**
5202 * nfs4_proc_setclientid_confirm - Confirm client ID
5203 * @clp: state data structure
5204 * @res: result of a previous SETCLIENTID
5205 * @cred: RPC credential to use for this call
5206 *
5207 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5208 */
5209 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5210 struct nfs4_setclientid_res *arg,
5211 struct rpc_cred *cred)
5212 {
5213 struct rpc_message msg = {
5214 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5215 .rpc_argp = arg,
5216 .rpc_cred = cred,
5217 };
5218 int status;
5219
5220 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5221 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5222 clp->cl_clientid);
5223 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5224 trace_nfs4_setclientid_confirm(clp, status);
5225 dprintk("NFS reply setclientid_confirm: %d\n", status);
5226 return status;
5227 }
5228
5229 struct nfs4_delegreturndata {
5230 struct nfs4_delegreturnargs args;
5231 struct nfs4_delegreturnres res;
5232 struct nfs_fh fh;
5233 nfs4_stateid stateid;
5234 unsigned long timestamp;
5235 struct nfs_fattr fattr;
5236 int rpc_status;
5237 struct inode *inode;
5238 bool roc;
5239 u32 roc_barrier;
5240 };
5241
5242 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5243 {
5244 struct nfs4_delegreturndata *data = calldata;
5245
5246 if (!nfs4_sequence_done(task, &data->res.seq_res))
5247 return;
5248
5249 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5250 switch (task->tk_status) {
5251 case 0:
5252 renew_lease(data->res.server, data->timestamp);
5253 case -NFS4ERR_ADMIN_REVOKED:
5254 case -NFS4ERR_DELEG_REVOKED:
5255 case -NFS4ERR_BAD_STATEID:
5256 case -NFS4ERR_OLD_STATEID:
5257 case -NFS4ERR_STALE_STATEID:
5258 case -NFS4ERR_EXPIRED:
5259 task->tk_status = 0;
5260 if (data->roc)
5261 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5262 break;
5263 default:
5264 if (nfs4_async_handle_error(task, data->res.server,
5265 NULL, NULL) == -EAGAIN) {
5266 rpc_restart_call_prepare(task);
5267 return;
5268 }
5269 }
5270 data->rpc_status = task->tk_status;
5271 }
5272
5273 static void nfs4_delegreturn_release(void *calldata)
5274 {
5275 struct nfs4_delegreturndata *data = calldata;
5276 struct inode *inode = data->inode;
5277
5278 if (inode) {
5279 if (data->roc)
5280 pnfs_roc_release(inode);
5281 nfs_iput_and_deactive(inode);
5282 }
5283 kfree(calldata);
5284 }
5285
5286 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5287 {
5288 struct nfs4_delegreturndata *d_data;
5289
5290 d_data = (struct nfs4_delegreturndata *)data;
5291
5292 if (d_data->roc &&
5293 pnfs_roc_drain(d_data->inode, &d_data->roc_barrier, task))
5294 return;
5295
5296 nfs4_setup_sequence(d_data->res.server,
5297 &d_data->args.seq_args,
5298 &d_data->res.seq_res,
5299 task);
5300 }
5301
5302 static const struct rpc_call_ops nfs4_delegreturn_ops = {
5303 .rpc_call_prepare = nfs4_delegreturn_prepare,
5304 .rpc_call_done = nfs4_delegreturn_done,
5305 .rpc_release = nfs4_delegreturn_release,
5306 };
5307
5308 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5309 {
5310 struct nfs4_delegreturndata *data;
5311 struct nfs_server *server = NFS_SERVER(inode);
5312 struct rpc_task *task;
5313 struct rpc_message msg = {
5314 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5315 .rpc_cred = cred,
5316 };
5317 struct rpc_task_setup task_setup_data = {
5318 .rpc_client = server->client,
5319 .rpc_message = &msg,
5320 .callback_ops = &nfs4_delegreturn_ops,
5321 .flags = RPC_TASK_ASYNC,
5322 };
5323 int status = 0;
5324
5325 data = kzalloc(sizeof(*data), GFP_NOFS);
5326 if (data == NULL)
5327 return -ENOMEM;
5328 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5329 data->args.fhandle = &data->fh;
5330 data->args.stateid = &data->stateid;
5331 data->args.bitmask = server->cache_consistency_bitmask;
5332 nfs_copy_fh(&data->fh, NFS_FH(inode));
5333 nfs4_stateid_copy(&data->stateid, stateid);
5334 data->res.fattr = &data->fattr;
5335 data->res.server = server;
5336 nfs_fattr_init(data->res.fattr);
5337 data->timestamp = jiffies;
5338 data->rpc_status = 0;
5339 data->inode = nfs_igrab_and_active(inode);
5340 if (data->inode)
5341 data->roc = nfs4_roc(inode);
5342
5343 task_setup_data.callback_data = data;
5344 msg.rpc_argp = &data->args;
5345 msg.rpc_resp = &data->res;
5346 task = rpc_run_task(&task_setup_data);
5347 if (IS_ERR(task))
5348 return PTR_ERR(task);
5349 if (!issync)
5350 goto out;
5351 status = nfs4_wait_for_completion_rpc_task(task);
5352 if (status != 0)
5353 goto out;
5354 status = data->rpc_status;
5355 if (status == 0)
5356 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5357 else
5358 nfs_refresh_inode(inode, &data->fattr);
5359 out:
5360 rpc_put_task(task);
5361 return status;
5362 }
5363
5364 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5365 {
5366 struct nfs_server *server = NFS_SERVER(inode);
5367 struct nfs4_exception exception = { };
5368 int err;
5369 do {
5370 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5371 trace_nfs4_delegreturn(inode, err);
5372 switch (err) {
5373 case -NFS4ERR_STALE_STATEID:
5374 case -NFS4ERR_EXPIRED:
5375 case 0:
5376 return 0;
5377 }
5378 err = nfs4_handle_exception(server, err, &exception);
5379 } while (exception.retry);
5380 return err;
5381 }
5382
5383 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
5384 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
5385
5386 /*
5387 * sleep, with exponential backoff, and retry the LOCK operation.
5388 */
5389 static unsigned long
5390 nfs4_set_lock_task_retry(unsigned long timeout)
5391 {
5392 freezable_schedule_timeout_killable_unsafe(timeout);
5393 timeout <<= 1;
5394 if (timeout > NFS4_LOCK_MAXTIMEOUT)
5395 return NFS4_LOCK_MAXTIMEOUT;
5396 return timeout;
5397 }
5398
5399 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5400 {
5401 struct inode *inode = state->inode;
5402 struct nfs_server *server = NFS_SERVER(inode);
5403 struct nfs_client *clp = server->nfs_client;
5404 struct nfs_lockt_args arg = {
5405 .fh = NFS_FH(inode),
5406 .fl = request,
5407 };
5408 struct nfs_lockt_res res = {
5409 .denied = request,
5410 };
5411 struct rpc_message msg = {
5412 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5413 .rpc_argp = &arg,
5414 .rpc_resp = &res,
5415 .rpc_cred = state->owner->so_cred,
5416 };
5417 struct nfs4_lock_state *lsp;
5418 int status;
5419
5420 arg.lock_owner.clientid = clp->cl_clientid;
5421 status = nfs4_set_lock_state(state, request);
5422 if (status != 0)
5423 goto out;
5424 lsp = request->fl_u.nfs4_fl.owner;
5425 arg.lock_owner.id = lsp->ls_seqid.owner_id;
5426 arg.lock_owner.s_dev = server->s_dev;
5427 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5428 switch (status) {
5429 case 0:
5430 request->fl_type = F_UNLCK;
5431 break;
5432 case -NFS4ERR_DENIED:
5433 status = 0;
5434 }
5435 request->fl_ops->fl_release_private(request);
5436 request->fl_ops = NULL;
5437 out:
5438 return status;
5439 }
5440
5441 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5442 {
5443 struct nfs4_exception exception = { };
5444 int err;
5445
5446 do {
5447 err = _nfs4_proc_getlk(state, cmd, request);
5448 trace_nfs4_get_lock(request, state, cmd, err);
5449 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
5450 &exception);
5451 } while (exception.retry);
5452 return err;
5453 }
5454
5455 static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
5456 {
5457 int res = 0;
5458 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
5459 case FL_POSIX:
5460 res = posix_lock_inode_wait(inode, fl);
5461 break;
5462 case FL_FLOCK:
5463 res = flock_lock_inode_wait(inode, fl);
5464 break;
5465 default:
5466 BUG();
5467 }
5468 return res;
5469 }
5470
5471 struct nfs4_unlockdata {
5472 struct nfs_locku_args arg;
5473 struct nfs_locku_res res;
5474 struct nfs4_lock_state *lsp;
5475 struct nfs_open_context *ctx;
5476 struct file_lock fl;
5477 const struct nfs_server *server;
5478 unsigned long timestamp;
5479 };
5480
5481 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5482 struct nfs_open_context *ctx,
5483 struct nfs4_lock_state *lsp,
5484 struct nfs_seqid *seqid)
5485 {
5486 struct nfs4_unlockdata *p;
5487 struct inode *inode = lsp->ls_state->inode;
5488
5489 p = kzalloc(sizeof(*p), GFP_NOFS);
5490 if (p == NULL)
5491 return NULL;
5492 p->arg.fh = NFS_FH(inode);
5493 p->arg.fl = &p->fl;
5494 p->arg.seqid = seqid;
5495 p->res.seqid = seqid;
5496 p->lsp = lsp;
5497 atomic_inc(&lsp->ls_count);
5498 /* Ensure we don't close file until we're done freeing locks! */
5499 p->ctx = get_nfs_open_context(ctx);
5500 memcpy(&p->fl, fl, sizeof(p->fl));
5501 p->server = NFS_SERVER(inode);
5502 return p;
5503 }
5504
5505 static void nfs4_locku_release_calldata(void *data)
5506 {
5507 struct nfs4_unlockdata *calldata = data;
5508 nfs_free_seqid(calldata->arg.seqid);
5509 nfs4_put_lock_state(calldata->lsp);
5510 put_nfs_open_context(calldata->ctx);
5511 kfree(calldata);
5512 }
5513
5514 static void nfs4_locku_done(struct rpc_task *task, void *data)
5515 {
5516 struct nfs4_unlockdata *calldata = data;
5517
5518 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
5519 return;
5520 switch (task->tk_status) {
5521 case 0:
5522 renew_lease(calldata->server, calldata->timestamp);
5523 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
5524 if (nfs4_update_lock_stateid(calldata->lsp,
5525 &calldata->res.stateid))
5526 break;
5527 case -NFS4ERR_BAD_STATEID:
5528 case -NFS4ERR_OLD_STATEID:
5529 case -NFS4ERR_STALE_STATEID:
5530 case -NFS4ERR_EXPIRED:
5531 if (!nfs4_stateid_match(&calldata->arg.stateid,
5532 &calldata->lsp->ls_stateid))
5533 rpc_restart_call_prepare(task);
5534 break;
5535 default:
5536 if (nfs4_async_handle_error(task, calldata->server,
5537 NULL, NULL) == -EAGAIN)
5538 rpc_restart_call_prepare(task);
5539 }
5540 nfs_release_seqid(calldata->arg.seqid);
5541 }
5542
5543 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
5544 {
5545 struct nfs4_unlockdata *calldata = data;
5546
5547 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
5548 goto out_wait;
5549 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
5550 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
5551 /* Note: exit _without_ running nfs4_locku_done */
5552 goto out_no_action;
5553 }
5554 calldata->timestamp = jiffies;
5555 if (nfs4_setup_sequence(calldata->server,
5556 &calldata->arg.seq_args,
5557 &calldata->res.seq_res,
5558 task) != 0)
5559 nfs_release_seqid(calldata->arg.seqid);
5560 return;
5561 out_no_action:
5562 task->tk_action = NULL;
5563 out_wait:
5564 nfs4_sequence_done(task, &calldata->res.seq_res);
5565 }
5566
5567 static const struct rpc_call_ops nfs4_locku_ops = {
5568 .rpc_call_prepare = nfs4_locku_prepare,
5569 .rpc_call_done = nfs4_locku_done,
5570 .rpc_release = nfs4_locku_release_calldata,
5571 };
5572
5573 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
5574 struct nfs_open_context *ctx,
5575 struct nfs4_lock_state *lsp,
5576 struct nfs_seqid *seqid)
5577 {
5578 struct nfs4_unlockdata *data;
5579 struct rpc_message msg = {
5580 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
5581 .rpc_cred = ctx->cred,
5582 };
5583 struct rpc_task_setup task_setup_data = {
5584 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
5585 .rpc_message = &msg,
5586 .callback_ops = &nfs4_locku_ops,
5587 .workqueue = nfsiod_workqueue,
5588 .flags = RPC_TASK_ASYNC,
5589 };
5590
5591 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
5592 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
5593
5594 /* Ensure this is an unlock - when canceling a lock, the
5595 * canceled lock is passed in, and it won't be an unlock.
5596 */
5597 fl->fl_type = F_UNLCK;
5598
5599 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
5600 if (data == NULL) {
5601 nfs_free_seqid(seqid);
5602 return ERR_PTR(-ENOMEM);
5603 }
5604
5605 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5606 msg.rpc_argp = &data->arg;
5607 msg.rpc_resp = &data->res;
5608 task_setup_data.callback_data = data;
5609 return rpc_run_task(&task_setup_data);
5610 }
5611
5612 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
5613 {
5614 struct inode *inode = state->inode;
5615 struct nfs4_state_owner *sp = state->owner;
5616 struct nfs_inode *nfsi = NFS_I(inode);
5617 struct nfs_seqid *seqid;
5618 struct nfs4_lock_state *lsp;
5619 struct rpc_task *task;
5620 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5621 int status = 0;
5622 unsigned char fl_flags = request->fl_flags;
5623
5624 status = nfs4_set_lock_state(state, request);
5625 /* Unlock _before_ we do the RPC call */
5626 request->fl_flags |= FL_EXISTS;
5627 /* Exclude nfs_delegation_claim_locks() */
5628 mutex_lock(&sp->so_delegreturn_mutex);
5629 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5630 down_read(&nfsi->rwsem);
5631 if (do_vfs_lock(inode, request) == -ENOENT) {
5632 up_read(&nfsi->rwsem);
5633 mutex_unlock(&sp->so_delegreturn_mutex);
5634 goto out;
5635 }
5636 up_read(&nfsi->rwsem);
5637 mutex_unlock(&sp->so_delegreturn_mutex);
5638 if (status != 0)
5639 goto out;
5640 /* Is this a delegated lock? */
5641 lsp = request->fl_u.nfs4_fl.owner;
5642 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
5643 goto out;
5644 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
5645 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
5646 status = -ENOMEM;
5647 if (IS_ERR(seqid))
5648 goto out;
5649 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
5650 status = PTR_ERR(task);
5651 if (IS_ERR(task))
5652 goto out;
5653 status = nfs4_wait_for_completion_rpc_task(task);
5654 rpc_put_task(task);
5655 out:
5656 request->fl_flags = fl_flags;
5657 trace_nfs4_unlock(request, state, F_SETLK, status);
5658 return status;
5659 }
5660
5661 struct nfs4_lockdata {
5662 struct nfs_lock_args arg;
5663 struct nfs_lock_res res;
5664 struct nfs4_lock_state *lsp;
5665 struct nfs_open_context *ctx;
5666 struct file_lock fl;
5667 unsigned long timestamp;
5668 int rpc_status;
5669 int cancelled;
5670 struct nfs_server *server;
5671 };
5672
5673 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5674 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
5675 gfp_t gfp_mask)
5676 {
5677 struct nfs4_lockdata *p;
5678 struct inode *inode = lsp->ls_state->inode;
5679 struct nfs_server *server = NFS_SERVER(inode);
5680 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5681
5682 p = kzalloc(sizeof(*p), gfp_mask);
5683 if (p == NULL)
5684 return NULL;
5685
5686 p->arg.fh = NFS_FH(inode);
5687 p->arg.fl = &p->fl;
5688 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
5689 if (IS_ERR(p->arg.open_seqid))
5690 goto out_free;
5691 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
5692 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
5693 if (IS_ERR(p->arg.lock_seqid))
5694 goto out_free_seqid;
5695 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
5696 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
5697 p->arg.lock_owner.s_dev = server->s_dev;
5698 p->res.lock_seqid = p->arg.lock_seqid;
5699 p->lsp = lsp;
5700 p->server = server;
5701 atomic_inc(&lsp->ls_count);
5702 p->ctx = get_nfs_open_context(ctx);
5703 get_file(fl->fl_file);
5704 memcpy(&p->fl, fl, sizeof(p->fl));
5705 return p;
5706 out_free_seqid:
5707 nfs_free_seqid(p->arg.open_seqid);
5708 out_free:
5709 kfree(p);
5710 return NULL;
5711 }
5712
5713 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
5714 {
5715 struct nfs4_lockdata *data = calldata;
5716 struct nfs4_state *state = data->lsp->ls_state;
5717
5718 dprintk("%s: begin!\n", __func__);
5719 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
5720 goto out_wait;
5721 /* Do we need to do an open_to_lock_owner? */
5722 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
5723 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
5724 goto out_release_lock_seqid;
5725 }
5726 nfs4_stateid_copy(&data->arg.open_stateid,
5727 &state->open_stateid);
5728 data->arg.new_lock_owner = 1;
5729 data->res.open_seqid = data->arg.open_seqid;
5730 } else {
5731 data->arg.new_lock_owner = 0;
5732 nfs4_stateid_copy(&data->arg.lock_stateid,
5733 &data->lsp->ls_stateid);
5734 }
5735 if (!nfs4_valid_open_stateid(state)) {
5736 data->rpc_status = -EBADF;
5737 task->tk_action = NULL;
5738 goto out_release_open_seqid;
5739 }
5740 data->timestamp = jiffies;
5741 if (nfs4_setup_sequence(data->server,
5742 &data->arg.seq_args,
5743 &data->res.seq_res,
5744 task) == 0)
5745 return;
5746 out_release_open_seqid:
5747 nfs_release_seqid(data->arg.open_seqid);
5748 out_release_lock_seqid:
5749 nfs_release_seqid(data->arg.lock_seqid);
5750 out_wait:
5751 nfs4_sequence_done(task, &data->res.seq_res);
5752 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
5753 }
5754
5755 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5756 {
5757 struct nfs4_lockdata *data = calldata;
5758 struct nfs4_lock_state *lsp = data->lsp;
5759
5760 dprintk("%s: begin!\n", __func__);
5761
5762 if (!nfs4_sequence_done(task, &data->res.seq_res))
5763 return;
5764
5765 data->rpc_status = task->tk_status;
5766 switch (task->tk_status) {
5767 case 0:
5768 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
5769 data->timestamp);
5770 if (data->arg.new_lock) {
5771 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5772 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
5773 rpc_restart_call_prepare(task);
5774 break;
5775 }
5776 }
5777 if (data->arg.new_lock_owner != 0) {
5778 nfs_confirm_seqid(&lsp->ls_seqid, 0);
5779 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
5780 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5781 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
5782 rpc_restart_call_prepare(task);
5783 break;
5784 case -NFS4ERR_BAD_STATEID:
5785 case -NFS4ERR_OLD_STATEID:
5786 case -NFS4ERR_STALE_STATEID:
5787 case -NFS4ERR_EXPIRED:
5788 if (data->arg.new_lock_owner != 0) {
5789 if (!nfs4_stateid_match(&data->arg.open_stateid,
5790 &lsp->ls_state->open_stateid))
5791 rpc_restart_call_prepare(task);
5792 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
5793 &lsp->ls_stateid))
5794 rpc_restart_call_prepare(task);
5795 }
5796 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
5797 }
5798
5799 static void nfs4_lock_release(void *calldata)
5800 {
5801 struct nfs4_lockdata *data = calldata;
5802
5803 dprintk("%s: begin!\n", __func__);
5804 nfs_free_seqid(data->arg.open_seqid);
5805 if (data->cancelled != 0) {
5806 struct rpc_task *task;
5807 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
5808 data->arg.lock_seqid);
5809 if (!IS_ERR(task))
5810 rpc_put_task_async(task);
5811 dprintk("%s: cancelling lock!\n", __func__);
5812 } else
5813 nfs_free_seqid(data->arg.lock_seqid);
5814 nfs4_put_lock_state(data->lsp);
5815 put_nfs_open_context(data->ctx);
5816 fput(data->fl.fl_file);
5817 kfree(data);
5818 dprintk("%s: done!\n", __func__);
5819 }
5820
5821 static const struct rpc_call_ops nfs4_lock_ops = {
5822 .rpc_call_prepare = nfs4_lock_prepare,
5823 .rpc_call_done = nfs4_lock_done,
5824 .rpc_release = nfs4_lock_release,
5825 };
5826
5827 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
5828 {
5829 switch (error) {
5830 case -NFS4ERR_ADMIN_REVOKED:
5831 case -NFS4ERR_BAD_STATEID:
5832 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5833 if (new_lock_owner != 0 ||
5834 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
5835 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
5836 break;
5837 case -NFS4ERR_STALE_STATEID:
5838 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5839 case -NFS4ERR_EXPIRED:
5840 nfs4_schedule_lease_recovery(server->nfs_client);
5841 };
5842 }
5843
5844 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
5845 {
5846 struct nfs4_lockdata *data;
5847 struct rpc_task *task;
5848 struct rpc_message msg = {
5849 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
5850 .rpc_cred = state->owner->so_cred,
5851 };
5852 struct rpc_task_setup task_setup_data = {
5853 .rpc_client = NFS_CLIENT(state->inode),
5854 .rpc_message = &msg,
5855 .callback_ops = &nfs4_lock_ops,
5856 .workqueue = nfsiod_workqueue,
5857 .flags = RPC_TASK_ASYNC,
5858 };
5859 int ret;
5860
5861 dprintk("%s: begin!\n", __func__);
5862 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
5863 fl->fl_u.nfs4_fl.owner,
5864 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
5865 if (data == NULL)
5866 return -ENOMEM;
5867 if (IS_SETLKW(cmd))
5868 data->arg.block = 1;
5869 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5870 msg.rpc_argp = &data->arg;
5871 msg.rpc_resp = &data->res;
5872 task_setup_data.callback_data = data;
5873 if (recovery_type > NFS_LOCK_NEW) {
5874 if (recovery_type == NFS_LOCK_RECLAIM)
5875 data->arg.reclaim = NFS_LOCK_RECLAIM;
5876 nfs4_set_sequence_privileged(&data->arg.seq_args);
5877 } else
5878 data->arg.new_lock = 1;
5879 task = rpc_run_task(&task_setup_data);
5880 if (IS_ERR(task))
5881 return PTR_ERR(task);
5882 ret = nfs4_wait_for_completion_rpc_task(task);
5883 if (ret == 0) {
5884 ret = data->rpc_status;
5885 if (ret)
5886 nfs4_handle_setlk_error(data->server, data->lsp,
5887 data->arg.new_lock_owner, ret);
5888 } else
5889 data->cancelled = 1;
5890 rpc_put_task(task);
5891 dprintk("%s: done, ret = %d!\n", __func__, ret);
5892 return ret;
5893 }
5894
5895 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
5896 {
5897 struct nfs_server *server = NFS_SERVER(state->inode);
5898 struct nfs4_exception exception = {
5899 .inode = state->inode,
5900 };
5901 int err;
5902
5903 do {
5904 /* Cache the lock if possible... */
5905 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5906 return 0;
5907 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
5908 trace_nfs4_lock_reclaim(request, state, F_SETLK, err);
5909 if (err != -NFS4ERR_DELAY)
5910 break;
5911 nfs4_handle_exception(server, err, &exception);
5912 } while (exception.retry);
5913 return err;
5914 }
5915
5916 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
5917 {
5918 struct nfs_server *server = NFS_SERVER(state->inode);
5919 struct nfs4_exception exception = {
5920 .inode = state->inode,
5921 };
5922 int err;
5923
5924 err = nfs4_set_lock_state(state, request);
5925 if (err != 0)
5926 return err;
5927 if (!recover_lost_locks) {
5928 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
5929 return 0;
5930 }
5931 do {
5932 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5933 return 0;
5934 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5935 trace_nfs4_lock_expired(request, state, F_SETLK, err);
5936 switch (err) {
5937 default:
5938 goto out;
5939 case -NFS4ERR_GRACE:
5940 case -NFS4ERR_DELAY:
5941 nfs4_handle_exception(server, err, &exception);
5942 err = 0;
5943 }
5944 } while (exception.retry);
5945 out:
5946 return err;
5947 }
5948
5949 #if defined(CONFIG_NFS_V4_1)
5950 /**
5951 * nfs41_check_expired_locks - possibly free a lock stateid
5952 *
5953 * @state: NFSv4 state for an inode
5954 *
5955 * Returns NFS_OK if recovery for this stateid is now finished.
5956 * Otherwise a negative NFS4ERR value is returned.
5957 */
5958 static int nfs41_check_expired_locks(struct nfs4_state *state)
5959 {
5960 int status, ret = -NFS4ERR_BAD_STATEID;
5961 struct nfs4_lock_state *lsp;
5962 struct nfs_server *server = NFS_SERVER(state->inode);
5963
5964 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
5965 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
5966 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
5967
5968 status = nfs41_test_stateid(server,
5969 &lsp->ls_stateid,
5970 cred);
5971 trace_nfs4_test_lock_stateid(state, lsp, status);
5972 if (status != NFS_OK) {
5973 /* Free the stateid unless the server
5974 * informs us the stateid is unrecognized. */
5975 if (status != -NFS4ERR_BAD_STATEID)
5976 nfs41_free_stateid(server,
5977 &lsp->ls_stateid,
5978 cred);
5979 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5980 ret = status;
5981 }
5982 }
5983 };
5984
5985 return ret;
5986 }
5987
5988 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
5989 {
5990 int status = NFS_OK;
5991
5992 if (test_bit(LK_STATE_IN_USE, &state->flags))
5993 status = nfs41_check_expired_locks(state);
5994 if (status != NFS_OK)
5995 status = nfs4_lock_expired(state, request);
5996 return status;
5997 }
5998 #endif
5999
6000 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6001 {
6002 struct nfs_inode *nfsi = NFS_I(state->inode);
6003 unsigned char fl_flags = request->fl_flags;
6004 int status = -ENOLCK;
6005
6006 if ((fl_flags & FL_POSIX) &&
6007 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6008 goto out;
6009 /* Is this a delegated open? */
6010 status = nfs4_set_lock_state(state, request);
6011 if (status != 0)
6012 goto out;
6013 request->fl_flags |= FL_ACCESS;
6014 status = do_vfs_lock(state->inode, request);
6015 if (status < 0)
6016 goto out;
6017 down_read(&nfsi->rwsem);
6018 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6019 /* Yes: cache locks! */
6020 /* ...but avoid races with delegation recall... */
6021 request->fl_flags = fl_flags & ~FL_SLEEP;
6022 status = do_vfs_lock(state->inode, request);
6023 up_read(&nfsi->rwsem);
6024 goto out;
6025 }
6026 up_read(&nfsi->rwsem);
6027 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6028 out:
6029 request->fl_flags = fl_flags;
6030 return status;
6031 }
6032
6033 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6034 {
6035 struct nfs4_exception exception = {
6036 .state = state,
6037 .inode = state->inode,
6038 };
6039 int err;
6040
6041 do {
6042 err = _nfs4_proc_setlk(state, cmd, request);
6043 trace_nfs4_set_lock(request, state, cmd, err);
6044 if (err == -NFS4ERR_DENIED)
6045 err = -EAGAIN;
6046 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6047 err, &exception);
6048 } while (exception.retry);
6049 return err;
6050 }
6051
6052 static int
6053 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6054 {
6055 struct nfs_open_context *ctx;
6056 struct nfs4_state *state;
6057 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6058 int status;
6059
6060 /* verify open state */
6061 ctx = nfs_file_open_context(filp);
6062 state = ctx->state;
6063
6064 if (request->fl_start < 0 || request->fl_end < 0)
6065 return -EINVAL;
6066
6067 if (IS_GETLK(cmd)) {
6068 if (state != NULL)
6069 return nfs4_proc_getlk(state, F_GETLK, request);
6070 return 0;
6071 }
6072
6073 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6074 return -EINVAL;
6075
6076 if (request->fl_type == F_UNLCK) {
6077 if (state != NULL)
6078 return nfs4_proc_unlck(state, cmd, request);
6079 return 0;
6080 }
6081
6082 if (state == NULL)
6083 return -ENOLCK;
6084 /*
6085 * Don't rely on the VFS having checked the file open mode,
6086 * since it won't do this for flock() locks.
6087 */
6088 switch (request->fl_type) {
6089 case F_RDLCK:
6090 if (!(filp->f_mode & FMODE_READ))
6091 return -EBADF;
6092 break;
6093 case F_WRLCK:
6094 if (!(filp->f_mode & FMODE_WRITE))
6095 return -EBADF;
6096 }
6097
6098 do {
6099 status = nfs4_proc_setlk(state, cmd, request);
6100 if ((status != -EAGAIN) || IS_SETLK(cmd))
6101 break;
6102 timeout = nfs4_set_lock_task_retry(timeout);
6103 status = -ERESTARTSYS;
6104 if (signalled())
6105 break;
6106 } while(status < 0);
6107 return status;
6108 }
6109
6110 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6111 {
6112 struct nfs_server *server = NFS_SERVER(state->inode);
6113 int err;
6114
6115 err = nfs4_set_lock_state(state, fl);
6116 if (err != 0)
6117 return err;
6118 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6119 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6120 }
6121
6122 struct nfs_release_lockowner_data {
6123 struct nfs4_lock_state *lsp;
6124 struct nfs_server *server;
6125 struct nfs_release_lockowner_args args;
6126 struct nfs_release_lockowner_res res;
6127 unsigned long timestamp;
6128 };
6129
6130 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6131 {
6132 struct nfs_release_lockowner_data *data = calldata;
6133 struct nfs_server *server = data->server;
6134 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
6135 &data->args.seq_args, &data->res.seq_res, task);
6136 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6137 data->timestamp = jiffies;
6138 }
6139
6140 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6141 {
6142 struct nfs_release_lockowner_data *data = calldata;
6143 struct nfs_server *server = data->server;
6144
6145 nfs40_sequence_done(task, &data->res.seq_res);
6146
6147 switch (task->tk_status) {
6148 case 0:
6149 renew_lease(server, data->timestamp);
6150 break;
6151 case -NFS4ERR_STALE_CLIENTID:
6152 case -NFS4ERR_EXPIRED:
6153 nfs4_schedule_lease_recovery(server->nfs_client);
6154 break;
6155 case -NFS4ERR_LEASE_MOVED:
6156 case -NFS4ERR_DELAY:
6157 if (nfs4_async_handle_error(task, server,
6158 NULL, NULL) == -EAGAIN)
6159 rpc_restart_call_prepare(task);
6160 }
6161 }
6162
6163 static void nfs4_release_lockowner_release(void *calldata)
6164 {
6165 struct nfs_release_lockowner_data *data = calldata;
6166 nfs4_free_lock_state(data->server, data->lsp);
6167 kfree(calldata);
6168 }
6169
6170 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6171 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6172 .rpc_call_done = nfs4_release_lockowner_done,
6173 .rpc_release = nfs4_release_lockowner_release,
6174 };
6175
6176 static void
6177 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6178 {
6179 struct nfs_release_lockowner_data *data;
6180 struct rpc_message msg = {
6181 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6182 };
6183
6184 if (server->nfs_client->cl_mvops->minor_version != 0)
6185 return;
6186
6187 data = kmalloc(sizeof(*data), GFP_NOFS);
6188 if (!data)
6189 return;
6190 data->lsp = lsp;
6191 data->server = server;
6192 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6193 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6194 data->args.lock_owner.s_dev = server->s_dev;
6195
6196 msg.rpc_argp = &data->args;
6197 msg.rpc_resp = &data->res;
6198 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6199 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6200 }
6201
6202 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6203
6204 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
6205 const void *buf, size_t buflen,
6206 int flags, int type)
6207 {
6208 if (strcmp(key, "") != 0)
6209 return -EINVAL;
6210
6211 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
6212 }
6213
6214 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
6215 void *buf, size_t buflen, int type)
6216 {
6217 if (strcmp(key, "") != 0)
6218 return -EINVAL;
6219
6220 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen);
6221 }
6222
6223 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
6224 size_t list_len, const char *name,
6225 size_t name_len, int type)
6226 {
6227 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
6228
6229 if (!nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))))
6230 return 0;
6231
6232 if (list && len <= list_len)
6233 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
6234 return len;
6235 }
6236
6237 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6238 static inline int nfs4_server_supports_labels(struct nfs_server *server)
6239 {
6240 return server->caps & NFS_CAP_SECURITY_LABEL;
6241 }
6242
6243 static int nfs4_xattr_set_nfs4_label(struct dentry *dentry, const char *key,
6244 const void *buf, size_t buflen,
6245 int flags, int type)
6246 {
6247 if (security_ismaclabel(key))
6248 return nfs4_set_security_label(dentry, buf, buflen);
6249
6250 return -EOPNOTSUPP;
6251 }
6252
6253 static int nfs4_xattr_get_nfs4_label(struct dentry *dentry, const char *key,
6254 void *buf, size_t buflen, int type)
6255 {
6256 if (security_ismaclabel(key))
6257 return nfs4_get_security_label(d_inode(dentry), buf, buflen);
6258 return -EOPNOTSUPP;
6259 }
6260
6261 static size_t nfs4_xattr_list_nfs4_label(struct dentry *dentry, char *list,
6262 size_t list_len, const char *name,
6263 size_t name_len, int type)
6264 {
6265 size_t len = 0;
6266
6267 if (nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
6268 len = security_inode_listsecurity(d_inode(dentry), NULL, 0);
6269 if (list && len <= list_len)
6270 security_inode_listsecurity(d_inode(dentry), list, len);
6271 }
6272 return len;
6273 }
6274
6275 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6276 .prefix = XATTR_SECURITY_PREFIX,
6277 .list = nfs4_xattr_list_nfs4_label,
6278 .get = nfs4_xattr_get_nfs4_label,
6279 .set = nfs4_xattr_set_nfs4_label,
6280 };
6281 #endif
6282
6283
6284 /*
6285 * nfs_fhget will use either the mounted_on_fileid or the fileid
6286 */
6287 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6288 {
6289 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6290 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6291 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6292 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6293 return;
6294
6295 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6296 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6297 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6298 fattr->nlink = 2;
6299 }
6300
6301 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6302 const struct qstr *name,
6303 struct nfs4_fs_locations *fs_locations,
6304 struct page *page)
6305 {
6306 struct nfs_server *server = NFS_SERVER(dir);
6307 u32 bitmask[3] = {
6308 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6309 };
6310 struct nfs4_fs_locations_arg args = {
6311 .dir_fh = NFS_FH(dir),
6312 .name = name,
6313 .page = page,
6314 .bitmask = bitmask,
6315 };
6316 struct nfs4_fs_locations_res res = {
6317 .fs_locations = fs_locations,
6318 };
6319 struct rpc_message msg = {
6320 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6321 .rpc_argp = &args,
6322 .rpc_resp = &res,
6323 };
6324 int status;
6325
6326 dprintk("%s: start\n", __func__);
6327
6328 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6329 * is not supported */
6330 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6331 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6332 else
6333 bitmask[0] |= FATTR4_WORD0_FILEID;
6334
6335 nfs_fattr_init(&fs_locations->fattr);
6336 fs_locations->server = server;
6337 fs_locations->nlocations = 0;
6338 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6339 dprintk("%s: returned status = %d\n", __func__, status);
6340 return status;
6341 }
6342
6343 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6344 const struct qstr *name,
6345 struct nfs4_fs_locations *fs_locations,
6346 struct page *page)
6347 {
6348 struct nfs4_exception exception = { };
6349 int err;
6350 do {
6351 err = _nfs4_proc_fs_locations(client, dir, name,
6352 fs_locations, page);
6353 trace_nfs4_get_fs_locations(dir, name, err);
6354 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6355 &exception);
6356 } while (exception.retry);
6357 return err;
6358 }
6359
6360 /*
6361 * This operation also signals the server that this client is
6362 * performing migration recovery. The server can stop returning
6363 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
6364 * appended to this compound to identify the client ID which is
6365 * performing recovery.
6366 */
6367 static int _nfs40_proc_get_locations(struct inode *inode,
6368 struct nfs4_fs_locations *locations,
6369 struct page *page, struct rpc_cred *cred)
6370 {
6371 struct nfs_server *server = NFS_SERVER(inode);
6372 struct rpc_clnt *clnt = server->client;
6373 u32 bitmask[2] = {
6374 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6375 };
6376 struct nfs4_fs_locations_arg args = {
6377 .clientid = server->nfs_client->cl_clientid,
6378 .fh = NFS_FH(inode),
6379 .page = page,
6380 .bitmask = bitmask,
6381 .migration = 1, /* skip LOOKUP */
6382 .renew = 1, /* append RENEW */
6383 };
6384 struct nfs4_fs_locations_res res = {
6385 .fs_locations = locations,
6386 .migration = 1,
6387 .renew = 1,
6388 };
6389 struct rpc_message msg = {
6390 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6391 .rpc_argp = &args,
6392 .rpc_resp = &res,
6393 .rpc_cred = cred,
6394 };
6395 unsigned long now = jiffies;
6396 int status;
6397
6398 nfs_fattr_init(&locations->fattr);
6399 locations->server = server;
6400 locations->nlocations = 0;
6401
6402 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6403 nfs4_set_sequence_privileged(&args.seq_args);
6404 status = nfs4_call_sync_sequence(clnt, server, &msg,
6405 &args.seq_args, &res.seq_res);
6406 if (status)
6407 return status;
6408
6409 renew_lease(server, now);
6410 return 0;
6411 }
6412
6413 #ifdef CONFIG_NFS_V4_1
6414
6415 /*
6416 * This operation also signals the server that this client is
6417 * performing migration recovery. The server can stop asserting
6418 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
6419 * performing this operation is identified in the SEQUENCE
6420 * operation in this compound.
6421 *
6422 * When the client supports GETATTR(fs_locations_info), it can
6423 * be plumbed in here.
6424 */
6425 static int _nfs41_proc_get_locations(struct inode *inode,
6426 struct nfs4_fs_locations *locations,
6427 struct page *page, struct rpc_cred *cred)
6428 {
6429 struct nfs_server *server = NFS_SERVER(inode);
6430 struct rpc_clnt *clnt = server->client;
6431 u32 bitmask[2] = {
6432 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6433 };
6434 struct nfs4_fs_locations_arg args = {
6435 .fh = NFS_FH(inode),
6436 .page = page,
6437 .bitmask = bitmask,
6438 .migration = 1, /* skip LOOKUP */
6439 };
6440 struct nfs4_fs_locations_res res = {
6441 .fs_locations = locations,
6442 .migration = 1,
6443 };
6444 struct rpc_message msg = {
6445 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6446 .rpc_argp = &args,
6447 .rpc_resp = &res,
6448 .rpc_cred = cred,
6449 };
6450 int status;
6451
6452 nfs_fattr_init(&locations->fattr);
6453 locations->server = server;
6454 locations->nlocations = 0;
6455
6456 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6457 nfs4_set_sequence_privileged(&args.seq_args);
6458 status = nfs4_call_sync_sequence(clnt, server, &msg,
6459 &args.seq_args, &res.seq_res);
6460 if (status == NFS4_OK &&
6461 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6462 status = -NFS4ERR_LEASE_MOVED;
6463 return status;
6464 }
6465
6466 #endif /* CONFIG_NFS_V4_1 */
6467
6468 /**
6469 * nfs4_proc_get_locations - discover locations for a migrated FSID
6470 * @inode: inode on FSID that is migrating
6471 * @locations: result of query
6472 * @page: buffer
6473 * @cred: credential to use for this operation
6474 *
6475 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
6476 * operation failed, or a negative errno if a local error occurred.
6477 *
6478 * On success, "locations" is filled in, but if the server has
6479 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
6480 * asserted.
6481 *
6482 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
6483 * from this client that require migration recovery.
6484 */
6485 int nfs4_proc_get_locations(struct inode *inode,
6486 struct nfs4_fs_locations *locations,
6487 struct page *page, struct rpc_cred *cred)
6488 {
6489 struct nfs_server *server = NFS_SERVER(inode);
6490 struct nfs_client *clp = server->nfs_client;
6491 const struct nfs4_mig_recovery_ops *ops =
6492 clp->cl_mvops->mig_recovery_ops;
6493 struct nfs4_exception exception = { };
6494 int status;
6495
6496 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6497 (unsigned long long)server->fsid.major,
6498 (unsigned long long)server->fsid.minor,
6499 clp->cl_hostname);
6500 nfs_display_fhandle(NFS_FH(inode), __func__);
6501
6502 do {
6503 status = ops->get_locations(inode, locations, page, cred);
6504 if (status != -NFS4ERR_DELAY)
6505 break;
6506 nfs4_handle_exception(server, status, &exception);
6507 } while (exception.retry);
6508 return status;
6509 }
6510
6511 /*
6512 * This operation also signals the server that this client is
6513 * performing "lease moved" recovery. The server can stop
6514 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
6515 * is appended to this compound to identify the client ID which is
6516 * performing recovery.
6517 */
6518 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6519 {
6520 struct nfs_server *server = NFS_SERVER(inode);
6521 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
6522 struct rpc_clnt *clnt = server->client;
6523 struct nfs4_fsid_present_arg args = {
6524 .fh = NFS_FH(inode),
6525 .clientid = clp->cl_clientid,
6526 .renew = 1, /* append RENEW */
6527 };
6528 struct nfs4_fsid_present_res res = {
6529 .renew = 1,
6530 };
6531 struct rpc_message msg = {
6532 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6533 .rpc_argp = &args,
6534 .rpc_resp = &res,
6535 .rpc_cred = cred,
6536 };
6537 unsigned long now = jiffies;
6538 int status;
6539
6540 res.fh = nfs_alloc_fhandle();
6541 if (res.fh == NULL)
6542 return -ENOMEM;
6543
6544 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6545 nfs4_set_sequence_privileged(&args.seq_args);
6546 status = nfs4_call_sync_sequence(clnt, server, &msg,
6547 &args.seq_args, &res.seq_res);
6548 nfs_free_fhandle(res.fh);
6549 if (status)
6550 return status;
6551
6552 do_renew_lease(clp, now);
6553 return 0;
6554 }
6555
6556 #ifdef CONFIG_NFS_V4_1
6557
6558 /*
6559 * This operation also signals the server that this client is
6560 * performing "lease moved" recovery. The server can stop asserting
6561 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
6562 * this operation is identified in the SEQUENCE operation in this
6563 * compound.
6564 */
6565 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6566 {
6567 struct nfs_server *server = NFS_SERVER(inode);
6568 struct rpc_clnt *clnt = server->client;
6569 struct nfs4_fsid_present_arg args = {
6570 .fh = NFS_FH(inode),
6571 };
6572 struct nfs4_fsid_present_res res = {
6573 };
6574 struct rpc_message msg = {
6575 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6576 .rpc_argp = &args,
6577 .rpc_resp = &res,
6578 .rpc_cred = cred,
6579 };
6580 int status;
6581
6582 res.fh = nfs_alloc_fhandle();
6583 if (res.fh == NULL)
6584 return -ENOMEM;
6585
6586 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6587 nfs4_set_sequence_privileged(&args.seq_args);
6588 status = nfs4_call_sync_sequence(clnt, server, &msg,
6589 &args.seq_args, &res.seq_res);
6590 nfs_free_fhandle(res.fh);
6591 if (status == NFS4_OK &&
6592 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6593 status = -NFS4ERR_LEASE_MOVED;
6594 return status;
6595 }
6596
6597 #endif /* CONFIG_NFS_V4_1 */
6598
6599 /**
6600 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
6601 * @inode: inode on FSID to check
6602 * @cred: credential to use for this operation
6603 *
6604 * Server indicates whether the FSID is present, moved, or not
6605 * recognized. This operation is necessary to clear a LEASE_MOVED
6606 * condition for this client ID.
6607 *
6608 * Returns NFS4_OK if the FSID is present on this server,
6609 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
6610 * NFS4ERR code if some error occurred on the server, or a
6611 * negative errno if a local failure occurred.
6612 */
6613 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6614 {
6615 struct nfs_server *server = NFS_SERVER(inode);
6616 struct nfs_client *clp = server->nfs_client;
6617 const struct nfs4_mig_recovery_ops *ops =
6618 clp->cl_mvops->mig_recovery_ops;
6619 struct nfs4_exception exception = { };
6620 int status;
6621
6622 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6623 (unsigned long long)server->fsid.major,
6624 (unsigned long long)server->fsid.minor,
6625 clp->cl_hostname);
6626 nfs_display_fhandle(NFS_FH(inode), __func__);
6627
6628 do {
6629 status = ops->fsid_present(inode, cred);
6630 if (status != -NFS4ERR_DELAY)
6631 break;
6632 nfs4_handle_exception(server, status, &exception);
6633 } while (exception.retry);
6634 return status;
6635 }
6636
6637 /**
6638 * If 'use_integrity' is true and the state managment nfs_client
6639 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
6640 * and the machine credential as per RFC3530bis and RFC5661 Security
6641 * Considerations sections. Otherwise, just use the user cred with the
6642 * filesystem's rpc_client.
6643 */
6644 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
6645 {
6646 int status;
6647 struct nfs4_secinfo_arg args = {
6648 .dir_fh = NFS_FH(dir),
6649 .name = name,
6650 };
6651 struct nfs4_secinfo_res res = {
6652 .flavors = flavors,
6653 };
6654 struct rpc_message msg = {
6655 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
6656 .rpc_argp = &args,
6657 .rpc_resp = &res,
6658 };
6659 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
6660 struct rpc_cred *cred = NULL;
6661
6662 if (use_integrity) {
6663 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
6664 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
6665 msg.rpc_cred = cred;
6666 }
6667
6668 dprintk("NFS call secinfo %s\n", name->name);
6669
6670 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
6671 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
6672
6673 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
6674 &res.seq_res, 0);
6675 dprintk("NFS reply secinfo: %d\n", status);
6676
6677 if (cred)
6678 put_rpccred(cred);
6679
6680 return status;
6681 }
6682
6683 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
6684 struct nfs4_secinfo_flavors *flavors)
6685 {
6686 struct nfs4_exception exception = { };
6687 int err;
6688 do {
6689 err = -NFS4ERR_WRONGSEC;
6690
6691 /* try to use integrity protection with machine cred */
6692 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
6693 err = _nfs4_proc_secinfo(dir, name, flavors, true);
6694
6695 /*
6696 * if unable to use integrity protection, or SECINFO with
6697 * integrity protection returns NFS4ERR_WRONGSEC (which is
6698 * disallowed by spec, but exists in deployed servers) use
6699 * the current filesystem's rpc_client and the user cred.
6700 */
6701 if (err == -NFS4ERR_WRONGSEC)
6702 err = _nfs4_proc_secinfo(dir, name, flavors, false);
6703
6704 trace_nfs4_secinfo(dir, name, err);
6705 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6706 &exception);
6707 } while (exception.retry);
6708 return err;
6709 }
6710
6711 #ifdef CONFIG_NFS_V4_1
6712 /*
6713 * Check the exchange flags returned by the server for invalid flags, having
6714 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
6715 * DS flags set.
6716 */
6717 static int nfs4_check_cl_exchange_flags(u32 flags)
6718 {
6719 if (flags & ~EXCHGID4_FLAG_MASK_R)
6720 goto out_inval;
6721 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
6722 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
6723 goto out_inval;
6724 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
6725 goto out_inval;
6726 return NFS_OK;
6727 out_inval:
6728 return -NFS4ERR_INVAL;
6729 }
6730
6731 static bool
6732 nfs41_same_server_scope(struct nfs41_server_scope *a,
6733 struct nfs41_server_scope *b)
6734 {
6735 if (a->server_scope_sz == b->server_scope_sz &&
6736 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
6737 return true;
6738
6739 return false;
6740 }
6741
6742 /*
6743 * nfs4_proc_bind_conn_to_session()
6744 *
6745 * The 4.1 client currently uses the same TCP connection for the
6746 * fore and backchannel.
6747 */
6748 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6749 {
6750 int status;
6751 struct nfs41_bind_conn_to_session_args args = {
6752 .client = clp,
6753 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6754 };
6755 struct nfs41_bind_conn_to_session_res res;
6756 struct rpc_message msg = {
6757 .rpc_proc =
6758 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6759 .rpc_argp = &args,
6760 .rpc_resp = &res,
6761 .rpc_cred = cred,
6762 };
6763
6764 dprintk("--> %s\n", __func__);
6765
6766 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6767 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6768 args.dir = NFS4_CDFC4_FORE;
6769
6770 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6771 trace_nfs4_bind_conn_to_session(clp, status);
6772 if (status == 0) {
6773 if (memcmp(res.sessionid.data,
6774 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6775 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6776 status = -EIO;
6777 goto out;
6778 }
6779 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6780 dprintk("NFS: %s: Unexpected direction from server\n",
6781 __func__);
6782 status = -EIO;
6783 goto out;
6784 }
6785 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6786 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6787 __func__);
6788 status = -EIO;
6789 goto out;
6790 }
6791 }
6792 out:
6793 dprintk("<-- %s status= %d\n", __func__, status);
6794 return status;
6795 }
6796
6797 /*
6798 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
6799 * and operations we'd like to see to enable certain features in the allow map
6800 */
6801 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
6802 .how = SP4_MACH_CRED,
6803 .enforce.u.words = {
6804 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6805 1 << (OP_EXCHANGE_ID - 32) |
6806 1 << (OP_CREATE_SESSION - 32) |
6807 1 << (OP_DESTROY_SESSION - 32) |
6808 1 << (OP_DESTROY_CLIENTID - 32)
6809 },
6810 .allow.u.words = {
6811 [0] = 1 << (OP_CLOSE) |
6812 1 << (OP_LOCKU) |
6813 1 << (OP_COMMIT),
6814 [1] = 1 << (OP_SECINFO - 32) |
6815 1 << (OP_SECINFO_NO_NAME - 32) |
6816 1 << (OP_TEST_STATEID - 32) |
6817 1 << (OP_FREE_STATEID - 32) |
6818 1 << (OP_WRITE - 32)
6819 }
6820 };
6821
6822 /*
6823 * Select the state protection mode for client `clp' given the server results
6824 * from exchange_id in `sp'.
6825 *
6826 * Returns 0 on success, negative errno otherwise.
6827 */
6828 static int nfs4_sp4_select_mode(struct nfs_client *clp,
6829 struct nfs41_state_protection *sp)
6830 {
6831 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
6832 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6833 1 << (OP_EXCHANGE_ID - 32) |
6834 1 << (OP_CREATE_SESSION - 32) |
6835 1 << (OP_DESTROY_SESSION - 32) |
6836 1 << (OP_DESTROY_CLIENTID - 32)
6837 };
6838 unsigned int i;
6839
6840 if (sp->how == SP4_MACH_CRED) {
6841 /* Print state protect result */
6842 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
6843 for (i = 0; i <= LAST_NFS4_OP; i++) {
6844 if (test_bit(i, sp->enforce.u.longs))
6845 dfprintk(MOUNT, " enforce op %d\n", i);
6846 if (test_bit(i, sp->allow.u.longs))
6847 dfprintk(MOUNT, " allow op %d\n", i);
6848 }
6849
6850 /* make sure nothing is on enforce list that isn't supported */
6851 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
6852 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
6853 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6854 return -EINVAL;
6855 }
6856 }
6857
6858 /*
6859 * Minimal mode - state operations are allowed to use machine
6860 * credential. Note this already happens by default, so the
6861 * client doesn't have to do anything more than the negotiation.
6862 *
6863 * NOTE: we don't care if EXCHANGE_ID is in the list -
6864 * we're already using the machine cred for exchange_id
6865 * and will never use a different cred.
6866 */
6867 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
6868 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
6869 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
6870 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
6871 dfprintk(MOUNT, "sp4_mach_cred:\n");
6872 dfprintk(MOUNT, " minimal mode enabled\n");
6873 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags);
6874 } else {
6875 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6876 return -EINVAL;
6877 }
6878
6879 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
6880 test_bit(OP_LOCKU, sp->allow.u.longs)) {
6881 dfprintk(MOUNT, " cleanup mode enabled\n");
6882 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags);
6883 }
6884
6885 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
6886 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
6887 dfprintk(MOUNT, " secinfo mode enabled\n");
6888 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags);
6889 }
6890
6891 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
6892 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
6893 dfprintk(MOUNT, " stateid mode enabled\n");
6894 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags);
6895 }
6896
6897 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
6898 dfprintk(MOUNT, " write mode enabled\n");
6899 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags);
6900 }
6901
6902 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
6903 dfprintk(MOUNT, " commit mode enabled\n");
6904 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags);
6905 }
6906 }
6907
6908 return 0;
6909 }
6910
6911 /*
6912 * _nfs4_proc_exchange_id()
6913 *
6914 * Wrapper for EXCHANGE_ID operation.
6915 */
6916 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
6917 u32 sp4_how)
6918 {
6919 nfs4_verifier verifier;
6920 struct nfs41_exchange_id_args args = {
6921 .verifier = &verifier,
6922 .client = clp,
6923 #ifdef CONFIG_NFS_V4_1_MIGRATION
6924 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6925 EXCHGID4_FLAG_BIND_PRINC_STATEID |
6926 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
6927 #else
6928 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6929 EXCHGID4_FLAG_BIND_PRINC_STATEID,
6930 #endif
6931 };
6932 struct nfs41_exchange_id_res res = {
6933 0
6934 };
6935 int status;
6936 struct rpc_message msg = {
6937 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
6938 .rpc_argp = &args,
6939 .rpc_resp = &res,
6940 .rpc_cred = cred,
6941 };
6942
6943 nfs4_init_boot_verifier(clp, &verifier);
6944
6945 status = nfs4_init_uniform_client_string(clp);
6946 if (status)
6947 goto out;
6948
6949 dprintk("NFS call exchange_id auth=%s, '%s'\n",
6950 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6951 clp->cl_owner_id);
6952
6953 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
6954 GFP_NOFS);
6955 if (unlikely(res.server_owner == NULL)) {
6956 status = -ENOMEM;
6957 goto out;
6958 }
6959
6960 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
6961 GFP_NOFS);
6962 if (unlikely(res.server_scope == NULL)) {
6963 status = -ENOMEM;
6964 goto out_server_owner;
6965 }
6966
6967 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
6968 if (unlikely(res.impl_id == NULL)) {
6969 status = -ENOMEM;
6970 goto out_server_scope;
6971 }
6972
6973 switch (sp4_how) {
6974 case SP4_NONE:
6975 args.state_protect.how = SP4_NONE;
6976 break;
6977
6978 case SP4_MACH_CRED:
6979 args.state_protect = nfs4_sp4_mach_cred_request;
6980 break;
6981
6982 default:
6983 /* unsupported! */
6984 WARN_ON_ONCE(1);
6985 status = -EINVAL;
6986 goto out_impl_id;
6987 }
6988
6989 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6990 trace_nfs4_exchange_id(clp, status);
6991 if (status == 0)
6992 status = nfs4_check_cl_exchange_flags(res.flags);
6993
6994 if (status == 0)
6995 status = nfs4_sp4_select_mode(clp, &res.state_protect);
6996
6997 if (status == 0) {
6998 clp->cl_clientid = res.clientid;
6999 clp->cl_exchange_flags = res.flags;
7000 /* Client ID is not confirmed */
7001 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
7002 clear_bit(NFS4_SESSION_ESTABLISHED,
7003 &clp->cl_session->session_state);
7004 clp->cl_seqid = res.seqid;
7005 }
7006
7007 kfree(clp->cl_serverowner);
7008 clp->cl_serverowner = res.server_owner;
7009 res.server_owner = NULL;
7010
7011 /* use the most recent implementation id */
7012 kfree(clp->cl_implid);
7013 clp->cl_implid = res.impl_id;
7014 res.impl_id = NULL;
7015
7016 if (clp->cl_serverscope != NULL &&
7017 !nfs41_same_server_scope(clp->cl_serverscope,
7018 res.server_scope)) {
7019 dprintk("%s: server_scope mismatch detected\n",
7020 __func__);
7021 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7022 kfree(clp->cl_serverscope);
7023 clp->cl_serverscope = NULL;
7024 }
7025
7026 if (clp->cl_serverscope == NULL) {
7027 clp->cl_serverscope = res.server_scope;
7028 res.server_scope = NULL;
7029 }
7030 }
7031
7032 out_impl_id:
7033 kfree(res.impl_id);
7034 out_server_scope:
7035 kfree(res.server_scope);
7036 out_server_owner:
7037 kfree(res.server_owner);
7038 out:
7039 if (clp->cl_implid != NULL)
7040 dprintk("NFS reply exchange_id: Server Implementation ID: "
7041 "domain: %s, name: %s, date: %llu,%u\n",
7042 clp->cl_implid->domain, clp->cl_implid->name,
7043 clp->cl_implid->date.seconds,
7044 clp->cl_implid->date.nseconds);
7045 dprintk("NFS reply exchange_id: %d\n", status);
7046 return status;
7047 }
7048
7049 /*
7050 * nfs4_proc_exchange_id()
7051 *
7052 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7053 *
7054 * Since the clientid has expired, all compounds using sessions
7055 * associated with the stale clientid will be returning
7056 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7057 * be in some phase of session reset.
7058 *
7059 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7060 */
7061 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7062 {
7063 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7064 int status;
7065
7066 /* try SP4_MACH_CRED if krb5i/p */
7067 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7068 authflavor == RPC_AUTH_GSS_KRB5P) {
7069 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7070 if (!status)
7071 return 0;
7072 }
7073
7074 /* try SP4_NONE */
7075 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7076 }
7077
7078 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7079 struct rpc_cred *cred)
7080 {
7081 struct rpc_message msg = {
7082 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7083 .rpc_argp = clp,
7084 .rpc_cred = cred,
7085 };
7086 int status;
7087
7088 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7089 trace_nfs4_destroy_clientid(clp, status);
7090 if (status)
7091 dprintk("NFS: Got error %d from the server %s on "
7092 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7093 return status;
7094 }
7095
7096 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7097 struct rpc_cred *cred)
7098 {
7099 unsigned int loop;
7100 int ret;
7101
7102 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7103 ret = _nfs4_proc_destroy_clientid(clp, cred);
7104 switch (ret) {
7105 case -NFS4ERR_DELAY:
7106 case -NFS4ERR_CLIENTID_BUSY:
7107 ssleep(1);
7108 break;
7109 default:
7110 return ret;
7111 }
7112 }
7113 return 0;
7114 }
7115
7116 int nfs4_destroy_clientid(struct nfs_client *clp)
7117 {
7118 struct rpc_cred *cred;
7119 int ret = 0;
7120
7121 if (clp->cl_mvops->minor_version < 1)
7122 goto out;
7123 if (clp->cl_exchange_flags == 0)
7124 goto out;
7125 if (clp->cl_preserve_clid)
7126 goto out;
7127 cred = nfs4_get_clid_cred(clp);
7128 ret = nfs4_proc_destroy_clientid(clp, cred);
7129 if (cred)
7130 put_rpccred(cred);
7131 switch (ret) {
7132 case 0:
7133 case -NFS4ERR_STALE_CLIENTID:
7134 clp->cl_exchange_flags = 0;
7135 }
7136 out:
7137 return ret;
7138 }
7139
7140 struct nfs4_get_lease_time_data {
7141 struct nfs4_get_lease_time_args *args;
7142 struct nfs4_get_lease_time_res *res;
7143 struct nfs_client *clp;
7144 };
7145
7146 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7147 void *calldata)
7148 {
7149 struct nfs4_get_lease_time_data *data =
7150 (struct nfs4_get_lease_time_data *)calldata;
7151
7152 dprintk("--> %s\n", __func__);
7153 /* just setup sequence, do not trigger session recovery
7154 since we're invoked within one */
7155 nfs41_setup_sequence(data->clp->cl_session,
7156 &data->args->la_seq_args,
7157 &data->res->lr_seq_res,
7158 task);
7159 dprintk("<-- %s\n", __func__);
7160 }
7161
7162 /*
7163 * Called from nfs4_state_manager thread for session setup, so don't recover
7164 * from sequence operation or clientid errors.
7165 */
7166 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7167 {
7168 struct nfs4_get_lease_time_data *data =
7169 (struct nfs4_get_lease_time_data *)calldata;
7170
7171 dprintk("--> %s\n", __func__);
7172 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7173 return;
7174 switch (task->tk_status) {
7175 case -NFS4ERR_DELAY:
7176 case -NFS4ERR_GRACE:
7177 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7178 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7179 task->tk_status = 0;
7180 /* fall through */
7181 case -NFS4ERR_RETRY_UNCACHED_REP:
7182 rpc_restart_call_prepare(task);
7183 return;
7184 }
7185 dprintk("<-- %s\n", __func__);
7186 }
7187
7188 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7189 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7190 .rpc_call_done = nfs4_get_lease_time_done,
7191 };
7192
7193 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7194 {
7195 struct rpc_task *task;
7196 struct nfs4_get_lease_time_args args;
7197 struct nfs4_get_lease_time_res res = {
7198 .lr_fsinfo = fsinfo,
7199 };
7200 struct nfs4_get_lease_time_data data = {
7201 .args = &args,
7202 .res = &res,
7203 .clp = clp,
7204 };
7205 struct rpc_message msg = {
7206 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7207 .rpc_argp = &args,
7208 .rpc_resp = &res,
7209 };
7210 struct rpc_task_setup task_setup = {
7211 .rpc_client = clp->cl_rpcclient,
7212 .rpc_message = &msg,
7213 .callback_ops = &nfs4_get_lease_time_ops,
7214 .callback_data = &data,
7215 .flags = RPC_TASK_TIMEOUT,
7216 };
7217 int status;
7218
7219 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
7220 nfs4_set_sequence_privileged(&args.la_seq_args);
7221 dprintk("--> %s\n", __func__);
7222 task = rpc_run_task(&task_setup);
7223
7224 if (IS_ERR(task))
7225 status = PTR_ERR(task);
7226 else {
7227 status = task->tk_status;
7228 rpc_put_task(task);
7229 }
7230 dprintk("<-- %s return %d\n", __func__, status);
7231
7232 return status;
7233 }
7234
7235 /*
7236 * Initialize the values to be used by the client in CREATE_SESSION
7237 * If nfs4_init_session set the fore channel request and response sizes,
7238 * use them.
7239 *
7240 * Set the back channel max_resp_sz_cached to zero to force the client to
7241 * always set csa_cachethis to FALSE because the current implementation
7242 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
7243 */
7244 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
7245 {
7246 unsigned int max_rqst_sz, max_resp_sz;
7247
7248 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
7249 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
7250
7251 /* Fore channel attributes */
7252 args->fc_attrs.max_rqst_sz = max_rqst_sz;
7253 args->fc_attrs.max_resp_sz = max_resp_sz;
7254 args->fc_attrs.max_ops = NFS4_MAX_OPS;
7255 args->fc_attrs.max_reqs = max_session_slots;
7256
7257 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
7258 "max_ops=%u max_reqs=%u\n",
7259 __func__,
7260 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
7261 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
7262
7263 /* Back channel attributes */
7264 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
7265 args->bc_attrs.max_resp_sz = PAGE_SIZE;
7266 args->bc_attrs.max_resp_sz_cached = 0;
7267 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
7268 args->bc_attrs.max_reqs = 1;
7269
7270 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
7271 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
7272 __func__,
7273 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
7274 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
7275 args->bc_attrs.max_reqs);
7276 }
7277
7278 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7279 struct nfs41_create_session_res *res)
7280 {
7281 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7282 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7283
7284 if (rcvd->max_resp_sz > sent->max_resp_sz)
7285 return -EINVAL;
7286 /*
7287 * Our requested max_ops is the minimum we need; we're not
7288 * prepared to break up compounds into smaller pieces than that.
7289 * So, no point even trying to continue if the server won't
7290 * cooperate:
7291 */
7292 if (rcvd->max_ops < sent->max_ops)
7293 return -EINVAL;
7294 if (rcvd->max_reqs == 0)
7295 return -EINVAL;
7296 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
7297 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
7298 return 0;
7299 }
7300
7301 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7302 struct nfs41_create_session_res *res)
7303 {
7304 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7305 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7306
7307 if (!(res->flags & SESSION4_BACK_CHAN))
7308 goto out;
7309 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7310 return -EINVAL;
7311 if (rcvd->max_resp_sz < sent->max_resp_sz)
7312 return -EINVAL;
7313 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
7314 return -EINVAL;
7315 /* These would render the backchannel useless: */
7316 if (rcvd->max_ops != sent->max_ops)
7317 return -EINVAL;
7318 if (rcvd->max_reqs != sent->max_reqs)
7319 return -EINVAL;
7320 out:
7321 return 0;
7322 }
7323
7324 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7325 struct nfs41_create_session_res *res)
7326 {
7327 int ret;
7328
7329 ret = nfs4_verify_fore_channel_attrs(args, res);
7330 if (ret)
7331 return ret;
7332 return nfs4_verify_back_channel_attrs(args, res);
7333 }
7334
7335 static void nfs4_update_session(struct nfs4_session *session,
7336 struct nfs41_create_session_res *res)
7337 {
7338 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7339 /* Mark client id and session as being confirmed */
7340 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7341 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7342 session->flags = res->flags;
7343 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7344 if (res->flags & SESSION4_BACK_CHAN)
7345 memcpy(&session->bc_attrs, &res->bc_attrs,
7346 sizeof(session->bc_attrs));
7347 }
7348
7349 static int _nfs4_proc_create_session(struct nfs_client *clp,
7350 struct rpc_cred *cred)
7351 {
7352 struct nfs4_session *session = clp->cl_session;
7353 struct nfs41_create_session_args args = {
7354 .client = clp,
7355 .clientid = clp->cl_clientid,
7356 .seqid = clp->cl_seqid,
7357 .cb_program = NFS4_CALLBACK,
7358 };
7359 struct nfs41_create_session_res res;
7360
7361 struct rpc_message msg = {
7362 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7363 .rpc_argp = &args,
7364 .rpc_resp = &res,
7365 .rpc_cred = cred,
7366 };
7367 int status;
7368
7369 nfs4_init_channel_attrs(&args);
7370 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
7371
7372 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7373 trace_nfs4_create_session(clp, status);
7374
7375 if (!status) {
7376 /* Verify the session's negotiated channel_attrs values */
7377 status = nfs4_verify_channel_attrs(&args, &res);
7378 /* Increment the clientid slot sequence id */
7379 if (clp->cl_seqid == res.seqid)
7380 clp->cl_seqid++;
7381 if (status)
7382 goto out;
7383 nfs4_update_session(session, &res);
7384 }
7385 out:
7386 return status;
7387 }
7388
7389 /*
7390 * Issues a CREATE_SESSION operation to the server.
7391 * It is the responsibility of the caller to verify the session is
7392 * expired before calling this routine.
7393 */
7394 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
7395 {
7396 int status;
7397 unsigned *ptr;
7398 struct nfs4_session *session = clp->cl_session;
7399
7400 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
7401
7402 status = _nfs4_proc_create_session(clp, cred);
7403 if (status)
7404 goto out;
7405
7406 /* Init or reset the session slot tables */
7407 status = nfs4_setup_session_slot_tables(session);
7408 dprintk("slot table setup returned %d\n", status);
7409 if (status)
7410 goto out;
7411
7412 ptr = (unsigned *)&session->sess_id.data[0];
7413 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
7414 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
7415 out:
7416 dprintk("<-- %s\n", __func__);
7417 return status;
7418 }
7419
7420 /*
7421 * Issue the over-the-wire RPC DESTROY_SESSION.
7422 * The caller must serialize access to this routine.
7423 */
7424 int nfs4_proc_destroy_session(struct nfs4_session *session,
7425 struct rpc_cred *cred)
7426 {
7427 struct rpc_message msg = {
7428 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
7429 .rpc_argp = session,
7430 .rpc_cred = cred,
7431 };
7432 int status = 0;
7433
7434 dprintk("--> nfs4_proc_destroy_session\n");
7435
7436 /* session is still being setup */
7437 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7438 return 0;
7439
7440 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7441 trace_nfs4_destroy_session(session->clp, status);
7442
7443 if (status)
7444 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
7445 "Session has been destroyed regardless...\n", status);
7446
7447 dprintk("<-- nfs4_proc_destroy_session\n");
7448 return status;
7449 }
7450
7451 /*
7452 * Renew the cl_session lease.
7453 */
7454 struct nfs4_sequence_data {
7455 struct nfs_client *clp;
7456 struct nfs4_sequence_args args;
7457 struct nfs4_sequence_res res;
7458 };
7459
7460 static void nfs41_sequence_release(void *data)
7461 {
7462 struct nfs4_sequence_data *calldata = data;
7463 struct nfs_client *clp = calldata->clp;
7464
7465 if (atomic_read(&clp->cl_count) > 1)
7466 nfs4_schedule_state_renewal(clp);
7467 nfs_put_client(clp);
7468 kfree(calldata);
7469 }
7470
7471 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7472 {
7473 switch(task->tk_status) {
7474 case -NFS4ERR_DELAY:
7475 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7476 return -EAGAIN;
7477 default:
7478 nfs4_schedule_lease_recovery(clp);
7479 }
7480 return 0;
7481 }
7482
7483 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
7484 {
7485 struct nfs4_sequence_data *calldata = data;
7486 struct nfs_client *clp = calldata->clp;
7487
7488 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
7489 return;
7490
7491 trace_nfs4_sequence(clp, task->tk_status);
7492 if (task->tk_status < 0) {
7493 dprintk("%s ERROR %d\n", __func__, task->tk_status);
7494 if (atomic_read(&clp->cl_count) == 1)
7495 goto out;
7496
7497 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
7498 rpc_restart_call_prepare(task);
7499 return;
7500 }
7501 }
7502 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
7503 out:
7504 dprintk("<-- %s\n", __func__);
7505 }
7506
7507 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
7508 {
7509 struct nfs4_sequence_data *calldata = data;
7510 struct nfs_client *clp = calldata->clp;
7511 struct nfs4_sequence_args *args;
7512 struct nfs4_sequence_res *res;
7513
7514 args = task->tk_msg.rpc_argp;
7515 res = task->tk_msg.rpc_resp;
7516
7517 nfs41_setup_sequence(clp->cl_session, args, res, task);
7518 }
7519
7520 static const struct rpc_call_ops nfs41_sequence_ops = {
7521 .rpc_call_done = nfs41_sequence_call_done,
7522 .rpc_call_prepare = nfs41_sequence_prepare,
7523 .rpc_release = nfs41_sequence_release,
7524 };
7525
7526 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
7527 struct rpc_cred *cred,
7528 bool is_privileged)
7529 {
7530 struct nfs4_sequence_data *calldata;
7531 struct rpc_message msg = {
7532 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
7533 .rpc_cred = cred,
7534 };
7535 struct rpc_task_setup task_setup_data = {
7536 .rpc_client = clp->cl_rpcclient,
7537 .rpc_message = &msg,
7538 .callback_ops = &nfs41_sequence_ops,
7539 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
7540 };
7541
7542 if (!atomic_inc_not_zero(&clp->cl_count))
7543 return ERR_PTR(-EIO);
7544 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7545 if (calldata == NULL) {
7546 nfs_put_client(clp);
7547 return ERR_PTR(-ENOMEM);
7548 }
7549 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
7550 if (is_privileged)
7551 nfs4_set_sequence_privileged(&calldata->args);
7552 msg.rpc_argp = &calldata->args;
7553 msg.rpc_resp = &calldata->res;
7554 calldata->clp = clp;
7555 task_setup_data.callback_data = calldata;
7556
7557 return rpc_run_task(&task_setup_data);
7558 }
7559
7560 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
7561 {
7562 struct rpc_task *task;
7563 int ret = 0;
7564
7565 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
7566 return -EAGAIN;
7567 task = _nfs41_proc_sequence(clp, cred, false);
7568 if (IS_ERR(task))
7569 ret = PTR_ERR(task);
7570 else
7571 rpc_put_task_async(task);
7572 dprintk("<-- %s status=%d\n", __func__, ret);
7573 return ret;
7574 }
7575
7576 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7577 {
7578 struct rpc_task *task;
7579 int ret;
7580
7581 task = _nfs41_proc_sequence(clp, cred, true);
7582 if (IS_ERR(task)) {
7583 ret = PTR_ERR(task);
7584 goto out;
7585 }
7586 ret = rpc_wait_for_completion_task(task);
7587 if (!ret)
7588 ret = task->tk_status;
7589 rpc_put_task(task);
7590 out:
7591 dprintk("<-- %s status=%d\n", __func__, ret);
7592 return ret;
7593 }
7594
7595 struct nfs4_reclaim_complete_data {
7596 struct nfs_client *clp;
7597 struct nfs41_reclaim_complete_args arg;
7598 struct nfs41_reclaim_complete_res res;
7599 };
7600
7601 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
7602 {
7603 struct nfs4_reclaim_complete_data *calldata = data;
7604
7605 nfs41_setup_sequence(calldata->clp->cl_session,
7606 &calldata->arg.seq_args,
7607 &calldata->res.seq_res,
7608 task);
7609 }
7610
7611 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7612 {
7613 switch(task->tk_status) {
7614 case 0:
7615 case -NFS4ERR_COMPLETE_ALREADY:
7616 case -NFS4ERR_WRONG_CRED: /* What to do here? */
7617 break;
7618 case -NFS4ERR_DELAY:
7619 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7620 /* fall through */
7621 case -NFS4ERR_RETRY_UNCACHED_REP:
7622 return -EAGAIN;
7623 default:
7624 nfs4_schedule_lease_recovery(clp);
7625 }
7626 return 0;
7627 }
7628
7629 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
7630 {
7631 struct nfs4_reclaim_complete_data *calldata = data;
7632 struct nfs_client *clp = calldata->clp;
7633 struct nfs4_sequence_res *res = &calldata->res.seq_res;
7634
7635 dprintk("--> %s\n", __func__);
7636 if (!nfs41_sequence_done(task, res))
7637 return;
7638
7639 trace_nfs4_reclaim_complete(clp, task->tk_status);
7640 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
7641 rpc_restart_call_prepare(task);
7642 return;
7643 }
7644 dprintk("<-- %s\n", __func__);
7645 }
7646
7647 static void nfs4_free_reclaim_complete_data(void *data)
7648 {
7649 struct nfs4_reclaim_complete_data *calldata = data;
7650
7651 kfree(calldata);
7652 }
7653
7654 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
7655 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
7656 .rpc_call_done = nfs4_reclaim_complete_done,
7657 .rpc_release = nfs4_free_reclaim_complete_data,
7658 };
7659
7660 /*
7661 * Issue a global reclaim complete.
7662 */
7663 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7664 struct rpc_cred *cred)
7665 {
7666 struct nfs4_reclaim_complete_data *calldata;
7667 struct rpc_task *task;
7668 struct rpc_message msg = {
7669 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
7670 .rpc_cred = cred,
7671 };
7672 struct rpc_task_setup task_setup_data = {
7673 .rpc_client = clp->cl_rpcclient,
7674 .rpc_message = &msg,
7675 .callback_ops = &nfs4_reclaim_complete_call_ops,
7676 .flags = RPC_TASK_ASYNC,
7677 };
7678 int status = -ENOMEM;
7679
7680 dprintk("--> %s\n", __func__);
7681 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7682 if (calldata == NULL)
7683 goto out;
7684 calldata->clp = clp;
7685 calldata->arg.one_fs = 0;
7686
7687 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
7688 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
7689 msg.rpc_argp = &calldata->arg;
7690 msg.rpc_resp = &calldata->res;
7691 task_setup_data.callback_data = calldata;
7692 task = rpc_run_task(&task_setup_data);
7693 if (IS_ERR(task)) {
7694 status = PTR_ERR(task);
7695 goto out;
7696 }
7697 status = nfs4_wait_for_completion_rpc_task(task);
7698 if (status == 0)
7699 status = task->tk_status;
7700 rpc_put_task(task);
7701 return 0;
7702 out:
7703 dprintk("<-- %s status=%d\n", __func__, status);
7704 return status;
7705 }
7706
7707 static void
7708 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
7709 {
7710 struct nfs4_layoutget *lgp = calldata;
7711 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
7712 struct nfs4_session *session = nfs4_get_session(server);
7713
7714 dprintk("--> %s\n", __func__);
7715 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
7716 * right now covering the LAYOUTGET we are about to send.
7717 * However, that is not so catastrophic, and there seems
7718 * to be no way to prevent it completely.
7719 */
7720 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
7721 &lgp->res.seq_res, task))
7722 return;
7723 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
7724 NFS_I(lgp->args.inode)->layout,
7725 &lgp->args.range,
7726 lgp->args.ctx->state)) {
7727 rpc_exit(task, NFS4_OK);
7728 }
7729 }
7730
7731 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7732 {
7733 struct nfs4_layoutget *lgp = calldata;
7734 struct inode *inode = lgp->args.inode;
7735 struct nfs_server *server = NFS_SERVER(inode);
7736 struct pnfs_layout_hdr *lo;
7737 struct nfs4_state *state = NULL;
7738 unsigned long timeo, now, giveup;
7739
7740 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
7741
7742 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
7743 goto out;
7744
7745 switch (task->tk_status) {
7746 case 0:
7747 goto out;
7748 /*
7749 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
7750 * (or clients) writing to the same RAID stripe
7751 */
7752 case -NFS4ERR_LAYOUTTRYLATER:
7753 /*
7754 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
7755 * existing layout before getting a new one).
7756 */
7757 case -NFS4ERR_RECALLCONFLICT:
7758 timeo = rpc_get_timeout(task->tk_client);
7759 giveup = lgp->args.timestamp + timeo;
7760 now = jiffies;
7761 if (time_after(giveup, now)) {
7762 unsigned long delay;
7763
7764 /* Delay for:
7765 * - Not less then NFS4_POLL_RETRY_MIN.
7766 * - One last time a jiffie before we give up
7767 * - exponential backoff (time_now minus start_attempt)
7768 */
7769 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
7770 min((giveup - now - 1),
7771 now - lgp->args.timestamp));
7772
7773 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7774 __func__, delay);
7775 rpc_delay(task, delay);
7776 task->tk_status = 0;
7777 rpc_restart_call_prepare(task);
7778 goto out; /* Do not call nfs4_async_handle_error() */
7779 }
7780 break;
7781 case -NFS4ERR_EXPIRED:
7782 case -NFS4ERR_BAD_STATEID:
7783 spin_lock(&inode->i_lock);
7784 lo = NFS_I(inode)->layout;
7785 if (!lo || list_empty(&lo->plh_segs)) {
7786 spin_unlock(&inode->i_lock);
7787 /* If the open stateid was bad, then recover it. */
7788 state = lgp->args.ctx->state;
7789 } else {
7790 LIST_HEAD(head);
7791
7792 /*
7793 * Mark the bad layout state as invalid, then retry
7794 * with the current stateid.
7795 */
7796 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7797 spin_unlock(&inode->i_lock);
7798 pnfs_free_lseg_list(&head);
7799
7800 task->tk_status = 0;
7801 rpc_restart_call_prepare(task);
7802 }
7803 }
7804 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
7805 rpc_restart_call_prepare(task);
7806 out:
7807 dprintk("<-- %s\n", __func__);
7808 }
7809
7810 static size_t max_response_pages(struct nfs_server *server)
7811 {
7812 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
7813 return nfs_page_array_len(0, max_resp_sz);
7814 }
7815
7816 static void nfs4_free_pages(struct page **pages, size_t size)
7817 {
7818 int i;
7819
7820 if (!pages)
7821 return;
7822
7823 for (i = 0; i < size; i++) {
7824 if (!pages[i])
7825 break;
7826 __free_page(pages[i]);
7827 }
7828 kfree(pages);
7829 }
7830
7831 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
7832 {
7833 struct page **pages;
7834 int i;
7835
7836 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
7837 if (!pages) {
7838 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
7839 return NULL;
7840 }
7841
7842 for (i = 0; i < size; i++) {
7843 pages[i] = alloc_page(gfp_flags);
7844 if (!pages[i]) {
7845 dprintk("%s: failed to allocate page\n", __func__);
7846 nfs4_free_pages(pages, size);
7847 return NULL;
7848 }
7849 }
7850
7851 return pages;
7852 }
7853
7854 static void nfs4_layoutget_release(void *calldata)
7855 {
7856 struct nfs4_layoutget *lgp = calldata;
7857 struct inode *inode = lgp->args.inode;
7858 struct nfs_server *server = NFS_SERVER(inode);
7859 size_t max_pages = max_response_pages(server);
7860
7861 dprintk("--> %s\n", __func__);
7862 nfs4_free_pages(lgp->args.layout.pages, max_pages);
7863 pnfs_put_layout_hdr(NFS_I(inode)->layout);
7864 put_nfs_open_context(lgp->args.ctx);
7865 kfree(calldata);
7866 dprintk("<-- %s\n", __func__);
7867 }
7868
7869 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
7870 .rpc_call_prepare = nfs4_layoutget_prepare,
7871 .rpc_call_done = nfs4_layoutget_done,
7872 .rpc_release = nfs4_layoutget_release,
7873 };
7874
7875 struct pnfs_layout_segment *
7876 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
7877 {
7878 struct inode *inode = lgp->args.inode;
7879 struct nfs_server *server = NFS_SERVER(inode);
7880 size_t max_pages = max_response_pages(server);
7881 struct rpc_task *task;
7882 struct rpc_message msg = {
7883 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
7884 .rpc_argp = &lgp->args,
7885 .rpc_resp = &lgp->res,
7886 .rpc_cred = lgp->cred,
7887 };
7888 struct rpc_task_setup task_setup_data = {
7889 .rpc_client = server->client,
7890 .rpc_message = &msg,
7891 .callback_ops = &nfs4_layoutget_call_ops,
7892 .callback_data = lgp,
7893 .flags = RPC_TASK_ASYNC,
7894 };
7895 struct pnfs_layout_segment *lseg = NULL;
7896 int status = 0;
7897
7898 dprintk("--> %s\n", __func__);
7899
7900 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
7901 pnfs_get_layout_hdr(NFS_I(inode)->layout);
7902
7903 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
7904 if (!lgp->args.layout.pages) {
7905 nfs4_layoutget_release(lgp);
7906 return ERR_PTR(-ENOMEM);
7907 }
7908 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
7909 lgp->args.timestamp = jiffies;
7910
7911 lgp->res.layoutp = &lgp->args.layout;
7912 lgp->res.seq_res.sr_slot = NULL;
7913 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
7914
7915 task = rpc_run_task(&task_setup_data);
7916 if (IS_ERR(task))
7917 return ERR_CAST(task);
7918 status = nfs4_wait_for_completion_rpc_task(task);
7919 if (status == 0)
7920 status = task->tk_status;
7921 trace_nfs4_layoutget(lgp->args.ctx,
7922 &lgp->args.range,
7923 &lgp->res.range,
7924 status);
7925 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
7926 if (status == 0 && lgp->res.layoutp->len)
7927 lseg = pnfs_layout_process(lgp);
7928 rpc_put_task(task);
7929 dprintk("<-- %s status=%d\n", __func__, status);
7930 if (status)
7931 return ERR_PTR(status);
7932 return lseg;
7933 }
7934
7935 static void
7936 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
7937 {
7938 struct nfs4_layoutreturn *lrp = calldata;
7939
7940 dprintk("--> %s\n", __func__);
7941 nfs41_setup_sequence(lrp->clp->cl_session,
7942 &lrp->args.seq_args,
7943 &lrp->res.seq_res,
7944 task);
7945 }
7946
7947 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
7948 {
7949 struct nfs4_layoutreturn *lrp = calldata;
7950 struct nfs_server *server;
7951
7952 dprintk("--> %s\n", __func__);
7953
7954 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
7955 return;
7956
7957 server = NFS_SERVER(lrp->args.inode);
7958 switch (task->tk_status) {
7959 default:
7960 task->tk_status = 0;
7961 case 0:
7962 break;
7963 case -NFS4ERR_DELAY:
7964 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
7965 break;
7966 rpc_restart_call_prepare(task);
7967 return;
7968 }
7969 dprintk("<-- %s\n", __func__);
7970 }
7971
7972 static void nfs4_layoutreturn_release(void *calldata)
7973 {
7974 struct nfs4_layoutreturn *lrp = calldata;
7975 struct pnfs_layout_hdr *lo = lrp->args.layout;
7976 LIST_HEAD(freeme);
7977
7978 dprintk("--> %s\n", __func__);
7979 spin_lock(&lo->plh_inode->i_lock);
7980 if (lrp->res.lrs_present)
7981 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
7982 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
7983 pnfs_clear_layoutreturn_waitbit(lo);
7984 lo->plh_block_lgets--;
7985 spin_unlock(&lo->plh_inode->i_lock);
7986 pnfs_free_lseg_list(&freeme);
7987 pnfs_put_layout_hdr(lrp->args.layout);
7988 nfs_iput_and_deactive(lrp->inode);
7989 kfree(calldata);
7990 dprintk("<-- %s\n", __func__);
7991 }
7992
7993 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
7994 .rpc_call_prepare = nfs4_layoutreturn_prepare,
7995 .rpc_call_done = nfs4_layoutreturn_done,
7996 .rpc_release = nfs4_layoutreturn_release,
7997 };
7998
7999 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
8000 {
8001 struct rpc_task *task;
8002 struct rpc_message msg = {
8003 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
8004 .rpc_argp = &lrp->args,
8005 .rpc_resp = &lrp->res,
8006 .rpc_cred = lrp->cred,
8007 };
8008 struct rpc_task_setup task_setup_data = {
8009 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8010 .rpc_message = &msg,
8011 .callback_ops = &nfs4_layoutreturn_call_ops,
8012 .callback_data = lrp,
8013 };
8014 int status = 0;
8015
8016 dprintk("--> %s\n", __func__);
8017 if (!sync) {
8018 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8019 if (!lrp->inode) {
8020 nfs4_layoutreturn_release(lrp);
8021 return -EAGAIN;
8022 }
8023 task_setup_data.flags |= RPC_TASK_ASYNC;
8024 }
8025 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8026 task = rpc_run_task(&task_setup_data);
8027 if (IS_ERR(task))
8028 return PTR_ERR(task);
8029 if (sync)
8030 status = task->tk_status;
8031 trace_nfs4_layoutreturn(lrp->args.inode, status);
8032 dprintk("<-- %s status=%d\n", __func__, status);
8033 rpc_put_task(task);
8034 return status;
8035 }
8036
8037 static int
8038 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
8039 struct pnfs_device *pdev,
8040 struct rpc_cred *cred)
8041 {
8042 struct nfs4_getdeviceinfo_args args = {
8043 .pdev = pdev,
8044 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8045 NOTIFY_DEVICEID4_DELETE,
8046 };
8047 struct nfs4_getdeviceinfo_res res = {
8048 .pdev = pdev,
8049 };
8050 struct rpc_message msg = {
8051 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8052 .rpc_argp = &args,
8053 .rpc_resp = &res,
8054 .rpc_cred = cred,
8055 };
8056 int status;
8057
8058 dprintk("--> %s\n", __func__);
8059 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8060 if (res.notification & ~args.notify_types)
8061 dprintk("%s: unsupported notification\n", __func__);
8062 if (res.notification != args.notify_types)
8063 pdev->nocache = 1;
8064
8065 dprintk("<-- %s status=%d\n", __func__, status);
8066
8067 return status;
8068 }
8069
8070 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8071 struct pnfs_device *pdev,
8072 struct rpc_cred *cred)
8073 {
8074 struct nfs4_exception exception = { };
8075 int err;
8076
8077 do {
8078 err = nfs4_handle_exception(server,
8079 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8080 &exception);
8081 } while (exception.retry);
8082 return err;
8083 }
8084 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8085
8086 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8087 {
8088 struct nfs4_layoutcommit_data *data = calldata;
8089 struct nfs_server *server = NFS_SERVER(data->args.inode);
8090 struct nfs4_session *session = nfs4_get_session(server);
8091
8092 nfs41_setup_sequence(session,
8093 &data->args.seq_args,
8094 &data->res.seq_res,
8095 task);
8096 }
8097
8098 static void
8099 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8100 {
8101 struct nfs4_layoutcommit_data *data = calldata;
8102 struct nfs_server *server = NFS_SERVER(data->args.inode);
8103
8104 if (!nfs41_sequence_done(task, &data->res.seq_res))
8105 return;
8106
8107 switch (task->tk_status) { /* Just ignore these failures */
8108 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8109 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8110 case -NFS4ERR_BADLAYOUT: /* no layout */
8111 case -NFS4ERR_GRACE: /* loca_recalim always false */
8112 task->tk_status = 0;
8113 case 0:
8114 break;
8115 default:
8116 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8117 rpc_restart_call_prepare(task);
8118 return;
8119 }
8120 }
8121 }
8122
8123 static void nfs4_layoutcommit_release(void *calldata)
8124 {
8125 struct nfs4_layoutcommit_data *data = calldata;
8126
8127 pnfs_cleanup_layoutcommit(data);
8128 nfs_post_op_update_inode_force_wcc(data->args.inode,
8129 data->res.fattr);
8130 put_rpccred(data->cred);
8131 nfs_iput_and_deactive(data->inode);
8132 kfree(data);
8133 }
8134
8135 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8136 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8137 .rpc_call_done = nfs4_layoutcommit_done,
8138 .rpc_release = nfs4_layoutcommit_release,
8139 };
8140
8141 int
8142 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8143 {
8144 struct rpc_message msg = {
8145 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8146 .rpc_argp = &data->args,
8147 .rpc_resp = &data->res,
8148 .rpc_cred = data->cred,
8149 };
8150 struct rpc_task_setup task_setup_data = {
8151 .task = &data->task,
8152 .rpc_client = NFS_CLIENT(data->args.inode),
8153 .rpc_message = &msg,
8154 .callback_ops = &nfs4_layoutcommit_ops,
8155 .callback_data = data,
8156 };
8157 struct rpc_task *task;
8158 int status = 0;
8159
8160 dprintk("NFS: initiating layoutcommit call. sync %d "
8161 "lbw: %llu inode %lu\n", sync,
8162 data->args.lastbytewritten,
8163 data->args.inode->i_ino);
8164
8165 if (!sync) {
8166 data->inode = nfs_igrab_and_active(data->args.inode);
8167 if (data->inode == NULL) {
8168 nfs4_layoutcommit_release(data);
8169 return -EAGAIN;
8170 }
8171 task_setup_data.flags = RPC_TASK_ASYNC;
8172 }
8173 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
8174 task = rpc_run_task(&task_setup_data);
8175 if (IS_ERR(task))
8176 return PTR_ERR(task);
8177 if (sync)
8178 status = task->tk_status;
8179 trace_nfs4_layoutcommit(data->args.inode, status);
8180 dprintk("%s: status %d\n", __func__, status);
8181 rpc_put_task(task);
8182 return status;
8183 }
8184
8185 /**
8186 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
8187 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
8188 */
8189 static int
8190 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8191 struct nfs_fsinfo *info,
8192 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8193 {
8194 struct nfs41_secinfo_no_name_args args = {
8195 .style = SECINFO_STYLE_CURRENT_FH,
8196 };
8197 struct nfs4_secinfo_res res = {
8198 .flavors = flavors,
8199 };
8200 struct rpc_message msg = {
8201 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
8202 .rpc_argp = &args,
8203 .rpc_resp = &res,
8204 };
8205 struct rpc_clnt *clnt = server->client;
8206 struct rpc_cred *cred = NULL;
8207 int status;
8208
8209 if (use_integrity) {
8210 clnt = server->nfs_client->cl_rpcclient;
8211 cred = nfs4_get_clid_cred(server->nfs_client);
8212 msg.rpc_cred = cred;
8213 }
8214
8215 dprintk("--> %s\n", __func__);
8216 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
8217 &res.seq_res, 0);
8218 dprintk("<-- %s status=%d\n", __func__, status);
8219
8220 if (cred)
8221 put_rpccred(cred);
8222
8223 return status;
8224 }
8225
8226 static int
8227 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8228 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
8229 {
8230 struct nfs4_exception exception = { };
8231 int err;
8232 do {
8233 /* first try using integrity protection */
8234 err = -NFS4ERR_WRONGSEC;
8235
8236 /* try to use integrity protection with machine cred */
8237 if (_nfs4_is_integrity_protected(server->nfs_client))
8238 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8239 flavors, true);
8240
8241 /*
8242 * if unable to use integrity protection, or SECINFO with
8243 * integrity protection returns NFS4ERR_WRONGSEC (which is
8244 * disallowed by spec, but exists in deployed servers) use
8245 * the current filesystem's rpc_client and the user cred.
8246 */
8247 if (err == -NFS4ERR_WRONGSEC)
8248 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8249 flavors, false);
8250
8251 switch (err) {
8252 case 0:
8253 case -NFS4ERR_WRONGSEC:
8254 case -ENOTSUPP:
8255 goto out;
8256 default:
8257 err = nfs4_handle_exception(server, err, &exception);
8258 }
8259 } while (exception.retry);
8260 out:
8261 return err;
8262 }
8263
8264 static int
8265 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
8266 struct nfs_fsinfo *info)
8267 {
8268 int err;
8269 struct page *page;
8270 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
8271 struct nfs4_secinfo_flavors *flavors;
8272 struct nfs4_secinfo4 *secinfo;
8273 int i;
8274
8275 page = alloc_page(GFP_KERNEL);
8276 if (!page) {
8277 err = -ENOMEM;
8278 goto out;
8279 }
8280
8281 flavors = page_address(page);
8282 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
8283
8284 /*
8285 * Fall back on "guess and check" method if
8286 * the server doesn't support SECINFO_NO_NAME
8287 */
8288 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
8289 err = nfs4_find_root_sec(server, fhandle, info);
8290 goto out_freepage;
8291 }
8292 if (err)
8293 goto out_freepage;
8294
8295 for (i = 0; i < flavors->num_flavors; i++) {
8296 secinfo = &flavors->flavors[i];
8297
8298 switch (secinfo->flavor) {
8299 case RPC_AUTH_NULL:
8300 case RPC_AUTH_UNIX:
8301 case RPC_AUTH_GSS:
8302 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
8303 &secinfo->flavor_info);
8304 break;
8305 default:
8306 flavor = RPC_AUTH_MAXFLAVOR;
8307 break;
8308 }
8309
8310 if (!nfs_auth_info_match(&server->auth_info, flavor))
8311 flavor = RPC_AUTH_MAXFLAVOR;
8312
8313 if (flavor != RPC_AUTH_MAXFLAVOR) {
8314 err = nfs4_lookup_root_sec(server, fhandle,
8315 info, flavor);
8316 if (!err)
8317 break;
8318 }
8319 }
8320
8321 if (flavor == RPC_AUTH_MAXFLAVOR)
8322 err = -EPERM;
8323
8324 out_freepage:
8325 put_page(page);
8326 if (err == -EACCES)
8327 return -EPERM;
8328 out:
8329 return err;
8330 }
8331
8332 static int _nfs41_test_stateid(struct nfs_server *server,
8333 nfs4_stateid *stateid,
8334 struct rpc_cred *cred)
8335 {
8336 int status;
8337 struct nfs41_test_stateid_args args = {
8338 .stateid = stateid,
8339 };
8340 struct nfs41_test_stateid_res res;
8341 struct rpc_message msg = {
8342 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
8343 .rpc_argp = &args,
8344 .rpc_resp = &res,
8345 .rpc_cred = cred,
8346 };
8347 struct rpc_clnt *rpc_client = server->client;
8348
8349 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8350 &rpc_client, &msg);
8351
8352 dprintk("NFS call test_stateid %p\n", stateid);
8353 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
8354 nfs4_set_sequence_privileged(&args.seq_args);
8355 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
8356 &args.seq_args, &res.seq_res);
8357 if (status != NFS_OK) {
8358 dprintk("NFS reply test_stateid: failed, %d\n", status);
8359 return status;
8360 }
8361 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
8362 return -res.status;
8363 }
8364
8365 /**
8366 * nfs41_test_stateid - perform a TEST_STATEID operation
8367 *
8368 * @server: server / transport on which to perform the operation
8369 * @stateid: state ID to test
8370 * @cred: credential
8371 *
8372 * Returns NFS_OK if the server recognizes that "stateid" is valid.
8373 * Otherwise a negative NFS4ERR value is returned if the operation
8374 * failed or the state ID is not currently valid.
8375 */
8376 static int nfs41_test_stateid(struct nfs_server *server,
8377 nfs4_stateid *stateid,
8378 struct rpc_cred *cred)
8379 {
8380 struct nfs4_exception exception = { };
8381 int err;
8382 do {
8383 err = _nfs41_test_stateid(server, stateid, cred);
8384 if (err != -NFS4ERR_DELAY)
8385 break;
8386 nfs4_handle_exception(server, err, &exception);
8387 } while (exception.retry);
8388 return err;
8389 }
8390
8391 struct nfs_free_stateid_data {
8392 struct nfs_server *server;
8393 struct nfs41_free_stateid_args args;
8394 struct nfs41_free_stateid_res res;
8395 };
8396
8397 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
8398 {
8399 struct nfs_free_stateid_data *data = calldata;
8400 nfs41_setup_sequence(nfs4_get_session(data->server),
8401 &data->args.seq_args,
8402 &data->res.seq_res,
8403 task);
8404 }
8405
8406 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
8407 {
8408 struct nfs_free_stateid_data *data = calldata;
8409
8410 nfs41_sequence_done(task, &data->res.seq_res);
8411
8412 switch (task->tk_status) {
8413 case -NFS4ERR_DELAY:
8414 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
8415 rpc_restart_call_prepare(task);
8416 }
8417 }
8418
8419 static void nfs41_free_stateid_release(void *calldata)
8420 {
8421 kfree(calldata);
8422 }
8423
8424 static const struct rpc_call_ops nfs41_free_stateid_ops = {
8425 .rpc_call_prepare = nfs41_free_stateid_prepare,
8426 .rpc_call_done = nfs41_free_stateid_done,
8427 .rpc_release = nfs41_free_stateid_release,
8428 };
8429
8430 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
8431 nfs4_stateid *stateid,
8432 struct rpc_cred *cred,
8433 bool privileged)
8434 {
8435 struct rpc_message msg = {
8436 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
8437 .rpc_cred = cred,
8438 };
8439 struct rpc_task_setup task_setup = {
8440 .rpc_client = server->client,
8441 .rpc_message = &msg,
8442 .callback_ops = &nfs41_free_stateid_ops,
8443 .flags = RPC_TASK_ASYNC,
8444 };
8445 struct nfs_free_stateid_data *data;
8446
8447 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8448 &task_setup.rpc_client, &msg);
8449
8450 dprintk("NFS call free_stateid %p\n", stateid);
8451 data = kmalloc(sizeof(*data), GFP_NOFS);
8452 if (!data)
8453 return ERR_PTR(-ENOMEM);
8454 data->server = server;
8455 nfs4_stateid_copy(&data->args.stateid, stateid);
8456
8457 task_setup.callback_data = data;
8458
8459 msg.rpc_argp = &data->args;
8460 msg.rpc_resp = &data->res;
8461 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
8462 if (privileged)
8463 nfs4_set_sequence_privileged(&data->args.seq_args);
8464
8465 return rpc_run_task(&task_setup);
8466 }
8467
8468 /**
8469 * nfs41_free_stateid - perform a FREE_STATEID operation
8470 *
8471 * @server: server / transport on which to perform the operation
8472 * @stateid: state ID to release
8473 * @cred: credential
8474 *
8475 * Returns NFS_OK if the server freed "stateid". Otherwise a
8476 * negative NFS4ERR value is returned.
8477 */
8478 static int nfs41_free_stateid(struct nfs_server *server,
8479 nfs4_stateid *stateid,
8480 struct rpc_cred *cred)
8481 {
8482 struct rpc_task *task;
8483 int ret;
8484
8485 task = _nfs41_free_stateid(server, stateid, cred, true);
8486 if (IS_ERR(task))
8487 return PTR_ERR(task);
8488 ret = rpc_wait_for_completion_task(task);
8489 if (!ret)
8490 ret = task->tk_status;
8491 rpc_put_task(task);
8492 return ret;
8493 }
8494
8495 static void
8496 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
8497 {
8498 struct rpc_task *task;
8499 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
8500
8501 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
8502 nfs4_free_lock_state(server, lsp);
8503 if (IS_ERR(task))
8504 return;
8505 rpc_put_task(task);
8506 }
8507
8508 static bool nfs41_match_stateid(const nfs4_stateid *s1,
8509 const nfs4_stateid *s2)
8510 {
8511 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
8512 return false;
8513
8514 if (s1->seqid == s2->seqid)
8515 return true;
8516 if (s1->seqid == 0 || s2->seqid == 0)
8517 return true;
8518
8519 return false;
8520 }
8521
8522 #endif /* CONFIG_NFS_V4_1 */
8523
8524 static bool nfs4_match_stateid(const nfs4_stateid *s1,
8525 const nfs4_stateid *s2)
8526 {
8527 return nfs4_stateid_match(s1, s2);
8528 }
8529
8530
8531 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
8532 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8533 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8534 .recover_open = nfs4_open_reclaim,
8535 .recover_lock = nfs4_lock_reclaim,
8536 .establish_clid = nfs4_init_clientid,
8537 .detect_trunking = nfs40_discover_server_trunking,
8538 };
8539
8540 #if defined(CONFIG_NFS_V4_1)
8541 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8542 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8543 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8544 .recover_open = nfs4_open_reclaim,
8545 .recover_lock = nfs4_lock_reclaim,
8546 .establish_clid = nfs41_init_clientid,
8547 .reclaim_complete = nfs41_proc_reclaim_complete,
8548 .detect_trunking = nfs41_discover_server_trunking,
8549 };
8550 #endif /* CONFIG_NFS_V4_1 */
8551
8552 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8553 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8554 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8555 .recover_open = nfs40_open_expired,
8556 .recover_lock = nfs4_lock_expired,
8557 .establish_clid = nfs4_init_clientid,
8558 };
8559
8560 #if defined(CONFIG_NFS_V4_1)
8561 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
8562 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8563 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8564 .recover_open = nfs41_open_expired,
8565 .recover_lock = nfs41_lock_expired,
8566 .establish_clid = nfs41_init_clientid,
8567 };
8568 #endif /* CONFIG_NFS_V4_1 */
8569
8570 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
8571 .sched_state_renewal = nfs4_proc_async_renew,
8572 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
8573 .renew_lease = nfs4_proc_renew,
8574 };
8575
8576 #if defined(CONFIG_NFS_V4_1)
8577 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
8578 .sched_state_renewal = nfs41_proc_async_sequence,
8579 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
8580 .renew_lease = nfs4_proc_sequence,
8581 };
8582 #endif
8583
8584 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
8585 .get_locations = _nfs40_proc_get_locations,
8586 .fsid_present = _nfs40_proc_fsid_present,
8587 };
8588
8589 #if defined(CONFIG_NFS_V4_1)
8590 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
8591 .get_locations = _nfs41_proc_get_locations,
8592 .fsid_present = _nfs41_proc_fsid_present,
8593 };
8594 #endif /* CONFIG_NFS_V4_1 */
8595
8596 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8597 .minor_version = 0,
8598 .init_caps = NFS_CAP_READDIRPLUS
8599 | NFS_CAP_ATOMIC_OPEN
8600 | NFS_CAP_POSIX_LOCK,
8601 .init_client = nfs40_init_client,
8602 .shutdown_client = nfs40_shutdown_client,
8603 .match_stateid = nfs4_match_stateid,
8604 .find_root_sec = nfs4_find_root_sec,
8605 .free_lock_state = nfs4_release_lockowner,
8606 .alloc_seqid = nfs_alloc_seqid,
8607 .call_sync_ops = &nfs40_call_sync_ops,
8608 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
8609 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
8610 .state_renewal_ops = &nfs40_state_renewal_ops,
8611 .mig_recovery_ops = &nfs40_mig_recovery_ops,
8612 };
8613
8614 #if defined(CONFIG_NFS_V4_1)
8615 static struct nfs_seqid *
8616 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
8617 {
8618 return NULL;
8619 }
8620
8621 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8622 .minor_version = 1,
8623 .init_caps = NFS_CAP_READDIRPLUS
8624 | NFS_CAP_ATOMIC_OPEN
8625 | NFS_CAP_POSIX_LOCK
8626 | NFS_CAP_STATEID_NFSV41
8627 | NFS_CAP_ATOMIC_OPEN_V1,
8628 .init_client = nfs41_init_client,
8629 .shutdown_client = nfs41_shutdown_client,
8630 .match_stateid = nfs41_match_stateid,
8631 .find_root_sec = nfs41_find_root_sec,
8632 .free_lock_state = nfs41_free_lock_state,
8633 .alloc_seqid = nfs_alloc_no_seqid,
8634 .call_sync_ops = &nfs41_call_sync_ops,
8635 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8636 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8637 .state_renewal_ops = &nfs41_state_renewal_ops,
8638 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8639 };
8640 #endif
8641
8642 #if defined(CONFIG_NFS_V4_2)
8643 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8644 .minor_version = 2,
8645 .init_caps = NFS_CAP_READDIRPLUS
8646 | NFS_CAP_ATOMIC_OPEN
8647 | NFS_CAP_POSIX_LOCK
8648 | NFS_CAP_STATEID_NFSV41
8649 | NFS_CAP_ATOMIC_OPEN_V1
8650 | NFS_CAP_ALLOCATE
8651 | NFS_CAP_DEALLOCATE
8652 | NFS_CAP_SEEK
8653 | NFS_CAP_LAYOUTSTATS,
8654 .init_client = nfs41_init_client,
8655 .shutdown_client = nfs41_shutdown_client,
8656 .match_stateid = nfs41_match_stateid,
8657 .find_root_sec = nfs41_find_root_sec,
8658 .free_lock_state = nfs41_free_lock_state,
8659 .call_sync_ops = &nfs41_call_sync_ops,
8660 .alloc_seqid = nfs_alloc_no_seqid,
8661 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8662 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8663 .state_renewal_ops = &nfs41_state_renewal_ops,
8664 };
8665 #endif
8666
8667 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
8668 [0] = &nfs_v4_0_minor_ops,
8669 #if defined(CONFIG_NFS_V4_1)
8670 [1] = &nfs_v4_1_minor_ops,
8671 #endif
8672 #if defined(CONFIG_NFS_V4_2)
8673 [2] = &nfs_v4_2_minor_ops,
8674 #endif
8675 };
8676
8677 static const struct inode_operations nfs4_dir_inode_operations = {
8678 .create = nfs_create,
8679 .lookup = nfs_lookup,
8680 .atomic_open = nfs_atomic_open,
8681 .link = nfs_link,
8682 .unlink = nfs_unlink,
8683 .symlink = nfs_symlink,
8684 .mkdir = nfs_mkdir,
8685 .rmdir = nfs_rmdir,
8686 .mknod = nfs_mknod,
8687 .rename = nfs_rename,
8688 .permission = nfs_permission,
8689 .getattr = nfs_getattr,
8690 .setattr = nfs_setattr,
8691 .getxattr = generic_getxattr,
8692 .setxattr = generic_setxattr,
8693 .listxattr = generic_listxattr,
8694 .removexattr = generic_removexattr,
8695 };
8696
8697 static const struct inode_operations nfs4_file_inode_operations = {
8698 .permission = nfs_permission,
8699 .getattr = nfs_getattr,
8700 .setattr = nfs_setattr,
8701 .getxattr = generic_getxattr,
8702 .setxattr = generic_setxattr,
8703 .listxattr = generic_listxattr,
8704 .removexattr = generic_removexattr,
8705 };
8706
8707 const struct nfs_rpc_ops nfs_v4_clientops = {
8708 .version = 4, /* protocol version */
8709 .dentry_ops = &nfs4_dentry_operations,
8710 .dir_inode_ops = &nfs4_dir_inode_operations,
8711 .file_inode_ops = &nfs4_file_inode_operations,
8712 .file_ops = &nfs4_file_operations,
8713 .getroot = nfs4_proc_get_root,
8714 .submount = nfs4_submount,
8715 .try_mount = nfs4_try_mount,
8716 .getattr = nfs4_proc_getattr,
8717 .setattr = nfs4_proc_setattr,
8718 .lookup = nfs4_proc_lookup,
8719 .access = nfs4_proc_access,
8720 .readlink = nfs4_proc_readlink,
8721 .create = nfs4_proc_create,
8722 .remove = nfs4_proc_remove,
8723 .unlink_setup = nfs4_proc_unlink_setup,
8724 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
8725 .unlink_done = nfs4_proc_unlink_done,
8726 .rename_setup = nfs4_proc_rename_setup,
8727 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
8728 .rename_done = nfs4_proc_rename_done,
8729 .link = nfs4_proc_link,
8730 .symlink = nfs4_proc_symlink,
8731 .mkdir = nfs4_proc_mkdir,
8732 .rmdir = nfs4_proc_remove,
8733 .readdir = nfs4_proc_readdir,
8734 .mknod = nfs4_proc_mknod,
8735 .statfs = nfs4_proc_statfs,
8736 .fsinfo = nfs4_proc_fsinfo,
8737 .pathconf = nfs4_proc_pathconf,
8738 .set_capabilities = nfs4_server_capabilities,
8739 .decode_dirent = nfs4_decode_dirent,
8740 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
8741 .read_setup = nfs4_proc_read_setup,
8742 .read_done = nfs4_read_done,
8743 .write_setup = nfs4_proc_write_setup,
8744 .write_done = nfs4_write_done,
8745 .commit_setup = nfs4_proc_commit_setup,
8746 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
8747 .commit_done = nfs4_commit_done,
8748 .lock = nfs4_proc_lock,
8749 .clear_acl_cache = nfs4_zap_acl_attr,
8750 .close_context = nfs4_close_context,
8751 .open_context = nfs4_atomic_open,
8752 .have_delegation = nfs4_have_delegation,
8753 .return_delegation = nfs4_inode_return_delegation,
8754 .alloc_client = nfs4_alloc_client,
8755 .init_client = nfs4_init_client,
8756 .free_client = nfs4_free_client,
8757 .create_server = nfs4_create_server,
8758 .clone_server = nfs_clone_server,
8759 };
8760
8761 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
8762 .prefix = XATTR_NAME_NFSV4_ACL,
8763 .list = nfs4_xattr_list_nfs4_acl,
8764 .get = nfs4_xattr_get_nfs4_acl,
8765 .set = nfs4_xattr_set_nfs4_acl,
8766 };
8767
8768 const struct xattr_handler *nfs4_xattr_handlers[] = {
8769 &nfs4_xattr_nfs4_acl_handler,
8770 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
8771 &nfs4_xattr_nfs4_label_handler,
8772 #endif
8773 NULL
8774 };
8775
8776 /*
8777 * Local variables:
8778 * c-basic-offset: 8
8779 * End:
8780 */
This page took 0.214659 seconds and 5 git commands to generate.