NFSv4: We must set NFS_OPEN_STATE flag in nfs_resync_open_stateid_locked
[deliverable/linux.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/file.h>
42 #include <linux/string.h>
43 #include <linux/ratelimit.h>
44 #include <linux/printk.h>
45 #include <linux/slab.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/xattr.h>
56 #include <linux/utsname.h>
57 #include <linux/freezer.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4idmap.h"
67 #include "nfs4session.h"
68 #include "fscache.h"
69
70 #include "nfs4trace.h"
71
72 #define NFSDBG_FACILITY NFSDBG_PROC
73
74 #define NFS4_POLL_RETRY_MIN (HZ/10)
75 #define NFS4_POLL_RETRY_MAX (15*HZ)
76
77 struct nfs4_opendata;
78 static int _nfs4_proc_open(struct nfs4_opendata *data);
79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *, long *);
82 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
83 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
84 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
85 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
86 struct nfs_fattr *fattr, struct iattr *sattr,
87 struct nfs4_state *state, struct nfs4_label *ilabel,
88 struct nfs4_label *olabel);
89 #ifdef CONFIG_NFS_V4_1
90 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
91 struct rpc_cred *);
92 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
93 struct rpc_cred *);
94 #endif
95
96 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
97 static inline struct nfs4_label *
98 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
99 struct iattr *sattr, struct nfs4_label *label)
100 {
101 int err;
102
103 if (label == NULL)
104 return NULL;
105
106 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
107 return NULL;
108
109 err = security_dentry_init_security(dentry, sattr->ia_mode,
110 &dentry->d_name, (void **)&label->label, &label->len);
111 if (err == 0)
112 return label;
113
114 return NULL;
115 }
116 static inline void
117 nfs4_label_release_security(struct nfs4_label *label)
118 {
119 if (label)
120 security_release_secctx(label->label, label->len);
121 }
122 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
123 {
124 if (label)
125 return server->attr_bitmask;
126
127 return server->attr_bitmask_nl;
128 }
129 #else
130 static inline struct nfs4_label *
131 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
132 struct iattr *sattr, struct nfs4_label *l)
133 { return NULL; }
134 static inline void
135 nfs4_label_release_security(struct nfs4_label *label)
136 { return; }
137 static inline u32 *
138 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
139 { return server->attr_bitmask; }
140 #endif
141
142 /* Prevent leaks of NFSv4 errors into userland */
143 static int nfs4_map_errors(int err)
144 {
145 if (err >= -1000)
146 return err;
147 switch (err) {
148 case -NFS4ERR_RESOURCE:
149 case -NFS4ERR_LAYOUTTRYLATER:
150 case -NFS4ERR_RECALLCONFLICT:
151 return -EREMOTEIO;
152 case -NFS4ERR_WRONGSEC:
153 case -NFS4ERR_WRONG_CRED:
154 return -EPERM;
155 case -NFS4ERR_BADOWNER:
156 case -NFS4ERR_BADNAME:
157 return -EINVAL;
158 case -NFS4ERR_SHARE_DENIED:
159 return -EACCES;
160 case -NFS4ERR_MINOR_VERS_MISMATCH:
161 return -EPROTONOSUPPORT;
162 case -NFS4ERR_FILE_OPEN:
163 return -EBUSY;
164 default:
165 dprintk("%s could not handle NFSv4 error %d\n",
166 __func__, -err);
167 break;
168 }
169 return -EIO;
170 }
171
172 /*
173 * This is our standard bitmap for GETATTR requests.
174 */
175 const u32 nfs4_fattr_bitmap[3] = {
176 FATTR4_WORD0_TYPE
177 | FATTR4_WORD0_CHANGE
178 | FATTR4_WORD0_SIZE
179 | FATTR4_WORD0_FSID
180 | FATTR4_WORD0_FILEID,
181 FATTR4_WORD1_MODE
182 | FATTR4_WORD1_NUMLINKS
183 | FATTR4_WORD1_OWNER
184 | FATTR4_WORD1_OWNER_GROUP
185 | FATTR4_WORD1_RAWDEV
186 | FATTR4_WORD1_SPACE_USED
187 | FATTR4_WORD1_TIME_ACCESS
188 | FATTR4_WORD1_TIME_METADATA
189 | FATTR4_WORD1_TIME_MODIFY
190 | FATTR4_WORD1_MOUNTED_ON_FILEID,
191 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
192 FATTR4_WORD2_SECURITY_LABEL
193 #endif
194 };
195
196 static const u32 nfs4_pnfs_open_bitmap[3] = {
197 FATTR4_WORD0_TYPE
198 | FATTR4_WORD0_CHANGE
199 | FATTR4_WORD0_SIZE
200 | FATTR4_WORD0_FSID
201 | FATTR4_WORD0_FILEID,
202 FATTR4_WORD1_MODE
203 | FATTR4_WORD1_NUMLINKS
204 | FATTR4_WORD1_OWNER
205 | FATTR4_WORD1_OWNER_GROUP
206 | FATTR4_WORD1_RAWDEV
207 | FATTR4_WORD1_SPACE_USED
208 | FATTR4_WORD1_TIME_ACCESS
209 | FATTR4_WORD1_TIME_METADATA
210 | FATTR4_WORD1_TIME_MODIFY,
211 FATTR4_WORD2_MDSTHRESHOLD
212 };
213
214 static const u32 nfs4_open_noattr_bitmap[3] = {
215 FATTR4_WORD0_TYPE
216 | FATTR4_WORD0_CHANGE
217 | FATTR4_WORD0_FILEID,
218 };
219
220 const u32 nfs4_statfs_bitmap[3] = {
221 FATTR4_WORD0_FILES_AVAIL
222 | FATTR4_WORD0_FILES_FREE
223 | FATTR4_WORD0_FILES_TOTAL,
224 FATTR4_WORD1_SPACE_AVAIL
225 | FATTR4_WORD1_SPACE_FREE
226 | FATTR4_WORD1_SPACE_TOTAL
227 };
228
229 const u32 nfs4_pathconf_bitmap[3] = {
230 FATTR4_WORD0_MAXLINK
231 | FATTR4_WORD0_MAXNAME,
232 0
233 };
234
235 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
236 | FATTR4_WORD0_MAXREAD
237 | FATTR4_WORD0_MAXWRITE
238 | FATTR4_WORD0_LEASE_TIME,
239 FATTR4_WORD1_TIME_DELTA
240 | FATTR4_WORD1_FS_LAYOUT_TYPES,
241 FATTR4_WORD2_LAYOUT_BLKSIZE
242 };
243
244 const u32 nfs4_fs_locations_bitmap[3] = {
245 FATTR4_WORD0_TYPE
246 | FATTR4_WORD0_CHANGE
247 | FATTR4_WORD0_SIZE
248 | FATTR4_WORD0_FSID
249 | FATTR4_WORD0_FILEID
250 | FATTR4_WORD0_FS_LOCATIONS,
251 FATTR4_WORD1_MODE
252 | FATTR4_WORD1_NUMLINKS
253 | FATTR4_WORD1_OWNER
254 | FATTR4_WORD1_OWNER_GROUP
255 | FATTR4_WORD1_RAWDEV
256 | FATTR4_WORD1_SPACE_USED
257 | FATTR4_WORD1_TIME_ACCESS
258 | FATTR4_WORD1_TIME_METADATA
259 | FATTR4_WORD1_TIME_MODIFY
260 | FATTR4_WORD1_MOUNTED_ON_FILEID,
261 };
262
263 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
264 struct nfs4_readdir_arg *readdir)
265 {
266 __be32 *start, *p;
267
268 if (cookie > 2) {
269 readdir->cookie = cookie;
270 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
271 return;
272 }
273
274 readdir->cookie = 0;
275 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
276 if (cookie == 2)
277 return;
278
279 /*
280 * NFSv4 servers do not return entries for '.' and '..'
281 * Therefore, we fake these entries here. We let '.'
282 * have cookie 0 and '..' have cookie 1. Note that
283 * when talking to the server, we always send cookie 0
284 * instead of 1 or 2.
285 */
286 start = p = kmap_atomic(*readdir->pages);
287
288 if (cookie == 0) {
289 *p++ = xdr_one; /* next */
290 *p++ = xdr_zero; /* cookie, first word */
291 *p++ = xdr_one; /* cookie, second word */
292 *p++ = xdr_one; /* entry len */
293 memcpy(p, ".\0\0\0", 4); /* entry */
294 p++;
295 *p++ = xdr_one; /* bitmap length */
296 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
297 *p++ = htonl(8); /* attribute buffer length */
298 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
299 }
300
301 *p++ = xdr_one; /* next */
302 *p++ = xdr_zero; /* cookie, first word */
303 *p++ = xdr_two; /* cookie, second word */
304 *p++ = xdr_two; /* entry len */
305 memcpy(p, "..\0\0", 4); /* entry */
306 p++;
307 *p++ = xdr_one; /* bitmap length */
308 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
309 *p++ = htonl(8); /* attribute buffer length */
310 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
311
312 readdir->pgbase = (char *)p - (char *)start;
313 readdir->count -= readdir->pgbase;
314 kunmap_atomic(start);
315 }
316
317 static long nfs4_update_delay(long *timeout)
318 {
319 long ret;
320 if (!timeout)
321 return NFS4_POLL_RETRY_MAX;
322 if (*timeout <= 0)
323 *timeout = NFS4_POLL_RETRY_MIN;
324 if (*timeout > NFS4_POLL_RETRY_MAX)
325 *timeout = NFS4_POLL_RETRY_MAX;
326 ret = *timeout;
327 *timeout <<= 1;
328 return ret;
329 }
330
331 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
332 {
333 int res = 0;
334
335 might_sleep();
336
337 freezable_schedule_timeout_killable_unsafe(
338 nfs4_update_delay(timeout));
339 if (fatal_signal_pending(current))
340 res = -ERESTARTSYS;
341 return res;
342 }
343
344 /* This is the error handling routine for processes that are allowed
345 * to sleep.
346 */
347 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
348 {
349 struct nfs_client *clp = server->nfs_client;
350 struct nfs4_state *state = exception->state;
351 struct inode *inode = exception->inode;
352 int ret = errorcode;
353
354 exception->retry = 0;
355 switch(errorcode) {
356 case 0:
357 return 0;
358 case -NFS4ERR_OPENMODE:
359 case -NFS4ERR_DELEG_REVOKED:
360 case -NFS4ERR_ADMIN_REVOKED:
361 case -NFS4ERR_BAD_STATEID:
362 if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
363 nfs4_inode_return_delegation(inode);
364 exception->retry = 1;
365 return 0;
366 }
367 if (state == NULL)
368 break;
369 ret = nfs4_schedule_stateid_recovery(server, state);
370 if (ret < 0)
371 break;
372 goto wait_on_recovery;
373 case -NFS4ERR_EXPIRED:
374 if (state != NULL) {
375 ret = nfs4_schedule_stateid_recovery(server, state);
376 if (ret < 0)
377 break;
378 }
379 case -NFS4ERR_STALE_STATEID:
380 case -NFS4ERR_STALE_CLIENTID:
381 nfs4_schedule_lease_recovery(clp);
382 goto wait_on_recovery;
383 case -NFS4ERR_MOVED:
384 ret = nfs4_schedule_migration_recovery(server);
385 if (ret < 0)
386 break;
387 goto wait_on_recovery;
388 case -NFS4ERR_LEASE_MOVED:
389 nfs4_schedule_lease_moved_recovery(clp);
390 goto wait_on_recovery;
391 #if defined(CONFIG_NFS_V4_1)
392 case -NFS4ERR_BADSESSION:
393 case -NFS4ERR_BADSLOT:
394 case -NFS4ERR_BAD_HIGH_SLOT:
395 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
396 case -NFS4ERR_DEADSESSION:
397 case -NFS4ERR_SEQ_FALSE_RETRY:
398 case -NFS4ERR_SEQ_MISORDERED:
399 dprintk("%s ERROR: %d Reset session\n", __func__,
400 errorcode);
401 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
402 goto wait_on_recovery;
403 #endif /* defined(CONFIG_NFS_V4_1) */
404 case -NFS4ERR_FILE_OPEN:
405 if (exception->timeout > HZ) {
406 /* We have retried a decent amount, time to
407 * fail
408 */
409 ret = -EBUSY;
410 break;
411 }
412 case -NFS4ERR_GRACE:
413 case -NFS4ERR_DELAY:
414 ret = nfs4_delay(server->client, &exception->timeout);
415 if (ret != 0)
416 break;
417 case -NFS4ERR_RETRY_UNCACHED_REP:
418 case -NFS4ERR_OLD_STATEID:
419 exception->retry = 1;
420 break;
421 case -NFS4ERR_BADOWNER:
422 /* The following works around a Linux server bug! */
423 case -NFS4ERR_BADNAME:
424 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
425 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
426 exception->retry = 1;
427 printk(KERN_WARNING "NFS: v4 server %s "
428 "does not accept raw "
429 "uid/gids. "
430 "Reenabling the idmapper.\n",
431 server->nfs_client->cl_hostname);
432 }
433 }
434 /* We failed to handle the error */
435 return nfs4_map_errors(ret);
436 wait_on_recovery:
437 ret = nfs4_wait_clnt_recover(clp);
438 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
439 return -EIO;
440 if (ret == 0)
441 exception->retry = 1;
442 return ret;
443 }
444
445 /*
446 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
447 * or 'false' otherwise.
448 */
449 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
450 {
451 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
452
453 if (flavor == RPC_AUTH_GSS_KRB5I ||
454 flavor == RPC_AUTH_GSS_KRB5P)
455 return true;
456
457 return false;
458 }
459
460 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
461 {
462 spin_lock(&clp->cl_lock);
463 if (time_before(clp->cl_last_renewal,timestamp))
464 clp->cl_last_renewal = timestamp;
465 spin_unlock(&clp->cl_lock);
466 }
467
468 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
469 {
470 struct nfs_client *clp = server->nfs_client;
471
472 if (!nfs4_has_session(clp))
473 do_renew_lease(clp, timestamp);
474 }
475
476 struct nfs4_call_sync_data {
477 const struct nfs_server *seq_server;
478 struct nfs4_sequence_args *seq_args;
479 struct nfs4_sequence_res *seq_res;
480 };
481
482 void nfs4_init_sequence(struct nfs4_sequence_args *args,
483 struct nfs4_sequence_res *res, int cache_reply)
484 {
485 args->sa_slot = NULL;
486 args->sa_cache_this = cache_reply;
487 args->sa_privileged = 0;
488
489 res->sr_slot = NULL;
490 }
491
492 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
493 {
494 args->sa_privileged = 1;
495 }
496
497 int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
498 struct nfs4_sequence_args *args,
499 struct nfs4_sequence_res *res,
500 struct rpc_task *task)
501 {
502 struct nfs4_slot *slot;
503
504 /* slot already allocated? */
505 if (res->sr_slot != NULL)
506 goto out_start;
507
508 spin_lock(&tbl->slot_tbl_lock);
509 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
510 goto out_sleep;
511
512 slot = nfs4_alloc_slot(tbl);
513 if (IS_ERR(slot)) {
514 if (slot == ERR_PTR(-ENOMEM))
515 task->tk_timeout = HZ >> 2;
516 goto out_sleep;
517 }
518 spin_unlock(&tbl->slot_tbl_lock);
519
520 args->sa_slot = slot;
521 res->sr_slot = slot;
522
523 out_start:
524 rpc_call_start(task);
525 return 0;
526
527 out_sleep:
528 if (args->sa_privileged)
529 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
530 NULL, RPC_PRIORITY_PRIVILEGED);
531 else
532 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
533 spin_unlock(&tbl->slot_tbl_lock);
534 return -EAGAIN;
535 }
536 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
537
538 static int nfs40_sequence_done(struct rpc_task *task,
539 struct nfs4_sequence_res *res)
540 {
541 struct nfs4_slot *slot = res->sr_slot;
542 struct nfs4_slot_table *tbl;
543
544 if (slot == NULL)
545 goto out;
546
547 tbl = slot->table;
548 spin_lock(&tbl->slot_tbl_lock);
549 if (!nfs41_wake_and_assign_slot(tbl, slot))
550 nfs4_free_slot(tbl, slot);
551 spin_unlock(&tbl->slot_tbl_lock);
552
553 res->sr_slot = NULL;
554 out:
555 return 1;
556 }
557
558 #if defined(CONFIG_NFS_V4_1)
559
560 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
561 {
562 struct nfs4_session *session;
563 struct nfs4_slot_table *tbl;
564 struct nfs4_slot *slot = res->sr_slot;
565 bool send_new_highest_used_slotid = false;
566
567 tbl = slot->table;
568 session = tbl->session;
569
570 spin_lock(&tbl->slot_tbl_lock);
571 /* Be nice to the server: try to ensure that the last transmitted
572 * value for highest_user_slotid <= target_highest_slotid
573 */
574 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
575 send_new_highest_used_slotid = true;
576
577 if (nfs41_wake_and_assign_slot(tbl, slot)) {
578 send_new_highest_used_slotid = false;
579 goto out_unlock;
580 }
581 nfs4_free_slot(tbl, slot);
582
583 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
584 send_new_highest_used_slotid = false;
585 out_unlock:
586 spin_unlock(&tbl->slot_tbl_lock);
587 res->sr_slot = NULL;
588 if (send_new_highest_used_slotid)
589 nfs41_server_notify_highest_slotid_update(session->clp);
590 }
591
592 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
593 {
594 struct nfs4_session *session;
595 struct nfs4_slot *slot = res->sr_slot;
596 struct nfs_client *clp;
597 bool interrupted = false;
598 int ret = 1;
599
600 if (slot == NULL)
601 goto out_noaction;
602 /* don't increment the sequence number if the task wasn't sent */
603 if (!RPC_WAS_SENT(task))
604 goto out;
605
606 session = slot->table->session;
607
608 if (slot->interrupted) {
609 slot->interrupted = 0;
610 interrupted = true;
611 }
612
613 trace_nfs4_sequence_done(session, res);
614 /* Check the SEQUENCE operation status */
615 switch (res->sr_status) {
616 case 0:
617 /* Update the slot's sequence and clientid lease timer */
618 ++slot->seq_nr;
619 clp = session->clp;
620 do_renew_lease(clp, res->sr_timestamp);
621 /* Check sequence flags */
622 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
623 nfs41_update_target_slotid(slot->table, slot, res);
624 break;
625 case 1:
626 /*
627 * sr_status remains 1 if an RPC level error occurred.
628 * The server may or may not have processed the sequence
629 * operation..
630 * Mark the slot as having hosted an interrupted RPC call.
631 */
632 slot->interrupted = 1;
633 goto out;
634 case -NFS4ERR_DELAY:
635 /* The server detected a resend of the RPC call and
636 * returned NFS4ERR_DELAY as per Section 2.10.6.2
637 * of RFC5661.
638 */
639 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
640 __func__,
641 slot->slot_nr,
642 slot->seq_nr);
643 goto out_retry;
644 case -NFS4ERR_BADSLOT:
645 /*
646 * The slot id we used was probably retired. Try again
647 * using a different slot id.
648 */
649 goto retry_nowait;
650 case -NFS4ERR_SEQ_MISORDERED:
651 /*
652 * Was the last operation on this sequence interrupted?
653 * If so, retry after bumping the sequence number.
654 */
655 if (interrupted) {
656 ++slot->seq_nr;
657 goto retry_nowait;
658 }
659 /*
660 * Could this slot have been previously retired?
661 * If so, then the server may be expecting seq_nr = 1!
662 */
663 if (slot->seq_nr != 1) {
664 slot->seq_nr = 1;
665 goto retry_nowait;
666 }
667 break;
668 case -NFS4ERR_SEQ_FALSE_RETRY:
669 ++slot->seq_nr;
670 goto retry_nowait;
671 default:
672 /* Just update the slot sequence no. */
673 ++slot->seq_nr;
674 }
675 out:
676 /* The session may be reset by one of the error handlers. */
677 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
678 nfs41_sequence_free_slot(res);
679 out_noaction:
680 return ret;
681 retry_nowait:
682 if (rpc_restart_call_prepare(task)) {
683 task->tk_status = 0;
684 ret = 0;
685 }
686 goto out;
687 out_retry:
688 if (!rpc_restart_call(task))
689 goto out;
690 rpc_delay(task, NFS4_POLL_RETRY_MAX);
691 return 0;
692 }
693 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
694
695 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
696 {
697 if (res->sr_slot == NULL)
698 return 1;
699 if (!res->sr_slot->table->session)
700 return nfs40_sequence_done(task, res);
701 return nfs41_sequence_done(task, res);
702 }
703 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
704
705 int nfs41_setup_sequence(struct nfs4_session *session,
706 struct nfs4_sequence_args *args,
707 struct nfs4_sequence_res *res,
708 struct rpc_task *task)
709 {
710 struct nfs4_slot *slot;
711 struct nfs4_slot_table *tbl;
712
713 dprintk("--> %s\n", __func__);
714 /* slot already allocated? */
715 if (res->sr_slot != NULL)
716 goto out_success;
717
718 tbl = &session->fc_slot_table;
719
720 task->tk_timeout = 0;
721
722 spin_lock(&tbl->slot_tbl_lock);
723 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
724 !args->sa_privileged) {
725 /* The state manager will wait until the slot table is empty */
726 dprintk("%s session is draining\n", __func__);
727 goto out_sleep;
728 }
729
730 slot = nfs4_alloc_slot(tbl);
731 if (IS_ERR(slot)) {
732 /* If out of memory, try again in 1/4 second */
733 if (slot == ERR_PTR(-ENOMEM))
734 task->tk_timeout = HZ >> 2;
735 dprintk("<-- %s: no free slots\n", __func__);
736 goto out_sleep;
737 }
738 spin_unlock(&tbl->slot_tbl_lock);
739
740 args->sa_slot = slot;
741
742 dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
743 slot->slot_nr, slot->seq_nr);
744
745 res->sr_slot = slot;
746 res->sr_timestamp = jiffies;
747 res->sr_status_flags = 0;
748 /*
749 * sr_status is only set in decode_sequence, and so will remain
750 * set to 1 if an rpc level failure occurs.
751 */
752 res->sr_status = 1;
753 trace_nfs4_setup_sequence(session, args);
754 out_success:
755 rpc_call_start(task);
756 return 0;
757 out_sleep:
758 /* Privileged tasks are queued with top priority */
759 if (args->sa_privileged)
760 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
761 NULL, RPC_PRIORITY_PRIVILEGED);
762 else
763 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
764 spin_unlock(&tbl->slot_tbl_lock);
765 return -EAGAIN;
766 }
767 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
768
769 static int nfs4_setup_sequence(const struct nfs_server *server,
770 struct nfs4_sequence_args *args,
771 struct nfs4_sequence_res *res,
772 struct rpc_task *task)
773 {
774 struct nfs4_session *session = nfs4_get_session(server);
775 int ret = 0;
776
777 if (!session)
778 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
779 args, res, task);
780
781 dprintk("--> %s clp %p session %p sr_slot %u\n",
782 __func__, session->clp, session, res->sr_slot ?
783 res->sr_slot->slot_nr : NFS4_NO_SLOT);
784
785 ret = nfs41_setup_sequence(session, args, res, task);
786
787 dprintk("<-- %s status=%d\n", __func__, ret);
788 return ret;
789 }
790
791 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
792 {
793 struct nfs4_call_sync_data *data = calldata;
794 struct nfs4_session *session = nfs4_get_session(data->seq_server);
795
796 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
797
798 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
799 }
800
801 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
802 {
803 struct nfs4_call_sync_data *data = calldata;
804
805 nfs41_sequence_done(task, data->seq_res);
806 }
807
808 static const struct rpc_call_ops nfs41_call_sync_ops = {
809 .rpc_call_prepare = nfs41_call_sync_prepare,
810 .rpc_call_done = nfs41_call_sync_done,
811 };
812
813 #else /* !CONFIG_NFS_V4_1 */
814
815 static int nfs4_setup_sequence(const struct nfs_server *server,
816 struct nfs4_sequence_args *args,
817 struct nfs4_sequence_res *res,
818 struct rpc_task *task)
819 {
820 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
821 args, res, task);
822 }
823
824 int nfs4_sequence_done(struct rpc_task *task,
825 struct nfs4_sequence_res *res)
826 {
827 return nfs40_sequence_done(task, res);
828 }
829 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
830
831 #endif /* !CONFIG_NFS_V4_1 */
832
833 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
834 {
835 struct nfs4_call_sync_data *data = calldata;
836 nfs4_setup_sequence(data->seq_server,
837 data->seq_args, data->seq_res, task);
838 }
839
840 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
841 {
842 struct nfs4_call_sync_data *data = calldata;
843 nfs4_sequence_done(task, data->seq_res);
844 }
845
846 static const struct rpc_call_ops nfs40_call_sync_ops = {
847 .rpc_call_prepare = nfs40_call_sync_prepare,
848 .rpc_call_done = nfs40_call_sync_done,
849 };
850
851 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
852 struct nfs_server *server,
853 struct rpc_message *msg,
854 struct nfs4_sequence_args *args,
855 struct nfs4_sequence_res *res)
856 {
857 int ret;
858 struct rpc_task *task;
859 struct nfs_client *clp = server->nfs_client;
860 struct nfs4_call_sync_data data = {
861 .seq_server = server,
862 .seq_args = args,
863 .seq_res = res,
864 };
865 struct rpc_task_setup task_setup = {
866 .rpc_client = clnt,
867 .rpc_message = msg,
868 .callback_ops = clp->cl_mvops->call_sync_ops,
869 .callback_data = &data
870 };
871
872 task = rpc_run_task(&task_setup);
873 if (IS_ERR(task))
874 ret = PTR_ERR(task);
875 else {
876 ret = task->tk_status;
877 rpc_put_task(task);
878 }
879 return ret;
880 }
881
882 int nfs4_call_sync(struct rpc_clnt *clnt,
883 struct nfs_server *server,
884 struct rpc_message *msg,
885 struct nfs4_sequence_args *args,
886 struct nfs4_sequence_res *res,
887 int cache_reply)
888 {
889 nfs4_init_sequence(args, res, cache_reply);
890 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
891 }
892
893 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
894 {
895 struct nfs_inode *nfsi = NFS_I(dir);
896
897 spin_lock(&dir->i_lock);
898 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
899 if (!cinfo->atomic || cinfo->before != dir->i_version)
900 nfs_force_lookup_revalidate(dir);
901 dir->i_version = cinfo->after;
902 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
903 nfs_fscache_invalidate(dir);
904 spin_unlock(&dir->i_lock);
905 }
906
907 struct nfs4_opendata {
908 struct kref kref;
909 struct nfs_openargs o_arg;
910 struct nfs_openres o_res;
911 struct nfs_open_confirmargs c_arg;
912 struct nfs_open_confirmres c_res;
913 struct nfs4_string owner_name;
914 struct nfs4_string group_name;
915 struct nfs_fattr f_attr;
916 struct nfs4_label *f_label;
917 struct dentry *dir;
918 struct dentry *dentry;
919 struct nfs4_state_owner *owner;
920 struct nfs4_state *state;
921 struct iattr attrs;
922 unsigned long timestamp;
923 unsigned int rpc_done : 1;
924 unsigned int file_created : 1;
925 unsigned int is_recover : 1;
926 int rpc_status;
927 int cancelled;
928 };
929
930 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
931 int err, struct nfs4_exception *exception)
932 {
933 if (err != -EINVAL)
934 return false;
935 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
936 return false;
937 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
938 exception->retry = 1;
939 return true;
940 }
941
942 static u32
943 nfs4_map_atomic_open_share(struct nfs_server *server,
944 fmode_t fmode, int openflags)
945 {
946 u32 res = 0;
947
948 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
949 case FMODE_READ:
950 res = NFS4_SHARE_ACCESS_READ;
951 break;
952 case FMODE_WRITE:
953 res = NFS4_SHARE_ACCESS_WRITE;
954 break;
955 case FMODE_READ|FMODE_WRITE:
956 res = NFS4_SHARE_ACCESS_BOTH;
957 }
958 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
959 goto out;
960 /* Want no delegation if we're using O_DIRECT */
961 if (openflags & O_DIRECT)
962 res |= NFS4_SHARE_WANT_NO_DELEG;
963 out:
964 return res;
965 }
966
967 static enum open_claim_type4
968 nfs4_map_atomic_open_claim(struct nfs_server *server,
969 enum open_claim_type4 claim)
970 {
971 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
972 return claim;
973 switch (claim) {
974 default:
975 return claim;
976 case NFS4_OPEN_CLAIM_FH:
977 return NFS4_OPEN_CLAIM_NULL;
978 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
979 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
980 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
981 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
982 }
983 }
984
985 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
986 {
987 p->o_res.f_attr = &p->f_attr;
988 p->o_res.f_label = p->f_label;
989 p->o_res.seqid = p->o_arg.seqid;
990 p->c_res.seqid = p->c_arg.seqid;
991 p->o_res.server = p->o_arg.server;
992 p->o_res.access_request = p->o_arg.access;
993 nfs_fattr_init(&p->f_attr);
994 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
995 }
996
997 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
998 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
999 const struct iattr *attrs,
1000 struct nfs4_label *label,
1001 enum open_claim_type4 claim,
1002 gfp_t gfp_mask)
1003 {
1004 struct dentry *parent = dget_parent(dentry);
1005 struct inode *dir = d_inode(parent);
1006 struct nfs_server *server = NFS_SERVER(dir);
1007 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1008 struct nfs4_opendata *p;
1009
1010 p = kzalloc(sizeof(*p), gfp_mask);
1011 if (p == NULL)
1012 goto err;
1013
1014 p->f_label = nfs4_label_alloc(server, gfp_mask);
1015 if (IS_ERR(p->f_label))
1016 goto err_free_p;
1017
1018 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1019 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1020 if (IS_ERR(p->o_arg.seqid))
1021 goto err_free_label;
1022 nfs_sb_active(dentry->d_sb);
1023 p->dentry = dget(dentry);
1024 p->dir = parent;
1025 p->owner = sp;
1026 atomic_inc(&sp->so_count);
1027 p->o_arg.open_flags = flags;
1028 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1029 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1030 fmode, flags);
1031 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1032 * will return permission denied for all bits until close */
1033 if (!(flags & O_EXCL)) {
1034 /* ask server to check for all possible rights as results
1035 * are cached */
1036 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1037 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
1038 }
1039 p->o_arg.clientid = server->nfs_client->cl_clientid;
1040 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1041 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1042 p->o_arg.name = &dentry->d_name;
1043 p->o_arg.server = server;
1044 p->o_arg.bitmask = nfs4_bitmask(server, label);
1045 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1046 p->o_arg.label = label;
1047 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1048 switch (p->o_arg.claim) {
1049 case NFS4_OPEN_CLAIM_NULL:
1050 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1051 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1052 p->o_arg.fh = NFS_FH(dir);
1053 break;
1054 case NFS4_OPEN_CLAIM_PREVIOUS:
1055 case NFS4_OPEN_CLAIM_FH:
1056 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1057 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1058 p->o_arg.fh = NFS_FH(d_inode(dentry));
1059 }
1060 if (attrs != NULL && attrs->ia_valid != 0) {
1061 __u32 verf[2];
1062
1063 p->o_arg.u.attrs = &p->attrs;
1064 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1065
1066 verf[0] = jiffies;
1067 verf[1] = current->pid;
1068 memcpy(p->o_arg.u.verifier.data, verf,
1069 sizeof(p->o_arg.u.verifier.data));
1070 }
1071 p->c_arg.fh = &p->o_res.fh;
1072 p->c_arg.stateid = &p->o_res.stateid;
1073 p->c_arg.seqid = p->o_arg.seqid;
1074 nfs4_init_opendata_res(p);
1075 kref_init(&p->kref);
1076 return p;
1077
1078 err_free_label:
1079 nfs4_label_free(p->f_label);
1080 err_free_p:
1081 kfree(p);
1082 err:
1083 dput(parent);
1084 return NULL;
1085 }
1086
1087 static void nfs4_opendata_free(struct kref *kref)
1088 {
1089 struct nfs4_opendata *p = container_of(kref,
1090 struct nfs4_opendata, kref);
1091 struct super_block *sb = p->dentry->d_sb;
1092
1093 nfs_free_seqid(p->o_arg.seqid);
1094 if (p->state != NULL)
1095 nfs4_put_open_state(p->state);
1096 nfs4_put_state_owner(p->owner);
1097
1098 nfs4_label_free(p->f_label);
1099
1100 dput(p->dir);
1101 dput(p->dentry);
1102 nfs_sb_deactive(sb);
1103 nfs_fattr_free_names(&p->f_attr);
1104 kfree(p->f_attr.mdsthreshold);
1105 kfree(p);
1106 }
1107
1108 static void nfs4_opendata_put(struct nfs4_opendata *p)
1109 {
1110 if (p != NULL)
1111 kref_put(&p->kref, nfs4_opendata_free);
1112 }
1113
1114 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1115 {
1116 int ret;
1117
1118 ret = rpc_wait_for_completion_task(task);
1119 return ret;
1120 }
1121
1122 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1123 {
1124 int ret = 0;
1125
1126 if (open_mode & (O_EXCL|O_TRUNC))
1127 goto out;
1128 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1129 case FMODE_READ:
1130 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1131 && state->n_rdonly != 0;
1132 break;
1133 case FMODE_WRITE:
1134 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1135 && state->n_wronly != 0;
1136 break;
1137 case FMODE_READ|FMODE_WRITE:
1138 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1139 && state->n_rdwr != 0;
1140 }
1141 out:
1142 return ret;
1143 }
1144
1145 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
1146 {
1147 if (delegation == NULL)
1148 return 0;
1149 if ((delegation->type & fmode) != fmode)
1150 return 0;
1151 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1152 return 0;
1153 nfs_mark_delegation_referenced(delegation);
1154 return 1;
1155 }
1156
1157 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1158 {
1159 switch (fmode) {
1160 case FMODE_WRITE:
1161 state->n_wronly++;
1162 break;
1163 case FMODE_READ:
1164 state->n_rdonly++;
1165 break;
1166 case FMODE_READ|FMODE_WRITE:
1167 state->n_rdwr++;
1168 }
1169 nfs4_state_set_mode_locked(state, state->state | fmode);
1170 }
1171
1172 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1173 {
1174 struct nfs_client *clp = state->owner->so_server->nfs_client;
1175 bool need_recover = false;
1176
1177 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1178 need_recover = true;
1179 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1180 need_recover = true;
1181 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1182 need_recover = true;
1183 if (need_recover)
1184 nfs4_state_mark_reclaim_nograce(clp, state);
1185 }
1186
1187 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1188 nfs4_stateid *stateid)
1189 {
1190 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
1191 return true;
1192 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1193 nfs_test_and_clear_all_open_stateid(state);
1194 return true;
1195 }
1196 if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
1197 return true;
1198 return false;
1199 }
1200
1201 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1202 {
1203 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1204 return;
1205 if (state->n_wronly)
1206 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1207 if (state->n_rdonly)
1208 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1209 if (state->n_rdwr)
1210 set_bit(NFS_O_RDWR_STATE, &state->flags);
1211 set_bit(NFS_OPEN_STATE, &state->flags);
1212 }
1213
1214 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1215 nfs4_stateid *stateid, fmode_t fmode)
1216 {
1217 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1218 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1219 case FMODE_WRITE:
1220 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1221 break;
1222 case FMODE_READ:
1223 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1224 break;
1225 case 0:
1226 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1227 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1228 clear_bit(NFS_OPEN_STATE, &state->flags);
1229 }
1230 if (stateid == NULL)
1231 return;
1232 /* Handle races with OPEN */
1233 if (!nfs4_stateid_match_other(stateid, &state->open_stateid) ||
1234 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1235 nfs_resync_open_stateid_locked(state);
1236 return;
1237 }
1238 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1239 nfs4_stateid_copy(&state->stateid, stateid);
1240 nfs4_stateid_copy(&state->open_stateid, stateid);
1241 }
1242
1243 static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1244 {
1245 write_seqlock(&state->seqlock);
1246 nfs_clear_open_stateid_locked(state, stateid, fmode);
1247 write_sequnlock(&state->seqlock);
1248 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1249 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1250 }
1251
1252 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1253 {
1254 switch (fmode) {
1255 case FMODE_READ:
1256 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1257 break;
1258 case FMODE_WRITE:
1259 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1260 break;
1261 case FMODE_READ|FMODE_WRITE:
1262 set_bit(NFS_O_RDWR_STATE, &state->flags);
1263 }
1264 if (!nfs_need_update_open_stateid(state, stateid))
1265 return;
1266 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1267 nfs4_stateid_copy(&state->stateid, stateid);
1268 nfs4_stateid_copy(&state->open_stateid, stateid);
1269 }
1270
1271 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1272 {
1273 /*
1274 * Protect the call to nfs4_state_set_mode_locked and
1275 * serialise the stateid update
1276 */
1277 write_seqlock(&state->seqlock);
1278 if (deleg_stateid != NULL) {
1279 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1280 set_bit(NFS_DELEGATED_STATE, &state->flags);
1281 }
1282 if (open_stateid != NULL)
1283 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1284 write_sequnlock(&state->seqlock);
1285 spin_lock(&state->owner->so_lock);
1286 update_open_stateflags(state, fmode);
1287 spin_unlock(&state->owner->so_lock);
1288 }
1289
1290 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1291 {
1292 struct nfs_inode *nfsi = NFS_I(state->inode);
1293 struct nfs_delegation *deleg_cur;
1294 int ret = 0;
1295
1296 fmode &= (FMODE_READ|FMODE_WRITE);
1297
1298 rcu_read_lock();
1299 deleg_cur = rcu_dereference(nfsi->delegation);
1300 if (deleg_cur == NULL)
1301 goto no_delegation;
1302
1303 spin_lock(&deleg_cur->lock);
1304 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1305 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1306 (deleg_cur->type & fmode) != fmode)
1307 goto no_delegation_unlock;
1308
1309 if (delegation == NULL)
1310 delegation = &deleg_cur->stateid;
1311 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1312 goto no_delegation_unlock;
1313
1314 nfs_mark_delegation_referenced(deleg_cur);
1315 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1316 ret = 1;
1317 no_delegation_unlock:
1318 spin_unlock(&deleg_cur->lock);
1319 no_delegation:
1320 rcu_read_unlock();
1321
1322 if (!ret && open_stateid != NULL) {
1323 __update_open_stateid(state, open_stateid, NULL, fmode);
1324 ret = 1;
1325 }
1326 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1327 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1328
1329 return ret;
1330 }
1331
1332 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1333 const nfs4_stateid *stateid)
1334 {
1335 struct nfs4_state *state = lsp->ls_state;
1336 bool ret = false;
1337
1338 spin_lock(&state->state_lock);
1339 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1340 goto out_noupdate;
1341 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1342 goto out_noupdate;
1343 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1344 ret = true;
1345 out_noupdate:
1346 spin_unlock(&state->state_lock);
1347 return ret;
1348 }
1349
1350 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1351 {
1352 struct nfs_delegation *delegation;
1353
1354 rcu_read_lock();
1355 delegation = rcu_dereference(NFS_I(inode)->delegation);
1356 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1357 rcu_read_unlock();
1358 return;
1359 }
1360 rcu_read_unlock();
1361 nfs4_inode_return_delegation(inode);
1362 }
1363
1364 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1365 {
1366 struct nfs4_state *state = opendata->state;
1367 struct nfs_inode *nfsi = NFS_I(state->inode);
1368 struct nfs_delegation *delegation;
1369 int open_mode = opendata->o_arg.open_flags;
1370 fmode_t fmode = opendata->o_arg.fmode;
1371 nfs4_stateid stateid;
1372 int ret = -EAGAIN;
1373
1374 for (;;) {
1375 spin_lock(&state->owner->so_lock);
1376 if (can_open_cached(state, fmode, open_mode)) {
1377 update_open_stateflags(state, fmode);
1378 spin_unlock(&state->owner->so_lock);
1379 goto out_return_state;
1380 }
1381 spin_unlock(&state->owner->so_lock);
1382 rcu_read_lock();
1383 delegation = rcu_dereference(nfsi->delegation);
1384 if (!can_open_delegated(delegation, fmode)) {
1385 rcu_read_unlock();
1386 break;
1387 }
1388 /* Save the delegation */
1389 nfs4_stateid_copy(&stateid, &delegation->stateid);
1390 rcu_read_unlock();
1391 nfs_release_seqid(opendata->o_arg.seqid);
1392 if (!opendata->is_recover) {
1393 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1394 if (ret != 0)
1395 goto out;
1396 }
1397 ret = -EAGAIN;
1398
1399 /* Try to update the stateid using the delegation */
1400 if (update_open_stateid(state, NULL, &stateid, fmode))
1401 goto out_return_state;
1402 }
1403 out:
1404 return ERR_PTR(ret);
1405 out_return_state:
1406 atomic_inc(&state->count);
1407 return state;
1408 }
1409
1410 static void
1411 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1412 {
1413 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1414 struct nfs_delegation *delegation;
1415 int delegation_flags = 0;
1416
1417 rcu_read_lock();
1418 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1419 if (delegation)
1420 delegation_flags = delegation->flags;
1421 rcu_read_unlock();
1422 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1423 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1424 "returning a delegation for "
1425 "OPEN(CLAIM_DELEGATE_CUR)\n",
1426 clp->cl_hostname);
1427 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1428 nfs_inode_set_delegation(state->inode,
1429 data->owner->so_cred,
1430 &data->o_res);
1431 else
1432 nfs_inode_reclaim_delegation(state->inode,
1433 data->owner->so_cred,
1434 &data->o_res);
1435 }
1436
1437 /*
1438 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1439 * and update the nfs4_state.
1440 */
1441 static struct nfs4_state *
1442 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1443 {
1444 struct inode *inode = data->state->inode;
1445 struct nfs4_state *state = data->state;
1446 int ret;
1447
1448 if (!data->rpc_done) {
1449 if (data->rpc_status) {
1450 ret = data->rpc_status;
1451 goto err;
1452 }
1453 /* cached opens have already been processed */
1454 goto update;
1455 }
1456
1457 ret = nfs_refresh_inode(inode, &data->f_attr);
1458 if (ret)
1459 goto err;
1460
1461 if (data->o_res.delegation_type != 0)
1462 nfs4_opendata_check_deleg(data, state);
1463 update:
1464 update_open_stateid(state, &data->o_res.stateid, NULL,
1465 data->o_arg.fmode);
1466 atomic_inc(&state->count);
1467
1468 return state;
1469 err:
1470 return ERR_PTR(ret);
1471
1472 }
1473
1474 static struct nfs4_state *
1475 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1476 {
1477 struct inode *inode;
1478 struct nfs4_state *state = NULL;
1479 int ret;
1480
1481 if (!data->rpc_done) {
1482 state = nfs4_try_open_cached(data);
1483 goto out;
1484 }
1485
1486 ret = -EAGAIN;
1487 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1488 goto err;
1489 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label);
1490 ret = PTR_ERR(inode);
1491 if (IS_ERR(inode))
1492 goto err;
1493 ret = -ENOMEM;
1494 state = nfs4_get_open_state(inode, data->owner);
1495 if (state == NULL)
1496 goto err_put_inode;
1497 if (data->o_res.delegation_type != 0)
1498 nfs4_opendata_check_deleg(data, state);
1499 update_open_stateid(state, &data->o_res.stateid, NULL,
1500 data->o_arg.fmode);
1501 iput(inode);
1502 out:
1503 nfs_release_seqid(data->o_arg.seqid);
1504 return state;
1505 err_put_inode:
1506 iput(inode);
1507 err:
1508 return ERR_PTR(ret);
1509 }
1510
1511 static struct nfs4_state *
1512 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1513 {
1514 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1515 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1516 return _nfs4_opendata_to_nfs4_state(data);
1517 }
1518
1519 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1520 {
1521 struct nfs_inode *nfsi = NFS_I(state->inode);
1522 struct nfs_open_context *ctx;
1523
1524 spin_lock(&state->inode->i_lock);
1525 list_for_each_entry(ctx, &nfsi->open_files, list) {
1526 if (ctx->state != state)
1527 continue;
1528 get_nfs_open_context(ctx);
1529 spin_unlock(&state->inode->i_lock);
1530 return ctx;
1531 }
1532 spin_unlock(&state->inode->i_lock);
1533 return ERR_PTR(-ENOENT);
1534 }
1535
1536 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1537 struct nfs4_state *state, enum open_claim_type4 claim)
1538 {
1539 struct nfs4_opendata *opendata;
1540
1541 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1542 NULL, NULL, claim, GFP_NOFS);
1543 if (opendata == NULL)
1544 return ERR_PTR(-ENOMEM);
1545 opendata->state = state;
1546 atomic_inc(&state->count);
1547 return opendata;
1548 }
1549
1550 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1551 {
1552 struct nfs4_state *newstate;
1553 int ret;
1554
1555 if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
1556 opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) &&
1557 (opendata->o_arg.u.delegation_type & fmode) != fmode)
1558 /* This mode can't have been delegated, so we must have
1559 * a valid open_stateid to cover it - not need to reclaim.
1560 */
1561 return 0;
1562 opendata->o_arg.open_flags = 0;
1563 opendata->o_arg.fmode = fmode;
1564 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1565 NFS_SB(opendata->dentry->d_sb),
1566 fmode, 0);
1567 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1568 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1569 nfs4_init_opendata_res(opendata);
1570 ret = _nfs4_recover_proc_open(opendata);
1571 if (ret != 0)
1572 return ret;
1573 newstate = nfs4_opendata_to_nfs4_state(opendata);
1574 if (IS_ERR(newstate))
1575 return PTR_ERR(newstate);
1576 nfs4_close_state(newstate, fmode);
1577 *res = newstate;
1578 return 0;
1579 }
1580
1581 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1582 {
1583 struct nfs4_state *newstate;
1584 int ret;
1585
1586 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1587 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1588 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1589 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1590 /* memory barrier prior to reading state->n_* */
1591 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1592 clear_bit(NFS_OPEN_STATE, &state->flags);
1593 smp_rmb();
1594 if (state->n_rdwr != 0) {
1595 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1596 if (ret != 0)
1597 return ret;
1598 if (newstate != state)
1599 return -ESTALE;
1600 }
1601 if (state->n_wronly != 0) {
1602 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1603 if (ret != 0)
1604 return ret;
1605 if (newstate != state)
1606 return -ESTALE;
1607 }
1608 if (state->n_rdonly != 0) {
1609 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1610 if (ret != 0)
1611 return ret;
1612 if (newstate != state)
1613 return -ESTALE;
1614 }
1615 /*
1616 * We may have performed cached opens for all three recoveries.
1617 * Check if we need to update the current stateid.
1618 */
1619 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1620 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1621 write_seqlock(&state->seqlock);
1622 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1623 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1624 write_sequnlock(&state->seqlock);
1625 }
1626 return 0;
1627 }
1628
1629 /*
1630 * OPEN_RECLAIM:
1631 * reclaim state on the server after a reboot.
1632 */
1633 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1634 {
1635 struct nfs_delegation *delegation;
1636 struct nfs4_opendata *opendata;
1637 fmode_t delegation_type = 0;
1638 int status;
1639
1640 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1641 NFS4_OPEN_CLAIM_PREVIOUS);
1642 if (IS_ERR(opendata))
1643 return PTR_ERR(opendata);
1644 rcu_read_lock();
1645 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1646 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1647 delegation_type = delegation->type;
1648 rcu_read_unlock();
1649 opendata->o_arg.u.delegation_type = delegation_type;
1650 status = nfs4_open_recover(opendata, state);
1651 nfs4_opendata_put(opendata);
1652 return status;
1653 }
1654
1655 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1656 {
1657 struct nfs_server *server = NFS_SERVER(state->inode);
1658 struct nfs4_exception exception = { };
1659 int err;
1660 do {
1661 err = _nfs4_do_open_reclaim(ctx, state);
1662 trace_nfs4_open_reclaim(ctx, 0, err);
1663 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1664 continue;
1665 if (err != -NFS4ERR_DELAY)
1666 break;
1667 nfs4_handle_exception(server, err, &exception);
1668 } while (exception.retry);
1669 return err;
1670 }
1671
1672 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1673 {
1674 struct nfs_open_context *ctx;
1675 int ret;
1676
1677 ctx = nfs4_state_find_open_context(state);
1678 if (IS_ERR(ctx))
1679 return -EAGAIN;
1680 ret = nfs4_do_open_reclaim(ctx, state);
1681 put_nfs_open_context(ctx);
1682 return ret;
1683 }
1684
1685 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1686 {
1687 switch (err) {
1688 default:
1689 printk(KERN_ERR "NFS: %s: unhandled error "
1690 "%d.\n", __func__, err);
1691 case 0:
1692 case -ENOENT:
1693 case -EAGAIN:
1694 case -ESTALE:
1695 break;
1696 case -NFS4ERR_BADSESSION:
1697 case -NFS4ERR_BADSLOT:
1698 case -NFS4ERR_BAD_HIGH_SLOT:
1699 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1700 case -NFS4ERR_DEADSESSION:
1701 set_bit(NFS_DELEGATED_STATE, &state->flags);
1702 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1703 return -EAGAIN;
1704 case -NFS4ERR_STALE_CLIENTID:
1705 case -NFS4ERR_STALE_STATEID:
1706 set_bit(NFS_DELEGATED_STATE, &state->flags);
1707 case -NFS4ERR_EXPIRED:
1708 /* Don't recall a delegation if it was lost */
1709 nfs4_schedule_lease_recovery(server->nfs_client);
1710 return -EAGAIN;
1711 case -NFS4ERR_MOVED:
1712 nfs4_schedule_migration_recovery(server);
1713 return -EAGAIN;
1714 case -NFS4ERR_LEASE_MOVED:
1715 nfs4_schedule_lease_moved_recovery(server->nfs_client);
1716 return -EAGAIN;
1717 case -NFS4ERR_DELEG_REVOKED:
1718 case -NFS4ERR_ADMIN_REVOKED:
1719 case -NFS4ERR_BAD_STATEID:
1720 case -NFS4ERR_OPENMODE:
1721 nfs_inode_find_state_and_recover(state->inode,
1722 stateid);
1723 nfs4_schedule_stateid_recovery(server, state);
1724 return -EAGAIN;
1725 case -NFS4ERR_DELAY:
1726 case -NFS4ERR_GRACE:
1727 set_bit(NFS_DELEGATED_STATE, &state->flags);
1728 ssleep(1);
1729 return -EAGAIN;
1730 case -ENOMEM:
1731 case -NFS4ERR_DENIED:
1732 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1733 return 0;
1734 }
1735 return err;
1736 }
1737
1738 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1739 {
1740 struct nfs_server *server = NFS_SERVER(state->inode);
1741 struct nfs4_opendata *opendata;
1742 int err;
1743
1744 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1745 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1746 if (IS_ERR(opendata))
1747 return PTR_ERR(opendata);
1748 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1749 err = nfs4_open_recover(opendata, state);
1750 nfs4_opendata_put(opendata);
1751 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1752 }
1753
1754 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1755 {
1756 struct nfs4_opendata *data = calldata;
1757
1758 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
1759 &data->c_arg.seq_args, &data->c_res.seq_res, task);
1760 }
1761
1762 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1763 {
1764 struct nfs4_opendata *data = calldata;
1765
1766 nfs40_sequence_done(task, &data->c_res.seq_res);
1767
1768 data->rpc_status = task->tk_status;
1769 if (data->rpc_status == 0) {
1770 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1771 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1772 renew_lease(data->o_res.server, data->timestamp);
1773 data->rpc_done = 1;
1774 }
1775 }
1776
1777 static void nfs4_open_confirm_release(void *calldata)
1778 {
1779 struct nfs4_opendata *data = calldata;
1780 struct nfs4_state *state = NULL;
1781
1782 /* If this request hasn't been cancelled, do nothing */
1783 if (data->cancelled == 0)
1784 goto out_free;
1785 /* In case of error, no cleanup! */
1786 if (!data->rpc_done)
1787 goto out_free;
1788 state = nfs4_opendata_to_nfs4_state(data);
1789 if (!IS_ERR(state))
1790 nfs4_close_state(state, data->o_arg.fmode);
1791 out_free:
1792 nfs4_opendata_put(data);
1793 }
1794
1795 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1796 .rpc_call_prepare = nfs4_open_confirm_prepare,
1797 .rpc_call_done = nfs4_open_confirm_done,
1798 .rpc_release = nfs4_open_confirm_release,
1799 };
1800
1801 /*
1802 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1803 */
1804 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1805 {
1806 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
1807 struct rpc_task *task;
1808 struct rpc_message msg = {
1809 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1810 .rpc_argp = &data->c_arg,
1811 .rpc_resp = &data->c_res,
1812 .rpc_cred = data->owner->so_cred,
1813 };
1814 struct rpc_task_setup task_setup_data = {
1815 .rpc_client = server->client,
1816 .rpc_message = &msg,
1817 .callback_ops = &nfs4_open_confirm_ops,
1818 .callback_data = data,
1819 .workqueue = nfsiod_workqueue,
1820 .flags = RPC_TASK_ASYNC,
1821 };
1822 int status;
1823
1824 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
1825 kref_get(&data->kref);
1826 data->rpc_done = 0;
1827 data->rpc_status = 0;
1828 data->timestamp = jiffies;
1829 task = rpc_run_task(&task_setup_data);
1830 if (IS_ERR(task))
1831 return PTR_ERR(task);
1832 status = nfs4_wait_for_completion_rpc_task(task);
1833 if (status != 0) {
1834 data->cancelled = 1;
1835 smp_wmb();
1836 } else
1837 status = data->rpc_status;
1838 rpc_put_task(task);
1839 return status;
1840 }
1841
1842 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1843 {
1844 struct nfs4_opendata *data = calldata;
1845 struct nfs4_state_owner *sp = data->owner;
1846 struct nfs_client *clp = sp->so_server->nfs_client;
1847
1848 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1849 goto out_wait;
1850 /*
1851 * Check if we still need to send an OPEN call, or if we can use
1852 * a delegation instead.
1853 */
1854 if (data->state != NULL) {
1855 struct nfs_delegation *delegation;
1856
1857 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1858 goto out_no_action;
1859 rcu_read_lock();
1860 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1861 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1862 data->o_arg.claim != NFS4_OPEN_CLAIM_DELEG_CUR_FH &&
1863 can_open_delegated(delegation, data->o_arg.fmode))
1864 goto unlock_no_action;
1865 rcu_read_unlock();
1866 }
1867 /* Update client id. */
1868 data->o_arg.clientid = clp->cl_clientid;
1869 switch (data->o_arg.claim) {
1870 case NFS4_OPEN_CLAIM_PREVIOUS:
1871 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1872 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1873 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1874 case NFS4_OPEN_CLAIM_FH:
1875 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1876 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1877 }
1878 data->timestamp = jiffies;
1879 if (nfs4_setup_sequence(data->o_arg.server,
1880 &data->o_arg.seq_args,
1881 &data->o_res.seq_res,
1882 task) != 0)
1883 nfs_release_seqid(data->o_arg.seqid);
1884
1885 /* Set the create mode (note dependency on the session type) */
1886 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
1887 if (data->o_arg.open_flags & O_EXCL) {
1888 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
1889 if (nfs4_has_persistent_session(clp))
1890 data->o_arg.createmode = NFS4_CREATE_GUARDED;
1891 else if (clp->cl_mvops->minor_version > 0)
1892 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
1893 }
1894 return;
1895 unlock_no_action:
1896 rcu_read_unlock();
1897 out_no_action:
1898 task->tk_action = NULL;
1899 out_wait:
1900 nfs4_sequence_done(task, &data->o_res.seq_res);
1901 }
1902
1903 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1904 {
1905 struct nfs4_opendata *data = calldata;
1906
1907 data->rpc_status = task->tk_status;
1908
1909 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1910 return;
1911
1912 if (task->tk_status == 0) {
1913 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
1914 switch (data->o_res.f_attr->mode & S_IFMT) {
1915 case S_IFREG:
1916 break;
1917 case S_IFLNK:
1918 data->rpc_status = -ELOOP;
1919 break;
1920 case S_IFDIR:
1921 data->rpc_status = -EISDIR;
1922 break;
1923 default:
1924 data->rpc_status = -ENOTDIR;
1925 }
1926 }
1927 renew_lease(data->o_res.server, data->timestamp);
1928 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1929 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1930 }
1931 data->rpc_done = 1;
1932 }
1933
1934 static void nfs4_open_release(void *calldata)
1935 {
1936 struct nfs4_opendata *data = calldata;
1937 struct nfs4_state *state = NULL;
1938
1939 /* If this request hasn't been cancelled, do nothing */
1940 if (data->cancelled == 0)
1941 goto out_free;
1942 /* In case of error, no cleanup! */
1943 if (data->rpc_status != 0 || !data->rpc_done)
1944 goto out_free;
1945 /* In case we need an open_confirm, no cleanup! */
1946 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1947 goto out_free;
1948 state = nfs4_opendata_to_nfs4_state(data);
1949 if (!IS_ERR(state))
1950 nfs4_close_state(state, data->o_arg.fmode);
1951 out_free:
1952 nfs4_opendata_put(data);
1953 }
1954
1955 static const struct rpc_call_ops nfs4_open_ops = {
1956 .rpc_call_prepare = nfs4_open_prepare,
1957 .rpc_call_done = nfs4_open_done,
1958 .rpc_release = nfs4_open_release,
1959 };
1960
1961 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1962 {
1963 struct inode *dir = d_inode(data->dir);
1964 struct nfs_server *server = NFS_SERVER(dir);
1965 struct nfs_openargs *o_arg = &data->o_arg;
1966 struct nfs_openres *o_res = &data->o_res;
1967 struct rpc_task *task;
1968 struct rpc_message msg = {
1969 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1970 .rpc_argp = o_arg,
1971 .rpc_resp = o_res,
1972 .rpc_cred = data->owner->so_cred,
1973 };
1974 struct rpc_task_setup task_setup_data = {
1975 .rpc_client = server->client,
1976 .rpc_message = &msg,
1977 .callback_ops = &nfs4_open_ops,
1978 .callback_data = data,
1979 .workqueue = nfsiod_workqueue,
1980 .flags = RPC_TASK_ASYNC,
1981 };
1982 int status;
1983
1984 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1985 kref_get(&data->kref);
1986 data->rpc_done = 0;
1987 data->rpc_status = 0;
1988 data->cancelled = 0;
1989 data->is_recover = 0;
1990 if (isrecover) {
1991 nfs4_set_sequence_privileged(&o_arg->seq_args);
1992 data->is_recover = 1;
1993 }
1994 task = rpc_run_task(&task_setup_data);
1995 if (IS_ERR(task))
1996 return PTR_ERR(task);
1997 status = nfs4_wait_for_completion_rpc_task(task);
1998 if (status != 0) {
1999 data->cancelled = 1;
2000 smp_wmb();
2001 } else
2002 status = data->rpc_status;
2003 rpc_put_task(task);
2004
2005 return status;
2006 }
2007
2008 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2009 {
2010 struct inode *dir = d_inode(data->dir);
2011 struct nfs_openres *o_res = &data->o_res;
2012 int status;
2013
2014 status = nfs4_run_open_task(data, 1);
2015 if (status != 0 || !data->rpc_done)
2016 return status;
2017
2018 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2019
2020 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2021 status = _nfs4_proc_open_confirm(data);
2022 if (status != 0)
2023 return status;
2024 }
2025
2026 return status;
2027 }
2028
2029 /*
2030 * Additional permission checks in order to distinguish between an
2031 * open for read, and an open for execute. This works around the
2032 * fact that NFSv4 OPEN treats read and execute permissions as being
2033 * the same.
2034 * Note that in the non-execute case, we want to turn off permission
2035 * checking if we just created a new file (POSIX open() semantics).
2036 */
2037 static int nfs4_opendata_access(struct rpc_cred *cred,
2038 struct nfs4_opendata *opendata,
2039 struct nfs4_state *state, fmode_t fmode,
2040 int openflags)
2041 {
2042 struct nfs_access_entry cache;
2043 u32 mask;
2044
2045 /* access call failed or for some reason the server doesn't
2046 * support any access modes -- defer access call until later */
2047 if (opendata->o_res.access_supported == 0)
2048 return 0;
2049
2050 mask = 0;
2051 /*
2052 * Use openflags to check for exec, because fmode won't
2053 * always have FMODE_EXEC set when file open for exec.
2054 */
2055 if (openflags & __FMODE_EXEC) {
2056 /* ONLY check for exec rights */
2057 mask = MAY_EXEC;
2058 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2059 mask = MAY_READ;
2060
2061 cache.cred = cred;
2062 cache.jiffies = jiffies;
2063 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2064 nfs_access_add_cache(state->inode, &cache);
2065
2066 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2067 return 0;
2068
2069 /* even though OPEN succeeded, access is denied. Close the file */
2070 nfs4_close_state(state, fmode);
2071 return -EACCES;
2072 }
2073
2074 /*
2075 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2076 */
2077 static int _nfs4_proc_open(struct nfs4_opendata *data)
2078 {
2079 struct inode *dir = d_inode(data->dir);
2080 struct nfs_server *server = NFS_SERVER(dir);
2081 struct nfs_openargs *o_arg = &data->o_arg;
2082 struct nfs_openres *o_res = &data->o_res;
2083 int status;
2084
2085 status = nfs4_run_open_task(data, 0);
2086 if (!data->rpc_done)
2087 return status;
2088 if (status != 0) {
2089 if (status == -NFS4ERR_BADNAME &&
2090 !(o_arg->open_flags & O_CREAT))
2091 return -ENOENT;
2092 return status;
2093 }
2094
2095 nfs_fattr_map_and_free_names(server, &data->f_attr);
2096
2097 if (o_arg->open_flags & O_CREAT) {
2098 update_changeattr(dir, &o_res->cinfo);
2099 if (o_arg->open_flags & O_EXCL)
2100 data->file_created = 1;
2101 else if (o_res->cinfo.before != o_res->cinfo.after)
2102 data->file_created = 1;
2103 }
2104 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2105 server->caps &= ~NFS_CAP_POSIX_LOCK;
2106 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2107 status = _nfs4_proc_open_confirm(data);
2108 if (status != 0)
2109 return status;
2110 }
2111 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
2112 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2113 return 0;
2114 }
2115
2116 static int nfs4_recover_expired_lease(struct nfs_server *server)
2117 {
2118 return nfs4_client_recover_expired_lease(server->nfs_client);
2119 }
2120
2121 /*
2122 * OPEN_EXPIRED:
2123 * reclaim state on the server after a network partition.
2124 * Assumes caller holds the appropriate lock
2125 */
2126 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2127 {
2128 struct nfs4_opendata *opendata;
2129 int ret;
2130
2131 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2132 NFS4_OPEN_CLAIM_FH);
2133 if (IS_ERR(opendata))
2134 return PTR_ERR(opendata);
2135 ret = nfs4_open_recover(opendata, state);
2136 if (ret == -ESTALE)
2137 d_drop(ctx->dentry);
2138 nfs4_opendata_put(opendata);
2139 return ret;
2140 }
2141
2142 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2143 {
2144 struct nfs_server *server = NFS_SERVER(state->inode);
2145 struct nfs4_exception exception = { };
2146 int err;
2147
2148 do {
2149 err = _nfs4_open_expired(ctx, state);
2150 trace_nfs4_open_expired(ctx, 0, err);
2151 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2152 continue;
2153 switch (err) {
2154 default:
2155 goto out;
2156 case -NFS4ERR_GRACE:
2157 case -NFS4ERR_DELAY:
2158 nfs4_handle_exception(server, err, &exception);
2159 err = 0;
2160 }
2161 } while (exception.retry);
2162 out:
2163 return err;
2164 }
2165
2166 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2167 {
2168 struct nfs_open_context *ctx;
2169 int ret;
2170
2171 ctx = nfs4_state_find_open_context(state);
2172 if (IS_ERR(ctx))
2173 return -EAGAIN;
2174 ret = nfs4_do_open_expired(ctx, state);
2175 put_nfs_open_context(ctx);
2176 return ret;
2177 }
2178
2179 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2180 {
2181 nfs_remove_bad_delegation(state->inode);
2182 write_seqlock(&state->seqlock);
2183 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2184 write_sequnlock(&state->seqlock);
2185 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2186 }
2187
2188 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2189 {
2190 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2191 nfs_finish_clear_delegation_stateid(state);
2192 }
2193
2194 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2195 {
2196 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2197 nfs40_clear_delegation_stateid(state);
2198 return nfs4_open_expired(sp, state);
2199 }
2200
2201 #if defined(CONFIG_NFS_V4_1)
2202 static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2203 {
2204 struct nfs_server *server = NFS_SERVER(state->inode);
2205 nfs4_stateid stateid;
2206 struct nfs_delegation *delegation;
2207 struct rpc_cred *cred;
2208 int status;
2209
2210 /* Get the delegation credential for use by test/free_stateid */
2211 rcu_read_lock();
2212 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2213 if (delegation == NULL) {
2214 rcu_read_unlock();
2215 return;
2216 }
2217
2218 nfs4_stateid_copy(&stateid, &delegation->stateid);
2219 cred = get_rpccred(delegation->cred);
2220 rcu_read_unlock();
2221 status = nfs41_test_stateid(server, &stateid, cred);
2222 trace_nfs4_test_delegation_stateid(state, NULL, status);
2223
2224 if (status != NFS_OK) {
2225 /* Free the stateid unless the server explicitly
2226 * informs us the stateid is unrecognized. */
2227 if (status != -NFS4ERR_BAD_STATEID)
2228 nfs41_free_stateid(server, &stateid, cred);
2229 nfs_finish_clear_delegation_stateid(state);
2230 }
2231
2232 put_rpccred(cred);
2233 }
2234
2235 /**
2236 * nfs41_check_open_stateid - possibly free an open stateid
2237 *
2238 * @state: NFSv4 state for an inode
2239 *
2240 * Returns NFS_OK if recovery for this stateid is now finished.
2241 * Otherwise a negative NFS4ERR value is returned.
2242 */
2243 static int nfs41_check_open_stateid(struct nfs4_state *state)
2244 {
2245 struct nfs_server *server = NFS_SERVER(state->inode);
2246 nfs4_stateid *stateid = &state->open_stateid;
2247 struct rpc_cred *cred = state->owner->so_cred;
2248 int status;
2249
2250 /* If a state reset has been done, test_stateid is unneeded */
2251 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2252 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2253 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2254 return -NFS4ERR_BAD_STATEID;
2255
2256 status = nfs41_test_stateid(server, stateid, cred);
2257 trace_nfs4_test_open_stateid(state, NULL, status);
2258 if (status != NFS_OK) {
2259 /* Free the stateid unless the server explicitly
2260 * informs us the stateid is unrecognized. */
2261 if (status != -NFS4ERR_BAD_STATEID)
2262 nfs41_free_stateid(server, stateid, cred);
2263
2264 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2265 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2266 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2267 clear_bit(NFS_OPEN_STATE, &state->flags);
2268 }
2269 return status;
2270 }
2271
2272 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2273 {
2274 int status;
2275
2276 nfs41_check_delegation_stateid(state);
2277 status = nfs41_check_open_stateid(state);
2278 if (status != NFS_OK)
2279 status = nfs4_open_expired(sp, state);
2280 return status;
2281 }
2282 #endif
2283
2284 /*
2285 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2286 * fields corresponding to attributes that were used to store the verifier.
2287 * Make sure we clobber those fields in the later setattr call
2288 */
2289 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
2290 {
2291 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2292 !(sattr->ia_valid & ATTR_ATIME_SET))
2293 sattr->ia_valid |= ATTR_ATIME;
2294
2295 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2296 !(sattr->ia_valid & ATTR_MTIME_SET))
2297 sattr->ia_valid |= ATTR_MTIME;
2298 }
2299
2300 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2301 fmode_t fmode,
2302 int flags,
2303 struct nfs_open_context *ctx)
2304 {
2305 struct nfs4_state_owner *sp = opendata->owner;
2306 struct nfs_server *server = sp->so_server;
2307 struct dentry *dentry;
2308 struct nfs4_state *state;
2309 unsigned int seq;
2310 int ret;
2311
2312 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2313
2314 ret = _nfs4_proc_open(opendata);
2315 if (ret != 0)
2316 goto out;
2317
2318 state = nfs4_opendata_to_nfs4_state(opendata);
2319 ret = PTR_ERR(state);
2320 if (IS_ERR(state))
2321 goto out;
2322 if (server->caps & NFS_CAP_POSIX_LOCK)
2323 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2324
2325 dentry = opendata->dentry;
2326 if (d_really_is_negative(dentry)) {
2327 /* FIXME: Is this d_drop() ever needed? */
2328 d_drop(dentry);
2329 dentry = d_add_unique(dentry, igrab(state->inode));
2330 if (dentry == NULL) {
2331 dentry = opendata->dentry;
2332 } else if (dentry != ctx->dentry) {
2333 dput(ctx->dentry);
2334 ctx->dentry = dget(dentry);
2335 }
2336 nfs_set_verifier(dentry,
2337 nfs_save_change_attribute(d_inode(opendata->dir)));
2338 }
2339
2340 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2341 if (ret != 0)
2342 goto out;
2343
2344 ctx->state = state;
2345 if (d_inode(dentry) == state->inode) {
2346 nfs_inode_attach_open_context(ctx);
2347 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2348 nfs4_schedule_stateid_recovery(server, state);
2349 }
2350 out:
2351 return ret;
2352 }
2353
2354 /*
2355 * Returns a referenced nfs4_state
2356 */
2357 static int _nfs4_do_open(struct inode *dir,
2358 struct nfs_open_context *ctx,
2359 int flags,
2360 struct iattr *sattr,
2361 struct nfs4_label *label,
2362 int *opened)
2363 {
2364 struct nfs4_state_owner *sp;
2365 struct nfs4_state *state = NULL;
2366 struct nfs_server *server = NFS_SERVER(dir);
2367 struct nfs4_opendata *opendata;
2368 struct dentry *dentry = ctx->dentry;
2369 struct rpc_cred *cred = ctx->cred;
2370 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2371 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2372 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2373 struct nfs4_label *olabel = NULL;
2374 int status;
2375
2376 /* Protect against reboot recovery conflicts */
2377 status = -ENOMEM;
2378 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2379 if (sp == NULL) {
2380 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2381 goto out_err;
2382 }
2383 status = nfs4_recover_expired_lease(server);
2384 if (status != 0)
2385 goto err_put_state_owner;
2386 if (d_really_is_positive(dentry))
2387 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2388 status = -ENOMEM;
2389 if (d_really_is_positive(dentry))
2390 claim = NFS4_OPEN_CLAIM_FH;
2391 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2392 label, claim, GFP_KERNEL);
2393 if (opendata == NULL)
2394 goto err_put_state_owner;
2395
2396 if (label) {
2397 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2398 if (IS_ERR(olabel)) {
2399 status = PTR_ERR(olabel);
2400 goto err_opendata_put;
2401 }
2402 }
2403
2404 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2405 if (!opendata->f_attr.mdsthreshold) {
2406 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2407 if (!opendata->f_attr.mdsthreshold)
2408 goto err_free_label;
2409 }
2410 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2411 }
2412 if (d_really_is_positive(dentry))
2413 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2414
2415 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2416 if (status != 0)
2417 goto err_free_label;
2418 state = ctx->state;
2419
2420 if ((opendata->o_arg.open_flags & O_EXCL) &&
2421 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2422 nfs4_exclusive_attrset(opendata, sattr);
2423
2424 nfs_fattr_init(opendata->o_res.f_attr);
2425 status = nfs4_do_setattr(state->inode, cred,
2426 opendata->o_res.f_attr, sattr,
2427 state, label, olabel);
2428 if (status == 0) {
2429 nfs_setattr_update_inode(state->inode, sattr,
2430 opendata->o_res.f_attr);
2431 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2432 }
2433 }
2434 if (opendata->file_created)
2435 *opened |= FILE_CREATED;
2436
2437 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2438 *ctx_th = opendata->f_attr.mdsthreshold;
2439 opendata->f_attr.mdsthreshold = NULL;
2440 }
2441
2442 nfs4_label_free(olabel);
2443
2444 nfs4_opendata_put(opendata);
2445 nfs4_put_state_owner(sp);
2446 return 0;
2447 err_free_label:
2448 nfs4_label_free(olabel);
2449 err_opendata_put:
2450 nfs4_opendata_put(opendata);
2451 err_put_state_owner:
2452 nfs4_put_state_owner(sp);
2453 out_err:
2454 return status;
2455 }
2456
2457
2458 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2459 struct nfs_open_context *ctx,
2460 int flags,
2461 struct iattr *sattr,
2462 struct nfs4_label *label,
2463 int *opened)
2464 {
2465 struct nfs_server *server = NFS_SERVER(dir);
2466 struct nfs4_exception exception = { };
2467 struct nfs4_state *res;
2468 int status;
2469
2470 do {
2471 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2472 res = ctx->state;
2473 trace_nfs4_open_file(ctx, flags, status);
2474 if (status == 0)
2475 break;
2476 /* NOTE: BAD_SEQID means the server and client disagree about the
2477 * book-keeping w.r.t. state-changing operations
2478 * (OPEN/CLOSE/LOCK/LOCKU...)
2479 * It is actually a sign of a bug on the client or on the server.
2480 *
2481 * If we receive a BAD_SEQID error in the particular case of
2482 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2483 * have unhashed the old state_owner for us, and that we can
2484 * therefore safely retry using a new one. We should still warn
2485 * the user though...
2486 */
2487 if (status == -NFS4ERR_BAD_SEQID) {
2488 pr_warn_ratelimited("NFS: v4 server %s "
2489 " returned a bad sequence-id error!\n",
2490 NFS_SERVER(dir)->nfs_client->cl_hostname);
2491 exception.retry = 1;
2492 continue;
2493 }
2494 /*
2495 * BAD_STATEID on OPEN means that the server cancelled our
2496 * state before it received the OPEN_CONFIRM.
2497 * Recover by retrying the request as per the discussion
2498 * on Page 181 of RFC3530.
2499 */
2500 if (status == -NFS4ERR_BAD_STATEID) {
2501 exception.retry = 1;
2502 continue;
2503 }
2504 if (status == -EAGAIN) {
2505 /* We must have found a delegation */
2506 exception.retry = 1;
2507 continue;
2508 }
2509 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2510 continue;
2511 res = ERR_PTR(nfs4_handle_exception(server,
2512 status, &exception));
2513 } while (exception.retry);
2514 return res;
2515 }
2516
2517 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2518 struct nfs_fattr *fattr, struct iattr *sattr,
2519 struct nfs4_state *state, struct nfs4_label *ilabel,
2520 struct nfs4_label *olabel)
2521 {
2522 struct nfs_server *server = NFS_SERVER(inode);
2523 struct nfs_setattrargs arg = {
2524 .fh = NFS_FH(inode),
2525 .iap = sattr,
2526 .server = server,
2527 .bitmask = server->attr_bitmask,
2528 .label = ilabel,
2529 };
2530 struct nfs_setattrres res = {
2531 .fattr = fattr,
2532 .label = olabel,
2533 .server = server,
2534 };
2535 struct rpc_message msg = {
2536 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2537 .rpc_argp = &arg,
2538 .rpc_resp = &res,
2539 .rpc_cred = cred,
2540 };
2541 unsigned long timestamp = jiffies;
2542 fmode_t fmode;
2543 bool truncate;
2544 int status;
2545
2546 arg.bitmask = nfs4_bitmask(server, ilabel);
2547 if (ilabel)
2548 arg.bitmask = nfs4_bitmask(server, olabel);
2549
2550 nfs_fattr_init(fattr);
2551
2552 /* Servers should only apply open mode checks for file size changes */
2553 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
2554 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2555
2556 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2557 /* Use that stateid */
2558 } else if (truncate && state != NULL) {
2559 struct nfs_lockowner lockowner = {
2560 .l_owner = current->files,
2561 .l_pid = current->tgid,
2562 };
2563 if (!nfs4_valid_open_stateid(state))
2564 return -EBADF;
2565 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2566 &lockowner) == -EIO)
2567 return -EBADF;
2568 } else
2569 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2570
2571 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2572 if (status == 0 && state != NULL)
2573 renew_lease(server, timestamp);
2574 return status;
2575 }
2576
2577 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2578 struct nfs_fattr *fattr, struct iattr *sattr,
2579 struct nfs4_state *state, struct nfs4_label *ilabel,
2580 struct nfs4_label *olabel)
2581 {
2582 struct nfs_server *server = NFS_SERVER(inode);
2583 struct nfs4_exception exception = {
2584 .state = state,
2585 .inode = inode,
2586 };
2587 int err;
2588 do {
2589 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel);
2590 trace_nfs4_setattr(inode, err);
2591 switch (err) {
2592 case -NFS4ERR_OPENMODE:
2593 if (!(sattr->ia_valid & ATTR_SIZE)) {
2594 pr_warn_once("NFSv4: server %s is incorrectly "
2595 "applying open mode checks to "
2596 "a SETATTR that is not "
2597 "changing file size.\n",
2598 server->nfs_client->cl_hostname);
2599 }
2600 if (state && !(state->state & FMODE_WRITE)) {
2601 err = -EBADF;
2602 if (sattr->ia_valid & ATTR_OPEN)
2603 err = -EACCES;
2604 goto out;
2605 }
2606 }
2607 err = nfs4_handle_exception(server, err, &exception);
2608 } while (exception.retry);
2609 out:
2610 return err;
2611 }
2612
2613 struct nfs4_closedata {
2614 struct inode *inode;
2615 struct nfs4_state *state;
2616 struct nfs_closeargs arg;
2617 struct nfs_closeres res;
2618 struct nfs_fattr fattr;
2619 unsigned long timestamp;
2620 bool roc;
2621 u32 roc_barrier;
2622 };
2623
2624 static void nfs4_free_closedata(void *data)
2625 {
2626 struct nfs4_closedata *calldata = data;
2627 struct nfs4_state_owner *sp = calldata->state->owner;
2628 struct super_block *sb = calldata->state->inode->i_sb;
2629
2630 if (calldata->roc)
2631 pnfs_roc_release(calldata->state->inode);
2632 nfs4_put_open_state(calldata->state);
2633 nfs_free_seqid(calldata->arg.seqid);
2634 nfs4_put_state_owner(sp);
2635 nfs_sb_deactive(sb);
2636 kfree(calldata);
2637 }
2638
2639 static void nfs4_close_done(struct rpc_task *task, void *data)
2640 {
2641 struct nfs4_closedata *calldata = data;
2642 struct nfs4_state *state = calldata->state;
2643 struct nfs_server *server = NFS_SERVER(calldata->inode);
2644 nfs4_stateid *res_stateid = NULL;
2645
2646 dprintk("%s: begin!\n", __func__);
2647 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2648 return;
2649 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
2650 /* hmm. we are done with the inode, and in the process of freeing
2651 * the state_owner. we keep this around to process errors
2652 */
2653 switch (task->tk_status) {
2654 case 0:
2655 res_stateid = &calldata->res.stateid;
2656 if (calldata->arg.fmode == 0 && calldata->roc)
2657 pnfs_roc_set_barrier(state->inode,
2658 calldata->roc_barrier);
2659 renew_lease(server, calldata->timestamp);
2660 break;
2661 case -NFS4ERR_ADMIN_REVOKED:
2662 case -NFS4ERR_STALE_STATEID:
2663 case -NFS4ERR_OLD_STATEID:
2664 case -NFS4ERR_BAD_STATEID:
2665 case -NFS4ERR_EXPIRED:
2666 if (!nfs4_stateid_match(&calldata->arg.stateid,
2667 &state->open_stateid)) {
2668 rpc_restart_call_prepare(task);
2669 goto out_release;
2670 }
2671 if (calldata->arg.fmode == 0)
2672 break;
2673 default:
2674 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
2675 rpc_restart_call_prepare(task);
2676 goto out_release;
2677 }
2678 }
2679 nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode);
2680 out_release:
2681 nfs_release_seqid(calldata->arg.seqid);
2682 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2683 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2684 }
2685
2686 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2687 {
2688 struct nfs4_closedata *calldata = data;
2689 struct nfs4_state *state = calldata->state;
2690 struct inode *inode = calldata->inode;
2691 bool is_rdonly, is_wronly, is_rdwr;
2692 int call_close = 0;
2693
2694 dprintk("%s: begin!\n", __func__);
2695 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2696 goto out_wait;
2697
2698 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2699 spin_lock(&state->owner->so_lock);
2700 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2701 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2702 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2703 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2704 /* Calculate the change in open mode */
2705 calldata->arg.fmode = 0;
2706 if (state->n_rdwr == 0) {
2707 if (state->n_rdonly == 0)
2708 call_close |= is_rdonly;
2709 else if (is_rdonly)
2710 calldata->arg.fmode |= FMODE_READ;
2711 if (state->n_wronly == 0)
2712 call_close |= is_wronly;
2713 else if (is_wronly)
2714 calldata->arg.fmode |= FMODE_WRITE;
2715 } else if (is_rdwr)
2716 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2717
2718 if (calldata->arg.fmode == 0)
2719 call_close |= is_rdwr;
2720
2721 if (!nfs4_valid_open_stateid(state))
2722 call_close = 0;
2723 spin_unlock(&state->owner->so_lock);
2724
2725 if (!call_close) {
2726 /* Note: exit _without_ calling nfs4_close_done */
2727 goto out_no_action;
2728 }
2729
2730 if (calldata->arg.fmode == 0) {
2731 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2732 if (calldata->roc &&
2733 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) {
2734 nfs_release_seqid(calldata->arg.seqid);
2735 goto out_wait;
2736 }
2737 }
2738 calldata->arg.share_access =
2739 nfs4_map_atomic_open_share(NFS_SERVER(inode),
2740 calldata->arg.fmode, 0);
2741
2742 nfs_fattr_init(calldata->res.fattr);
2743 calldata->timestamp = jiffies;
2744 if (nfs4_setup_sequence(NFS_SERVER(inode),
2745 &calldata->arg.seq_args,
2746 &calldata->res.seq_res,
2747 task) != 0)
2748 nfs_release_seqid(calldata->arg.seqid);
2749 dprintk("%s: done!\n", __func__);
2750 return;
2751 out_no_action:
2752 task->tk_action = NULL;
2753 out_wait:
2754 nfs4_sequence_done(task, &calldata->res.seq_res);
2755 }
2756
2757 static const struct rpc_call_ops nfs4_close_ops = {
2758 .rpc_call_prepare = nfs4_close_prepare,
2759 .rpc_call_done = nfs4_close_done,
2760 .rpc_release = nfs4_free_closedata,
2761 };
2762
2763 static bool nfs4_roc(struct inode *inode)
2764 {
2765 if (!nfs_have_layout(inode))
2766 return false;
2767 return pnfs_roc(inode);
2768 }
2769
2770 /*
2771 * It is possible for data to be read/written from a mem-mapped file
2772 * after the sys_close call (which hits the vfs layer as a flush).
2773 * This means that we can't safely call nfsv4 close on a file until
2774 * the inode is cleared. This in turn means that we are not good
2775 * NFSv4 citizens - we do not indicate to the server to update the file's
2776 * share state even when we are done with one of the three share
2777 * stateid's in the inode.
2778 *
2779 * NOTE: Caller must be holding the sp->so_owner semaphore!
2780 */
2781 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2782 {
2783 struct nfs_server *server = NFS_SERVER(state->inode);
2784 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
2785 struct nfs4_closedata *calldata;
2786 struct nfs4_state_owner *sp = state->owner;
2787 struct rpc_task *task;
2788 struct rpc_message msg = {
2789 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2790 .rpc_cred = state->owner->so_cred,
2791 };
2792 struct rpc_task_setup task_setup_data = {
2793 .rpc_client = server->client,
2794 .rpc_message = &msg,
2795 .callback_ops = &nfs4_close_ops,
2796 .workqueue = nfsiod_workqueue,
2797 .flags = RPC_TASK_ASYNC,
2798 };
2799 int status = -ENOMEM;
2800
2801 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
2802 &task_setup_data.rpc_client, &msg);
2803
2804 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2805 if (calldata == NULL)
2806 goto out;
2807 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2808 calldata->inode = state->inode;
2809 calldata->state = state;
2810 calldata->arg.fh = NFS_FH(state->inode);
2811 /* Serialization for the sequence id */
2812 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
2813 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
2814 if (IS_ERR(calldata->arg.seqid))
2815 goto out_free_calldata;
2816 calldata->arg.fmode = 0;
2817 calldata->arg.bitmask = server->cache_consistency_bitmask;
2818 calldata->res.fattr = &calldata->fattr;
2819 calldata->res.seqid = calldata->arg.seqid;
2820 calldata->res.server = server;
2821 calldata->roc = nfs4_roc(state->inode);
2822 nfs_sb_active(calldata->inode->i_sb);
2823
2824 msg.rpc_argp = &calldata->arg;
2825 msg.rpc_resp = &calldata->res;
2826 task_setup_data.callback_data = calldata;
2827 task = rpc_run_task(&task_setup_data);
2828 if (IS_ERR(task))
2829 return PTR_ERR(task);
2830 status = 0;
2831 if (wait)
2832 status = rpc_wait_for_completion_task(task);
2833 rpc_put_task(task);
2834 return status;
2835 out_free_calldata:
2836 kfree(calldata);
2837 out:
2838 nfs4_put_open_state(state);
2839 nfs4_put_state_owner(sp);
2840 return status;
2841 }
2842
2843 static struct inode *
2844 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2845 int open_flags, struct iattr *attr, int *opened)
2846 {
2847 struct nfs4_state *state;
2848 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
2849
2850 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
2851
2852 /* Protect against concurrent sillydeletes */
2853 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
2854
2855 nfs4_label_release_security(label);
2856
2857 if (IS_ERR(state))
2858 return ERR_CAST(state);
2859 return state->inode;
2860 }
2861
2862 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2863 {
2864 if (ctx->state == NULL)
2865 return;
2866 if (is_sync)
2867 nfs4_close_sync(ctx->state, ctx->mode);
2868 else
2869 nfs4_close_state(ctx->state, ctx->mode);
2870 }
2871
2872 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
2873 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
2874 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
2875
2876 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2877 {
2878 struct nfs4_server_caps_arg args = {
2879 .fhandle = fhandle,
2880 };
2881 struct nfs4_server_caps_res res = {};
2882 struct rpc_message msg = {
2883 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2884 .rpc_argp = &args,
2885 .rpc_resp = &res,
2886 };
2887 int status;
2888
2889 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2890 if (status == 0) {
2891 /* Sanity check the server answers */
2892 switch (server->nfs_client->cl_minorversion) {
2893 case 0:
2894 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
2895 res.attr_bitmask[2] = 0;
2896 break;
2897 case 1:
2898 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
2899 break;
2900 case 2:
2901 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
2902 }
2903 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2904 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2905 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2906 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2907 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2908 NFS_CAP_CTIME|NFS_CAP_MTIME|
2909 NFS_CAP_SECURITY_LABEL);
2910 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
2911 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
2912 server->caps |= NFS_CAP_ACLS;
2913 if (res.has_links != 0)
2914 server->caps |= NFS_CAP_HARDLINKS;
2915 if (res.has_symlinks != 0)
2916 server->caps |= NFS_CAP_SYMLINKS;
2917 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2918 server->caps |= NFS_CAP_FILEID;
2919 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2920 server->caps |= NFS_CAP_MODE;
2921 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2922 server->caps |= NFS_CAP_NLINK;
2923 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2924 server->caps |= NFS_CAP_OWNER;
2925 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2926 server->caps |= NFS_CAP_OWNER_GROUP;
2927 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2928 server->caps |= NFS_CAP_ATIME;
2929 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2930 server->caps |= NFS_CAP_CTIME;
2931 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2932 server->caps |= NFS_CAP_MTIME;
2933 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
2934 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
2935 server->caps |= NFS_CAP_SECURITY_LABEL;
2936 #endif
2937 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
2938 sizeof(server->attr_bitmask));
2939 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
2940
2941 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2942 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2943 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2944 server->cache_consistency_bitmask[2] = 0;
2945 server->acl_bitmask = res.acl_bitmask;
2946 server->fh_expire_type = res.fh_expire_type;
2947 }
2948
2949 return status;
2950 }
2951
2952 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2953 {
2954 struct nfs4_exception exception = { };
2955 int err;
2956 do {
2957 err = nfs4_handle_exception(server,
2958 _nfs4_server_capabilities(server, fhandle),
2959 &exception);
2960 } while (exception.retry);
2961 return err;
2962 }
2963
2964 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2965 struct nfs_fsinfo *info)
2966 {
2967 u32 bitmask[3];
2968 struct nfs4_lookup_root_arg args = {
2969 .bitmask = bitmask,
2970 };
2971 struct nfs4_lookup_res res = {
2972 .server = server,
2973 .fattr = info->fattr,
2974 .fh = fhandle,
2975 };
2976 struct rpc_message msg = {
2977 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2978 .rpc_argp = &args,
2979 .rpc_resp = &res,
2980 };
2981
2982 bitmask[0] = nfs4_fattr_bitmap[0];
2983 bitmask[1] = nfs4_fattr_bitmap[1];
2984 /*
2985 * Process the label in the upcoming getfattr
2986 */
2987 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
2988
2989 nfs_fattr_init(info->fattr);
2990 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2991 }
2992
2993 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2994 struct nfs_fsinfo *info)
2995 {
2996 struct nfs4_exception exception = { };
2997 int err;
2998 do {
2999 err = _nfs4_lookup_root(server, fhandle, info);
3000 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3001 switch (err) {
3002 case 0:
3003 case -NFS4ERR_WRONGSEC:
3004 goto out;
3005 default:
3006 err = nfs4_handle_exception(server, err, &exception);
3007 }
3008 } while (exception.retry);
3009 out:
3010 return err;
3011 }
3012
3013 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3014 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3015 {
3016 struct rpc_auth_create_args auth_args = {
3017 .pseudoflavor = flavor,
3018 };
3019 struct rpc_auth *auth;
3020 int ret;
3021
3022 auth = rpcauth_create(&auth_args, server->client);
3023 if (IS_ERR(auth)) {
3024 ret = -EACCES;
3025 goto out;
3026 }
3027 ret = nfs4_lookup_root(server, fhandle, info);
3028 out:
3029 return ret;
3030 }
3031
3032 /*
3033 * Retry pseudoroot lookup with various security flavors. We do this when:
3034 *
3035 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3036 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3037 *
3038 * Returns zero on success, or a negative NFS4ERR value, or a
3039 * negative errno value.
3040 */
3041 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3042 struct nfs_fsinfo *info)
3043 {
3044 /* Per 3530bis 15.33.5 */
3045 static const rpc_authflavor_t flav_array[] = {
3046 RPC_AUTH_GSS_KRB5P,
3047 RPC_AUTH_GSS_KRB5I,
3048 RPC_AUTH_GSS_KRB5,
3049 RPC_AUTH_UNIX, /* courtesy */
3050 RPC_AUTH_NULL,
3051 };
3052 int status = -EPERM;
3053 size_t i;
3054
3055 if (server->auth_info.flavor_len > 0) {
3056 /* try each flavor specified by user */
3057 for (i = 0; i < server->auth_info.flavor_len; i++) {
3058 status = nfs4_lookup_root_sec(server, fhandle, info,
3059 server->auth_info.flavors[i]);
3060 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3061 continue;
3062 break;
3063 }
3064 } else {
3065 /* no flavors specified by user, try default list */
3066 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3067 status = nfs4_lookup_root_sec(server, fhandle, info,
3068 flav_array[i]);
3069 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3070 continue;
3071 break;
3072 }
3073 }
3074
3075 /*
3076 * -EACCESS could mean that the user doesn't have correct permissions
3077 * to access the mount. It could also mean that we tried to mount
3078 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3079 * existing mount programs don't handle -EACCES very well so it should
3080 * be mapped to -EPERM instead.
3081 */
3082 if (status == -EACCES)
3083 status = -EPERM;
3084 return status;
3085 }
3086
3087 static int nfs4_do_find_root_sec(struct nfs_server *server,
3088 struct nfs_fh *fhandle, struct nfs_fsinfo *info)
3089 {
3090 int mv = server->nfs_client->cl_minorversion;
3091 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
3092 }
3093
3094 /**
3095 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3096 * @server: initialized nfs_server handle
3097 * @fhandle: we fill in the pseudo-fs root file handle
3098 * @info: we fill in an FSINFO struct
3099 * @auth_probe: probe the auth flavours
3100 *
3101 * Returns zero on success, or a negative errno.
3102 */
3103 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3104 struct nfs_fsinfo *info,
3105 bool auth_probe)
3106 {
3107 int status = 0;
3108
3109 if (!auth_probe)
3110 status = nfs4_lookup_root(server, fhandle, info);
3111
3112 if (auth_probe || status == NFS4ERR_WRONGSEC)
3113 status = nfs4_do_find_root_sec(server, fhandle, info);
3114
3115 if (status == 0)
3116 status = nfs4_server_capabilities(server, fhandle);
3117 if (status == 0)
3118 status = nfs4_do_fsinfo(server, fhandle, info);
3119
3120 return nfs4_map_errors(status);
3121 }
3122
3123 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3124 struct nfs_fsinfo *info)
3125 {
3126 int error;
3127 struct nfs_fattr *fattr = info->fattr;
3128 struct nfs4_label *label = NULL;
3129
3130 error = nfs4_server_capabilities(server, mntfh);
3131 if (error < 0) {
3132 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3133 return error;
3134 }
3135
3136 label = nfs4_label_alloc(server, GFP_KERNEL);
3137 if (IS_ERR(label))
3138 return PTR_ERR(label);
3139
3140 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3141 if (error < 0) {
3142 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3143 goto err_free_label;
3144 }
3145
3146 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3147 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3148 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3149
3150 err_free_label:
3151 nfs4_label_free(label);
3152
3153 return error;
3154 }
3155
3156 /*
3157 * Get locations and (maybe) other attributes of a referral.
3158 * Note that we'll actually follow the referral later when
3159 * we detect fsid mismatch in inode revalidation
3160 */
3161 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3162 const struct qstr *name, struct nfs_fattr *fattr,
3163 struct nfs_fh *fhandle)
3164 {
3165 int status = -ENOMEM;
3166 struct page *page = NULL;
3167 struct nfs4_fs_locations *locations = NULL;
3168
3169 page = alloc_page(GFP_KERNEL);
3170 if (page == NULL)
3171 goto out;
3172 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3173 if (locations == NULL)
3174 goto out;
3175
3176 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3177 if (status != 0)
3178 goto out;
3179
3180 /*
3181 * If the fsid didn't change, this is a migration event, not a
3182 * referral. Cause us to drop into the exception handler, which
3183 * will kick off migration recovery.
3184 */
3185 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3186 dprintk("%s: server did not return a different fsid for"
3187 " a referral at %s\n", __func__, name->name);
3188 status = -NFS4ERR_MOVED;
3189 goto out;
3190 }
3191 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3192 nfs_fixup_referral_attributes(&locations->fattr);
3193
3194 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3195 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3196 memset(fhandle, 0, sizeof(struct nfs_fh));
3197 out:
3198 if (page)
3199 __free_page(page);
3200 kfree(locations);
3201 return status;
3202 }
3203
3204 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3205 struct nfs_fattr *fattr, struct nfs4_label *label)
3206 {
3207 struct nfs4_getattr_arg args = {
3208 .fh = fhandle,
3209 .bitmask = server->attr_bitmask,
3210 };
3211 struct nfs4_getattr_res res = {
3212 .fattr = fattr,
3213 .label = label,
3214 .server = server,
3215 };
3216 struct rpc_message msg = {
3217 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3218 .rpc_argp = &args,
3219 .rpc_resp = &res,
3220 };
3221
3222 args.bitmask = nfs4_bitmask(server, label);
3223
3224 nfs_fattr_init(fattr);
3225 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3226 }
3227
3228 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3229 struct nfs_fattr *fattr, struct nfs4_label *label)
3230 {
3231 struct nfs4_exception exception = { };
3232 int err;
3233 do {
3234 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3235 trace_nfs4_getattr(server, fhandle, fattr, err);
3236 err = nfs4_handle_exception(server, err,
3237 &exception);
3238 } while (exception.retry);
3239 return err;
3240 }
3241
3242 /*
3243 * The file is not closed if it is opened due to the a request to change
3244 * the size of the file. The open call will not be needed once the
3245 * VFS layer lookup-intents are implemented.
3246 *
3247 * Close is called when the inode is destroyed.
3248 * If we haven't opened the file for O_WRONLY, we
3249 * need to in the size_change case to obtain a stateid.
3250 *
3251 * Got race?
3252 * Because OPEN is always done by name in nfsv4, it is
3253 * possible that we opened a different file by the same
3254 * name. We can recognize this race condition, but we
3255 * can't do anything about it besides returning an error.
3256 *
3257 * This will be fixed with VFS changes (lookup-intent).
3258 */
3259 static int
3260 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3261 struct iattr *sattr)
3262 {
3263 struct inode *inode = d_inode(dentry);
3264 struct rpc_cred *cred = NULL;
3265 struct nfs4_state *state = NULL;
3266 struct nfs4_label *label = NULL;
3267 int status;
3268
3269 if (pnfs_ld_layoutret_on_setattr(inode) &&
3270 sattr->ia_valid & ATTR_SIZE &&
3271 sattr->ia_size < i_size_read(inode))
3272 pnfs_commit_and_return_layout(inode);
3273
3274 nfs_fattr_init(fattr);
3275
3276 /* Deal with open(O_TRUNC) */
3277 if (sattr->ia_valid & ATTR_OPEN)
3278 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3279
3280 /* Optimization: if the end result is no change, don't RPC */
3281 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3282 return 0;
3283
3284 /* Search for an existing open(O_WRITE) file */
3285 if (sattr->ia_valid & ATTR_FILE) {
3286 struct nfs_open_context *ctx;
3287
3288 ctx = nfs_file_open_context(sattr->ia_file);
3289 if (ctx) {
3290 cred = ctx->cred;
3291 state = ctx->state;
3292 }
3293 }
3294
3295 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3296 if (IS_ERR(label))
3297 return PTR_ERR(label);
3298
3299 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3300 if (status == 0) {
3301 nfs_setattr_update_inode(inode, sattr, fattr);
3302 nfs_setsecurity(inode, fattr, label);
3303 }
3304 nfs4_label_free(label);
3305 return status;
3306 }
3307
3308 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3309 const struct qstr *name, struct nfs_fh *fhandle,
3310 struct nfs_fattr *fattr, struct nfs4_label *label)
3311 {
3312 struct nfs_server *server = NFS_SERVER(dir);
3313 int status;
3314 struct nfs4_lookup_arg args = {
3315 .bitmask = server->attr_bitmask,
3316 .dir_fh = NFS_FH(dir),
3317 .name = name,
3318 };
3319 struct nfs4_lookup_res res = {
3320 .server = server,
3321 .fattr = fattr,
3322 .label = label,
3323 .fh = fhandle,
3324 };
3325 struct rpc_message msg = {
3326 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3327 .rpc_argp = &args,
3328 .rpc_resp = &res,
3329 };
3330
3331 args.bitmask = nfs4_bitmask(server, label);
3332
3333 nfs_fattr_init(fattr);
3334
3335 dprintk("NFS call lookup %s\n", name->name);
3336 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3337 dprintk("NFS reply lookup: %d\n", status);
3338 return status;
3339 }
3340
3341 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3342 {
3343 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3344 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3345 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3346 fattr->nlink = 2;
3347 }
3348
3349 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3350 struct qstr *name, struct nfs_fh *fhandle,
3351 struct nfs_fattr *fattr, struct nfs4_label *label)
3352 {
3353 struct nfs4_exception exception = { };
3354 struct rpc_clnt *client = *clnt;
3355 int err;
3356 do {
3357 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3358 trace_nfs4_lookup(dir, name, err);
3359 switch (err) {
3360 case -NFS4ERR_BADNAME:
3361 err = -ENOENT;
3362 goto out;
3363 case -NFS4ERR_MOVED:
3364 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3365 if (err == -NFS4ERR_MOVED)
3366 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3367 goto out;
3368 case -NFS4ERR_WRONGSEC:
3369 err = -EPERM;
3370 if (client != *clnt)
3371 goto out;
3372 client = nfs4_negotiate_security(client, dir, name);
3373 if (IS_ERR(client))
3374 return PTR_ERR(client);
3375
3376 exception.retry = 1;
3377 break;
3378 default:
3379 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3380 }
3381 } while (exception.retry);
3382
3383 out:
3384 if (err == 0)
3385 *clnt = client;
3386 else if (client != *clnt)
3387 rpc_shutdown_client(client);
3388
3389 return err;
3390 }
3391
3392 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
3393 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3394 struct nfs4_label *label)
3395 {
3396 int status;
3397 struct rpc_clnt *client = NFS_CLIENT(dir);
3398
3399 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3400 if (client != NFS_CLIENT(dir)) {
3401 rpc_shutdown_client(client);
3402 nfs_fixup_secinfo_attributes(fattr);
3403 }
3404 return status;
3405 }
3406
3407 struct rpc_clnt *
3408 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
3409 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3410 {
3411 struct rpc_clnt *client = NFS_CLIENT(dir);
3412 int status;
3413
3414 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3415 if (status < 0)
3416 return ERR_PTR(status);
3417 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3418 }
3419
3420 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3421 {
3422 struct nfs_server *server = NFS_SERVER(inode);
3423 struct nfs4_accessargs args = {
3424 .fh = NFS_FH(inode),
3425 .bitmask = server->cache_consistency_bitmask,
3426 };
3427 struct nfs4_accessres res = {
3428 .server = server,
3429 };
3430 struct rpc_message msg = {
3431 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
3432 .rpc_argp = &args,
3433 .rpc_resp = &res,
3434 .rpc_cred = entry->cred,
3435 };
3436 int mode = entry->mask;
3437 int status = 0;
3438
3439 /*
3440 * Determine which access bits we want to ask for...
3441 */
3442 if (mode & MAY_READ)
3443 args.access |= NFS4_ACCESS_READ;
3444 if (S_ISDIR(inode->i_mode)) {
3445 if (mode & MAY_WRITE)
3446 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
3447 if (mode & MAY_EXEC)
3448 args.access |= NFS4_ACCESS_LOOKUP;
3449 } else {
3450 if (mode & MAY_WRITE)
3451 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
3452 if (mode & MAY_EXEC)
3453 args.access |= NFS4_ACCESS_EXECUTE;
3454 }
3455
3456 res.fattr = nfs_alloc_fattr();
3457 if (res.fattr == NULL)
3458 return -ENOMEM;
3459
3460 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3461 if (!status) {
3462 nfs_access_set_mask(entry, res.access);
3463 nfs_refresh_inode(inode, res.fattr);
3464 }
3465 nfs_free_fattr(res.fattr);
3466 return status;
3467 }
3468
3469 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3470 {
3471 struct nfs4_exception exception = { };
3472 int err;
3473 do {
3474 err = _nfs4_proc_access(inode, entry);
3475 trace_nfs4_access(inode, err);
3476 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3477 &exception);
3478 } while (exception.retry);
3479 return err;
3480 }
3481
3482 /*
3483 * TODO: For the time being, we don't try to get any attributes
3484 * along with any of the zero-copy operations READ, READDIR,
3485 * READLINK, WRITE.
3486 *
3487 * In the case of the first three, we want to put the GETATTR
3488 * after the read-type operation -- this is because it is hard
3489 * to predict the length of a GETATTR response in v4, and thus
3490 * align the READ data correctly. This means that the GETATTR
3491 * may end up partially falling into the page cache, and we should
3492 * shift it into the 'tail' of the xdr_buf before processing.
3493 * To do this efficiently, we need to know the total length
3494 * of data received, which doesn't seem to be available outside
3495 * of the RPC layer.
3496 *
3497 * In the case of WRITE, we also want to put the GETATTR after
3498 * the operation -- in this case because we want to make sure
3499 * we get the post-operation mtime and size.
3500 *
3501 * Both of these changes to the XDR layer would in fact be quite
3502 * minor, but I decided to leave them for a subsequent patch.
3503 */
3504 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3505 unsigned int pgbase, unsigned int pglen)
3506 {
3507 struct nfs4_readlink args = {
3508 .fh = NFS_FH(inode),
3509 .pgbase = pgbase,
3510 .pglen = pglen,
3511 .pages = &page,
3512 };
3513 struct nfs4_readlink_res res;
3514 struct rpc_message msg = {
3515 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3516 .rpc_argp = &args,
3517 .rpc_resp = &res,
3518 };
3519
3520 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3521 }
3522
3523 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3524 unsigned int pgbase, unsigned int pglen)
3525 {
3526 struct nfs4_exception exception = { };
3527 int err;
3528 do {
3529 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
3530 trace_nfs4_readlink(inode, err);
3531 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3532 &exception);
3533 } while (exception.retry);
3534 return err;
3535 }
3536
3537 /*
3538 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3539 */
3540 static int
3541 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3542 int flags)
3543 {
3544 struct nfs4_label l, *ilabel = NULL;
3545 struct nfs_open_context *ctx;
3546 struct nfs4_state *state;
3547 int opened = 0;
3548 int status = 0;
3549
3550 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3551 if (IS_ERR(ctx))
3552 return PTR_ERR(ctx);
3553
3554 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3555
3556 sattr->ia_mode &= ~current_umask();
3557 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened);
3558 if (IS_ERR(state)) {
3559 status = PTR_ERR(state);
3560 goto out;
3561 }
3562 out:
3563 nfs4_label_release_security(ilabel);
3564 put_nfs_open_context(ctx);
3565 return status;
3566 }
3567
3568 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3569 {
3570 struct nfs_server *server = NFS_SERVER(dir);
3571 struct nfs_removeargs args = {
3572 .fh = NFS_FH(dir),
3573 .name = *name,
3574 };
3575 struct nfs_removeres res = {
3576 .server = server,
3577 };
3578 struct rpc_message msg = {
3579 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3580 .rpc_argp = &args,
3581 .rpc_resp = &res,
3582 };
3583 int status;
3584
3585 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3586 if (status == 0)
3587 update_changeattr(dir, &res.cinfo);
3588 return status;
3589 }
3590
3591 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3592 {
3593 struct nfs4_exception exception = { };
3594 int err;
3595 do {
3596 err = _nfs4_proc_remove(dir, name);
3597 trace_nfs4_remove(dir, name, err);
3598 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3599 &exception);
3600 } while (exception.retry);
3601 return err;
3602 }
3603
3604 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3605 {
3606 struct nfs_server *server = NFS_SERVER(dir);
3607 struct nfs_removeargs *args = msg->rpc_argp;
3608 struct nfs_removeres *res = msg->rpc_resp;
3609
3610 res->server = server;
3611 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3612 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
3613
3614 nfs_fattr_init(res->dir_attr);
3615 }
3616
3617 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3618 {
3619 nfs4_setup_sequence(NFS_SERVER(data->dir),
3620 &data->args.seq_args,
3621 &data->res.seq_res,
3622 task);
3623 }
3624
3625 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3626 {
3627 struct nfs_unlinkdata *data = task->tk_calldata;
3628 struct nfs_removeres *res = &data->res;
3629
3630 if (!nfs4_sequence_done(task, &res->seq_res))
3631 return 0;
3632 if (nfs4_async_handle_error(task, res->server, NULL,
3633 &data->timeout) == -EAGAIN)
3634 return 0;
3635 update_changeattr(dir, &res->cinfo);
3636 return 1;
3637 }
3638
3639 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3640 {
3641 struct nfs_server *server = NFS_SERVER(dir);
3642 struct nfs_renameargs *arg = msg->rpc_argp;
3643 struct nfs_renameres *res = msg->rpc_resp;
3644
3645 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3646 res->server = server;
3647 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
3648 }
3649
3650 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3651 {
3652 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3653 &data->args.seq_args,
3654 &data->res.seq_res,
3655 task);
3656 }
3657
3658 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3659 struct inode *new_dir)
3660 {
3661 struct nfs_renamedata *data = task->tk_calldata;
3662 struct nfs_renameres *res = &data->res;
3663
3664 if (!nfs4_sequence_done(task, &res->seq_res))
3665 return 0;
3666 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
3667 return 0;
3668
3669 update_changeattr(old_dir, &res->old_cinfo);
3670 update_changeattr(new_dir, &res->new_cinfo);
3671 return 1;
3672 }
3673
3674 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3675 {
3676 struct nfs_server *server = NFS_SERVER(inode);
3677 struct nfs4_link_arg arg = {
3678 .fh = NFS_FH(inode),
3679 .dir_fh = NFS_FH(dir),
3680 .name = name,
3681 .bitmask = server->attr_bitmask,
3682 };
3683 struct nfs4_link_res res = {
3684 .server = server,
3685 .label = NULL,
3686 };
3687 struct rpc_message msg = {
3688 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3689 .rpc_argp = &arg,
3690 .rpc_resp = &res,
3691 };
3692 int status = -ENOMEM;
3693
3694 res.fattr = nfs_alloc_fattr();
3695 if (res.fattr == NULL)
3696 goto out;
3697
3698 res.label = nfs4_label_alloc(server, GFP_KERNEL);
3699 if (IS_ERR(res.label)) {
3700 status = PTR_ERR(res.label);
3701 goto out;
3702 }
3703 arg.bitmask = nfs4_bitmask(server, res.label);
3704
3705 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3706 if (!status) {
3707 update_changeattr(dir, &res.cinfo);
3708 status = nfs_post_op_update_inode(inode, res.fattr);
3709 if (!status)
3710 nfs_setsecurity(inode, res.fattr, res.label);
3711 }
3712
3713
3714 nfs4_label_free(res.label);
3715
3716 out:
3717 nfs_free_fattr(res.fattr);
3718 return status;
3719 }
3720
3721 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3722 {
3723 struct nfs4_exception exception = { };
3724 int err;
3725 do {
3726 err = nfs4_handle_exception(NFS_SERVER(inode),
3727 _nfs4_proc_link(inode, dir, name),
3728 &exception);
3729 } while (exception.retry);
3730 return err;
3731 }
3732
3733 struct nfs4_createdata {
3734 struct rpc_message msg;
3735 struct nfs4_create_arg arg;
3736 struct nfs4_create_res res;
3737 struct nfs_fh fh;
3738 struct nfs_fattr fattr;
3739 struct nfs4_label *label;
3740 };
3741
3742 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3743 struct qstr *name, struct iattr *sattr, u32 ftype)
3744 {
3745 struct nfs4_createdata *data;
3746
3747 data = kzalloc(sizeof(*data), GFP_KERNEL);
3748 if (data != NULL) {
3749 struct nfs_server *server = NFS_SERVER(dir);
3750
3751 data->label = nfs4_label_alloc(server, GFP_KERNEL);
3752 if (IS_ERR(data->label))
3753 goto out_free;
3754
3755 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3756 data->msg.rpc_argp = &data->arg;
3757 data->msg.rpc_resp = &data->res;
3758 data->arg.dir_fh = NFS_FH(dir);
3759 data->arg.server = server;
3760 data->arg.name = name;
3761 data->arg.attrs = sattr;
3762 data->arg.ftype = ftype;
3763 data->arg.bitmask = nfs4_bitmask(server, data->label);
3764 data->res.server = server;
3765 data->res.fh = &data->fh;
3766 data->res.fattr = &data->fattr;
3767 data->res.label = data->label;
3768 nfs_fattr_init(data->res.fattr);
3769 }
3770 return data;
3771 out_free:
3772 kfree(data);
3773 return NULL;
3774 }
3775
3776 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3777 {
3778 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3779 &data->arg.seq_args, &data->res.seq_res, 1);
3780 if (status == 0) {
3781 update_changeattr(dir, &data->res.dir_cinfo);
3782 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
3783 }
3784 return status;
3785 }
3786
3787 static void nfs4_free_createdata(struct nfs4_createdata *data)
3788 {
3789 nfs4_label_free(data->label);
3790 kfree(data);
3791 }
3792
3793 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3794 struct page *page, unsigned int len, struct iattr *sattr,
3795 struct nfs4_label *label)
3796 {
3797 struct nfs4_createdata *data;
3798 int status = -ENAMETOOLONG;
3799
3800 if (len > NFS4_MAXPATHLEN)
3801 goto out;
3802
3803 status = -ENOMEM;
3804 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3805 if (data == NULL)
3806 goto out;
3807
3808 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3809 data->arg.u.symlink.pages = &page;
3810 data->arg.u.symlink.len = len;
3811 data->arg.label = label;
3812
3813 status = nfs4_do_create(dir, dentry, data);
3814
3815 nfs4_free_createdata(data);
3816 out:
3817 return status;
3818 }
3819
3820 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3821 struct page *page, unsigned int len, struct iattr *sattr)
3822 {
3823 struct nfs4_exception exception = { };
3824 struct nfs4_label l, *label = NULL;
3825 int err;
3826
3827 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3828
3829 do {
3830 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
3831 trace_nfs4_symlink(dir, &dentry->d_name, err);
3832 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3833 &exception);
3834 } while (exception.retry);
3835
3836 nfs4_label_release_security(label);
3837 return err;
3838 }
3839
3840 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3841 struct iattr *sattr, struct nfs4_label *label)
3842 {
3843 struct nfs4_createdata *data;
3844 int status = -ENOMEM;
3845
3846 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3847 if (data == NULL)
3848 goto out;
3849
3850 data->arg.label = label;
3851 status = nfs4_do_create(dir, dentry, data);
3852
3853 nfs4_free_createdata(data);
3854 out:
3855 return status;
3856 }
3857
3858 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3859 struct iattr *sattr)
3860 {
3861 struct nfs4_exception exception = { };
3862 struct nfs4_label l, *label = NULL;
3863 int err;
3864
3865 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3866
3867 sattr->ia_mode &= ~current_umask();
3868 do {
3869 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
3870 trace_nfs4_mkdir(dir, &dentry->d_name, err);
3871 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3872 &exception);
3873 } while (exception.retry);
3874 nfs4_label_release_security(label);
3875
3876 return err;
3877 }
3878
3879 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3880 u64 cookie, struct page **pages, unsigned int count, int plus)
3881 {
3882 struct inode *dir = d_inode(dentry);
3883 struct nfs4_readdir_arg args = {
3884 .fh = NFS_FH(dir),
3885 .pages = pages,
3886 .pgbase = 0,
3887 .count = count,
3888 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
3889 .plus = plus,
3890 };
3891 struct nfs4_readdir_res res;
3892 struct rpc_message msg = {
3893 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3894 .rpc_argp = &args,
3895 .rpc_resp = &res,
3896 .rpc_cred = cred,
3897 };
3898 int status;
3899
3900 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
3901 dentry,
3902 (unsigned long long)cookie);
3903 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3904 res.pgbase = args.pgbase;
3905 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3906 if (status >= 0) {
3907 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3908 status += args.pgbase;
3909 }
3910
3911 nfs_invalidate_atime(dir);
3912
3913 dprintk("%s: returns %d\n", __func__, status);
3914 return status;
3915 }
3916
3917 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3918 u64 cookie, struct page **pages, unsigned int count, int plus)
3919 {
3920 struct nfs4_exception exception = { };
3921 int err;
3922 do {
3923 err = _nfs4_proc_readdir(dentry, cred, cookie,
3924 pages, count, plus);
3925 trace_nfs4_readdir(d_inode(dentry), err);
3926 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
3927 &exception);
3928 } while (exception.retry);
3929 return err;
3930 }
3931
3932 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3933 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
3934 {
3935 struct nfs4_createdata *data;
3936 int mode = sattr->ia_mode;
3937 int status = -ENOMEM;
3938
3939 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3940 if (data == NULL)
3941 goto out;
3942
3943 if (S_ISFIFO(mode))
3944 data->arg.ftype = NF4FIFO;
3945 else if (S_ISBLK(mode)) {
3946 data->arg.ftype = NF4BLK;
3947 data->arg.u.device.specdata1 = MAJOR(rdev);
3948 data->arg.u.device.specdata2 = MINOR(rdev);
3949 }
3950 else if (S_ISCHR(mode)) {
3951 data->arg.ftype = NF4CHR;
3952 data->arg.u.device.specdata1 = MAJOR(rdev);
3953 data->arg.u.device.specdata2 = MINOR(rdev);
3954 } else if (!S_ISSOCK(mode)) {
3955 status = -EINVAL;
3956 goto out_free;
3957 }
3958
3959 data->arg.label = label;
3960 status = nfs4_do_create(dir, dentry, data);
3961 out_free:
3962 nfs4_free_createdata(data);
3963 out:
3964 return status;
3965 }
3966
3967 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3968 struct iattr *sattr, dev_t rdev)
3969 {
3970 struct nfs4_exception exception = { };
3971 struct nfs4_label l, *label = NULL;
3972 int err;
3973
3974 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3975
3976 sattr->ia_mode &= ~current_umask();
3977 do {
3978 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
3979 trace_nfs4_mknod(dir, &dentry->d_name, err);
3980 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3981 &exception);
3982 } while (exception.retry);
3983
3984 nfs4_label_release_security(label);
3985
3986 return err;
3987 }
3988
3989 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3990 struct nfs_fsstat *fsstat)
3991 {
3992 struct nfs4_statfs_arg args = {
3993 .fh = fhandle,
3994 .bitmask = server->attr_bitmask,
3995 };
3996 struct nfs4_statfs_res res = {
3997 .fsstat = fsstat,
3998 };
3999 struct rpc_message msg = {
4000 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
4001 .rpc_argp = &args,
4002 .rpc_resp = &res,
4003 };
4004
4005 nfs_fattr_init(fsstat->fattr);
4006 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4007 }
4008
4009 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4010 {
4011 struct nfs4_exception exception = { };
4012 int err;
4013 do {
4014 err = nfs4_handle_exception(server,
4015 _nfs4_proc_statfs(server, fhandle, fsstat),
4016 &exception);
4017 } while (exception.retry);
4018 return err;
4019 }
4020
4021 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4022 struct nfs_fsinfo *fsinfo)
4023 {
4024 struct nfs4_fsinfo_arg args = {
4025 .fh = fhandle,
4026 .bitmask = server->attr_bitmask,
4027 };
4028 struct nfs4_fsinfo_res res = {
4029 .fsinfo = fsinfo,
4030 };
4031 struct rpc_message msg = {
4032 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4033 .rpc_argp = &args,
4034 .rpc_resp = &res,
4035 };
4036
4037 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4038 }
4039
4040 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4041 {
4042 struct nfs4_exception exception = { };
4043 unsigned long now = jiffies;
4044 int err;
4045
4046 do {
4047 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4048 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4049 if (err == 0) {
4050 struct nfs_client *clp = server->nfs_client;
4051
4052 spin_lock(&clp->cl_lock);
4053 clp->cl_lease_time = fsinfo->lease_time * HZ;
4054 clp->cl_last_renewal = now;
4055 spin_unlock(&clp->cl_lock);
4056 break;
4057 }
4058 err = nfs4_handle_exception(server, err, &exception);
4059 } while (exception.retry);
4060 return err;
4061 }
4062
4063 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4064 {
4065 int error;
4066
4067 nfs_fattr_init(fsinfo->fattr);
4068 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4069 if (error == 0) {
4070 /* block layout checks this! */
4071 server->pnfs_blksize = fsinfo->blksize;
4072 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
4073 }
4074
4075 return error;
4076 }
4077
4078 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4079 struct nfs_pathconf *pathconf)
4080 {
4081 struct nfs4_pathconf_arg args = {
4082 .fh = fhandle,
4083 .bitmask = server->attr_bitmask,
4084 };
4085 struct nfs4_pathconf_res res = {
4086 .pathconf = pathconf,
4087 };
4088 struct rpc_message msg = {
4089 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4090 .rpc_argp = &args,
4091 .rpc_resp = &res,
4092 };
4093
4094 /* None of the pathconf attributes are mandatory to implement */
4095 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4096 memset(pathconf, 0, sizeof(*pathconf));
4097 return 0;
4098 }
4099
4100 nfs_fattr_init(pathconf->fattr);
4101 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4102 }
4103
4104 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4105 struct nfs_pathconf *pathconf)
4106 {
4107 struct nfs4_exception exception = { };
4108 int err;
4109
4110 do {
4111 err = nfs4_handle_exception(server,
4112 _nfs4_proc_pathconf(server, fhandle, pathconf),
4113 &exception);
4114 } while (exception.retry);
4115 return err;
4116 }
4117
4118 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4119 const struct nfs_open_context *ctx,
4120 const struct nfs_lock_context *l_ctx,
4121 fmode_t fmode)
4122 {
4123 const struct nfs_lockowner *lockowner = NULL;
4124
4125 if (l_ctx != NULL)
4126 lockowner = &l_ctx->lockowner;
4127 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
4128 }
4129 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4130
4131 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4132 const struct nfs_open_context *ctx,
4133 const struct nfs_lock_context *l_ctx,
4134 fmode_t fmode)
4135 {
4136 nfs4_stateid current_stateid;
4137
4138 /* If the current stateid represents a lost lock, then exit */
4139 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4140 return true;
4141 return nfs4_stateid_match(stateid, &current_stateid);
4142 }
4143
4144 static bool nfs4_error_stateid_expired(int err)
4145 {
4146 switch (err) {
4147 case -NFS4ERR_DELEG_REVOKED:
4148 case -NFS4ERR_ADMIN_REVOKED:
4149 case -NFS4ERR_BAD_STATEID:
4150 case -NFS4ERR_STALE_STATEID:
4151 case -NFS4ERR_OLD_STATEID:
4152 case -NFS4ERR_OPENMODE:
4153 case -NFS4ERR_EXPIRED:
4154 return true;
4155 }
4156 return false;
4157 }
4158
4159 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
4160 {
4161 nfs_invalidate_atime(hdr->inode);
4162 }
4163
4164 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4165 {
4166 struct nfs_server *server = NFS_SERVER(hdr->inode);
4167
4168 trace_nfs4_read(hdr, task->tk_status);
4169 if (nfs4_async_handle_error(task, server,
4170 hdr->args.context->state,
4171 NULL) == -EAGAIN) {
4172 rpc_restart_call_prepare(task);
4173 return -EAGAIN;
4174 }
4175
4176 __nfs4_read_done_cb(hdr);
4177 if (task->tk_status > 0)
4178 renew_lease(server, hdr->timestamp);
4179 return 0;
4180 }
4181
4182 static bool nfs4_read_stateid_changed(struct rpc_task *task,
4183 struct nfs_pgio_args *args)
4184 {
4185
4186 if (!nfs4_error_stateid_expired(task->tk_status) ||
4187 nfs4_stateid_is_current(&args->stateid,
4188 args->context,
4189 args->lock_context,
4190 FMODE_READ))
4191 return false;
4192 rpc_restart_call_prepare(task);
4193 return true;
4194 }
4195
4196 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4197 {
4198
4199 dprintk("--> %s\n", __func__);
4200
4201 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4202 return -EAGAIN;
4203 if (nfs4_read_stateid_changed(task, &hdr->args))
4204 return -EAGAIN;
4205 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4206 nfs4_read_done_cb(task, hdr);
4207 }
4208
4209 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4210 struct rpc_message *msg)
4211 {
4212 hdr->timestamp = jiffies;
4213 hdr->pgio_done_cb = nfs4_read_done_cb;
4214 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4215 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4216 }
4217
4218 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4219 struct nfs_pgio_header *hdr)
4220 {
4221 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode),
4222 &hdr->args.seq_args,
4223 &hdr->res.seq_res,
4224 task))
4225 return 0;
4226 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4227 hdr->args.lock_context,
4228 hdr->rw_ops->rw_mode) == -EIO)
4229 return -EIO;
4230 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4231 return -EIO;
4232 return 0;
4233 }
4234
4235 static int nfs4_write_done_cb(struct rpc_task *task,
4236 struct nfs_pgio_header *hdr)
4237 {
4238 struct inode *inode = hdr->inode;
4239
4240 trace_nfs4_write(hdr, task->tk_status);
4241 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4242 hdr->args.context->state,
4243 NULL) == -EAGAIN) {
4244 rpc_restart_call_prepare(task);
4245 return -EAGAIN;
4246 }
4247 if (task->tk_status >= 0) {
4248 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4249 nfs_writeback_update_inode(hdr);
4250 }
4251 return 0;
4252 }
4253
4254 static bool nfs4_write_stateid_changed(struct rpc_task *task,
4255 struct nfs_pgio_args *args)
4256 {
4257
4258 if (!nfs4_error_stateid_expired(task->tk_status) ||
4259 nfs4_stateid_is_current(&args->stateid,
4260 args->context,
4261 args->lock_context,
4262 FMODE_WRITE))
4263 return false;
4264 rpc_restart_call_prepare(task);
4265 return true;
4266 }
4267
4268 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4269 {
4270 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4271 return -EAGAIN;
4272 if (nfs4_write_stateid_changed(task, &hdr->args))
4273 return -EAGAIN;
4274 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4275 nfs4_write_done_cb(task, hdr);
4276 }
4277
4278 static
4279 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4280 {
4281 /* Don't request attributes for pNFS or O_DIRECT writes */
4282 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4283 return false;
4284 /* Otherwise, request attributes if and only if we don't hold
4285 * a delegation
4286 */
4287 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4288 }
4289
4290 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4291 struct rpc_message *msg)
4292 {
4293 struct nfs_server *server = NFS_SERVER(hdr->inode);
4294
4295 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4296 hdr->args.bitmask = NULL;
4297 hdr->res.fattr = NULL;
4298 } else
4299 hdr->args.bitmask = server->cache_consistency_bitmask;
4300
4301 if (!hdr->pgio_done_cb)
4302 hdr->pgio_done_cb = nfs4_write_done_cb;
4303 hdr->res.server = server;
4304 hdr->timestamp = jiffies;
4305
4306 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4307 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4308 }
4309
4310 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4311 {
4312 nfs4_setup_sequence(NFS_SERVER(data->inode),
4313 &data->args.seq_args,
4314 &data->res.seq_res,
4315 task);
4316 }
4317
4318 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4319 {
4320 struct inode *inode = data->inode;
4321
4322 trace_nfs4_commit(data, task->tk_status);
4323 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4324 NULL, NULL) == -EAGAIN) {
4325 rpc_restart_call_prepare(task);
4326 return -EAGAIN;
4327 }
4328 return 0;
4329 }
4330
4331 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4332 {
4333 if (!nfs4_sequence_done(task, &data->res.seq_res))
4334 return -EAGAIN;
4335 return data->commit_done_cb(task, data);
4336 }
4337
4338 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4339 {
4340 struct nfs_server *server = NFS_SERVER(data->inode);
4341
4342 if (data->commit_done_cb == NULL)
4343 data->commit_done_cb = nfs4_commit_done_cb;
4344 data->res.server = server;
4345 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4346 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4347 }
4348
4349 struct nfs4_renewdata {
4350 struct nfs_client *client;
4351 unsigned long timestamp;
4352 };
4353
4354 /*
4355 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4356 * standalone procedure for queueing an asynchronous RENEW.
4357 */
4358 static void nfs4_renew_release(void *calldata)
4359 {
4360 struct nfs4_renewdata *data = calldata;
4361 struct nfs_client *clp = data->client;
4362
4363 if (atomic_read(&clp->cl_count) > 1)
4364 nfs4_schedule_state_renewal(clp);
4365 nfs_put_client(clp);
4366 kfree(data);
4367 }
4368
4369 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4370 {
4371 struct nfs4_renewdata *data = calldata;
4372 struct nfs_client *clp = data->client;
4373 unsigned long timestamp = data->timestamp;
4374
4375 trace_nfs4_renew_async(clp, task->tk_status);
4376 switch (task->tk_status) {
4377 case 0:
4378 break;
4379 case -NFS4ERR_LEASE_MOVED:
4380 nfs4_schedule_lease_moved_recovery(clp);
4381 break;
4382 default:
4383 /* Unless we're shutting down, schedule state recovery! */
4384 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
4385 return;
4386 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
4387 nfs4_schedule_lease_recovery(clp);
4388 return;
4389 }
4390 nfs4_schedule_path_down_recovery(clp);
4391 }
4392 do_renew_lease(clp, timestamp);
4393 }
4394
4395 static const struct rpc_call_ops nfs4_renew_ops = {
4396 .rpc_call_done = nfs4_renew_done,
4397 .rpc_release = nfs4_renew_release,
4398 };
4399
4400 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
4401 {
4402 struct rpc_message msg = {
4403 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4404 .rpc_argp = clp,
4405 .rpc_cred = cred,
4406 };
4407 struct nfs4_renewdata *data;
4408
4409 if (renew_flags == 0)
4410 return 0;
4411 if (!atomic_inc_not_zero(&clp->cl_count))
4412 return -EIO;
4413 data = kmalloc(sizeof(*data), GFP_NOFS);
4414 if (data == NULL)
4415 return -ENOMEM;
4416 data->client = clp;
4417 data->timestamp = jiffies;
4418 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
4419 &nfs4_renew_ops, data);
4420 }
4421
4422 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
4423 {
4424 struct rpc_message msg = {
4425 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4426 .rpc_argp = clp,
4427 .rpc_cred = cred,
4428 };
4429 unsigned long now = jiffies;
4430 int status;
4431
4432 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4433 if (status < 0)
4434 return status;
4435 do_renew_lease(clp, now);
4436 return 0;
4437 }
4438
4439 static inline int nfs4_server_supports_acls(struct nfs_server *server)
4440 {
4441 return server->caps & NFS_CAP_ACLS;
4442 }
4443
4444 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
4445 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
4446 * the stack.
4447 */
4448 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
4449
4450 static int buf_to_pages_noslab(const void *buf, size_t buflen,
4451 struct page **pages, unsigned int *pgbase)
4452 {
4453 struct page *newpage, **spages;
4454 int rc = 0;
4455 size_t len;
4456 spages = pages;
4457
4458 do {
4459 len = min_t(size_t, PAGE_SIZE, buflen);
4460 newpage = alloc_page(GFP_KERNEL);
4461
4462 if (newpage == NULL)
4463 goto unwind;
4464 memcpy(page_address(newpage), buf, len);
4465 buf += len;
4466 buflen -= len;
4467 *pages++ = newpage;
4468 rc++;
4469 } while (buflen != 0);
4470
4471 return rc;
4472
4473 unwind:
4474 for(; rc > 0; rc--)
4475 __free_page(spages[rc-1]);
4476 return -ENOMEM;
4477 }
4478
4479 struct nfs4_cached_acl {
4480 int cached;
4481 size_t len;
4482 char data[0];
4483 };
4484
4485 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
4486 {
4487 struct nfs_inode *nfsi = NFS_I(inode);
4488
4489 spin_lock(&inode->i_lock);
4490 kfree(nfsi->nfs4_acl);
4491 nfsi->nfs4_acl = acl;
4492 spin_unlock(&inode->i_lock);
4493 }
4494
4495 static void nfs4_zap_acl_attr(struct inode *inode)
4496 {
4497 nfs4_set_cached_acl(inode, NULL);
4498 }
4499
4500 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
4501 {
4502 struct nfs_inode *nfsi = NFS_I(inode);
4503 struct nfs4_cached_acl *acl;
4504 int ret = -ENOENT;
4505
4506 spin_lock(&inode->i_lock);
4507 acl = nfsi->nfs4_acl;
4508 if (acl == NULL)
4509 goto out;
4510 if (buf == NULL) /* user is just asking for length */
4511 goto out_len;
4512 if (acl->cached == 0)
4513 goto out;
4514 ret = -ERANGE; /* see getxattr(2) man page */
4515 if (acl->len > buflen)
4516 goto out;
4517 memcpy(buf, acl->data, acl->len);
4518 out_len:
4519 ret = acl->len;
4520 out:
4521 spin_unlock(&inode->i_lock);
4522 return ret;
4523 }
4524
4525 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
4526 {
4527 struct nfs4_cached_acl *acl;
4528 size_t buflen = sizeof(*acl) + acl_len;
4529
4530 if (buflen <= PAGE_SIZE) {
4531 acl = kmalloc(buflen, GFP_KERNEL);
4532 if (acl == NULL)
4533 goto out;
4534 acl->cached = 1;
4535 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4536 } else {
4537 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4538 if (acl == NULL)
4539 goto out;
4540 acl->cached = 0;
4541 }
4542 acl->len = acl_len;
4543 out:
4544 nfs4_set_cached_acl(inode, acl);
4545 }
4546
4547 /*
4548 * The getxattr API returns the required buffer length when called with a
4549 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4550 * the required buf. On a NULL buf, we send a page of data to the server
4551 * guessing that the ACL request can be serviced by a page. If so, we cache
4552 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4553 * the cache. If not so, we throw away the page, and cache the required
4554 * length. The next getxattr call will then produce another round trip to
4555 * the server, this time with the input buf of the required size.
4556 */
4557 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4558 {
4559 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4560 struct nfs_getaclargs args = {
4561 .fh = NFS_FH(inode),
4562 .acl_pages = pages,
4563 .acl_len = buflen,
4564 };
4565 struct nfs_getaclres res = {
4566 .acl_len = buflen,
4567 };
4568 struct rpc_message msg = {
4569 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4570 .rpc_argp = &args,
4571 .rpc_resp = &res,
4572 };
4573 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4574 int ret = -ENOMEM, i;
4575
4576 /* As long as we're doing a round trip to the server anyway,
4577 * let's be prepared for a page of acl data. */
4578 if (npages == 0)
4579 npages = 1;
4580 if (npages > ARRAY_SIZE(pages))
4581 return -ERANGE;
4582
4583 for (i = 0; i < npages; i++) {
4584 pages[i] = alloc_page(GFP_KERNEL);
4585 if (!pages[i])
4586 goto out_free;
4587 }
4588
4589 /* for decoding across pages */
4590 res.acl_scratch = alloc_page(GFP_KERNEL);
4591 if (!res.acl_scratch)
4592 goto out_free;
4593
4594 args.acl_len = npages * PAGE_SIZE;
4595 args.acl_pgbase = 0;
4596
4597 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4598 __func__, buf, buflen, npages, args.acl_len);
4599 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4600 &msg, &args.seq_args, &res.seq_res, 0);
4601 if (ret)
4602 goto out_free;
4603
4604 /* Handle the case where the passed-in buffer is too short */
4605 if (res.acl_flags & NFS4_ACL_TRUNC) {
4606 /* Did the user only issue a request for the acl length? */
4607 if (buf == NULL)
4608 goto out_ok;
4609 ret = -ERANGE;
4610 goto out_free;
4611 }
4612 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4613 if (buf) {
4614 if (res.acl_len > buflen) {
4615 ret = -ERANGE;
4616 goto out_free;
4617 }
4618 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4619 }
4620 out_ok:
4621 ret = res.acl_len;
4622 out_free:
4623 for (i = 0; i < npages; i++)
4624 if (pages[i])
4625 __free_page(pages[i]);
4626 if (res.acl_scratch)
4627 __free_page(res.acl_scratch);
4628 return ret;
4629 }
4630
4631 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4632 {
4633 struct nfs4_exception exception = { };
4634 ssize_t ret;
4635 do {
4636 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4637 trace_nfs4_get_acl(inode, ret);
4638 if (ret >= 0)
4639 break;
4640 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4641 } while (exception.retry);
4642 return ret;
4643 }
4644
4645 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4646 {
4647 struct nfs_server *server = NFS_SERVER(inode);
4648 int ret;
4649
4650 if (!nfs4_server_supports_acls(server))
4651 return -EOPNOTSUPP;
4652 ret = nfs_revalidate_inode(server, inode);
4653 if (ret < 0)
4654 return ret;
4655 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4656 nfs_zap_acl_cache(inode);
4657 ret = nfs4_read_cached_acl(inode, buf, buflen);
4658 if (ret != -ENOENT)
4659 /* -ENOENT is returned if there is no ACL or if there is an ACL
4660 * but no cached acl data, just the acl length */
4661 return ret;
4662 return nfs4_get_acl_uncached(inode, buf, buflen);
4663 }
4664
4665 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4666 {
4667 struct nfs_server *server = NFS_SERVER(inode);
4668 struct page *pages[NFS4ACL_MAXPAGES];
4669 struct nfs_setaclargs arg = {
4670 .fh = NFS_FH(inode),
4671 .acl_pages = pages,
4672 .acl_len = buflen,
4673 };
4674 struct nfs_setaclres res;
4675 struct rpc_message msg = {
4676 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4677 .rpc_argp = &arg,
4678 .rpc_resp = &res,
4679 };
4680 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4681 int ret, i;
4682
4683 if (!nfs4_server_supports_acls(server))
4684 return -EOPNOTSUPP;
4685 if (npages > ARRAY_SIZE(pages))
4686 return -ERANGE;
4687 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
4688 if (i < 0)
4689 return i;
4690 nfs4_inode_return_delegation(inode);
4691 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4692
4693 /*
4694 * Free each page after tx, so the only ref left is
4695 * held by the network stack
4696 */
4697 for (; i > 0; i--)
4698 put_page(pages[i-1]);
4699
4700 /*
4701 * Acl update can result in inode attribute update.
4702 * so mark the attribute cache invalid.
4703 */
4704 spin_lock(&inode->i_lock);
4705 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4706 spin_unlock(&inode->i_lock);
4707 nfs_access_zap_cache(inode);
4708 nfs_zap_acl_cache(inode);
4709 return ret;
4710 }
4711
4712 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4713 {
4714 struct nfs4_exception exception = { };
4715 int err;
4716 do {
4717 err = __nfs4_proc_set_acl(inode, buf, buflen);
4718 trace_nfs4_set_acl(inode, err);
4719 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4720 &exception);
4721 } while (exception.retry);
4722 return err;
4723 }
4724
4725 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4726 static int _nfs4_get_security_label(struct inode *inode, void *buf,
4727 size_t buflen)
4728 {
4729 struct nfs_server *server = NFS_SERVER(inode);
4730 struct nfs_fattr fattr;
4731 struct nfs4_label label = {0, 0, buflen, buf};
4732
4733 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4734 struct nfs4_getattr_arg arg = {
4735 .fh = NFS_FH(inode),
4736 .bitmask = bitmask,
4737 };
4738 struct nfs4_getattr_res res = {
4739 .fattr = &fattr,
4740 .label = &label,
4741 .server = server,
4742 };
4743 struct rpc_message msg = {
4744 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4745 .rpc_argp = &arg,
4746 .rpc_resp = &res,
4747 };
4748 int ret;
4749
4750 nfs_fattr_init(&fattr);
4751
4752 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
4753 if (ret)
4754 return ret;
4755 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4756 return -ENOENT;
4757 if (buflen < label.len)
4758 return -ERANGE;
4759 return 0;
4760 }
4761
4762 static int nfs4_get_security_label(struct inode *inode, void *buf,
4763 size_t buflen)
4764 {
4765 struct nfs4_exception exception = { };
4766 int err;
4767
4768 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4769 return -EOPNOTSUPP;
4770
4771 do {
4772 err = _nfs4_get_security_label(inode, buf, buflen);
4773 trace_nfs4_get_security_label(inode, err);
4774 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4775 &exception);
4776 } while (exception.retry);
4777 return err;
4778 }
4779
4780 static int _nfs4_do_set_security_label(struct inode *inode,
4781 struct nfs4_label *ilabel,
4782 struct nfs_fattr *fattr,
4783 struct nfs4_label *olabel)
4784 {
4785
4786 struct iattr sattr = {0};
4787 struct nfs_server *server = NFS_SERVER(inode);
4788 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4789 struct nfs_setattrargs arg = {
4790 .fh = NFS_FH(inode),
4791 .iap = &sattr,
4792 .server = server,
4793 .bitmask = bitmask,
4794 .label = ilabel,
4795 };
4796 struct nfs_setattrres res = {
4797 .fattr = fattr,
4798 .label = olabel,
4799 .server = server,
4800 };
4801 struct rpc_message msg = {
4802 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
4803 .rpc_argp = &arg,
4804 .rpc_resp = &res,
4805 };
4806 int status;
4807
4808 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
4809
4810 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4811 if (status)
4812 dprintk("%s failed: %d\n", __func__, status);
4813
4814 return status;
4815 }
4816
4817 static int nfs4_do_set_security_label(struct inode *inode,
4818 struct nfs4_label *ilabel,
4819 struct nfs_fattr *fattr,
4820 struct nfs4_label *olabel)
4821 {
4822 struct nfs4_exception exception = { };
4823 int err;
4824
4825 do {
4826 err = _nfs4_do_set_security_label(inode, ilabel,
4827 fattr, olabel);
4828 trace_nfs4_set_security_label(inode, err);
4829 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4830 &exception);
4831 } while (exception.retry);
4832 return err;
4833 }
4834
4835 static int
4836 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
4837 {
4838 struct nfs4_label ilabel, *olabel = NULL;
4839 struct nfs_fattr fattr;
4840 struct rpc_cred *cred;
4841 struct inode *inode = d_inode(dentry);
4842 int status;
4843
4844 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4845 return -EOPNOTSUPP;
4846
4847 nfs_fattr_init(&fattr);
4848
4849 ilabel.pi = 0;
4850 ilabel.lfs = 0;
4851 ilabel.label = (char *)buf;
4852 ilabel.len = buflen;
4853
4854 cred = rpc_lookup_cred();
4855 if (IS_ERR(cred))
4856 return PTR_ERR(cred);
4857
4858 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
4859 if (IS_ERR(olabel)) {
4860 status = -PTR_ERR(olabel);
4861 goto out;
4862 }
4863
4864 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
4865 if (status == 0)
4866 nfs_setsecurity(inode, &fattr, olabel);
4867
4868 nfs4_label_free(olabel);
4869 out:
4870 put_rpccred(cred);
4871 return status;
4872 }
4873 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
4874
4875
4876 static int
4877 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
4878 struct nfs4_state *state, long *timeout)
4879 {
4880 struct nfs_client *clp = server->nfs_client;
4881
4882 if (task->tk_status >= 0)
4883 return 0;
4884 switch(task->tk_status) {
4885 case -NFS4ERR_DELEG_REVOKED:
4886 case -NFS4ERR_ADMIN_REVOKED:
4887 case -NFS4ERR_BAD_STATEID:
4888 case -NFS4ERR_OPENMODE:
4889 if (state == NULL)
4890 break;
4891 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4892 goto recovery_failed;
4893 goto wait_on_recovery;
4894 case -NFS4ERR_EXPIRED:
4895 if (state != NULL) {
4896 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4897 goto recovery_failed;
4898 }
4899 case -NFS4ERR_STALE_STATEID:
4900 case -NFS4ERR_STALE_CLIENTID:
4901 nfs4_schedule_lease_recovery(clp);
4902 goto wait_on_recovery;
4903 case -NFS4ERR_MOVED:
4904 if (nfs4_schedule_migration_recovery(server) < 0)
4905 goto recovery_failed;
4906 goto wait_on_recovery;
4907 case -NFS4ERR_LEASE_MOVED:
4908 nfs4_schedule_lease_moved_recovery(clp);
4909 goto wait_on_recovery;
4910 #if defined(CONFIG_NFS_V4_1)
4911 case -NFS4ERR_BADSESSION:
4912 case -NFS4ERR_BADSLOT:
4913 case -NFS4ERR_BAD_HIGH_SLOT:
4914 case -NFS4ERR_DEADSESSION:
4915 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4916 case -NFS4ERR_SEQ_FALSE_RETRY:
4917 case -NFS4ERR_SEQ_MISORDERED:
4918 dprintk("%s ERROR %d, Reset session\n", __func__,
4919 task->tk_status);
4920 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4921 goto wait_on_recovery;
4922 #endif /* CONFIG_NFS_V4_1 */
4923 case -NFS4ERR_DELAY:
4924 nfs_inc_server_stats(server, NFSIOS_DELAY);
4925 rpc_delay(task, nfs4_update_delay(timeout));
4926 goto restart_call;
4927 case -NFS4ERR_GRACE:
4928 rpc_delay(task, NFS4_POLL_RETRY_MAX);
4929 case -NFS4ERR_RETRY_UNCACHED_REP:
4930 case -NFS4ERR_OLD_STATEID:
4931 goto restart_call;
4932 }
4933 task->tk_status = nfs4_map_errors(task->tk_status);
4934 return 0;
4935 recovery_failed:
4936 task->tk_status = -EIO;
4937 return 0;
4938 wait_on_recovery:
4939 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
4940 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
4941 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
4942 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
4943 goto recovery_failed;
4944 restart_call:
4945 task->tk_status = 0;
4946 return -EAGAIN;
4947 }
4948
4949 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
4950 nfs4_verifier *bootverf)
4951 {
4952 __be32 verf[2];
4953
4954 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
4955 /* An impossible timestamp guarantees this value
4956 * will never match a generated boot time. */
4957 verf[0] = 0;
4958 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
4959 } else {
4960 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
4961 verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
4962 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
4963 }
4964 memcpy(bootverf->data, verf, sizeof(bootverf->data));
4965 }
4966
4967 static int
4968 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
4969 {
4970 int result;
4971 size_t len;
4972 char *str;
4973 bool retried = false;
4974
4975 if (clp->cl_owner_id != NULL)
4976 return 0;
4977 retry:
4978 rcu_read_lock();
4979 len = 10 + strlen(clp->cl_ipaddr) + 1 +
4980 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
4981 1 +
4982 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
4983 1;
4984 rcu_read_unlock();
4985
4986 if (len > NFS4_OPAQUE_LIMIT + 1)
4987 return -EINVAL;
4988
4989 /*
4990 * Since this string is allocated at mount time, and held until the
4991 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
4992 * about a memory-reclaim deadlock.
4993 */
4994 str = kmalloc(len, GFP_KERNEL);
4995 if (!str)
4996 return -ENOMEM;
4997
4998 rcu_read_lock();
4999 result = scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
5000 clp->cl_ipaddr,
5001 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
5002 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
5003 rcu_read_unlock();
5004
5005 /* Did something change? */
5006 if (result >= len) {
5007 kfree(str);
5008 if (retried)
5009 return -EINVAL;
5010 retried = true;
5011 goto retry;
5012 }
5013 clp->cl_owner_id = str;
5014 return 0;
5015 }
5016
5017 static int
5018 nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5019 {
5020 int result;
5021 size_t len;
5022 char *str;
5023
5024 len = 10 + 10 + 1 + 10 + 1 +
5025 strlen(nfs4_client_id_uniquifier) + 1 +
5026 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5027
5028 if (len > NFS4_OPAQUE_LIMIT + 1)
5029 return -EINVAL;
5030
5031 /*
5032 * Since this string is allocated at mount time, and held until the
5033 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5034 * about a memory-reclaim deadlock.
5035 */
5036 str = kmalloc(len, GFP_KERNEL);
5037 if (!str)
5038 return -ENOMEM;
5039
5040 result = scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5041 clp->rpc_ops->version, clp->cl_minorversion,
5042 nfs4_client_id_uniquifier,
5043 clp->cl_rpcclient->cl_nodename);
5044 if (result >= len) {
5045 kfree(str);
5046 return -EINVAL;
5047 }
5048 clp->cl_owner_id = str;
5049 return 0;
5050 }
5051
5052 static int
5053 nfs4_init_uniform_client_string(struct nfs_client *clp)
5054 {
5055 int result;
5056 size_t len;
5057 char *str;
5058
5059 if (clp->cl_owner_id != NULL)
5060 return 0;
5061
5062 if (nfs4_client_id_uniquifier[0] != '\0')
5063 return nfs4_init_uniquifier_client_string(clp);
5064
5065 len = 10 + 10 + 1 + 10 + 1 +
5066 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5067
5068 if (len > NFS4_OPAQUE_LIMIT + 1)
5069 return -EINVAL;
5070
5071 /*
5072 * Since this string is allocated at mount time, and held until the
5073 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5074 * about a memory-reclaim deadlock.
5075 */
5076 str = kmalloc(len, GFP_KERNEL);
5077 if (!str)
5078 return -ENOMEM;
5079
5080 result = scnprintf(str, len, "Linux NFSv%u.%u %s",
5081 clp->rpc_ops->version, clp->cl_minorversion,
5082 clp->cl_rpcclient->cl_nodename);
5083 if (result >= len) {
5084 kfree(str);
5085 return -EINVAL;
5086 }
5087 clp->cl_owner_id = str;
5088 return 0;
5089 }
5090
5091 /*
5092 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5093 * services. Advertise one based on the address family of the
5094 * clientaddr.
5095 */
5096 static unsigned int
5097 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5098 {
5099 if (strchr(clp->cl_ipaddr, ':') != NULL)
5100 return scnprintf(buf, len, "tcp6");
5101 else
5102 return scnprintf(buf, len, "tcp");
5103 }
5104
5105 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5106 {
5107 struct nfs4_setclientid *sc = calldata;
5108
5109 if (task->tk_status == 0)
5110 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5111 }
5112
5113 static const struct rpc_call_ops nfs4_setclientid_ops = {
5114 .rpc_call_done = nfs4_setclientid_done,
5115 };
5116
5117 /**
5118 * nfs4_proc_setclientid - Negotiate client ID
5119 * @clp: state data structure
5120 * @program: RPC program for NFSv4 callback service
5121 * @port: IP port number for NFS4 callback service
5122 * @cred: RPC credential to use for this call
5123 * @res: where to place the result
5124 *
5125 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5126 */
5127 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5128 unsigned short port, struct rpc_cred *cred,
5129 struct nfs4_setclientid_res *res)
5130 {
5131 nfs4_verifier sc_verifier;
5132 struct nfs4_setclientid setclientid = {
5133 .sc_verifier = &sc_verifier,
5134 .sc_prog = program,
5135 .sc_clnt = clp,
5136 };
5137 struct rpc_message msg = {
5138 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5139 .rpc_argp = &setclientid,
5140 .rpc_resp = res,
5141 .rpc_cred = cred,
5142 };
5143 struct rpc_task *task;
5144 struct rpc_task_setup task_setup_data = {
5145 .rpc_client = clp->cl_rpcclient,
5146 .rpc_message = &msg,
5147 .callback_ops = &nfs4_setclientid_ops,
5148 .callback_data = &setclientid,
5149 .flags = RPC_TASK_TIMEOUT,
5150 };
5151 int status;
5152
5153 /* nfs_client_id4 */
5154 nfs4_init_boot_verifier(clp, &sc_verifier);
5155
5156 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5157 status = nfs4_init_uniform_client_string(clp);
5158 else
5159 status = nfs4_init_nonuniform_client_string(clp);
5160
5161 if (status)
5162 goto out;
5163
5164 /* cb_client4 */
5165 setclientid.sc_netid_len =
5166 nfs4_init_callback_netid(clp,
5167 setclientid.sc_netid,
5168 sizeof(setclientid.sc_netid));
5169 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5170 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5171 clp->cl_ipaddr, port >> 8, port & 255);
5172
5173 dprintk("NFS call setclientid auth=%s, '%s'\n",
5174 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5175 clp->cl_owner_id);
5176 task = rpc_run_task(&task_setup_data);
5177 if (IS_ERR(task)) {
5178 status = PTR_ERR(task);
5179 goto out;
5180 }
5181 status = task->tk_status;
5182 if (setclientid.sc_cred) {
5183 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5184 put_rpccred(setclientid.sc_cred);
5185 }
5186 rpc_put_task(task);
5187 out:
5188 trace_nfs4_setclientid(clp, status);
5189 dprintk("NFS reply setclientid: %d\n", status);
5190 return status;
5191 }
5192
5193 /**
5194 * nfs4_proc_setclientid_confirm - Confirm client ID
5195 * @clp: state data structure
5196 * @res: result of a previous SETCLIENTID
5197 * @cred: RPC credential to use for this call
5198 *
5199 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5200 */
5201 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5202 struct nfs4_setclientid_res *arg,
5203 struct rpc_cred *cred)
5204 {
5205 struct rpc_message msg = {
5206 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5207 .rpc_argp = arg,
5208 .rpc_cred = cred,
5209 };
5210 int status;
5211
5212 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5213 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5214 clp->cl_clientid);
5215 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5216 trace_nfs4_setclientid_confirm(clp, status);
5217 dprintk("NFS reply setclientid_confirm: %d\n", status);
5218 return status;
5219 }
5220
5221 struct nfs4_delegreturndata {
5222 struct nfs4_delegreturnargs args;
5223 struct nfs4_delegreturnres res;
5224 struct nfs_fh fh;
5225 nfs4_stateid stateid;
5226 unsigned long timestamp;
5227 struct nfs_fattr fattr;
5228 int rpc_status;
5229 struct inode *inode;
5230 bool roc;
5231 u32 roc_barrier;
5232 };
5233
5234 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5235 {
5236 struct nfs4_delegreturndata *data = calldata;
5237
5238 if (!nfs4_sequence_done(task, &data->res.seq_res))
5239 return;
5240
5241 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5242 switch (task->tk_status) {
5243 case 0:
5244 renew_lease(data->res.server, data->timestamp);
5245 case -NFS4ERR_ADMIN_REVOKED:
5246 case -NFS4ERR_DELEG_REVOKED:
5247 case -NFS4ERR_BAD_STATEID:
5248 case -NFS4ERR_OLD_STATEID:
5249 case -NFS4ERR_STALE_STATEID:
5250 case -NFS4ERR_EXPIRED:
5251 task->tk_status = 0;
5252 if (data->roc)
5253 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5254 break;
5255 default:
5256 if (nfs4_async_handle_error(task, data->res.server,
5257 NULL, NULL) == -EAGAIN) {
5258 rpc_restart_call_prepare(task);
5259 return;
5260 }
5261 }
5262 data->rpc_status = task->tk_status;
5263 }
5264
5265 static void nfs4_delegreturn_release(void *calldata)
5266 {
5267 struct nfs4_delegreturndata *data = calldata;
5268 struct inode *inode = data->inode;
5269
5270 if (inode) {
5271 if (data->roc)
5272 pnfs_roc_release(inode);
5273 nfs_iput_and_deactive(inode);
5274 }
5275 kfree(calldata);
5276 }
5277
5278 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5279 {
5280 struct nfs4_delegreturndata *d_data;
5281
5282 d_data = (struct nfs4_delegreturndata *)data;
5283
5284 if (d_data->roc &&
5285 pnfs_roc_drain(d_data->inode, &d_data->roc_barrier, task))
5286 return;
5287
5288 nfs4_setup_sequence(d_data->res.server,
5289 &d_data->args.seq_args,
5290 &d_data->res.seq_res,
5291 task);
5292 }
5293
5294 static const struct rpc_call_ops nfs4_delegreturn_ops = {
5295 .rpc_call_prepare = nfs4_delegreturn_prepare,
5296 .rpc_call_done = nfs4_delegreturn_done,
5297 .rpc_release = nfs4_delegreturn_release,
5298 };
5299
5300 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5301 {
5302 struct nfs4_delegreturndata *data;
5303 struct nfs_server *server = NFS_SERVER(inode);
5304 struct rpc_task *task;
5305 struct rpc_message msg = {
5306 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5307 .rpc_cred = cred,
5308 };
5309 struct rpc_task_setup task_setup_data = {
5310 .rpc_client = server->client,
5311 .rpc_message = &msg,
5312 .callback_ops = &nfs4_delegreturn_ops,
5313 .flags = RPC_TASK_ASYNC,
5314 };
5315 int status = 0;
5316
5317 data = kzalloc(sizeof(*data), GFP_NOFS);
5318 if (data == NULL)
5319 return -ENOMEM;
5320 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5321 data->args.fhandle = &data->fh;
5322 data->args.stateid = &data->stateid;
5323 data->args.bitmask = server->cache_consistency_bitmask;
5324 nfs_copy_fh(&data->fh, NFS_FH(inode));
5325 nfs4_stateid_copy(&data->stateid, stateid);
5326 data->res.fattr = &data->fattr;
5327 data->res.server = server;
5328 nfs_fattr_init(data->res.fattr);
5329 data->timestamp = jiffies;
5330 data->rpc_status = 0;
5331 data->inode = nfs_igrab_and_active(inode);
5332 if (data->inode)
5333 data->roc = nfs4_roc(inode);
5334
5335 task_setup_data.callback_data = data;
5336 msg.rpc_argp = &data->args;
5337 msg.rpc_resp = &data->res;
5338 task = rpc_run_task(&task_setup_data);
5339 if (IS_ERR(task))
5340 return PTR_ERR(task);
5341 if (!issync)
5342 goto out;
5343 status = nfs4_wait_for_completion_rpc_task(task);
5344 if (status != 0)
5345 goto out;
5346 status = data->rpc_status;
5347 if (status == 0)
5348 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5349 else
5350 nfs_refresh_inode(inode, &data->fattr);
5351 out:
5352 rpc_put_task(task);
5353 return status;
5354 }
5355
5356 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5357 {
5358 struct nfs_server *server = NFS_SERVER(inode);
5359 struct nfs4_exception exception = { };
5360 int err;
5361 do {
5362 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5363 trace_nfs4_delegreturn(inode, err);
5364 switch (err) {
5365 case -NFS4ERR_STALE_STATEID:
5366 case -NFS4ERR_EXPIRED:
5367 case 0:
5368 return 0;
5369 }
5370 err = nfs4_handle_exception(server, err, &exception);
5371 } while (exception.retry);
5372 return err;
5373 }
5374
5375 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
5376 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
5377
5378 /*
5379 * sleep, with exponential backoff, and retry the LOCK operation.
5380 */
5381 static unsigned long
5382 nfs4_set_lock_task_retry(unsigned long timeout)
5383 {
5384 freezable_schedule_timeout_killable_unsafe(timeout);
5385 timeout <<= 1;
5386 if (timeout > NFS4_LOCK_MAXTIMEOUT)
5387 return NFS4_LOCK_MAXTIMEOUT;
5388 return timeout;
5389 }
5390
5391 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5392 {
5393 struct inode *inode = state->inode;
5394 struct nfs_server *server = NFS_SERVER(inode);
5395 struct nfs_client *clp = server->nfs_client;
5396 struct nfs_lockt_args arg = {
5397 .fh = NFS_FH(inode),
5398 .fl = request,
5399 };
5400 struct nfs_lockt_res res = {
5401 .denied = request,
5402 };
5403 struct rpc_message msg = {
5404 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5405 .rpc_argp = &arg,
5406 .rpc_resp = &res,
5407 .rpc_cred = state->owner->so_cred,
5408 };
5409 struct nfs4_lock_state *lsp;
5410 int status;
5411
5412 arg.lock_owner.clientid = clp->cl_clientid;
5413 status = nfs4_set_lock_state(state, request);
5414 if (status != 0)
5415 goto out;
5416 lsp = request->fl_u.nfs4_fl.owner;
5417 arg.lock_owner.id = lsp->ls_seqid.owner_id;
5418 arg.lock_owner.s_dev = server->s_dev;
5419 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5420 switch (status) {
5421 case 0:
5422 request->fl_type = F_UNLCK;
5423 break;
5424 case -NFS4ERR_DENIED:
5425 status = 0;
5426 }
5427 request->fl_ops->fl_release_private(request);
5428 request->fl_ops = NULL;
5429 out:
5430 return status;
5431 }
5432
5433 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5434 {
5435 struct nfs4_exception exception = { };
5436 int err;
5437
5438 do {
5439 err = _nfs4_proc_getlk(state, cmd, request);
5440 trace_nfs4_get_lock(request, state, cmd, err);
5441 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
5442 &exception);
5443 } while (exception.retry);
5444 return err;
5445 }
5446
5447 static int do_vfs_lock(struct file *file, struct file_lock *fl)
5448 {
5449 int res = 0;
5450 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
5451 case FL_POSIX:
5452 res = posix_lock_file_wait(file, fl);
5453 break;
5454 case FL_FLOCK:
5455 res = flock_lock_file_wait(file, fl);
5456 break;
5457 default:
5458 BUG();
5459 }
5460 return res;
5461 }
5462
5463 struct nfs4_unlockdata {
5464 struct nfs_locku_args arg;
5465 struct nfs_locku_res res;
5466 struct nfs4_lock_state *lsp;
5467 struct nfs_open_context *ctx;
5468 struct file_lock fl;
5469 const struct nfs_server *server;
5470 unsigned long timestamp;
5471 };
5472
5473 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5474 struct nfs_open_context *ctx,
5475 struct nfs4_lock_state *lsp,
5476 struct nfs_seqid *seqid)
5477 {
5478 struct nfs4_unlockdata *p;
5479 struct inode *inode = lsp->ls_state->inode;
5480
5481 p = kzalloc(sizeof(*p), GFP_NOFS);
5482 if (p == NULL)
5483 return NULL;
5484 p->arg.fh = NFS_FH(inode);
5485 p->arg.fl = &p->fl;
5486 p->arg.seqid = seqid;
5487 p->res.seqid = seqid;
5488 p->lsp = lsp;
5489 atomic_inc(&lsp->ls_count);
5490 /* Ensure we don't close file until we're done freeing locks! */
5491 p->ctx = get_nfs_open_context(ctx);
5492 get_file(fl->fl_file);
5493 memcpy(&p->fl, fl, sizeof(p->fl));
5494 p->server = NFS_SERVER(inode);
5495 return p;
5496 }
5497
5498 static void nfs4_locku_release_calldata(void *data)
5499 {
5500 struct nfs4_unlockdata *calldata = data;
5501 nfs_free_seqid(calldata->arg.seqid);
5502 nfs4_put_lock_state(calldata->lsp);
5503 put_nfs_open_context(calldata->ctx);
5504 fput(calldata->fl.fl_file);
5505 kfree(calldata);
5506 }
5507
5508 static void nfs4_locku_done(struct rpc_task *task, void *data)
5509 {
5510 struct nfs4_unlockdata *calldata = data;
5511
5512 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
5513 return;
5514 switch (task->tk_status) {
5515 case 0:
5516 renew_lease(calldata->server, calldata->timestamp);
5517 do_vfs_lock(calldata->fl.fl_file, &calldata->fl);
5518 if (nfs4_update_lock_stateid(calldata->lsp,
5519 &calldata->res.stateid))
5520 break;
5521 case -NFS4ERR_BAD_STATEID:
5522 case -NFS4ERR_OLD_STATEID:
5523 case -NFS4ERR_STALE_STATEID:
5524 case -NFS4ERR_EXPIRED:
5525 if (!nfs4_stateid_match(&calldata->arg.stateid,
5526 &calldata->lsp->ls_stateid))
5527 rpc_restart_call_prepare(task);
5528 break;
5529 default:
5530 if (nfs4_async_handle_error(task, calldata->server,
5531 NULL, NULL) == -EAGAIN)
5532 rpc_restart_call_prepare(task);
5533 }
5534 nfs_release_seqid(calldata->arg.seqid);
5535 }
5536
5537 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
5538 {
5539 struct nfs4_unlockdata *calldata = data;
5540
5541 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
5542 goto out_wait;
5543 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
5544 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
5545 /* Note: exit _without_ running nfs4_locku_done */
5546 goto out_no_action;
5547 }
5548 calldata->timestamp = jiffies;
5549 if (nfs4_setup_sequence(calldata->server,
5550 &calldata->arg.seq_args,
5551 &calldata->res.seq_res,
5552 task) != 0)
5553 nfs_release_seqid(calldata->arg.seqid);
5554 return;
5555 out_no_action:
5556 task->tk_action = NULL;
5557 out_wait:
5558 nfs4_sequence_done(task, &calldata->res.seq_res);
5559 }
5560
5561 static const struct rpc_call_ops nfs4_locku_ops = {
5562 .rpc_call_prepare = nfs4_locku_prepare,
5563 .rpc_call_done = nfs4_locku_done,
5564 .rpc_release = nfs4_locku_release_calldata,
5565 };
5566
5567 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
5568 struct nfs_open_context *ctx,
5569 struct nfs4_lock_state *lsp,
5570 struct nfs_seqid *seqid)
5571 {
5572 struct nfs4_unlockdata *data;
5573 struct rpc_message msg = {
5574 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
5575 .rpc_cred = ctx->cred,
5576 };
5577 struct rpc_task_setup task_setup_data = {
5578 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
5579 .rpc_message = &msg,
5580 .callback_ops = &nfs4_locku_ops,
5581 .workqueue = nfsiod_workqueue,
5582 .flags = RPC_TASK_ASYNC,
5583 };
5584
5585 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
5586 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
5587
5588 /* Ensure this is an unlock - when canceling a lock, the
5589 * canceled lock is passed in, and it won't be an unlock.
5590 */
5591 fl->fl_type = F_UNLCK;
5592
5593 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
5594 if (data == NULL) {
5595 nfs_free_seqid(seqid);
5596 return ERR_PTR(-ENOMEM);
5597 }
5598
5599 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5600 msg.rpc_argp = &data->arg;
5601 msg.rpc_resp = &data->res;
5602 task_setup_data.callback_data = data;
5603 return rpc_run_task(&task_setup_data);
5604 }
5605
5606 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
5607 {
5608 struct inode *inode = state->inode;
5609 struct nfs4_state_owner *sp = state->owner;
5610 struct nfs_inode *nfsi = NFS_I(inode);
5611 struct nfs_seqid *seqid;
5612 struct nfs4_lock_state *lsp;
5613 struct rpc_task *task;
5614 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5615 int status = 0;
5616 unsigned char fl_flags = request->fl_flags;
5617
5618 status = nfs4_set_lock_state(state, request);
5619 /* Unlock _before_ we do the RPC call */
5620 request->fl_flags |= FL_EXISTS;
5621 /* Exclude nfs_delegation_claim_locks() */
5622 mutex_lock(&sp->so_delegreturn_mutex);
5623 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5624 down_read(&nfsi->rwsem);
5625 if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
5626 up_read(&nfsi->rwsem);
5627 mutex_unlock(&sp->so_delegreturn_mutex);
5628 goto out;
5629 }
5630 up_read(&nfsi->rwsem);
5631 mutex_unlock(&sp->so_delegreturn_mutex);
5632 if (status != 0)
5633 goto out;
5634 /* Is this a delegated lock? */
5635 lsp = request->fl_u.nfs4_fl.owner;
5636 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
5637 goto out;
5638 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
5639 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
5640 status = -ENOMEM;
5641 if (IS_ERR(seqid))
5642 goto out;
5643 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
5644 status = PTR_ERR(task);
5645 if (IS_ERR(task))
5646 goto out;
5647 status = nfs4_wait_for_completion_rpc_task(task);
5648 rpc_put_task(task);
5649 out:
5650 request->fl_flags = fl_flags;
5651 trace_nfs4_unlock(request, state, F_SETLK, status);
5652 return status;
5653 }
5654
5655 struct nfs4_lockdata {
5656 struct nfs_lock_args arg;
5657 struct nfs_lock_res res;
5658 struct nfs4_lock_state *lsp;
5659 struct nfs_open_context *ctx;
5660 struct file_lock fl;
5661 unsigned long timestamp;
5662 int rpc_status;
5663 int cancelled;
5664 struct nfs_server *server;
5665 };
5666
5667 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5668 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
5669 gfp_t gfp_mask)
5670 {
5671 struct nfs4_lockdata *p;
5672 struct inode *inode = lsp->ls_state->inode;
5673 struct nfs_server *server = NFS_SERVER(inode);
5674 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5675
5676 p = kzalloc(sizeof(*p), gfp_mask);
5677 if (p == NULL)
5678 return NULL;
5679
5680 p->arg.fh = NFS_FH(inode);
5681 p->arg.fl = &p->fl;
5682 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
5683 if (IS_ERR(p->arg.open_seqid))
5684 goto out_free;
5685 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
5686 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
5687 if (IS_ERR(p->arg.lock_seqid))
5688 goto out_free_seqid;
5689 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
5690 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
5691 p->arg.lock_owner.s_dev = server->s_dev;
5692 p->res.lock_seqid = p->arg.lock_seqid;
5693 p->lsp = lsp;
5694 p->server = server;
5695 atomic_inc(&lsp->ls_count);
5696 p->ctx = get_nfs_open_context(ctx);
5697 get_file(fl->fl_file);
5698 memcpy(&p->fl, fl, sizeof(p->fl));
5699 return p;
5700 out_free_seqid:
5701 nfs_free_seqid(p->arg.open_seqid);
5702 out_free:
5703 kfree(p);
5704 return NULL;
5705 }
5706
5707 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
5708 {
5709 struct nfs4_lockdata *data = calldata;
5710 struct nfs4_state *state = data->lsp->ls_state;
5711
5712 dprintk("%s: begin!\n", __func__);
5713 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
5714 goto out_wait;
5715 /* Do we need to do an open_to_lock_owner? */
5716 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
5717 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
5718 goto out_release_lock_seqid;
5719 }
5720 nfs4_stateid_copy(&data->arg.open_stateid,
5721 &state->open_stateid);
5722 data->arg.new_lock_owner = 1;
5723 data->res.open_seqid = data->arg.open_seqid;
5724 } else {
5725 data->arg.new_lock_owner = 0;
5726 nfs4_stateid_copy(&data->arg.lock_stateid,
5727 &data->lsp->ls_stateid);
5728 }
5729 if (!nfs4_valid_open_stateid(state)) {
5730 data->rpc_status = -EBADF;
5731 task->tk_action = NULL;
5732 goto out_release_open_seqid;
5733 }
5734 data->timestamp = jiffies;
5735 if (nfs4_setup_sequence(data->server,
5736 &data->arg.seq_args,
5737 &data->res.seq_res,
5738 task) == 0)
5739 return;
5740 out_release_open_seqid:
5741 nfs_release_seqid(data->arg.open_seqid);
5742 out_release_lock_seqid:
5743 nfs_release_seqid(data->arg.lock_seqid);
5744 out_wait:
5745 nfs4_sequence_done(task, &data->res.seq_res);
5746 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
5747 }
5748
5749 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5750 {
5751 struct nfs4_lockdata *data = calldata;
5752 struct nfs4_lock_state *lsp = data->lsp;
5753
5754 dprintk("%s: begin!\n", __func__);
5755
5756 if (!nfs4_sequence_done(task, &data->res.seq_res))
5757 return;
5758
5759 data->rpc_status = task->tk_status;
5760 switch (task->tk_status) {
5761 case 0:
5762 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
5763 data->timestamp);
5764 if (data->arg.new_lock) {
5765 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5766 if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) {
5767 rpc_restart_call_prepare(task);
5768 break;
5769 }
5770 }
5771 if (data->arg.new_lock_owner != 0) {
5772 nfs_confirm_seqid(&lsp->ls_seqid, 0);
5773 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
5774 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5775 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
5776 rpc_restart_call_prepare(task);
5777 break;
5778 case -NFS4ERR_BAD_STATEID:
5779 case -NFS4ERR_OLD_STATEID:
5780 case -NFS4ERR_STALE_STATEID:
5781 case -NFS4ERR_EXPIRED:
5782 if (data->arg.new_lock_owner != 0) {
5783 if (!nfs4_stateid_match(&data->arg.open_stateid,
5784 &lsp->ls_state->open_stateid))
5785 rpc_restart_call_prepare(task);
5786 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
5787 &lsp->ls_stateid))
5788 rpc_restart_call_prepare(task);
5789 }
5790 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
5791 }
5792
5793 static void nfs4_lock_release(void *calldata)
5794 {
5795 struct nfs4_lockdata *data = calldata;
5796
5797 dprintk("%s: begin!\n", __func__);
5798 nfs_free_seqid(data->arg.open_seqid);
5799 if (data->cancelled != 0) {
5800 struct rpc_task *task;
5801 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
5802 data->arg.lock_seqid);
5803 if (!IS_ERR(task))
5804 rpc_put_task_async(task);
5805 dprintk("%s: cancelling lock!\n", __func__);
5806 } else
5807 nfs_free_seqid(data->arg.lock_seqid);
5808 nfs4_put_lock_state(data->lsp);
5809 put_nfs_open_context(data->ctx);
5810 fput(data->fl.fl_file);
5811 kfree(data);
5812 dprintk("%s: done!\n", __func__);
5813 }
5814
5815 static const struct rpc_call_ops nfs4_lock_ops = {
5816 .rpc_call_prepare = nfs4_lock_prepare,
5817 .rpc_call_done = nfs4_lock_done,
5818 .rpc_release = nfs4_lock_release,
5819 };
5820
5821 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
5822 {
5823 switch (error) {
5824 case -NFS4ERR_ADMIN_REVOKED:
5825 case -NFS4ERR_BAD_STATEID:
5826 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5827 if (new_lock_owner != 0 ||
5828 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
5829 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
5830 break;
5831 case -NFS4ERR_STALE_STATEID:
5832 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5833 case -NFS4ERR_EXPIRED:
5834 nfs4_schedule_lease_recovery(server->nfs_client);
5835 };
5836 }
5837
5838 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
5839 {
5840 struct nfs4_lockdata *data;
5841 struct rpc_task *task;
5842 struct rpc_message msg = {
5843 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
5844 .rpc_cred = state->owner->so_cred,
5845 };
5846 struct rpc_task_setup task_setup_data = {
5847 .rpc_client = NFS_CLIENT(state->inode),
5848 .rpc_message = &msg,
5849 .callback_ops = &nfs4_lock_ops,
5850 .workqueue = nfsiod_workqueue,
5851 .flags = RPC_TASK_ASYNC,
5852 };
5853 int ret;
5854
5855 dprintk("%s: begin!\n", __func__);
5856 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
5857 fl->fl_u.nfs4_fl.owner,
5858 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
5859 if (data == NULL)
5860 return -ENOMEM;
5861 if (IS_SETLKW(cmd))
5862 data->arg.block = 1;
5863 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5864 msg.rpc_argp = &data->arg;
5865 msg.rpc_resp = &data->res;
5866 task_setup_data.callback_data = data;
5867 if (recovery_type > NFS_LOCK_NEW) {
5868 if (recovery_type == NFS_LOCK_RECLAIM)
5869 data->arg.reclaim = NFS_LOCK_RECLAIM;
5870 nfs4_set_sequence_privileged(&data->arg.seq_args);
5871 } else
5872 data->arg.new_lock = 1;
5873 task = rpc_run_task(&task_setup_data);
5874 if (IS_ERR(task))
5875 return PTR_ERR(task);
5876 ret = nfs4_wait_for_completion_rpc_task(task);
5877 if (ret == 0) {
5878 ret = data->rpc_status;
5879 if (ret)
5880 nfs4_handle_setlk_error(data->server, data->lsp,
5881 data->arg.new_lock_owner, ret);
5882 } else
5883 data->cancelled = 1;
5884 rpc_put_task(task);
5885 dprintk("%s: done, ret = %d!\n", __func__, ret);
5886 return ret;
5887 }
5888
5889 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
5890 {
5891 struct nfs_server *server = NFS_SERVER(state->inode);
5892 struct nfs4_exception exception = {
5893 .inode = state->inode,
5894 };
5895 int err;
5896
5897 do {
5898 /* Cache the lock if possible... */
5899 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5900 return 0;
5901 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
5902 trace_nfs4_lock_reclaim(request, state, F_SETLK, err);
5903 if (err != -NFS4ERR_DELAY)
5904 break;
5905 nfs4_handle_exception(server, err, &exception);
5906 } while (exception.retry);
5907 return err;
5908 }
5909
5910 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
5911 {
5912 struct nfs_server *server = NFS_SERVER(state->inode);
5913 struct nfs4_exception exception = {
5914 .inode = state->inode,
5915 };
5916 int err;
5917
5918 err = nfs4_set_lock_state(state, request);
5919 if (err != 0)
5920 return err;
5921 if (!recover_lost_locks) {
5922 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
5923 return 0;
5924 }
5925 do {
5926 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5927 return 0;
5928 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5929 trace_nfs4_lock_expired(request, state, F_SETLK, err);
5930 switch (err) {
5931 default:
5932 goto out;
5933 case -NFS4ERR_GRACE:
5934 case -NFS4ERR_DELAY:
5935 nfs4_handle_exception(server, err, &exception);
5936 err = 0;
5937 }
5938 } while (exception.retry);
5939 out:
5940 return err;
5941 }
5942
5943 #if defined(CONFIG_NFS_V4_1)
5944 /**
5945 * nfs41_check_expired_locks - possibly free a lock stateid
5946 *
5947 * @state: NFSv4 state for an inode
5948 *
5949 * Returns NFS_OK if recovery for this stateid is now finished.
5950 * Otherwise a negative NFS4ERR value is returned.
5951 */
5952 static int nfs41_check_expired_locks(struct nfs4_state *state)
5953 {
5954 int status, ret = -NFS4ERR_BAD_STATEID;
5955 struct nfs4_lock_state *lsp;
5956 struct nfs_server *server = NFS_SERVER(state->inode);
5957
5958 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
5959 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
5960 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
5961
5962 status = nfs41_test_stateid(server,
5963 &lsp->ls_stateid,
5964 cred);
5965 trace_nfs4_test_lock_stateid(state, lsp, status);
5966 if (status != NFS_OK) {
5967 /* Free the stateid unless the server
5968 * informs us the stateid is unrecognized. */
5969 if (status != -NFS4ERR_BAD_STATEID)
5970 nfs41_free_stateid(server,
5971 &lsp->ls_stateid,
5972 cred);
5973 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5974 ret = status;
5975 }
5976 }
5977 };
5978
5979 return ret;
5980 }
5981
5982 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
5983 {
5984 int status = NFS_OK;
5985
5986 if (test_bit(LK_STATE_IN_USE, &state->flags))
5987 status = nfs41_check_expired_locks(state);
5988 if (status != NFS_OK)
5989 status = nfs4_lock_expired(state, request);
5990 return status;
5991 }
5992 #endif
5993
5994 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5995 {
5996 struct nfs_inode *nfsi = NFS_I(state->inode);
5997 unsigned char fl_flags = request->fl_flags;
5998 int status = -ENOLCK;
5999
6000 if ((fl_flags & FL_POSIX) &&
6001 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6002 goto out;
6003 /* Is this a delegated open? */
6004 status = nfs4_set_lock_state(state, request);
6005 if (status != 0)
6006 goto out;
6007 request->fl_flags |= FL_ACCESS;
6008 status = do_vfs_lock(request->fl_file, request);
6009 if (status < 0)
6010 goto out;
6011 down_read(&nfsi->rwsem);
6012 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6013 /* Yes: cache locks! */
6014 /* ...but avoid races with delegation recall... */
6015 request->fl_flags = fl_flags & ~FL_SLEEP;
6016 status = do_vfs_lock(request->fl_file, request);
6017 up_read(&nfsi->rwsem);
6018 goto out;
6019 }
6020 up_read(&nfsi->rwsem);
6021 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6022 out:
6023 request->fl_flags = fl_flags;
6024 return status;
6025 }
6026
6027 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6028 {
6029 struct nfs4_exception exception = {
6030 .state = state,
6031 .inode = state->inode,
6032 };
6033 int err;
6034
6035 do {
6036 err = _nfs4_proc_setlk(state, cmd, request);
6037 trace_nfs4_set_lock(request, state, cmd, err);
6038 if (err == -NFS4ERR_DENIED)
6039 err = -EAGAIN;
6040 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6041 err, &exception);
6042 } while (exception.retry);
6043 return err;
6044 }
6045
6046 static int
6047 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6048 {
6049 struct nfs_open_context *ctx;
6050 struct nfs4_state *state;
6051 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6052 int status;
6053
6054 /* verify open state */
6055 ctx = nfs_file_open_context(filp);
6056 state = ctx->state;
6057
6058 if (request->fl_start < 0 || request->fl_end < 0)
6059 return -EINVAL;
6060
6061 if (IS_GETLK(cmd)) {
6062 if (state != NULL)
6063 return nfs4_proc_getlk(state, F_GETLK, request);
6064 return 0;
6065 }
6066
6067 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6068 return -EINVAL;
6069
6070 if (request->fl_type == F_UNLCK) {
6071 if (state != NULL)
6072 return nfs4_proc_unlck(state, cmd, request);
6073 return 0;
6074 }
6075
6076 if (state == NULL)
6077 return -ENOLCK;
6078 /*
6079 * Don't rely on the VFS having checked the file open mode,
6080 * since it won't do this for flock() locks.
6081 */
6082 switch (request->fl_type) {
6083 case F_RDLCK:
6084 if (!(filp->f_mode & FMODE_READ))
6085 return -EBADF;
6086 break;
6087 case F_WRLCK:
6088 if (!(filp->f_mode & FMODE_WRITE))
6089 return -EBADF;
6090 }
6091
6092 do {
6093 status = nfs4_proc_setlk(state, cmd, request);
6094 if ((status != -EAGAIN) || IS_SETLK(cmd))
6095 break;
6096 timeout = nfs4_set_lock_task_retry(timeout);
6097 status = -ERESTARTSYS;
6098 if (signalled())
6099 break;
6100 } while(status < 0);
6101 return status;
6102 }
6103
6104 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6105 {
6106 struct nfs_server *server = NFS_SERVER(state->inode);
6107 int err;
6108
6109 err = nfs4_set_lock_state(state, fl);
6110 if (err != 0)
6111 return err;
6112 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6113 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6114 }
6115
6116 struct nfs_release_lockowner_data {
6117 struct nfs4_lock_state *lsp;
6118 struct nfs_server *server;
6119 struct nfs_release_lockowner_args args;
6120 struct nfs_release_lockowner_res res;
6121 unsigned long timestamp;
6122 };
6123
6124 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6125 {
6126 struct nfs_release_lockowner_data *data = calldata;
6127 struct nfs_server *server = data->server;
6128 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
6129 &data->args.seq_args, &data->res.seq_res, task);
6130 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6131 data->timestamp = jiffies;
6132 }
6133
6134 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6135 {
6136 struct nfs_release_lockowner_data *data = calldata;
6137 struct nfs_server *server = data->server;
6138
6139 nfs40_sequence_done(task, &data->res.seq_res);
6140
6141 switch (task->tk_status) {
6142 case 0:
6143 renew_lease(server, data->timestamp);
6144 break;
6145 case -NFS4ERR_STALE_CLIENTID:
6146 case -NFS4ERR_EXPIRED:
6147 nfs4_schedule_lease_recovery(server->nfs_client);
6148 break;
6149 case -NFS4ERR_LEASE_MOVED:
6150 case -NFS4ERR_DELAY:
6151 if (nfs4_async_handle_error(task, server,
6152 NULL, NULL) == -EAGAIN)
6153 rpc_restart_call_prepare(task);
6154 }
6155 }
6156
6157 static void nfs4_release_lockowner_release(void *calldata)
6158 {
6159 struct nfs_release_lockowner_data *data = calldata;
6160 nfs4_free_lock_state(data->server, data->lsp);
6161 kfree(calldata);
6162 }
6163
6164 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6165 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6166 .rpc_call_done = nfs4_release_lockowner_done,
6167 .rpc_release = nfs4_release_lockowner_release,
6168 };
6169
6170 static void
6171 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6172 {
6173 struct nfs_release_lockowner_data *data;
6174 struct rpc_message msg = {
6175 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6176 };
6177
6178 if (server->nfs_client->cl_mvops->minor_version != 0)
6179 return;
6180
6181 data = kmalloc(sizeof(*data), GFP_NOFS);
6182 if (!data)
6183 return;
6184 data->lsp = lsp;
6185 data->server = server;
6186 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6187 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6188 data->args.lock_owner.s_dev = server->s_dev;
6189
6190 msg.rpc_argp = &data->args;
6191 msg.rpc_resp = &data->res;
6192 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6193 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6194 }
6195
6196 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6197
6198 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
6199 const void *buf, size_t buflen,
6200 int flags, int type)
6201 {
6202 if (strcmp(key, "") != 0)
6203 return -EINVAL;
6204
6205 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
6206 }
6207
6208 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
6209 void *buf, size_t buflen, int type)
6210 {
6211 if (strcmp(key, "") != 0)
6212 return -EINVAL;
6213
6214 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen);
6215 }
6216
6217 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
6218 size_t list_len, const char *name,
6219 size_t name_len, int type)
6220 {
6221 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
6222
6223 if (!nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))))
6224 return 0;
6225
6226 if (list && len <= list_len)
6227 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
6228 return len;
6229 }
6230
6231 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6232 static inline int nfs4_server_supports_labels(struct nfs_server *server)
6233 {
6234 return server->caps & NFS_CAP_SECURITY_LABEL;
6235 }
6236
6237 static int nfs4_xattr_set_nfs4_label(struct dentry *dentry, const char *key,
6238 const void *buf, size_t buflen,
6239 int flags, int type)
6240 {
6241 if (security_ismaclabel(key))
6242 return nfs4_set_security_label(dentry, buf, buflen);
6243
6244 return -EOPNOTSUPP;
6245 }
6246
6247 static int nfs4_xattr_get_nfs4_label(struct dentry *dentry, const char *key,
6248 void *buf, size_t buflen, int type)
6249 {
6250 if (security_ismaclabel(key))
6251 return nfs4_get_security_label(d_inode(dentry), buf, buflen);
6252 return -EOPNOTSUPP;
6253 }
6254
6255 static size_t nfs4_xattr_list_nfs4_label(struct dentry *dentry, char *list,
6256 size_t list_len, const char *name,
6257 size_t name_len, int type)
6258 {
6259 size_t len = 0;
6260
6261 if (nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
6262 len = security_inode_listsecurity(d_inode(dentry), NULL, 0);
6263 if (list && len <= list_len)
6264 security_inode_listsecurity(d_inode(dentry), list, len);
6265 }
6266 return len;
6267 }
6268
6269 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6270 .prefix = XATTR_SECURITY_PREFIX,
6271 .list = nfs4_xattr_list_nfs4_label,
6272 .get = nfs4_xattr_get_nfs4_label,
6273 .set = nfs4_xattr_set_nfs4_label,
6274 };
6275 #endif
6276
6277
6278 /*
6279 * nfs_fhget will use either the mounted_on_fileid or the fileid
6280 */
6281 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6282 {
6283 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6284 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6285 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6286 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6287 return;
6288
6289 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6290 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6291 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6292 fattr->nlink = 2;
6293 }
6294
6295 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6296 const struct qstr *name,
6297 struct nfs4_fs_locations *fs_locations,
6298 struct page *page)
6299 {
6300 struct nfs_server *server = NFS_SERVER(dir);
6301 u32 bitmask[3] = {
6302 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6303 };
6304 struct nfs4_fs_locations_arg args = {
6305 .dir_fh = NFS_FH(dir),
6306 .name = name,
6307 .page = page,
6308 .bitmask = bitmask,
6309 };
6310 struct nfs4_fs_locations_res res = {
6311 .fs_locations = fs_locations,
6312 };
6313 struct rpc_message msg = {
6314 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6315 .rpc_argp = &args,
6316 .rpc_resp = &res,
6317 };
6318 int status;
6319
6320 dprintk("%s: start\n", __func__);
6321
6322 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6323 * is not supported */
6324 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6325 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6326 else
6327 bitmask[0] |= FATTR4_WORD0_FILEID;
6328
6329 nfs_fattr_init(&fs_locations->fattr);
6330 fs_locations->server = server;
6331 fs_locations->nlocations = 0;
6332 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6333 dprintk("%s: returned status = %d\n", __func__, status);
6334 return status;
6335 }
6336
6337 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6338 const struct qstr *name,
6339 struct nfs4_fs_locations *fs_locations,
6340 struct page *page)
6341 {
6342 struct nfs4_exception exception = { };
6343 int err;
6344 do {
6345 err = _nfs4_proc_fs_locations(client, dir, name,
6346 fs_locations, page);
6347 trace_nfs4_get_fs_locations(dir, name, err);
6348 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6349 &exception);
6350 } while (exception.retry);
6351 return err;
6352 }
6353
6354 /*
6355 * This operation also signals the server that this client is
6356 * performing migration recovery. The server can stop returning
6357 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
6358 * appended to this compound to identify the client ID which is
6359 * performing recovery.
6360 */
6361 static int _nfs40_proc_get_locations(struct inode *inode,
6362 struct nfs4_fs_locations *locations,
6363 struct page *page, struct rpc_cred *cred)
6364 {
6365 struct nfs_server *server = NFS_SERVER(inode);
6366 struct rpc_clnt *clnt = server->client;
6367 u32 bitmask[2] = {
6368 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6369 };
6370 struct nfs4_fs_locations_arg args = {
6371 .clientid = server->nfs_client->cl_clientid,
6372 .fh = NFS_FH(inode),
6373 .page = page,
6374 .bitmask = bitmask,
6375 .migration = 1, /* skip LOOKUP */
6376 .renew = 1, /* append RENEW */
6377 };
6378 struct nfs4_fs_locations_res res = {
6379 .fs_locations = locations,
6380 .migration = 1,
6381 .renew = 1,
6382 };
6383 struct rpc_message msg = {
6384 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6385 .rpc_argp = &args,
6386 .rpc_resp = &res,
6387 .rpc_cred = cred,
6388 };
6389 unsigned long now = jiffies;
6390 int status;
6391
6392 nfs_fattr_init(&locations->fattr);
6393 locations->server = server;
6394 locations->nlocations = 0;
6395
6396 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6397 nfs4_set_sequence_privileged(&args.seq_args);
6398 status = nfs4_call_sync_sequence(clnt, server, &msg,
6399 &args.seq_args, &res.seq_res);
6400 if (status)
6401 return status;
6402
6403 renew_lease(server, now);
6404 return 0;
6405 }
6406
6407 #ifdef CONFIG_NFS_V4_1
6408
6409 /*
6410 * This operation also signals the server that this client is
6411 * performing migration recovery. The server can stop asserting
6412 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
6413 * performing this operation is identified in the SEQUENCE
6414 * operation in this compound.
6415 *
6416 * When the client supports GETATTR(fs_locations_info), it can
6417 * be plumbed in here.
6418 */
6419 static int _nfs41_proc_get_locations(struct inode *inode,
6420 struct nfs4_fs_locations *locations,
6421 struct page *page, struct rpc_cred *cred)
6422 {
6423 struct nfs_server *server = NFS_SERVER(inode);
6424 struct rpc_clnt *clnt = server->client;
6425 u32 bitmask[2] = {
6426 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6427 };
6428 struct nfs4_fs_locations_arg args = {
6429 .fh = NFS_FH(inode),
6430 .page = page,
6431 .bitmask = bitmask,
6432 .migration = 1, /* skip LOOKUP */
6433 };
6434 struct nfs4_fs_locations_res res = {
6435 .fs_locations = locations,
6436 .migration = 1,
6437 };
6438 struct rpc_message msg = {
6439 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6440 .rpc_argp = &args,
6441 .rpc_resp = &res,
6442 .rpc_cred = cred,
6443 };
6444 int status;
6445
6446 nfs_fattr_init(&locations->fattr);
6447 locations->server = server;
6448 locations->nlocations = 0;
6449
6450 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6451 nfs4_set_sequence_privileged(&args.seq_args);
6452 status = nfs4_call_sync_sequence(clnt, server, &msg,
6453 &args.seq_args, &res.seq_res);
6454 if (status == NFS4_OK &&
6455 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6456 status = -NFS4ERR_LEASE_MOVED;
6457 return status;
6458 }
6459
6460 #endif /* CONFIG_NFS_V4_1 */
6461
6462 /**
6463 * nfs4_proc_get_locations - discover locations for a migrated FSID
6464 * @inode: inode on FSID that is migrating
6465 * @locations: result of query
6466 * @page: buffer
6467 * @cred: credential to use for this operation
6468 *
6469 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
6470 * operation failed, or a negative errno if a local error occurred.
6471 *
6472 * On success, "locations" is filled in, but if the server has
6473 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
6474 * asserted.
6475 *
6476 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
6477 * from this client that require migration recovery.
6478 */
6479 int nfs4_proc_get_locations(struct inode *inode,
6480 struct nfs4_fs_locations *locations,
6481 struct page *page, struct rpc_cred *cred)
6482 {
6483 struct nfs_server *server = NFS_SERVER(inode);
6484 struct nfs_client *clp = server->nfs_client;
6485 const struct nfs4_mig_recovery_ops *ops =
6486 clp->cl_mvops->mig_recovery_ops;
6487 struct nfs4_exception exception = { };
6488 int status;
6489
6490 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6491 (unsigned long long)server->fsid.major,
6492 (unsigned long long)server->fsid.minor,
6493 clp->cl_hostname);
6494 nfs_display_fhandle(NFS_FH(inode), __func__);
6495
6496 do {
6497 status = ops->get_locations(inode, locations, page, cred);
6498 if (status != -NFS4ERR_DELAY)
6499 break;
6500 nfs4_handle_exception(server, status, &exception);
6501 } while (exception.retry);
6502 return status;
6503 }
6504
6505 /*
6506 * This operation also signals the server that this client is
6507 * performing "lease moved" recovery. The server can stop
6508 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
6509 * is appended to this compound to identify the client ID which is
6510 * performing recovery.
6511 */
6512 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6513 {
6514 struct nfs_server *server = NFS_SERVER(inode);
6515 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
6516 struct rpc_clnt *clnt = server->client;
6517 struct nfs4_fsid_present_arg args = {
6518 .fh = NFS_FH(inode),
6519 .clientid = clp->cl_clientid,
6520 .renew = 1, /* append RENEW */
6521 };
6522 struct nfs4_fsid_present_res res = {
6523 .renew = 1,
6524 };
6525 struct rpc_message msg = {
6526 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6527 .rpc_argp = &args,
6528 .rpc_resp = &res,
6529 .rpc_cred = cred,
6530 };
6531 unsigned long now = jiffies;
6532 int status;
6533
6534 res.fh = nfs_alloc_fhandle();
6535 if (res.fh == NULL)
6536 return -ENOMEM;
6537
6538 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6539 nfs4_set_sequence_privileged(&args.seq_args);
6540 status = nfs4_call_sync_sequence(clnt, server, &msg,
6541 &args.seq_args, &res.seq_res);
6542 nfs_free_fhandle(res.fh);
6543 if (status)
6544 return status;
6545
6546 do_renew_lease(clp, now);
6547 return 0;
6548 }
6549
6550 #ifdef CONFIG_NFS_V4_1
6551
6552 /*
6553 * This operation also signals the server that this client is
6554 * performing "lease moved" recovery. The server can stop asserting
6555 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
6556 * this operation is identified in the SEQUENCE operation in this
6557 * compound.
6558 */
6559 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6560 {
6561 struct nfs_server *server = NFS_SERVER(inode);
6562 struct rpc_clnt *clnt = server->client;
6563 struct nfs4_fsid_present_arg args = {
6564 .fh = NFS_FH(inode),
6565 };
6566 struct nfs4_fsid_present_res res = {
6567 };
6568 struct rpc_message msg = {
6569 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6570 .rpc_argp = &args,
6571 .rpc_resp = &res,
6572 .rpc_cred = cred,
6573 };
6574 int status;
6575
6576 res.fh = nfs_alloc_fhandle();
6577 if (res.fh == NULL)
6578 return -ENOMEM;
6579
6580 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6581 nfs4_set_sequence_privileged(&args.seq_args);
6582 status = nfs4_call_sync_sequence(clnt, server, &msg,
6583 &args.seq_args, &res.seq_res);
6584 nfs_free_fhandle(res.fh);
6585 if (status == NFS4_OK &&
6586 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6587 status = -NFS4ERR_LEASE_MOVED;
6588 return status;
6589 }
6590
6591 #endif /* CONFIG_NFS_V4_1 */
6592
6593 /**
6594 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
6595 * @inode: inode on FSID to check
6596 * @cred: credential to use for this operation
6597 *
6598 * Server indicates whether the FSID is present, moved, or not
6599 * recognized. This operation is necessary to clear a LEASE_MOVED
6600 * condition for this client ID.
6601 *
6602 * Returns NFS4_OK if the FSID is present on this server,
6603 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
6604 * NFS4ERR code if some error occurred on the server, or a
6605 * negative errno if a local failure occurred.
6606 */
6607 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6608 {
6609 struct nfs_server *server = NFS_SERVER(inode);
6610 struct nfs_client *clp = server->nfs_client;
6611 const struct nfs4_mig_recovery_ops *ops =
6612 clp->cl_mvops->mig_recovery_ops;
6613 struct nfs4_exception exception = { };
6614 int status;
6615
6616 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6617 (unsigned long long)server->fsid.major,
6618 (unsigned long long)server->fsid.minor,
6619 clp->cl_hostname);
6620 nfs_display_fhandle(NFS_FH(inode), __func__);
6621
6622 do {
6623 status = ops->fsid_present(inode, cred);
6624 if (status != -NFS4ERR_DELAY)
6625 break;
6626 nfs4_handle_exception(server, status, &exception);
6627 } while (exception.retry);
6628 return status;
6629 }
6630
6631 /**
6632 * If 'use_integrity' is true and the state managment nfs_client
6633 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
6634 * and the machine credential as per RFC3530bis and RFC5661 Security
6635 * Considerations sections. Otherwise, just use the user cred with the
6636 * filesystem's rpc_client.
6637 */
6638 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
6639 {
6640 int status;
6641 struct nfs4_secinfo_arg args = {
6642 .dir_fh = NFS_FH(dir),
6643 .name = name,
6644 };
6645 struct nfs4_secinfo_res res = {
6646 .flavors = flavors,
6647 };
6648 struct rpc_message msg = {
6649 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
6650 .rpc_argp = &args,
6651 .rpc_resp = &res,
6652 };
6653 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
6654 struct rpc_cred *cred = NULL;
6655
6656 if (use_integrity) {
6657 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
6658 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
6659 msg.rpc_cred = cred;
6660 }
6661
6662 dprintk("NFS call secinfo %s\n", name->name);
6663
6664 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
6665 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
6666
6667 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
6668 &res.seq_res, 0);
6669 dprintk("NFS reply secinfo: %d\n", status);
6670
6671 if (cred)
6672 put_rpccred(cred);
6673
6674 return status;
6675 }
6676
6677 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
6678 struct nfs4_secinfo_flavors *flavors)
6679 {
6680 struct nfs4_exception exception = { };
6681 int err;
6682 do {
6683 err = -NFS4ERR_WRONGSEC;
6684
6685 /* try to use integrity protection with machine cred */
6686 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
6687 err = _nfs4_proc_secinfo(dir, name, flavors, true);
6688
6689 /*
6690 * if unable to use integrity protection, or SECINFO with
6691 * integrity protection returns NFS4ERR_WRONGSEC (which is
6692 * disallowed by spec, but exists in deployed servers) use
6693 * the current filesystem's rpc_client and the user cred.
6694 */
6695 if (err == -NFS4ERR_WRONGSEC)
6696 err = _nfs4_proc_secinfo(dir, name, flavors, false);
6697
6698 trace_nfs4_secinfo(dir, name, err);
6699 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6700 &exception);
6701 } while (exception.retry);
6702 return err;
6703 }
6704
6705 #ifdef CONFIG_NFS_V4_1
6706 /*
6707 * Check the exchange flags returned by the server for invalid flags, having
6708 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
6709 * DS flags set.
6710 */
6711 static int nfs4_check_cl_exchange_flags(u32 flags)
6712 {
6713 if (flags & ~EXCHGID4_FLAG_MASK_R)
6714 goto out_inval;
6715 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
6716 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
6717 goto out_inval;
6718 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
6719 goto out_inval;
6720 return NFS_OK;
6721 out_inval:
6722 return -NFS4ERR_INVAL;
6723 }
6724
6725 static bool
6726 nfs41_same_server_scope(struct nfs41_server_scope *a,
6727 struct nfs41_server_scope *b)
6728 {
6729 if (a->server_scope_sz == b->server_scope_sz &&
6730 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
6731 return true;
6732
6733 return false;
6734 }
6735
6736 /*
6737 * nfs4_proc_bind_conn_to_session()
6738 *
6739 * The 4.1 client currently uses the same TCP connection for the
6740 * fore and backchannel.
6741 */
6742 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6743 {
6744 int status;
6745 struct nfs41_bind_conn_to_session_args args = {
6746 .client = clp,
6747 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6748 };
6749 struct nfs41_bind_conn_to_session_res res;
6750 struct rpc_message msg = {
6751 .rpc_proc =
6752 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6753 .rpc_argp = &args,
6754 .rpc_resp = &res,
6755 .rpc_cred = cred,
6756 };
6757
6758 dprintk("--> %s\n", __func__);
6759
6760 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6761 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6762 args.dir = NFS4_CDFC4_FORE;
6763
6764 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6765 trace_nfs4_bind_conn_to_session(clp, status);
6766 if (status == 0) {
6767 if (memcmp(res.sessionid.data,
6768 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6769 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6770 status = -EIO;
6771 goto out;
6772 }
6773 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6774 dprintk("NFS: %s: Unexpected direction from server\n",
6775 __func__);
6776 status = -EIO;
6777 goto out;
6778 }
6779 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6780 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6781 __func__);
6782 status = -EIO;
6783 goto out;
6784 }
6785 }
6786 out:
6787 dprintk("<-- %s status= %d\n", __func__, status);
6788 return status;
6789 }
6790
6791 /*
6792 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
6793 * and operations we'd like to see to enable certain features in the allow map
6794 */
6795 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
6796 .how = SP4_MACH_CRED,
6797 .enforce.u.words = {
6798 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6799 1 << (OP_EXCHANGE_ID - 32) |
6800 1 << (OP_CREATE_SESSION - 32) |
6801 1 << (OP_DESTROY_SESSION - 32) |
6802 1 << (OP_DESTROY_CLIENTID - 32)
6803 },
6804 .allow.u.words = {
6805 [0] = 1 << (OP_CLOSE) |
6806 1 << (OP_LOCKU) |
6807 1 << (OP_COMMIT),
6808 [1] = 1 << (OP_SECINFO - 32) |
6809 1 << (OP_SECINFO_NO_NAME - 32) |
6810 1 << (OP_TEST_STATEID - 32) |
6811 1 << (OP_FREE_STATEID - 32) |
6812 1 << (OP_WRITE - 32)
6813 }
6814 };
6815
6816 /*
6817 * Select the state protection mode for client `clp' given the server results
6818 * from exchange_id in `sp'.
6819 *
6820 * Returns 0 on success, negative errno otherwise.
6821 */
6822 static int nfs4_sp4_select_mode(struct nfs_client *clp,
6823 struct nfs41_state_protection *sp)
6824 {
6825 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
6826 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6827 1 << (OP_EXCHANGE_ID - 32) |
6828 1 << (OP_CREATE_SESSION - 32) |
6829 1 << (OP_DESTROY_SESSION - 32) |
6830 1 << (OP_DESTROY_CLIENTID - 32)
6831 };
6832 unsigned int i;
6833
6834 if (sp->how == SP4_MACH_CRED) {
6835 /* Print state protect result */
6836 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
6837 for (i = 0; i <= LAST_NFS4_OP; i++) {
6838 if (test_bit(i, sp->enforce.u.longs))
6839 dfprintk(MOUNT, " enforce op %d\n", i);
6840 if (test_bit(i, sp->allow.u.longs))
6841 dfprintk(MOUNT, " allow op %d\n", i);
6842 }
6843
6844 /* make sure nothing is on enforce list that isn't supported */
6845 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
6846 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
6847 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6848 return -EINVAL;
6849 }
6850 }
6851
6852 /*
6853 * Minimal mode - state operations are allowed to use machine
6854 * credential. Note this already happens by default, so the
6855 * client doesn't have to do anything more than the negotiation.
6856 *
6857 * NOTE: we don't care if EXCHANGE_ID is in the list -
6858 * we're already using the machine cred for exchange_id
6859 * and will never use a different cred.
6860 */
6861 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
6862 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
6863 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
6864 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
6865 dfprintk(MOUNT, "sp4_mach_cred:\n");
6866 dfprintk(MOUNT, " minimal mode enabled\n");
6867 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags);
6868 } else {
6869 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6870 return -EINVAL;
6871 }
6872
6873 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
6874 test_bit(OP_LOCKU, sp->allow.u.longs)) {
6875 dfprintk(MOUNT, " cleanup mode enabled\n");
6876 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags);
6877 }
6878
6879 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
6880 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
6881 dfprintk(MOUNT, " secinfo mode enabled\n");
6882 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags);
6883 }
6884
6885 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
6886 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
6887 dfprintk(MOUNT, " stateid mode enabled\n");
6888 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags);
6889 }
6890
6891 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
6892 dfprintk(MOUNT, " write mode enabled\n");
6893 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags);
6894 }
6895
6896 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
6897 dfprintk(MOUNT, " commit mode enabled\n");
6898 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags);
6899 }
6900 }
6901
6902 return 0;
6903 }
6904
6905 /*
6906 * _nfs4_proc_exchange_id()
6907 *
6908 * Wrapper for EXCHANGE_ID operation.
6909 */
6910 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
6911 u32 sp4_how)
6912 {
6913 nfs4_verifier verifier;
6914 struct nfs41_exchange_id_args args = {
6915 .verifier = &verifier,
6916 .client = clp,
6917 #ifdef CONFIG_NFS_V4_1_MIGRATION
6918 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6919 EXCHGID4_FLAG_BIND_PRINC_STATEID |
6920 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
6921 #else
6922 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6923 EXCHGID4_FLAG_BIND_PRINC_STATEID,
6924 #endif
6925 };
6926 struct nfs41_exchange_id_res res = {
6927 0
6928 };
6929 int status;
6930 struct rpc_message msg = {
6931 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
6932 .rpc_argp = &args,
6933 .rpc_resp = &res,
6934 .rpc_cred = cred,
6935 };
6936
6937 nfs4_init_boot_verifier(clp, &verifier);
6938
6939 status = nfs4_init_uniform_client_string(clp);
6940 if (status)
6941 goto out;
6942
6943 dprintk("NFS call exchange_id auth=%s, '%s'\n",
6944 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6945 clp->cl_owner_id);
6946
6947 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
6948 GFP_NOFS);
6949 if (unlikely(res.server_owner == NULL)) {
6950 status = -ENOMEM;
6951 goto out;
6952 }
6953
6954 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
6955 GFP_NOFS);
6956 if (unlikely(res.server_scope == NULL)) {
6957 status = -ENOMEM;
6958 goto out_server_owner;
6959 }
6960
6961 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
6962 if (unlikely(res.impl_id == NULL)) {
6963 status = -ENOMEM;
6964 goto out_server_scope;
6965 }
6966
6967 switch (sp4_how) {
6968 case SP4_NONE:
6969 args.state_protect.how = SP4_NONE;
6970 break;
6971
6972 case SP4_MACH_CRED:
6973 args.state_protect = nfs4_sp4_mach_cred_request;
6974 break;
6975
6976 default:
6977 /* unsupported! */
6978 WARN_ON_ONCE(1);
6979 status = -EINVAL;
6980 goto out_impl_id;
6981 }
6982
6983 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6984 trace_nfs4_exchange_id(clp, status);
6985 if (status == 0)
6986 status = nfs4_check_cl_exchange_flags(res.flags);
6987
6988 if (status == 0)
6989 status = nfs4_sp4_select_mode(clp, &res.state_protect);
6990
6991 if (status == 0) {
6992 clp->cl_clientid = res.clientid;
6993 clp->cl_exchange_flags = res.flags;
6994 /* Client ID is not confirmed */
6995 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
6996 clear_bit(NFS4_SESSION_ESTABLISHED,
6997 &clp->cl_session->session_state);
6998 clp->cl_seqid = res.seqid;
6999 }
7000
7001 kfree(clp->cl_serverowner);
7002 clp->cl_serverowner = res.server_owner;
7003 res.server_owner = NULL;
7004
7005 /* use the most recent implementation id */
7006 kfree(clp->cl_implid);
7007 clp->cl_implid = res.impl_id;
7008 res.impl_id = NULL;
7009
7010 if (clp->cl_serverscope != NULL &&
7011 !nfs41_same_server_scope(clp->cl_serverscope,
7012 res.server_scope)) {
7013 dprintk("%s: server_scope mismatch detected\n",
7014 __func__);
7015 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7016 kfree(clp->cl_serverscope);
7017 clp->cl_serverscope = NULL;
7018 }
7019
7020 if (clp->cl_serverscope == NULL) {
7021 clp->cl_serverscope = res.server_scope;
7022 res.server_scope = NULL;
7023 }
7024 }
7025
7026 out_impl_id:
7027 kfree(res.impl_id);
7028 out_server_scope:
7029 kfree(res.server_scope);
7030 out_server_owner:
7031 kfree(res.server_owner);
7032 out:
7033 if (clp->cl_implid != NULL)
7034 dprintk("NFS reply exchange_id: Server Implementation ID: "
7035 "domain: %s, name: %s, date: %llu,%u\n",
7036 clp->cl_implid->domain, clp->cl_implid->name,
7037 clp->cl_implid->date.seconds,
7038 clp->cl_implid->date.nseconds);
7039 dprintk("NFS reply exchange_id: %d\n", status);
7040 return status;
7041 }
7042
7043 /*
7044 * nfs4_proc_exchange_id()
7045 *
7046 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7047 *
7048 * Since the clientid has expired, all compounds using sessions
7049 * associated with the stale clientid will be returning
7050 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7051 * be in some phase of session reset.
7052 *
7053 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7054 */
7055 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7056 {
7057 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7058 int status;
7059
7060 /* try SP4_MACH_CRED if krb5i/p */
7061 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7062 authflavor == RPC_AUTH_GSS_KRB5P) {
7063 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7064 if (!status)
7065 return 0;
7066 }
7067
7068 /* try SP4_NONE */
7069 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7070 }
7071
7072 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7073 struct rpc_cred *cred)
7074 {
7075 struct rpc_message msg = {
7076 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7077 .rpc_argp = clp,
7078 .rpc_cred = cred,
7079 };
7080 int status;
7081
7082 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7083 trace_nfs4_destroy_clientid(clp, status);
7084 if (status)
7085 dprintk("NFS: Got error %d from the server %s on "
7086 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7087 return status;
7088 }
7089
7090 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7091 struct rpc_cred *cred)
7092 {
7093 unsigned int loop;
7094 int ret;
7095
7096 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7097 ret = _nfs4_proc_destroy_clientid(clp, cred);
7098 switch (ret) {
7099 case -NFS4ERR_DELAY:
7100 case -NFS4ERR_CLIENTID_BUSY:
7101 ssleep(1);
7102 break;
7103 default:
7104 return ret;
7105 }
7106 }
7107 return 0;
7108 }
7109
7110 int nfs4_destroy_clientid(struct nfs_client *clp)
7111 {
7112 struct rpc_cred *cred;
7113 int ret = 0;
7114
7115 if (clp->cl_mvops->minor_version < 1)
7116 goto out;
7117 if (clp->cl_exchange_flags == 0)
7118 goto out;
7119 if (clp->cl_preserve_clid)
7120 goto out;
7121 cred = nfs4_get_clid_cred(clp);
7122 ret = nfs4_proc_destroy_clientid(clp, cred);
7123 if (cred)
7124 put_rpccred(cred);
7125 switch (ret) {
7126 case 0:
7127 case -NFS4ERR_STALE_CLIENTID:
7128 clp->cl_exchange_flags = 0;
7129 }
7130 out:
7131 return ret;
7132 }
7133
7134 struct nfs4_get_lease_time_data {
7135 struct nfs4_get_lease_time_args *args;
7136 struct nfs4_get_lease_time_res *res;
7137 struct nfs_client *clp;
7138 };
7139
7140 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7141 void *calldata)
7142 {
7143 struct nfs4_get_lease_time_data *data =
7144 (struct nfs4_get_lease_time_data *)calldata;
7145
7146 dprintk("--> %s\n", __func__);
7147 /* just setup sequence, do not trigger session recovery
7148 since we're invoked within one */
7149 nfs41_setup_sequence(data->clp->cl_session,
7150 &data->args->la_seq_args,
7151 &data->res->lr_seq_res,
7152 task);
7153 dprintk("<-- %s\n", __func__);
7154 }
7155
7156 /*
7157 * Called from nfs4_state_manager thread for session setup, so don't recover
7158 * from sequence operation or clientid errors.
7159 */
7160 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7161 {
7162 struct nfs4_get_lease_time_data *data =
7163 (struct nfs4_get_lease_time_data *)calldata;
7164
7165 dprintk("--> %s\n", __func__);
7166 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7167 return;
7168 switch (task->tk_status) {
7169 case -NFS4ERR_DELAY:
7170 case -NFS4ERR_GRACE:
7171 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7172 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7173 task->tk_status = 0;
7174 /* fall through */
7175 case -NFS4ERR_RETRY_UNCACHED_REP:
7176 rpc_restart_call_prepare(task);
7177 return;
7178 }
7179 dprintk("<-- %s\n", __func__);
7180 }
7181
7182 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7183 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7184 .rpc_call_done = nfs4_get_lease_time_done,
7185 };
7186
7187 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7188 {
7189 struct rpc_task *task;
7190 struct nfs4_get_lease_time_args args;
7191 struct nfs4_get_lease_time_res res = {
7192 .lr_fsinfo = fsinfo,
7193 };
7194 struct nfs4_get_lease_time_data data = {
7195 .args = &args,
7196 .res = &res,
7197 .clp = clp,
7198 };
7199 struct rpc_message msg = {
7200 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7201 .rpc_argp = &args,
7202 .rpc_resp = &res,
7203 };
7204 struct rpc_task_setup task_setup = {
7205 .rpc_client = clp->cl_rpcclient,
7206 .rpc_message = &msg,
7207 .callback_ops = &nfs4_get_lease_time_ops,
7208 .callback_data = &data,
7209 .flags = RPC_TASK_TIMEOUT,
7210 };
7211 int status;
7212
7213 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
7214 nfs4_set_sequence_privileged(&args.la_seq_args);
7215 dprintk("--> %s\n", __func__);
7216 task = rpc_run_task(&task_setup);
7217
7218 if (IS_ERR(task))
7219 status = PTR_ERR(task);
7220 else {
7221 status = task->tk_status;
7222 rpc_put_task(task);
7223 }
7224 dprintk("<-- %s return %d\n", __func__, status);
7225
7226 return status;
7227 }
7228
7229 /*
7230 * Initialize the values to be used by the client in CREATE_SESSION
7231 * If nfs4_init_session set the fore channel request and response sizes,
7232 * use them.
7233 *
7234 * Set the back channel max_resp_sz_cached to zero to force the client to
7235 * always set csa_cachethis to FALSE because the current implementation
7236 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
7237 */
7238 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
7239 {
7240 unsigned int max_rqst_sz, max_resp_sz;
7241
7242 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
7243 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
7244
7245 /* Fore channel attributes */
7246 args->fc_attrs.max_rqst_sz = max_rqst_sz;
7247 args->fc_attrs.max_resp_sz = max_resp_sz;
7248 args->fc_attrs.max_ops = NFS4_MAX_OPS;
7249 args->fc_attrs.max_reqs = max_session_slots;
7250
7251 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
7252 "max_ops=%u max_reqs=%u\n",
7253 __func__,
7254 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
7255 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
7256
7257 /* Back channel attributes */
7258 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
7259 args->bc_attrs.max_resp_sz = PAGE_SIZE;
7260 args->bc_attrs.max_resp_sz_cached = 0;
7261 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
7262 args->bc_attrs.max_reqs = 1;
7263
7264 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
7265 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
7266 __func__,
7267 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
7268 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
7269 args->bc_attrs.max_reqs);
7270 }
7271
7272 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7273 struct nfs41_create_session_res *res)
7274 {
7275 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7276 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7277
7278 if (rcvd->max_resp_sz > sent->max_resp_sz)
7279 return -EINVAL;
7280 /*
7281 * Our requested max_ops is the minimum we need; we're not
7282 * prepared to break up compounds into smaller pieces than that.
7283 * So, no point even trying to continue if the server won't
7284 * cooperate:
7285 */
7286 if (rcvd->max_ops < sent->max_ops)
7287 return -EINVAL;
7288 if (rcvd->max_reqs == 0)
7289 return -EINVAL;
7290 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
7291 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
7292 return 0;
7293 }
7294
7295 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7296 struct nfs41_create_session_res *res)
7297 {
7298 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7299 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7300
7301 if (!(res->flags & SESSION4_BACK_CHAN))
7302 goto out;
7303 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7304 return -EINVAL;
7305 if (rcvd->max_resp_sz < sent->max_resp_sz)
7306 return -EINVAL;
7307 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
7308 return -EINVAL;
7309 /* These would render the backchannel useless: */
7310 if (rcvd->max_ops != sent->max_ops)
7311 return -EINVAL;
7312 if (rcvd->max_reqs != sent->max_reqs)
7313 return -EINVAL;
7314 out:
7315 return 0;
7316 }
7317
7318 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7319 struct nfs41_create_session_res *res)
7320 {
7321 int ret;
7322
7323 ret = nfs4_verify_fore_channel_attrs(args, res);
7324 if (ret)
7325 return ret;
7326 return nfs4_verify_back_channel_attrs(args, res);
7327 }
7328
7329 static void nfs4_update_session(struct nfs4_session *session,
7330 struct nfs41_create_session_res *res)
7331 {
7332 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7333 /* Mark client id and session as being confirmed */
7334 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7335 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7336 session->flags = res->flags;
7337 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7338 if (res->flags & SESSION4_BACK_CHAN)
7339 memcpy(&session->bc_attrs, &res->bc_attrs,
7340 sizeof(session->bc_attrs));
7341 }
7342
7343 static int _nfs4_proc_create_session(struct nfs_client *clp,
7344 struct rpc_cred *cred)
7345 {
7346 struct nfs4_session *session = clp->cl_session;
7347 struct nfs41_create_session_args args = {
7348 .client = clp,
7349 .clientid = clp->cl_clientid,
7350 .seqid = clp->cl_seqid,
7351 .cb_program = NFS4_CALLBACK,
7352 };
7353 struct nfs41_create_session_res res;
7354
7355 struct rpc_message msg = {
7356 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7357 .rpc_argp = &args,
7358 .rpc_resp = &res,
7359 .rpc_cred = cred,
7360 };
7361 int status;
7362
7363 nfs4_init_channel_attrs(&args);
7364 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
7365
7366 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7367 trace_nfs4_create_session(clp, status);
7368
7369 if (!status) {
7370 /* Verify the session's negotiated channel_attrs values */
7371 status = nfs4_verify_channel_attrs(&args, &res);
7372 /* Increment the clientid slot sequence id */
7373 if (clp->cl_seqid == res.seqid)
7374 clp->cl_seqid++;
7375 if (status)
7376 goto out;
7377 nfs4_update_session(session, &res);
7378 }
7379 out:
7380 return status;
7381 }
7382
7383 /*
7384 * Issues a CREATE_SESSION operation to the server.
7385 * It is the responsibility of the caller to verify the session is
7386 * expired before calling this routine.
7387 */
7388 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
7389 {
7390 int status;
7391 unsigned *ptr;
7392 struct nfs4_session *session = clp->cl_session;
7393
7394 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
7395
7396 status = _nfs4_proc_create_session(clp, cred);
7397 if (status)
7398 goto out;
7399
7400 /* Init or reset the session slot tables */
7401 status = nfs4_setup_session_slot_tables(session);
7402 dprintk("slot table setup returned %d\n", status);
7403 if (status)
7404 goto out;
7405
7406 ptr = (unsigned *)&session->sess_id.data[0];
7407 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
7408 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
7409 out:
7410 dprintk("<-- %s\n", __func__);
7411 return status;
7412 }
7413
7414 /*
7415 * Issue the over-the-wire RPC DESTROY_SESSION.
7416 * The caller must serialize access to this routine.
7417 */
7418 int nfs4_proc_destroy_session(struct nfs4_session *session,
7419 struct rpc_cred *cred)
7420 {
7421 struct rpc_message msg = {
7422 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
7423 .rpc_argp = session,
7424 .rpc_cred = cred,
7425 };
7426 int status = 0;
7427
7428 dprintk("--> nfs4_proc_destroy_session\n");
7429
7430 /* session is still being setup */
7431 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7432 return 0;
7433
7434 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7435 trace_nfs4_destroy_session(session->clp, status);
7436
7437 if (status)
7438 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
7439 "Session has been destroyed regardless...\n", status);
7440
7441 dprintk("<-- nfs4_proc_destroy_session\n");
7442 return status;
7443 }
7444
7445 /*
7446 * Renew the cl_session lease.
7447 */
7448 struct nfs4_sequence_data {
7449 struct nfs_client *clp;
7450 struct nfs4_sequence_args args;
7451 struct nfs4_sequence_res res;
7452 };
7453
7454 static void nfs41_sequence_release(void *data)
7455 {
7456 struct nfs4_sequence_data *calldata = data;
7457 struct nfs_client *clp = calldata->clp;
7458
7459 if (atomic_read(&clp->cl_count) > 1)
7460 nfs4_schedule_state_renewal(clp);
7461 nfs_put_client(clp);
7462 kfree(calldata);
7463 }
7464
7465 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7466 {
7467 switch(task->tk_status) {
7468 case -NFS4ERR_DELAY:
7469 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7470 return -EAGAIN;
7471 default:
7472 nfs4_schedule_lease_recovery(clp);
7473 }
7474 return 0;
7475 }
7476
7477 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
7478 {
7479 struct nfs4_sequence_data *calldata = data;
7480 struct nfs_client *clp = calldata->clp;
7481
7482 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
7483 return;
7484
7485 trace_nfs4_sequence(clp, task->tk_status);
7486 if (task->tk_status < 0) {
7487 dprintk("%s ERROR %d\n", __func__, task->tk_status);
7488 if (atomic_read(&clp->cl_count) == 1)
7489 goto out;
7490
7491 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
7492 rpc_restart_call_prepare(task);
7493 return;
7494 }
7495 }
7496 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
7497 out:
7498 dprintk("<-- %s\n", __func__);
7499 }
7500
7501 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
7502 {
7503 struct nfs4_sequence_data *calldata = data;
7504 struct nfs_client *clp = calldata->clp;
7505 struct nfs4_sequence_args *args;
7506 struct nfs4_sequence_res *res;
7507
7508 args = task->tk_msg.rpc_argp;
7509 res = task->tk_msg.rpc_resp;
7510
7511 nfs41_setup_sequence(clp->cl_session, args, res, task);
7512 }
7513
7514 static const struct rpc_call_ops nfs41_sequence_ops = {
7515 .rpc_call_done = nfs41_sequence_call_done,
7516 .rpc_call_prepare = nfs41_sequence_prepare,
7517 .rpc_release = nfs41_sequence_release,
7518 };
7519
7520 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
7521 struct rpc_cred *cred,
7522 bool is_privileged)
7523 {
7524 struct nfs4_sequence_data *calldata;
7525 struct rpc_message msg = {
7526 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
7527 .rpc_cred = cred,
7528 };
7529 struct rpc_task_setup task_setup_data = {
7530 .rpc_client = clp->cl_rpcclient,
7531 .rpc_message = &msg,
7532 .callback_ops = &nfs41_sequence_ops,
7533 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
7534 };
7535
7536 if (!atomic_inc_not_zero(&clp->cl_count))
7537 return ERR_PTR(-EIO);
7538 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7539 if (calldata == NULL) {
7540 nfs_put_client(clp);
7541 return ERR_PTR(-ENOMEM);
7542 }
7543 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
7544 if (is_privileged)
7545 nfs4_set_sequence_privileged(&calldata->args);
7546 msg.rpc_argp = &calldata->args;
7547 msg.rpc_resp = &calldata->res;
7548 calldata->clp = clp;
7549 task_setup_data.callback_data = calldata;
7550
7551 return rpc_run_task(&task_setup_data);
7552 }
7553
7554 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
7555 {
7556 struct rpc_task *task;
7557 int ret = 0;
7558
7559 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
7560 return -EAGAIN;
7561 task = _nfs41_proc_sequence(clp, cred, false);
7562 if (IS_ERR(task))
7563 ret = PTR_ERR(task);
7564 else
7565 rpc_put_task_async(task);
7566 dprintk("<-- %s status=%d\n", __func__, ret);
7567 return ret;
7568 }
7569
7570 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7571 {
7572 struct rpc_task *task;
7573 int ret;
7574
7575 task = _nfs41_proc_sequence(clp, cred, true);
7576 if (IS_ERR(task)) {
7577 ret = PTR_ERR(task);
7578 goto out;
7579 }
7580 ret = rpc_wait_for_completion_task(task);
7581 if (!ret)
7582 ret = task->tk_status;
7583 rpc_put_task(task);
7584 out:
7585 dprintk("<-- %s status=%d\n", __func__, ret);
7586 return ret;
7587 }
7588
7589 struct nfs4_reclaim_complete_data {
7590 struct nfs_client *clp;
7591 struct nfs41_reclaim_complete_args arg;
7592 struct nfs41_reclaim_complete_res res;
7593 };
7594
7595 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
7596 {
7597 struct nfs4_reclaim_complete_data *calldata = data;
7598
7599 nfs41_setup_sequence(calldata->clp->cl_session,
7600 &calldata->arg.seq_args,
7601 &calldata->res.seq_res,
7602 task);
7603 }
7604
7605 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7606 {
7607 switch(task->tk_status) {
7608 case 0:
7609 case -NFS4ERR_COMPLETE_ALREADY:
7610 case -NFS4ERR_WRONG_CRED: /* What to do here? */
7611 break;
7612 case -NFS4ERR_DELAY:
7613 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7614 /* fall through */
7615 case -NFS4ERR_RETRY_UNCACHED_REP:
7616 return -EAGAIN;
7617 default:
7618 nfs4_schedule_lease_recovery(clp);
7619 }
7620 return 0;
7621 }
7622
7623 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
7624 {
7625 struct nfs4_reclaim_complete_data *calldata = data;
7626 struct nfs_client *clp = calldata->clp;
7627 struct nfs4_sequence_res *res = &calldata->res.seq_res;
7628
7629 dprintk("--> %s\n", __func__);
7630 if (!nfs41_sequence_done(task, res))
7631 return;
7632
7633 trace_nfs4_reclaim_complete(clp, task->tk_status);
7634 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
7635 rpc_restart_call_prepare(task);
7636 return;
7637 }
7638 dprintk("<-- %s\n", __func__);
7639 }
7640
7641 static void nfs4_free_reclaim_complete_data(void *data)
7642 {
7643 struct nfs4_reclaim_complete_data *calldata = data;
7644
7645 kfree(calldata);
7646 }
7647
7648 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
7649 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
7650 .rpc_call_done = nfs4_reclaim_complete_done,
7651 .rpc_release = nfs4_free_reclaim_complete_data,
7652 };
7653
7654 /*
7655 * Issue a global reclaim complete.
7656 */
7657 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7658 struct rpc_cred *cred)
7659 {
7660 struct nfs4_reclaim_complete_data *calldata;
7661 struct rpc_task *task;
7662 struct rpc_message msg = {
7663 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
7664 .rpc_cred = cred,
7665 };
7666 struct rpc_task_setup task_setup_data = {
7667 .rpc_client = clp->cl_rpcclient,
7668 .rpc_message = &msg,
7669 .callback_ops = &nfs4_reclaim_complete_call_ops,
7670 .flags = RPC_TASK_ASYNC,
7671 };
7672 int status = -ENOMEM;
7673
7674 dprintk("--> %s\n", __func__);
7675 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7676 if (calldata == NULL)
7677 goto out;
7678 calldata->clp = clp;
7679 calldata->arg.one_fs = 0;
7680
7681 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
7682 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
7683 msg.rpc_argp = &calldata->arg;
7684 msg.rpc_resp = &calldata->res;
7685 task_setup_data.callback_data = calldata;
7686 task = rpc_run_task(&task_setup_data);
7687 if (IS_ERR(task)) {
7688 status = PTR_ERR(task);
7689 goto out;
7690 }
7691 status = nfs4_wait_for_completion_rpc_task(task);
7692 if (status == 0)
7693 status = task->tk_status;
7694 rpc_put_task(task);
7695 return 0;
7696 out:
7697 dprintk("<-- %s status=%d\n", __func__, status);
7698 return status;
7699 }
7700
7701 static void
7702 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
7703 {
7704 struct nfs4_layoutget *lgp = calldata;
7705 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
7706 struct nfs4_session *session = nfs4_get_session(server);
7707
7708 dprintk("--> %s\n", __func__);
7709 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
7710 * right now covering the LAYOUTGET we are about to send.
7711 * However, that is not so catastrophic, and there seems
7712 * to be no way to prevent it completely.
7713 */
7714 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
7715 &lgp->res.seq_res, task))
7716 return;
7717 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
7718 NFS_I(lgp->args.inode)->layout,
7719 &lgp->args.range,
7720 lgp->args.ctx->state)) {
7721 rpc_exit(task, NFS4_OK);
7722 }
7723 }
7724
7725 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7726 {
7727 struct nfs4_layoutget *lgp = calldata;
7728 struct inode *inode = lgp->args.inode;
7729 struct nfs_server *server = NFS_SERVER(inode);
7730 struct pnfs_layout_hdr *lo;
7731 struct nfs4_state *state = NULL;
7732 unsigned long timeo, now, giveup;
7733
7734 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
7735
7736 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
7737 goto out;
7738
7739 switch (task->tk_status) {
7740 case 0:
7741 goto out;
7742 /*
7743 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
7744 * (or clients) writing to the same RAID stripe
7745 */
7746 case -NFS4ERR_LAYOUTTRYLATER:
7747 /*
7748 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
7749 * existing layout before getting a new one).
7750 */
7751 case -NFS4ERR_RECALLCONFLICT:
7752 timeo = rpc_get_timeout(task->tk_client);
7753 giveup = lgp->args.timestamp + timeo;
7754 now = jiffies;
7755 if (time_after(giveup, now)) {
7756 unsigned long delay;
7757
7758 /* Delay for:
7759 * - Not less then NFS4_POLL_RETRY_MIN.
7760 * - One last time a jiffie before we give up
7761 * - exponential backoff (time_now minus start_attempt)
7762 */
7763 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
7764 min((giveup - now - 1),
7765 now - lgp->args.timestamp));
7766
7767 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7768 __func__, delay);
7769 rpc_delay(task, delay);
7770 task->tk_status = 0;
7771 rpc_restart_call_prepare(task);
7772 goto out; /* Do not call nfs4_async_handle_error() */
7773 }
7774 break;
7775 case -NFS4ERR_EXPIRED:
7776 case -NFS4ERR_BAD_STATEID:
7777 spin_lock(&inode->i_lock);
7778 lo = NFS_I(inode)->layout;
7779 if (!lo || list_empty(&lo->plh_segs)) {
7780 spin_unlock(&inode->i_lock);
7781 /* If the open stateid was bad, then recover it. */
7782 state = lgp->args.ctx->state;
7783 } else {
7784 LIST_HEAD(head);
7785
7786 /*
7787 * Mark the bad layout state as invalid, then retry
7788 * with the current stateid.
7789 */
7790 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7791 spin_unlock(&inode->i_lock);
7792 pnfs_free_lseg_list(&head);
7793
7794 task->tk_status = 0;
7795 rpc_restart_call_prepare(task);
7796 }
7797 }
7798 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
7799 rpc_restart_call_prepare(task);
7800 out:
7801 dprintk("<-- %s\n", __func__);
7802 }
7803
7804 static size_t max_response_pages(struct nfs_server *server)
7805 {
7806 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
7807 return nfs_page_array_len(0, max_resp_sz);
7808 }
7809
7810 static void nfs4_free_pages(struct page **pages, size_t size)
7811 {
7812 int i;
7813
7814 if (!pages)
7815 return;
7816
7817 for (i = 0; i < size; i++) {
7818 if (!pages[i])
7819 break;
7820 __free_page(pages[i]);
7821 }
7822 kfree(pages);
7823 }
7824
7825 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
7826 {
7827 struct page **pages;
7828 int i;
7829
7830 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
7831 if (!pages) {
7832 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
7833 return NULL;
7834 }
7835
7836 for (i = 0; i < size; i++) {
7837 pages[i] = alloc_page(gfp_flags);
7838 if (!pages[i]) {
7839 dprintk("%s: failed to allocate page\n", __func__);
7840 nfs4_free_pages(pages, size);
7841 return NULL;
7842 }
7843 }
7844
7845 return pages;
7846 }
7847
7848 static void nfs4_layoutget_release(void *calldata)
7849 {
7850 struct nfs4_layoutget *lgp = calldata;
7851 struct inode *inode = lgp->args.inode;
7852 struct nfs_server *server = NFS_SERVER(inode);
7853 size_t max_pages = max_response_pages(server);
7854
7855 dprintk("--> %s\n", __func__);
7856 nfs4_free_pages(lgp->args.layout.pages, max_pages);
7857 pnfs_put_layout_hdr(NFS_I(inode)->layout);
7858 put_nfs_open_context(lgp->args.ctx);
7859 kfree(calldata);
7860 dprintk("<-- %s\n", __func__);
7861 }
7862
7863 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
7864 .rpc_call_prepare = nfs4_layoutget_prepare,
7865 .rpc_call_done = nfs4_layoutget_done,
7866 .rpc_release = nfs4_layoutget_release,
7867 };
7868
7869 struct pnfs_layout_segment *
7870 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
7871 {
7872 struct inode *inode = lgp->args.inode;
7873 struct nfs_server *server = NFS_SERVER(inode);
7874 size_t max_pages = max_response_pages(server);
7875 struct rpc_task *task;
7876 struct rpc_message msg = {
7877 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
7878 .rpc_argp = &lgp->args,
7879 .rpc_resp = &lgp->res,
7880 .rpc_cred = lgp->cred,
7881 };
7882 struct rpc_task_setup task_setup_data = {
7883 .rpc_client = server->client,
7884 .rpc_message = &msg,
7885 .callback_ops = &nfs4_layoutget_call_ops,
7886 .callback_data = lgp,
7887 .flags = RPC_TASK_ASYNC,
7888 };
7889 struct pnfs_layout_segment *lseg = NULL;
7890 int status = 0;
7891
7892 dprintk("--> %s\n", __func__);
7893
7894 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
7895 pnfs_get_layout_hdr(NFS_I(inode)->layout);
7896
7897 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
7898 if (!lgp->args.layout.pages) {
7899 nfs4_layoutget_release(lgp);
7900 return ERR_PTR(-ENOMEM);
7901 }
7902 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
7903 lgp->args.timestamp = jiffies;
7904
7905 lgp->res.layoutp = &lgp->args.layout;
7906 lgp->res.seq_res.sr_slot = NULL;
7907 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
7908
7909 task = rpc_run_task(&task_setup_data);
7910 if (IS_ERR(task))
7911 return ERR_CAST(task);
7912 status = nfs4_wait_for_completion_rpc_task(task);
7913 if (status == 0)
7914 status = task->tk_status;
7915 trace_nfs4_layoutget(lgp->args.ctx,
7916 &lgp->args.range,
7917 &lgp->res.range,
7918 status);
7919 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
7920 if (status == 0 && lgp->res.layoutp->len)
7921 lseg = pnfs_layout_process(lgp);
7922 rpc_put_task(task);
7923 dprintk("<-- %s status=%d\n", __func__, status);
7924 if (status)
7925 return ERR_PTR(status);
7926 return lseg;
7927 }
7928
7929 static void
7930 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
7931 {
7932 struct nfs4_layoutreturn *lrp = calldata;
7933
7934 dprintk("--> %s\n", __func__);
7935 nfs41_setup_sequence(lrp->clp->cl_session,
7936 &lrp->args.seq_args,
7937 &lrp->res.seq_res,
7938 task);
7939 }
7940
7941 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
7942 {
7943 struct nfs4_layoutreturn *lrp = calldata;
7944 struct nfs_server *server;
7945
7946 dprintk("--> %s\n", __func__);
7947
7948 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
7949 return;
7950
7951 server = NFS_SERVER(lrp->args.inode);
7952 switch (task->tk_status) {
7953 default:
7954 task->tk_status = 0;
7955 case 0:
7956 break;
7957 case -NFS4ERR_DELAY:
7958 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
7959 break;
7960 rpc_restart_call_prepare(task);
7961 return;
7962 }
7963 dprintk("<-- %s\n", __func__);
7964 }
7965
7966 static void nfs4_layoutreturn_release(void *calldata)
7967 {
7968 struct nfs4_layoutreturn *lrp = calldata;
7969 struct pnfs_layout_hdr *lo = lrp->args.layout;
7970 LIST_HEAD(freeme);
7971
7972 dprintk("--> %s\n", __func__);
7973 spin_lock(&lo->plh_inode->i_lock);
7974 if (lrp->res.lrs_present)
7975 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
7976 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
7977 pnfs_clear_layoutreturn_waitbit(lo);
7978 lo->plh_block_lgets--;
7979 spin_unlock(&lo->plh_inode->i_lock);
7980 pnfs_free_lseg_list(&freeme);
7981 pnfs_put_layout_hdr(lrp->args.layout);
7982 nfs_iput_and_deactive(lrp->inode);
7983 kfree(calldata);
7984 dprintk("<-- %s\n", __func__);
7985 }
7986
7987 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
7988 .rpc_call_prepare = nfs4_layoutreturn_prepare,
7989 .rpc_call_done = nfs4_layoutreturn_done,
7990 .rpc_release = nfs4_layoutreturn_release,
7991 };
7992
7993 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
7994 {
7995 struct rpc_task *task;
7996 struct rpc_message msg = {
7997 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
7998 .rpc_argp = &lrp->args,
7999 .rpc_resp = &lrp->res,
8000 .rpc_cred = lrp->cred,
8001 };
8002 struct rpc_task_setup task_setup_data = {
8003 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8004 .rpc_message = &msg,
8005 .callback_ops = &nfs4_layoutreturn_call_ops,
8006 .callback_data = lrp,
8007 };
8008 int status = 0;
8009
8010 dprintk("--> %s\n", __func__);
8011 if (!sync) {
8012 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8013 if (!lrp->inode) {
8014 nfs4_layoutreturn_release(lrp);
8015 return -EAGAIN;
8016 }
8017 task_setup_data.flags |= RPC_TASK_ASYNC;
8018 }
8019 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8020 task = rpc_run_task(&task_setup_data);
8021 if (IS_ERR(task))
8022 return PTR_ERR(task);
8023 if (sync)
8024 status = task->tk_status;
8025 trace_nfs4_layoutreturn(lrp->args.inode, status);
8026 dprintk("<-- %s status=%d\n", __func__, status);
8027 rpc_put_task(task);
8028 return status;
8029 }
8030
8031 static int
8032 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
8033 struct pnfs_device *pdev,
8034 struct rpc_cred *cred)
8035 {
8036 struct nfs4_getdeviceinfo_args args = {
8037 .pdev = pdev,
8038 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8039 NOTIFY_DEVICEID4_DELETE,
8040 };
8041 struct nfs4_getdeviceinfo_res res = {
8042 .pdev = pdev,
8043 };
8044 struct rpc_message msg = {
8045 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8046 .rpc_argp = &args,
8047 .rpc_resp = &res,
8048 .rpc_cred = cred,
8049 };
8050 int status;
8051
8052 dprintk("--> %s\n", __func__);
8053 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8054 if (res.notification & ~args.notify_types)
8055 dprintk("%s: unsupported notification\n", __func__);
8056 if (res.notification != args.notify_types)
8057 pdev->nocache = 1;
8058
8059 dprintk("<-- %s status=%d\n", __func__, status);
8060
8061 return status;
8062 }
8063
8064 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8065 struct pnfs_device *pdev,
8066 struct rpc_cred *cred)
8067 {
8068 struct nfs4_exception exception = { };
8069 int err;
8070
8071 do {
8072 err = nfs4_handle_exception(server,
8073 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8074 &exception);
8075 } while (exception.retry);
8076 return err;
8077 }
8078 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8079
8080 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8081 {
8082 struct nfs4_layoutcommit_data *data = calldata;
8083 struct nfs_server *server = NFS_SERVER(data->args.inode);
8084 struct nfs4_session *session = nfs4_get_session(server);
8085
8086 nfs41_setup_sequence(session,
8087 &data->args.seq_args,
8088 &data->res.seq_res,
8089 task);
8090 }
8091
8092 static void
8093 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8094 {
8095 struct nfs4_layoutcommit_data *data = calldata;
8096 struct nfs_server *server = NFS_SERVER(data->args.inode);
8097
8098 if (!nfs41_sequence_done(task, &data->res.seq_res))
8099 return;
8100
8101 switch (task->tk_status) { /* Just ignore these failures */
8102 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8103 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8104 case -NFS4ERR_BADLAYOUT: /* no layout */
8105 case -NFS4ERR_GRACE: /* loca_recalim always false */
8106 task->tk_status = 0;
8107 case 0:
8108 break;
8109 default:
8110 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8111 rpc_restart_call_prepare(task);
8112 return;
8113 }
8114 }
8115 }
8116
8117 static void nfs4_layoutcommit_release(void *calldata)
8118 {
8119 struct nfs4_layoutcommit_data *data = calldata;
8120
8121 pnfs_cleanup_layoutcommit(data);
8122 nfs_post_op_update_inode_force_wcc(data->args.inode,
8123 data->res.fattr);
8124 put_rpccred(data->cred);
8125 nfs_iput_and_deactive(data->inode);
8126 kfree(data);
8127 }
8128
8129 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8130 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8131 .rpc_call_done = nfs4_layoutcommit_done,
8132 .rpc_release = nfs4_layoutcommit_release,
8133 };
8134
8135 int
8136 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8137 {
8138 struct rpc_message msg = {
8139 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8140 .rpc_argp = &data->args,
8141 .rpc_resp = &data->res,
8142 .rpc_cred = data->cred,
8143 };
8144 struct rpc_task_setup task_setup_data = {
8145 .task = &data->task,
8146 .rpc_client = NFS_CLIENT(data->args.inode),
8147 .rpc_message = &msg,
8148 .callback_ops = &nfs4_layoutcommit_ops,
8149 .callback_data = data,
8150 };
8151 struct rpc_task *task;
8152 int status = 0;
8153
8154 dprintk("NFS: initiating layoutcommit call. sync %d "
8155 "lbw: %llu inode %lu\n", sync,
8156 data->args.lastbytewritten,
8157 data->args.inode->i_ino);
8158
8159 if (!sync) {
8160 data->inode = nfs_igrab_and_active(data->args.inode);
8161 if (data->inode == NULL) {
8162 nfs4_layoutcommit_release(data);
8163 return -EAGAIN;
8164 }
8165 task_setup_data.flags = RPC_TASK_ASYNC;
8166 }
8167 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
8168 task = rpc_run_task(&task_setup_data);
8169 if (IS_ERR(task))
8170 return PTR_ERR(task);
8171 if (sync)
8172 status = task->tk_status;
8173 trace_nfs4_layoutcommit(data->args.inode, status);
8174 dprintk("%s: status %d\n", __func__, status);
8175 rpc_put_task(task);
8176 return status;
8177 }
8178
8179 /**
8180 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
8181 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
8182 */
8183 static int
8184 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8185 struct nfs_fsinfo *info,
8186 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8187 {
8188 struct nfs41_secinfo_no_name_args args = {
8189 .style = SECINFO_STYLE_CURRENT_FH,
8190 };
8191 struct nfs4_secinfo_res res = {
8192 .flavors = flavors,
8193 };
8194 struct rpc_message msg = {
8195 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
8196 .rpc_argp = &args,
8197 .rpc_resp = &res,
8198 };
8199 struct rpc_clnt *clnt = server->client;
8200 struct rpc_cred *cred = NULL;
8201 int status;
8202
8203 if (use_integrity) {
8204 clnt = server->nfs_client->cl_rpcclient;
8205 cred = nfs4_get_clid_cred(server->nfs_client);
8206 msg.rpc_cred = cred;
8207 }
8208
8209 dprintk("--> %s\n", __func__);
8210 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
8211 &res.seq_res, 0);
8212 dprintk("<-- %s status=%d\n", __func__, status);
8213
8214 if (cred)
8215 put_rpccred(cred);
8216
8217 return status;
8218 }
8219
8220 static int
8221 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8222 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
8223 {
8224 struct nfs4_exception exception = { };
8225 int err;
8226 do {
8227 /* first try using integrity protection */
8228 err = -NFS4ERR_WRONGSEC;
8229
8230 /* try to use integrity protection with machine cred */
8231 if (_nfs4_is_integrity_protected(server->nfs_client))
8232 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8233 flavors, true);
8234
8235 /*
8236 * if unable to use integrity protection, or SECINFO with
8237 * integrity protection returns NFS4ERR_WRONGSEC (which is
8238 * disallowed by spec, but exists in deployed servers) use
8239 * the current filesystem's rpc_client and the user cred.
8240 */
8241 if (err == -NFS4ERR_WRONGSEC)
8242 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8243 flavors, false);
8244
8245 switch (err) {
8246 case 0:
8247 case -NFS4ERR_WRONGSEC:
8248 case -ENOTSUPP:
8249 goto out;
8250 default:
8251 err = nfs4_handle_exception(server, err, &exception);
8252 }
8253 } while (exception.retry);
8254 out:
8255 return err;
8256 }
8257
8258 static int
8259 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
8260 struct nfs_fsinfo *info)
8261 {
8262 int err;
8263 struct page *page;
8264 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
8265 struct nfs4_secinfo_flavors *flavors;
8266 struct nfs4_secinfo4 *secinfo;
8267 int i;
8268
8269 page = alloc_page(GFP_KERNEL);
8270 if (!page) {
8271 err = -ENOMEM;
8272 goto out;
8273 }
8274
8275 flavors = page_address(page);
8276 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
8277
8278 /*
8279 * Fall back on "guess and check" method if
8280 * the server doesn't support SECINFO_NO_NAME
8281 */
8282 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
8283 err = nfs4_find_root_sec(server, fhandle, info);
8284 goto out_freepage;
8285 }
8286 if (err)
8287 goto out_freepage;
8288
8289 for (i = 0; i < flavors->num_flavors; i++) {
8290 secinfo = &flavors->flavors[i];
8291
8292 switch (secinfo->flavor) {
8293 case RPC_AUTH_NULL:
8294 case RPC_AUTH_UNIX:
8295 case RPC_AUTH_GSS:
8296 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
8297 &secinfo->flavor_info);
8298 break;
8299 default:
8300 flavor = RPC_AUTH_MAXFLAVOR;
8301 break;
8302 }
8303
8304 if (!nfs_auth_info_match(&server->auth_info, flavor))
8305 flavor = RPC_AUTH_MAXFLAVOR;
8306
8307 if (flavor != RPC_AUTH_MAXFLAVOR) {
8308 err = nfs4_lookup_root_sec(server, fhandle,
8309 info, flavor);
8310 if (!err)
8311 break;
8312 }
8313 }
8314
8315 if (flavor == RPC_AUTH_MAXFLAVOR)
8316 err = -EPERM;
8317
8318 out_freepage:
8319 put_page(page);
8320 if (err == -EACCES)
8321 return -EPERM;
8322 out:
8323 return err;
8324 }
8325
8326 static int _nfs41_test_stateid(struct nfs_server *server,
8327 nfs4_stateid *stateid,
8328 struct rpc_cred *cred)
8329 {
8330 int status;
8331 struct nfs41_test_stateid_args args = {
8332 .stateid = stateid,
8333 };
8334 struct nfs41_test_stateid_res res;
8335 struct rpc_message msg = {
8336 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
8337 .rpc_argp = &args,
8338 .rpc_resp = &res,
8339 .rpc_cred = cred,
8340 };
8341 struct rpc_clnt *rpc_client = server->client;
8342
8343 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8344 &rpc_client, &msg);
8345
8346 dprintk("NFS call test_stateid %p\n", stateid);
8347 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
8348 nfs4_set_sequence_privileged(&args.seq_args);
8349 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
8350 &args.seq_args, &res.seq_res);
8351 if (status != NFS_OK) {
8352 dprintk("NFS reply test_stateid: failed, %d\n", status);
8353 return status;
8354 }
8355 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
8356 return -res.status;
8357 }
8358
8359 /**
8360 * nfs41_test_stateid - perform a TEST_STATEID operation
8361 *
8362 * @server: server / transport on which to perform the operation
8363 * @stateid: state ID to test
8364 * @cred: credential
8365 *
8366 * Returns NFS_OK if the server recognizes that "stateid" is valid.
8367 * Otherwise a negative NFS4ERR value is returned if the operation
8368 * failed or the state ID is not currently valid.
8369 */
8370 static int nfs41_test_stateid(struct nfs_server *server,
8371 nfs4_stateid *stateid,
8372 struct rpc_cred *cred)
8373 {
8374 struct nfs4_exception exception = { };
8375 int err;
8376 do {
8377 err = _nfs41_test_stateid(server, stateid, cred);
8378 if (err != -NFS4ERR_DELAY)
8379 break;
8380 nfs4_handle_exception(server, err, &exception);
8381 } while (exception.retry);
8382 return err;
8383 }
8384
8385 struct nfs_free_stateid_data {
8386 struct nfs_server *server;
8387 struct nfs41_free_stateid_args args;
8388 struct nfs41_free_stateid_res res;
8389 };
8390
8391 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
8392 {
8393 struct nfs_free_stateid_data *data = calldata;
8394 nfs41_setup_sequence(nfs4_get_session(data->server),
8395 &data->args.seq_args,
8396 &data->res.seq_res,
8397 task);
8398 }
8399
8400 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
8401 {
8402 struct nfs_free_stateid_data *data = calldata;
8403
8404 nfs41_sequence_done(task, &data->res.seq_res);
8405
8406 switch (task->tk_status) {
8407 case -NFS4ERR_DELAY:
8408 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
8409 rpc_restart_call_prepare(task);
8410 }
8411 }
8412
8413 static void nfs41_free_stateid_release(void *calldata)
8414 {
8415 kfree(calldata);
8416 }
8417
8418 static const struct rpc_call_ops nfs41_free_stateid_ops = {
8419 .rpc_call_prepare = nfs41_free_stateid_prepare,
8420 .rpc_call_done = nfs41_free_stateid_done,
8421 .rpc_release = nfs41_free_stateid_release,
8422 };
8423
8424 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
8425 nfs4_stateid *stateid,
8426 struct rpc_cred *cred,
8427 bool privileged)
8428 {
8429 struct rpc_message msg = {
8430 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
8431 .rpc_cred = cred,
8432 };
8433 struct rpc_task_setup task_setup = {
8434 .rpc_client = server->client,
8435 .rpc_message = &msg,
8436 .callback_ops = &nfs41_free_stateid_ops,
8437 .flags = RPC_TASK_ASYNC,
8438 };
8439 struct nfs_free_stateid_data *data;
8440
8441 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8442 &task_setup.rpc_client, &msg);
8443
8444 dprintk("NFS call free_stateid %p\n", stateid);
8445 data = kmalloc(sizeof(*data), GFP_NOFS);
8446 if (!data)
8447 return ERR_PTR(-ENOMEM);
8448 data->server = server;
8449 nfs4_stateid_copy(&data->args.stateid, stateid);
8450
8451 task_setup.callback_data = data;
8452
8453 msg.rpc_argp = &data->args;
8454 msg.rpc_resp = &data->res;
8455 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
8456 if (privileged)
8457 nfs4_set_sequence_privileged(&data->args.seq_args);
8458
8459 return rpc_run_task(&task_setup);
8460 }
8461
8462 /**
8463 * nfs41_free_stateid - perform a FREE_STATEID operation
8464 *
8465 * @server: server / transport on which to perform the operation
8466 * @stateid: state ID to release
8467 * @cred: credential
8468 *
8469 * Returns NFS_OK if the server freed "stateid". Otherwise a
8470 * negative NFS4ERR value is returned.
8471 */
8472 static int nfs41_free_stateid(struct nfs_server *server,
8473 nfs4_stateid *stateid,
8474 struct rpc_cred *cred)
8475 {
8476 struct rpc_task *task;
8477 int ret;
8478
8479 task = _nfs41_free_stateid(server, stateid, cred, true);
8480 if (IS_ERR(task))
8481 return PTR_ERR(task);
8482 ret = rpc_wait_for_completion_task(task);
8483 if (!ret)
8484 ret = task->tk_status;
8485 rpc_put_task(task);
8486 return ret;
8487 }
8488
8489 static void
8490 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
8491 {
8492 struct rpc_task *task;
8493 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
8494
8495 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
8496 nfs4_free_lock_state(server, lsp);
8497 if (IS_ERR(task))
8498 return;
8499 rpc_put_task(task);
8500 }
8501
8502 static bool nfs41_match_stateid(const nfs4_stateid *s1,
8503 const nfs4_stateid *s2)
8504 {
8505 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
8506 return false;
8507
8508 if (s1->seqid == s2->seqid)
8509 return true;
8510 if (s1->seqid == 0 || s2->seqid == 0)
8511 return true;
8512
8513 return false;
8514 }
8515
8516 #endif /* CONFIG_NFS_V4_1 */
8517
8518 static bool nfs4_match_stateid(const nfs4_stateid *s1,
8519 const nfs4_stateid *s2)
8520 {
8521 return nfs4_stateid_match(s1, s2);
8522 }
8523
8524
8525 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
8526 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8527 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8528 .recover_open = nfs4_open_reclaim,
8529 .recover_lock = nfs4_lock_reclaim,
8530 .establish_clid = nfs4_init_clientid,
8531 .detect_trunking = nfs40_discover_server_trunking,
8532 };
8533
8534 #if defined(CONFIG_NFS_V4_1)
8535 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8536 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8537 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8538 .recover_open = nfs4_open_reclaim,
8539 .recover_lock = nfs4_lock_reclaim,
8540 .establish_clid = nfs41_init_clientid,
8541 .reclaim_complete = nfs41_proc_reclaim_complete,
8542 .detect_trunking = nfs41_discover_server_trunking,
8543 };
8544 #endif /* CONFIG_NFS_V4_1 */
8545
8546 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8547 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8548 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8549 .recover_open = nfs40_open_expired,
8550 .recover_lock = nfs4_lock_expired,
8551 .establish_clid = nfs4_init_clientid,
8552 };
8553
8554 #if defined(CONFIG_NFS_V4_1)
8555 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
8556 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8557 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8558 .recover_open = nfs41_open_expired,
8559 .recover_lock = nfs41_lock_expired,
8560 .establish_clid = nfs41_init_clientid,
8561 };
8562 #endif /* CONFIG_NFS_V4_1 */
8563
8564 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
8565 .sched_state_renewal = nfs4_proc_async_renew,
8566 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
8567 .renew_lease = nfs4_proc_renew,
8568 };
8569
8570 #if defined(CONFIG_NFS_V4_1)
8571 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
8572 .sched_state_renewal = nfs41_proc_async_sequence,
8573 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
8574 .renew_lease = nfs4_proc_sequence,
8575 };
8576 #endif
8577
8578 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
8579 .get_locations = _nfs40_proc_get_locations,
8580 .fsid_present = _nfs40_proc_fsid_present,
8581 };
8582
8583 #if defined(CONFIG_NFS_V4_1)
8584 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
8585 .get_locations = _nfs41_proc_get_locations,
8586 .fsid_present = _nfs41_proc_fsid_present,
8587 };
8588 #endif /* CONFIG_NFS_V4_1 */
8589
8590 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8591 .minor_version = 0,
8592 .init_caps = NFS_CAP_READDIRPLUS
8593 | NFS_CAP_ATOMIC_OPEN
8594 | NFS_CAP_CHANGE_ATTR
8595 | NFS_CAP_POSIX_LOCK,
8596 .init_client = nfs40_init_client,
8597 .shutdown_client = nfs40_shutdown_client,
8598 .match_stateid = nfs4_match_stateid,
8599 .find_root_sec = nfs4_find_root_sec,
8600 .free_lock_state = nfs4_release_lockowner,
8601 .alloc_seqid = nfs_alloc_seqid,
8602 .call_sync_ops = &nfs40_call_sync_ops,
8603 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
8604 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
8605 .state_renewal_ops = &nfs40_state_renewal_ops,
8606 .mig_recovery_ops = &nfs40_mig_recovery_ops,
8607 };
8608
8609 #if defined(CONFIG_NFS_V4_1)
8610 static struct nfs_seqid *
8611 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
8612 {
8613 return NULL;
8614 }
8615
8616 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8617 .minor_version = 1,
8618 .init_caps = NFS_CAP_READDIRPLUS
8619 | NFS_CAP_ATOMIC_OPEN
8620 | NFS_CAP_CHANGE_ATTR
8621 | NFS_CAP_POSIX_LOCK
8622 | NFS_CAP_STATEID_NFSV41
8623 | NFS_CAP_ATOMIC_OPEN_V1,
8624 .init_client = nfs41_init_client,
8625 .shutdown_client = nfs41_shutdown_client,
8626 .match_stateid = nfs41_match_stateid,
8627 .find_root_sec = nfs41_find_root_sec,
8628 .free_lock_state = nfs41_free_lock_state,
8629 .alloc_seqid = nfs_alloc_no_seqid,
8630 .call_sync_ops = &nfs41_call_sync_ops,
8631 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8632 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8633 .state_renewal_ops = &nfs41_state_renewal_ops,
8634 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8635 };
8636 #endif
8637
8638 #if defined(CONFIG_NFS_V4_2)
8639 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8640 .minor_version = 2,
8641 .init_caps = NFS_CAP_READDIRPLUS
8642 | NFS_CAP_ATOMIC_OPEN
8643 | NFS_CAP_CHANGE_ATTR
8644 | NFS_CAP_POSIX_LOCK
8645 | NFS_CAP_STATEID_NFSV41
8646 | NFS_CAP_ATOMIC_OPEN_V1
8647 | NFS_CAP_ALLOCATE
8648 | NFS_CAP_DEALLOCATE
8649 | NFS_CAP_SEEK
8650 | NFS_CAP_LAYOUTSTATS,
8651 .init_client = nfs41_init_client,
8652 .shutdown_client = nfs41_shutdown_client,
8653 .match_stateid = nfs41_match_stateid,
8654 .find_root_sec = nfs41_find_root_sec,
8655 .free_lock_state = nfs41_free_lock_state,
8656 .call_sync_ops = &nfs41_call_sync_ops,
8657 .alloc_seqid = nfs_alloc_no_seqid,
8658 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8659 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8660 .state_renewal_ops = &nfs41_state_renewal_ops,
8661 };
8662 #endif
8663
8664 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
8665 [0] = &nfs_v4_0_minor_ops,
8666 #if defined(CONFIG_NFS_V4_1)
8667 [1] = &nfs_v4_1_minor_ops,
8668 #endif
8669 #if defined(CONFIG_NFS_V4_2)
8670 [2] = &nfs_v4_2_minor_ops,
8671 #endif
8672 };
8673
8674 static const struct inode_operations nfs4_dir_inode_operations = {
8675 .create = nfs_create,
8676 .lookup = nfs_lookup,
8677 .atomic_open = nfs_atomic_open,
8678 .link = nfs_link,
8679 .unlink = nfs_unlink,
8680 .symlink = nfs_symlink,
8681 .mkdir = nfs_mkdir,
8682 .rmdir = nfs_rmdir,
8683 .mknod = nfs_mknod,
8684 .rename = nfs_rename,
8685 .permission = nfs_permission,
8686 .getattr = nfs_getattr,
8687 .setattr = nfs_setattr,
8688 .getxattr = generic_getxattr,
8689 .setxattr = generic_setxattr,
8690 .listxattr = generic_listxattr,
8691 .removexattr = generic_removexattr,
8692 };
8693
8694 static const struct inode_operations nfs4_file_inode_operations = {
8695 .permission = nfs_permission,
8696 .getattr = nfs_getattr,
8697 .setattr = nfs_setattr,
8698 .getxattr = generic_getxattr,
8699 .setxattr = generic_setxattr,
8700 .listxattr = generic_listxattr,
8701 .removexattr = generic_removexattr,
8702 };
8703
8704 const struct nfs_rpc_ops nfs_v4_clientops = {
8705 .version = 4, /* protocol version */
8706 .dentry_ops = &nfs4_dentry_operations,
8707 .dir_inode_ops = &nfs4_dir_inode_operations,
8708 .file_inode_ops = &nfs4_file_inode_operations,
8709 .file_ops = &nfs4_file_operations,
8710 .getroot = nfs4_proc_get_root,
8711 .submount = nfs4_submount,
8712 .try_mount = nfs4_try_mount,
8713 .getattr = nfs4_proc_getattr,
8714 .setattr = nfs4_proc_setattr,
8715 .lookup = nfs4_proc_lookup,
8716 .access = nfs4_proc_access,
8717 .readlink = nfs4_proc_readlink,
8718 .create = nfs4_proc_create,
8719 .remove = nfs4_proc_remove,
8720 .unlink_setup = nfs4_proc_unlink_setup,
8721 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
8722 .unlink_done = nfs4_proc_unlink_done,
8723 .rename_setup = nfs4_proc_rename_setup,
8724 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
8725 .rename_done = nfs4_proc_rename_done,
8726 .link = nfs4_proc_link,
8727 .symlink = nfs4_proc_symlink,
8728 .mkdir = nfs4_proc_mkdir,
8729 .rmdir = nfs4_proc_remove,
8730 .readdir = nfs4_proc_readdir,
8731 .mknod = nfs4_proc_mknod,
8732 .statfs = nfs4_proc_statfs,
8733 .fsinfo = nfs4_proc_fsinfo,
8734 .pathconf = nfs4_proc_pathconf,
8735 .set_capabilities = nfs4_server_capabilities,
8736 .decode_dirent = nfs4_decode_dirent,
8737 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
8738 .read_setup = nfs4_proc_read_setup,
8739 .read_done = nfs4_read_done,
8740 .write_setup = nfs4_proc_write_setup,
8741 .write_done = nfs4_write_done,
8742 .commit_setup = nfs4_proc_commit_setup,
8743 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
8744 .commit_done = nfs4_commit_done,
8745 .lock = nfs4_proc_lock,
8746 .clear_acl_cache = nfs4_zap_acl_attr,
8747 .close_context = nfs4_close_context,
8748 .open_context = nfs4_atomic_open,
8749 .have_delegation = nfs4_have_delegation,
8750 .return_delegation = nfs4_inode_return_delegation,
8751 .alloc_client = nfs4_alloc_client,
8752 .init_client = nfs4_init_client,
8753 .free_client = nfs4_free_client,
8754 .create_server = nfs4_create_server,
8755 .clone_server = nfs_clone_server,
8756 };
8757
8758 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
8759 .prefix = XATTR_NAME_NFSV4_ACL,
8760 .list = nfs4_xattr_list_nfs4_acl,
8761 .get = nfs4_xattr_get_nfs4_acl,
8762 .set = nfs4_xattr_set_nfs4_acl,
8763 };
8764
8765 const struct xattr_handler *nfs4_xattr_handlers[] = {
8766 &nfs4_xattr_nfs4_acl_handler,
8767 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
8768 &nfs4_xattr_nfs4_label_handler,
8769 #endif
8770 NULL
8771 };
8772
8773 /*
8774 * Local variables:
8775 * c-basic-offset: 8
8776 * End:
8777 */
This page took 0.263115 seconds and 5 git commands to generate.