NFSv4: Fix another open/open_recovery deadlock
[deliverable/linux.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/nfs_idmap.h>
55 #include <linux/xattr.h>
56 #include <linux/utsname.h>
57 #include <linux/freezer.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4session.h"
67 #include "fscache.h"
68
69 #define NFSDBG_FACILITY NFSDBG_PROC
70
71 #define NFS4_POLL_RETRY_MIN (HZ/10)
72 #define NFS4_POLL_RETRY_MAX (15*HZ)
73
74 struct nfs4_opendata;
75 static int _nfs4_proc_open(struct nfs4_opendata *data);
76 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
77 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
78 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
79 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
80 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *);
81 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
82 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
83 struct nfs_fattr *fattr, struct iattr *sattr,
84 struct nfs4_state *state);
85 #ifdef CONFIG_NFS_V4_1
86 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
87 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
88 #endif
89 /* Prevent leaks of NFSv4 errors into userland */
90 static int nfs4_map_errors(int err)
91 {
92 if (err >= -1000)
93 return err;
94 switch (err) {
95 case -NFS4ERR_RESOURCE:
96 return -EREMOTEIO;
97 case -NFS4ERR_WRONGSEC:
98 return -EPERM;
99 case -NFS4ERR_BADOWNER:
100 case -NFS4ERR_BADNAME:
101 return -EINVAL;
102 case -NFS4ERR_SHARE_DENIED:
103 return -EACCES;
104 case -NFS4ERR_MINOR_VERS_MISMATCH:
105 return -EPROTONOSUPPORT;
106 case -NFS4ERR_ACCESS:
107 return -EACCES;
108 default:
109 dprintk("%s could not handle NFSv4 error %d\n",
110 __func__, -err);
111 break;
112 }
113 return -EIO;
114 }
115
116 /*
117 * This is our standard bitmap for GETATTR requests.
118 */
119 const u32 nfs4_fattr_bitmap[3] = {
120 FATTR4_WORD0_TYPE
121 | FATTR4_WORD0_CHANGE
122 | FATTR4_WORD0_SIZE
123 | FATTR4_WORD0_FSID
124 | FATTR4_WORD0_FILEID,
125 FATTR4_WORD1_MODE
126 | FATTR4_WORD1_NUMLINKS
127 | FATTR4_WORD1_OWNER
128 | FATTR4_WORD1_OWNER_GROUP
129 | FATTR4_WORD1_RAWDEV
130 | FATTR4_WORD1_SPACE_USED
131 | FATTR4_WORD1_TIME_ACCESS
132 | FATTR4_WORD1_TIME_METADATA
133 | FATTR4_WORD1_TIME_MODIFY
134 };
135
136 static const u32 nfs4_pnfs_open_bitmap[3] = {
137 FATTR4_WORD0_TYPE
138 | FATTR4_WORD0_CHANGE
139 | FATTR4_WORD0_SIZE
140 | FATTR4_WORD0_FSID
141 | FATTR4_WORD0_FILEID,
142 FATTR4_WORD1_MODE
143 | FATTR4_WORD1_NUMLINKS
144 | FATTR4_WORD1_OWNER
145 | FATTR4_WORD1_OWNER_GROUP
146 | FATTR4_WORD1_RAWDEV
147 | FATTR4_WORD1_SPACE_USED
148 | FATTR4_WORD1_TIME_ACCESS
149 | FATTR4_WORD1_TIME_METADATA
150 | FATTR4_WORD1_TIME_MODIFY,
151 FATTR4_WORD2_MDSTHRESHOLD
152 };
153
154 static const u32 nfs4_open_noattr_bitmap[3] = {
155 FATTR4_WORD0_TYPE
156 | FATTR4_WORD0_CHANGE
157 | FATTR4_WORD0_FILEID,
158 };
159
160 const u32 nfs4_statfs_bitmap[2] = {
161 FATTR4_WORD0_FILES_AVAIL
162 | FATTR4_WORD0_FILES_FREE
163 | FATTR4_WORD0_FILES_TOTAL,
164 FATTR4_WORD1_SPACE_AVAIL
165 | FATTR4_WORD1_SPACE_FREE
166 | FATTR4_WORD1_SPACE_TOTAL
167 };
168
169 const u32 nfs4_pathconf_bitmap[2] = {
170 FATTR4_WORD0_MAXLINK
171 | FATTR4_WORD0_MAXNAME,
172 0
173 };
174
175 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
176 | FATTR4_WORD0_MAXREAD
177 | FATTR4_WORD0_MAXWRITE
178 | FATTR4_WORD0_LEASE_TIME,
179 FATTR4_WORD1_TIME_DELTA
180 | FATTR4_WORD1_FS_LAYOUT_TYPES,
181 FATTR4_WORD2_LAYOUT_BLKSIZE
182 };
183
184 const u32 nfs4_fs_locations_bitmap[2] = {
185 FATTR4_WORD0_TYPE
186 | FATTR4_WORD0_CHANGE
187 | FATTR4_WORD0_SIZE
188 | FATTR4_WORD0_FSID
189 | FATTR4_WORD0_FILEID
190 | FATTR4_WORD0_FS_LOCATIONS,
191 FATTR4_WORD1_MODE
192 | FATTR4_WORD1_NUMLINKS
193 | FATTR4_WORD1_OWNER
194 | FATTR4_WORD1_OWNER_GROUP
195 | FATTR4_WORD1_RAWDEV
196 | FATTR4_WORD1_SPACE_USED
197 | FATTR4_WORD1_TIME_ACCESS
198 | FATTR4_WORD1_TIME_METADATA
199 | FATTR4_WORD1_TIME_MODIFY
200 | FATTR4_WORD1_MOUNTED_ON_FILEID
201 };
202
203 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
204 struct nfs4_readdir_arg *readdir)
205 {
206 __be32 *start, *p;
207
208 if (cookie > 2) {
209 readdir->cookie = cookie;
210 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
211 return;
212 }
213
214 readdir->cookie = 0;
215 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
216 if (cookie == 2)
217 return;
218
219 /*
220 * NFSv4 servers do not return entries for '.' and '..'
221 * Therefore, we fake these entries here. We let '.'
222 * have cookie 0 and '..' have cookie 1. Note that
223 * when talking to the server, we always send cookie 0
224 * instead of 1 or 2.
225 */
226 start = p = kmap_atomic(*readdir->pages);
227
228 if (cookie == 0) {
229 *p++ = xdr_one; /* next */
230 *p++ = xdr_zero; /* cookie, first word */
231 *p++ = xdr_one; /* cookie, second word */
232 *p++ = xdr_one; /* entry len */
233 memcpy(p, ".\0\0\0", 4); /* entry */
234 p++;
235 *p++ = xdr_one; /* bitmap length */
236 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
237 *p++ = htonl(8); /* attribute buffer length */
238 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
239 }
240
241 *p++ = xdr_one; /* next */
242 *p++ = xdr_zero; /* cookie, first word */
243 *p++ = xdr_two; /* cookie, second word */
244 *p++ = xdr_two; /* entry len */
245 memcpy(p, "..\0\0", 4); /* entry */
246 p++;
247 *p++ = xdr_one; /* bitmap length */
248 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
249 *p++ = htonl(8); /* attribute buffer length */
250 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
251
252 readdir->pgbase = (char *)p - (char *)start;
253 readdir->count -= readdir->pgbase;
254 kunmap_atomic(start);
255 }
256
257 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
258 {
259 int res = 0;
260
261 might_sleep();
262
263 if (*timeout <= 0)
264 *timeout = NFS4_POLL_RETRY_MIN;
265 if (*timeout > NFS4_POLL_RETRY_MAX)
266 *timeout = NFS4_POLL_RETRY_MAX;
267 freezable_schedule_timeout_killable(*timeout);
268 if (fatal_signal_pending(current))
269 res = -ERESTARTSYS;
270 *timeout <<= 1;
271 return res;
272 }
273
274 /* This is the error handling routine for processes that are allowed
275 * to sleep.
276 */
277 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
278 {
279 struct nfs_client *clp = server->nfs_client;
280 struct nfs4_state *state = exception->state;
281 struct inode *inode = exception->inode;
282 int ret = errorcode;
283
284 exception->retry = 0;
285 switch(errorcode) {
286 case 0:
287 return 0;
288 case -NFS4ERR_OPENMODE:
289 if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
290 nfs4_inode_return_delegation(inode);
291 exception->retry = 1;
292 return 0;
293 }
294 if (state == NULL)
295 break;
296 nfs4_schedule_stateid_recovery(server, state);
297 goto wait_on_recovery;
298 case -NFS4ERR_DELEG_REVOKED:
299 case -NFS4ERR_ADMIN_REVOKED:
300 case -NFS4ERR_BAD_STATEID:
301 if (state == NULL)
302 break;
303 nfs_remove_bad_delegation(state->inode);
304 nfs4_schedule_stateid_recovery(server, state);
305 goto wait_on_recovery;
306 case -NFS4ERR_EXPIRED:
307 if (state != NULL)
308 nfs4_schedule_stateid_recovery(server, state);
309 case -NFS4ERR_STALE_STATEID:
310 case -NFS4ERR_STALE_CLIENTID:
311 nfs4_schedule_lease_recovery(clp);
312 goto wait_on_recovery;
313 #if defined(CONFIG_NFS_V4_1)
314 case -NFS4ERR_BADSESSION:
315 case -NFS4ERR_BADSLOT:
316 case -NFS4ERR_BAD_HIGH_SLOT:
317 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
318 case -NFS4ERR_DEADSESSION:
319 case -NFS4ERR_SEQ_FALSE_RETRY:
320 case -NFS4ERR_SEQ_MISORDERED:
321 dprintk("%s ERROR: %d Reset session\n", __func__,
322 errorcode);
323 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
324 goto wait_on_recovery;
325 #endif /* defined(CONFIG_NFS_V4_1) */
326 case -NFS4ERR_FILE_OPEN:
327 if (exception->timeout > HZ) {
328 /* We have retried a decent amount, time to
329 * fail
330 */
331 ret = -EBUSY;
332 break;
333 }
334 case -NFS4ERR_GRACE:
335 case -NFS4ERR_DELAY:
336 ret = nfs4_delay(server->client, &exception->timeout);
337 if (ret != 0)
338 break;
339 case -NFS4ERR_RETRY_UNCACHED_REP:
340 case -NFS4ERR_OLD_STATEID:
341 exception->retry = 1;
342 break;
343 case -NFS4ERR_BADOWNER:
344 /* The following works around a Linux server bug! */
345 case -NFS4ERR_BADNAME:
346 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
347 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
348 exception->retry = 1;
349 printk(KERN_WARNING "NFS: v4 server %s "
350 "does not accept raw "
351 "uid/gids. "
352 "Reenabling the idmapper.\n",
353 server->nfs_client->cl_hostname);
354 }
355 }
356 /* We failed to handle the error */
357 return nfs4_map_errors(ret);
358 wait_on_recovery:
359 ret = nfs4_wait_clnt_recover(clp);
360 if (ret == 0)
361 exception->retry = 1;
362 return ret;
363 }
364
365
366 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
367 {
368 spin_lock(&clp->cl_lock);
369 if (time_before(clp->cl_last_renewal,timestamp))
370 clp->cl_last_renewal = timestamp;
371 spin_unlock(&clp->cl_lock);
372 }
373
374 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
375 {
376 do_renew_lease(server->nfs_client, timestamp);
377 }
378
379 #if defined(CONFIG_NFS_V4_1)
380
381 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
382 {
383 struct nfs4_session *session;
384 struct nfs4_slot_table *tbl;
385 bool send_new_highest_used_slotid = false;
386
387 if (!res->sr_slot) {
388 /* just wake up the next guy waiting since
389 * we may have not consumed a slot after all */
390 dprintk("%s: No slot\n", __func__);
391 return;
392 }
393 tbl = res->sr_slot->table;
394 session = tbl->session;
395
396 spin_lock(&tbl->slot_tbl_lock);
397 /* Be nice to the server: try to ensure that the last transmitted
398 * value for highest_user_slotid <= target_highest_slotid
399 */
400 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
401 send_new_highest_used_slotid = true;
402
403 if (nfs41_wake_and_assign_slot(tbl, res->sr_slot)) {
404 send_new_highest_used_slotid = false;
405 goto out_unlock;
406 }
407 nfs4_free_slot(tbl, res->sr_slot);
408
409 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
410 send_new_highest_used_slotid = false;
411 out_unlock:
412 spin_unlock(&tbl->slot_tbl_lock);
413 res->sr_slot = NULL;
414 if (send_new_highest_used_slotid)
415 nfs41_server_notify_highest_slotid_update(session->clp);
416 }
417
418 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
419 {
420 struct nfs4_session *session;
421 struct nfs4_slot *slot;
422 struct nfs_client *clp;
423 bool interrupted = false;
424 int ret = 1;
425
426 /* don't increment the sequence number if the task wasn't sent */
427 if (!RPC_WAS_SENT(task))
428 goto out;
429
430 slot = res->sr_slot;
431 session = slot->table->session;
432
433 if (slot->interrupted) {
434 slot->interrupted = 0;
435 interrupted = true;
436 }
437
438 /* Check the SEQUENCE operation status */
439 switch (res->sr_status) {
440 case 0:
441 /* Update the slot's sequence and clientid lease timer */
442 ++slot->seq_nr;
443 clp = session->clp;
444 do_renew_lease(clp, res->sr_timestamp);
445 /* Check sequence flags */
446 if (res->sr_status_flags != 0)
447 nfs4_schedule_lease_recovery(clp);
448 nfs41_update_target_slotid(slot->table, slot, res);
449 break;
450 case 1:
451 /*
452 * sr_status remains 1 if an RPC level error occurred.
453 * The server may or may not have processed the sequence
454 * operation..
455 * Mark the slot as having hosted an interrupted RPC call.
456 */
457 slot->interrupted = 1;
458 goto out;
459 case -NFS4ERR_DELAY:
460 /* The server detected a resend of the RPC call and
461 * returned NFS4ERR_DELAY as per Section 2.10.6.2
462 * of RFC5661.
463 */
464 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
465 __func__,
466 slot->slot_nr,
467 slot->seq_nr);
468 goto out_retry;
469 case -NFS4ERR_BADSLOT:
470 /*
471 * The slot id we used was probably retired. Try again
472 * using a different slot id.
473 */
474 goto retry_nowait;
475 case -NFS4ERR_SEQ_MISORDERED:
476 /*
477 * Was the last operation on this sequence interrupted?
478 * If so, retry after bumping the sequence number.
479 */
480 if (interrupted) {
481 ++slot->seq_nr;
482 goto retry_nowait;
483 }
484 /*
485 * Could this slot have been previously retired?
486 * If so, then the server may be expecting seq_nr = 1!
487 */
488 if (slot->seq_nr != 1) {
489 slot->seq_nr = 1;
490 goto retry_nowait;
491 }
492 break;
493 case -NFS4ERR_SEQ_FALSE_RETRY:
494 ++slot->seq_nr;
495 goto retry_nowait;
496 default:
497 /* Just update the slot sequence no. */
498 ++slot->seq_nr;
499 }
500 out:
501 /* The session may be reset by one of the error handlers. */
502 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
503 nfs41_sequence_free_slot(res);
504 return ret;
505 retry_nowait:
506 if (rpc_restart_call_prepare(task)) {
507 task->tk_status = 0;
508 ret = 0;
509 }
510 goto out;
511 out_retry:
512 if (!rpc_restart_call(task))
513 goto out;
514 rpc_delay(task, NFS4_POLL_RETRY_MAX);
515 return 0;
516 }
517
518 static int nfs4_sequence_done(struct rpc_task *task,
519 struct nfs4_sequence_res *res)
520 {
521 if (res->sr_slot == NULL)
522 return 1;
523 return nfs41_sequence_done(task, res);
524 }
525
526 static void nfs41_init_sequence(struct nfs4_sequence_args *args,
527 struct nfs4_sequence_res *res, int cache_reply)
528 {
529 args->sa_slot = NULL;
530 args->sa_cache_this = 0;
531 args->sa_privileged = 0;
532 if (cache_reply)
533 args->sa_cache_this = 1;
534 res->sr_slot = NULL;
535 }
536
537 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
538 {
539 args->sa_privileged = 1;
540 }
541
542 int nfs41_setup_sequence(struct nfs4_session *session,
543 struct nfs4_sequence_args *args,
544 struct nfs4_sequence_res *res,
545 struct rpc_task *task)
546 {
547 struct nfs4_slot *slot;
548 struct nfs4_slot_table *tbl;
549
550 dprintk("--> %s\n", __func__);
551 /* slot already allocated? */
552 if (res->sr_slot != NULL)
553 goto out_success;
554
555 tbl = &session->fc_slot_table;
556
557 task->tk_timeout = 0;
558
559 spin_lock(&tbl->slot_tbl_lock);
560 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
561 !args->sa_privileged) {
562 /* The state manager will wait until the slot table is empty */
563 dprintk("%s session is draining\n", __func__);
564 goto out_sleep;
565 }
566
567 slot = nfs4_alloc_slot(tbl);
568 if (IS_ERR(slot)) {
569 /* If out of memory, try again in 1/4 second */
570 if (slot == ERR_PTR(-ENOMEM))
571 task->tk_timeout = HZ >> 2;
572 dprintk("<-- %s: no free slots\n", __func__);
573 goto out_sleep;
574 }
575 spin_unlock(&tbl->slot_tbl_lock);
576
577 args->sa_slot = slot;
578
579 dprintk("<-- %s slotid=%d seqid=%d\n", __func__,
580 slot->slot_nr, slot->seq_nr);
581
582 res->sr_slot = slot;
583 res->sr_timestamp = jiffies;
584 res->sr_status_flags = 0;
585 /*
586 * sr_status is only set in decode_sequence, and so will remain
587 * set to 1 if an rpc level failure occurs.
588 */
589 res->sr_status = 1;
590 out_success:
591 rpc_call_start(task);
592 return 0;
593 out_sleep:
594 /* Privileged tasks are queued with top priority */
595 if (args->sa_privileged)
596 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
597 NULL, RPC_PRIORITY_PRIVILEGED);
598 else
599 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
600 spin_unlock(&tbl->slot_tbl_lock);
601 return -EAGAIN;
602 }
603 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
604
605 int nfs4_setup_sequence(const struct nfs_server *server,
606 struct nfs4_sequence_args *args,
607 struct nfs4_sequence_res *res,
608 struct rpc_task *task)
609 {
610 struct nfs4_session *session = nfs4_get_session(server);
611 int ret = 0;
612
613 if (session == NULL) {
614 rpc_call_start(task);
615 goto out;
616 }
617
618 dprintk("--> %s clp %p session %p sr_slot %d\n",
619 __func__, session->clp, session, res->sr_slot ?
620 res->sr_slot->slot_nr : -1);
621
622 ret = nfs41_setup_sequence(session, args, res, task);
623 out:
624 dprintk("<-- %s status=%d\n", __func__, ret);
625 return ret;
626 }
627
628 struct nfs41_call_sync_data {
629 const struct nfs_server *seq_server;
630 struct nfs4_sequence_args *seq_args;
631 struct nfs4_sequence_res *seq_res;
632 };
633
634 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
635 {
636 struct nfs41_call_sync_data *data = calldata;
637 struct nfs4_session *session = nfs4_get_session(data->seq_server);
638
639 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
640
641 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
642 }
643
644 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
645 {
646 struct nfs41_call_sync_data *data = calldata;
647
648 nfs41_sequence_done(task, data->seq_res);
649 }
650
651 static const struct rpc_call_ops nfs41_call_sync_ops = {
652 .rpc_call_prepare = nfs41_call_sync_prepare,
653 .rpc_call_done = nfs41_call_sync_done,
654 };
655
656 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
657 struct nfs_server *server,
658 struct rpc_message *msg,
659 struct nfs4_sequence_args *args,
660 struct nfs4_sequence_res *res)
661 {
662 int ret;
663 struct rpc_task *task;
664 struct nfs41_call_sync_data data = {
665 .seq_server = server,
666 .seq_args = args,
667 .seq_res = res,
668 };
669 struct rpc_task_setup task_setup = {
670 .rpc_client = clnt,
671 .rpc_message = msg,
672 .callback_ops = &nfs41_call_sync_ops,
673 .callback_data = &data
674 };
675
676 task = rpc_run_task(&task_setup);
677 if (IS_ERR(task))
678 ret = PTR_ERR(task);
679 else {
680 ret = task->tk_status;
681 rpc_put_task(task);
682 }
683 return ret;
684 }
685
686 #else
687 static
688 void nfs41_init_sequence(struct nfs4_sequence_args *args,
689 struct nfs4_sequence_res *res, int cache_reply)
690 {
691 }
692
693 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
694 {
695 }
696
697
698 static int nfs4_sequence_done(struct rpc_task *task,
699 struct nfs4_sequence_res *res)
700 {
701 return 1;
702 }
703 #endif /* CONFIG_NFS_V4_1 */
704
705 static
706 int _nfs4_call_sync(struct rpc_clnt *clnt,
707 struct nfs_server *server,
708 struct rpc_message *msg,
709 struct nfs4_sequence_args *args,
710 struct nfs4_sequence_res *res)
711 {
712 return rpc_call_sync(clnt, msg, 0);
713 }
714
715 static
716 int nfs4_call_sync(struct rpc_clnt *clnt,
717 struct nfs_server *server,
718 struct rpc_message *msg,
719 struct nfs4_sequence_args *args,
720 struct nfs4_sequence_res *res,
721 int cache_reply)
722 {
723 nfs41_init_sequence(args, res, cache_reply);
724 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
725 args, res);
726 }
727
728 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
729 {
730 struct nfs_inode *nfsi = NFS_I(dir);
731
732 spin_lock(&dir->i_lock);
733 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
734 if (!cinfo->atomic || cinfo->before != dir->i_version)
735 nfs_force_lookup_revalidate(dir);
736 dir->i_version = cinfo->after;
737 nfs_fscache_invalidate(dir);
738 spin_unlock(&dir->i_lock);
739 }
740
741 struct nfs4_opendata {
742 struct kref kref;
743 struct nfs_openargs o_arg;
744 struct nfs_openres o_res;
745 struct nfs_open_confirmargs c_arg;
746 struct nfs_open_confirmres c_res;
747 struct nfs4_string owner_name;
748 struct nfs4_string group_name;
749 struct nfs_fattr f_attr;
750 struct dentry *dir;
751 struct dentry *dentry;
752 struct nfs4_state_owner *owner;
753 struct nfs4_state *state;
754 struct iattr attrs;
755 unsigned long timestamp;
756 unsigned int rpc_done : 1;
757 int rpc_status;
758 int cancelled;
759 };
760
761
762 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
763 {
764 p->o_res.f_attr = &p->f_attr;
765 p->o_res.seqid = p->o_arg.seqid;
766 p->c_res.seqid = p->c_arg.seqid;
767 p->o_res.server = p->o_arg.server;
768 p->o_res.access_request = p->o_arg.access;
769 nfs_fattr_init(&p->f_attr);
770 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
771 }
772
773 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
774 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
775 const struct iattr *attrs,
776 gfp_t gfp_mask)
777 {
778 struct dentry *parent = dget_parent(dentry);
779 struct inode *dir = parent->d_inode;
780 struct nfs_server *server = NFS_SERVER(dir);
781 struct nfs4_opendata *p;
782
783 p = kzalloc(sizeof(*p), gfp_mask);
784 if (p == NULL)
785 goto err;
786 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
787 if (p->o_arg.seqid == NULL)
788 goto err_free;
789 nfs_sb_active(dentry->d_sb);
790 p->dentry = dget(dentry);
791 p->dir = parent;
792 p->owner = sp;
793 atomic_inc(&sp->so_count);
794 p->o_arg.fh = NFS_FH(dir);
795 p->o_arg.open_flags = flags;
796 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
797 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
798 * will return permission denied for all bits until close */
799 if (!(flags & O_EXCL)) {
800 /* ask server to check for all possible rights as results
801 * are cached */
802 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
803 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
804 }
805 p->o_arg.clientid = server->nfs_client->cl_clientid;
806 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
807 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
808 p->o_arg.name = &dentry->d_name;
809 p->o_arg.server = server;
810 p->o_arg.bitmask = server->attr_bitmask;
811 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
812 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
813 if (attrs != NULL && attrs->ia_valid != 0) {
814 __be32 verf[2];
815
816 p->o_arg.u.attrs = &p->attrs;
817 memcpy(&p->attrs, attrs, sizeof(p->attrs));
818
819 verf[0] = jiffies;
820 verf[1] = current->pid;
821 memcpy(p->o_arg.u.verifier.data, verf,
822 sizeof(p->o_arg.u.verifier.data));
823 }
824 p->c_arg.fh = &p->o_res.fh;
825 p->c_arg.stateid = &p->o_res.stateid;
826 p->c_arg.seqid = p->o_arg.seqid;
827 nfs4_init_opendata_res(p);
828 kref_init(&p->kref);
829 return p;
830 err_free:
831 kfree(p);
832 err:
833 dput(parent);
834 return NULL;
835 }
836
837 static void nfs4_opendata_free(struct kref *kref)
838 {
839 struct nfs4_opendata *p = container_of(kref,
840 struct nfs4_opendata, kref);
841 struct super_block *sb = p->dentry->d_sb;
842
843 nfs_free_seqid(p->o_arg.seqid);
844 if (p->state != NULL)
845 nfs4_put_open_state(p->state);
846 nfs4_put_state_owner(p->owner);
847 dput(p->dir);
848 dput(p->dentry);
849 nfs_sb_deactive(sb);
850 nfs_fattr_free_names(&p->f_attr);
851 kfree(p);
852 }
853
854 static void nfs4_opendata_put(struct nfs4_opendata *p)
855 {
856 if (p != NULL)
857 kref_put(&p->kref, nfs4_opendata_free);
858 }
859
860 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
861 {
862 int ret;
863
864 ret = rpc_wait_for_completion_task(task);
865 return ret;
866 }
867
868 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
869 {
870 int ret = 0;
871
872 if (open_mode & (O_EXCL|O_TRUNC))
873 goto out;
874 switch (mode & (FMODE_READ|FMODE_WRITE)) {
875 case FMODE_READ:
876 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
877 && state->n_rdonly != 0;
878 break;
879 case FMODE_WRITE:
880 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
881 && state->n_wronly != 0;
882 break;
883 case FMODE_READ|FMODE_WRITE:
884 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
885 && state->n_rdwr != 0;
886 }
887 out:
888 return ret;
889 }
890
891 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
892 {
893 if (delegation == NULL)
894 return 0;
895 if ((delegation->type & fmode) != fmode)
896 return 0;
897 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
898 return 0;
899 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
900 return 0;
901 nfs_mark_delegation_referenced(delegation);
902 return 1;
903 }
904
905 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
906 {
907 switch (fmode) {
908 case FMODE_WRITE:
909 state->n_wronly++;
910 break;
911 case FMODE_READ:
912 state->n_rdonly++;
913 break;
914 case FMODE_READ|FMODE_WRITE:
915 state->n_rdwr++;
916 }
917 nfs4_state_set_mode_locked(state, state->state | fmode);
918 }
919
920 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
921 {
922 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
923 nfs4_stateid_copy(&state->stateid, stateid);
924 nfs4_stateid_copy(&state->open_stateid, stateid);
925 switch (fmode) {
926 case FMODE_READ:
927 set_bit(NFS_O_RDONLY_STATE, &state->flags);
928 break;
929 case FMODE_WRITE:
930 set_bit(NFS_O_WRONLY_STATE, &state->flags);
931 break;
932 case FMODE_READ|FMODE_WRITE:
933 set_bit(NFS_O_RDWR_STATE, &state->flags);
934 }
935 }
936
937 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
938 {
939 write_seqlock(&state->seqlock);
940 nfs_set_open_stateid_locked(state, stateid, fmode);
941 write_sequnlock(&state->seqlock);
942 }
943
944 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
945 {
946 /*
947 * Protect the call to nfs4_state_set_mode_locked and
948 * serialise the stateid update
949 */
950 write_seqlock(&state->seqlock);
951 if (deleg_stateid != NULL) {
952 nfs4_stateid_copy(&state->stateid, deleg_stateid);
953 set_bit(NFS_DELEGATED_STATE, &state->flags);
954 }
955 if (open_stateid != NULL)
956 nfs_set_open_stateid_locked(state, open_stateid, fmode);
957 write_sequnlock(&state->seqlock);
958 spin_lock(&state->owner->so_lock);
959 update_open_stateflags(state, fmode);
960 spin_unlock(&state->owner->so_lock);
961 }
962
963 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
964 {
965 struct nfs_inode *nfsi = NFS_I(state->inode);
966 struct nfs_delegation *deleg_cur;
967 int ret = 0;
968
969 fmode &= (FMODE_READ|FMODE_WRITE);
970
971 rcu_read_lock();
972 deleg_cur = rcu_dereference(nfsi->delegation);
973 if (deleg_cur == NULL)
974 goto no_delegation;
975
976 spin_lock(&deleg_cur->lock);
977 if (nfsi->delegation != deleg_cur ||
978 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
979 (deleg_cur->type & fmode) != fmode)
980 goto no_delegation_unlock;
981
982 if (delegation == NULL)
983 delegation = &deleg_cur->stateid;
984 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
985 goto no_delegation_unlock;
986
987 nfs_mark_delegation_referenced(deleg_cur);
988 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
989 ret = 1;
990 no_delegation_unlock:
991 spin_unlock(&deleg_cur->lock);
992 no_delegation:
993 rcu_read_unlock();
994
995 if (!ret && open_stateid != NULL) {
996 __update_open_stateid(state, open_stateid, NULL, fmode);
997 ret = 1;
998 }
999
1000 return ret;
1001 }
1002
1003
1004 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1005 {
1006 struct nfs_delegation *delegation;
1007
1008 rcu_read_lock();
1009 delegation = rcu_dereference(NFS_I(inode)->delegation);
1010 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1011 rcu_read_unlock();
1012 return;
1013 }
1014 rcu_read_unlock();
1015 nfs4_inode_return_delegation(inode);
1016 }
1017
1018 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1019 {
1020 struct nfs4_state *state = opendata->state;
1021 struct nfs_inode *nfsi = NFS_I(state->inode);
1022 struct nfs_delegation *delegation;
1023 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
1024 fmode_t fmode = opendata->o_arg.fmode;
1025 nfs4_stateid stateid;
1026 int ret = -EAGAIN;
1027
1028 for (;;) {
1029 if (can_open_cached(state, fmode, open_mode)) {
1030 spin_lock(&state->owner->so_lock);
1031 if (can_open_cached(state, fmode, open_mode)) {
1032 update_open_stateflags(state, fmode);
1033 spin_unlock(&state->owner->so_lock);
1034 goto out_return_state;
1035 }
1036 spin_unlock(&state->owner->so_lock);
1037 }
1038 rcu_read_lock();
1039 delegation = rcu_dereference(nfsi->delegation);
1040 if (!can_open_delegated(delegation, fmode)) {
1041 rcu_read_unlock();
1042 break;
1043 }
1044 /* Save the delegation */
1045 nfs4_stateid_copy(&stateid, &delegation->stateid);
1046 rcu_read_unlock();
1047 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1048 if (ret != 0)
1049 goto out;
1050 ret = -EAGAIN;
1051
1052 /* Try to update the stateid using the delegation */
1053 if (update_open_stateid(state, NULL, &stateid, fmode))
1054 goto out_return_state;
1055 }
1056 out:
1057 return ERR_PTR(ret);
1058 out_return_state:
1059 atomic_inc(&state->count);
1060 return state;
1061 }
1062
1063 static void
1064 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1065 {
1066 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1067 struct nfs_delegation *delegation;
1068 int delegation_flags = 0;
1069
1070 rcu_read_lock();
1071 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1072 if (delegation)
1073 delegation_flags = delegation->flags;
1074 rcu_read_unlock();
1075 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1076 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1077 "returning a delegation for "
1078 "OPEN(CLAIM_DELEGATE_CUR)\n",
1079 clp->cl_hostname);
1080 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1081 nfs_inode_set_delegation(state->inode,
1082 data->owner->so_cred,
1083 &data->o_res);
1084 else
1085 nfs_inode_reclaim_delegation(state->inode,
1086 data->owner->so_cred,
1087 &data->o_res);
1088 }
1089
1090 /*
1091 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1092 * and update the nfs4_state.
1093 */
1094 static struct nfs4_state *
1095 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1096 {
1097 struct inode *inode = data->state->inode;
1098 struct nfs4_state *state = data->state;
1099 int ret;
1100
1101 if (!data->rpc_done) {
1102 ret = data->rpc_status;
1103 goto err;
1104 }
1105
1106 ret = -ESTALE;
1107 if (!(data->f_attr.valid & NFS_ATTR_FATTR_TYPE) ||
1108 !(data->f_attr.valid & NFS_ATTR_FATTR_FILEID) ||
1109 !(data->f_attr.valid & NFS_ATTR_FATTR_CHANGE))
1110 goto err;
1111
1112 ret = -ENOMEM;
1113 state = nfs4_get_open_state(inode, data->owner);
1114 if (state == NULL)
1115 goto err;
1116
1117 ret = nfs_refresh_inode(inode, &data->f_attr);
1118 if (ret)
1119 goto err;
1120
1121 if (data->o_res.delegation_type != 0)
1122 nfs4_opendata_check_deleg(data, state);
1123 update_open_stateid(state, &data->o_res.stateid, NULL,
1124 data->o_arg.fmode);
1125
1126 return state;
1127 err:
1128 return ERR_PTR(ret);
1129
1130 }
1131
1132 static struct nfs4_state *
1133 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1134 {
1135 struct inode *inode;
1136 struct nfs4_state *state = NULL;
1137 int ret;
1138
1139 if (!data->rpc_done) {
1140 state = nfs4_try_open_cached(data);
1141 goto out;
1142 }
1143
1144 ret = -EAGAIN;
1145 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1146 goto err;
1147 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1148 ret = PTR_ERR(inode);
1149 if (IS_ERR(inode))
1150 goto err;
1151 ret = -ENOMEM;
1152 state = nfs4_get_open_state(inode, data->owner);
1153 if (state == NULL)
1154 goto err_put_inode;
1155 if (data->o_res.delegation_type != 0)
1156 nfs4_opendata_check_deleg(data, state);
1157 update_open_stateid(state, &data->o_res.stateid, NULL,
1158 data->o_arg.fmode);
1159 iput(inode);
1160 out:
1161 nfs_release_seqid(data->o_arg.seqid);
1162 return state;
1163 err_put_inode:
1164 iput(inode);
1165 err:
1166 return ERR_PTR(ret);
1167 }
1168
1169 static struct nfs4_state *
1170 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1171 {
1172 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1173 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1174 return _nfs4_opendata_to_nfs4_state(data);
1175 }
1176
1177 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1178 {
1179 struct nfs_inode *nfsi = NFS_I(state->inode);
1180 struct nfs_open_context *ctx;
1181
1182 spin_lock(&state->inode->i_lock);
1183 list_for_each_entry(ctx, &nfsi->open_files, list) {
1184 if (ctx->state != state)
1185 continue;
1186 get_nfs_open_context(ctx);
1187 spin_unlock(&state->inode->i_lock);
1188 return ctx;
1189 }
1190 spin_unlock(&state->inode->i_lock);
1191 return ERR_PTR(-ENOENT);
1192 }
1193
1194 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1195 {
1196 struct nfs4_opendata *opendata;
1197
1198 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
1199 if (opendata == NULL)
1200 return ERR_PTR(-ENOMEM);
1201 opendata->state = state;
1202 atomic_inc(&state->count);
1203 return opendata;
1204 }
1205
1206 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1207 {
1208 struct nfs4_state *newstate;
1209 int ret;
1210
1211 opendata->o_arg.open_flags = 0;
1212 opendata->o_arg.fmode = fmode;
1213 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1214 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1215 nfs4_init_opendata_res(opendata);
1216 ret = _nfs4_recover_proc_open(opendata);
1217 if (ret != 0)
1218 return ret;
1219 newstate = nfs4_opendata_to_nfs4_state(opendata);
1220 if (IS_ERR(newstate))
1221 return PTR_ERR(newstate);
1222 nfs4_close_state(newstate, fmode);
1223 *res = newstate;
1224 return 0;
1225 }
1226
1227 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1228 {
1229 struct nfs4_state *newstate;
1230 int ret;
1231
1232 /* memory barrier prior to reading state->n_* */
1233 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1234 smp_rmb();
1235 if (state->n_rdwr != 0) {
1236 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1237 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1238 if (ret != 0)
1239 return ret;
1240 if (newstate != state)
1241 return -ESTALE;
1242 }
1243 if (state->n_wronly != 0) {
1244 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1245 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1246 if (ret != 0)
1247 return ret;
1248 if (newstate != state)
1249 return -ESTALE;
1250 }
1251 if (state->n_rdonly != 0) {
1252 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1253 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1254 if (ret != 0)
1255 return ret;
1256 if (newstate != state)
1257 return -ESTALE;
1258 }
1259 /*
1260 * We may have performed cached opens for all three recoveries.
1261 * Check if we need to update the current stateid.
1262 */
1263 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1264 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1265 write_seqlock(&state->seqlock);
1266 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1267 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1268 write_sequnlock(&state->seqlock);
1269 }
1270 return 0;
1271 }
1272
1273 /*
1274 * OPEN_RECLAIM:
1275 * reclaim state on the server after a reboot.
1276 */
1277 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1278 {
1279 struct nfs_delegation *delegation;
1280 struct nfs4_opendata *opendata;
1281 fmode_t delegation_type = 0;
1282 int status;
1283
1284 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1285 if (IS_ERR(opendata))
1286 return PTR_ERR(opendata);
1287 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1288 opendata->o_arg.fh = NFS_FH(state->inode);
1289 rcu_read_lock();
1290 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1291 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1292 delegation_type = delegation->type;
1293 rcu_read_unlock();
1294 opendata->o_arg.u.delegation_type = delegation_type;
1295 status = nfs4_open_recover(opendata, state);
1296 nfs4_opendata_put(opendata);
1297 return status;
1298 }
1299
1300 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1301 {
1302 struct nfs_server *server = NFS_SERVER(state->inode);
1303 struct nfs4_exception exception = { };
1304 int err;
1305 do {
1306 err = _nfs4_do_open_reclaim(ctx, state);
1307 if (err != -NFS4ERR_DELAY)
1308 break;
1309 nfs4_handle_exception(server, err, &exception);
1310 } while (exception.retry);
1311 return err;
1312 }
1313
1314 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1315 {
1316 struct nfs_open_context *ctx;
1317 int ret;
1318
1319 ctx = nfs4_state_find_open_context(state);
1320 if (IS_ERR(ctx))
1321 return PTR_ERR(ctx);
1322 ret = nfs4_do_open_reclaim(ctx, state);
1323 put_nfs_open_context(ctx);
1324 return ret;
1325 }
1326
1327 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1328 {
1329 struct nfs4_opendata *opendata;
1330 int ret;
1331
1332 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1333 if (IS_ERR(opendata))
1334 return PTR_ERR(opendata);
1335 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1336 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1337 ret = nfs4_open_recover(opendata, state);
1338 nfs4_opendata_put(opendata);
1339 return ret;
1340 }
1341
1342 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1343 {
1344 struct nfs4_exception exception = { };
1345 struct nfs_server *server = NFS_SERVER(state->inode);
1346 int err;
1347 do {
1348 err = _nfs4_open_delegation_recall(ctx, state, stateid);
1349 switch (err) {
1350 case 0:
1351 case -ENOENT:
1352 case -ESTALE:
1353 goto out;
1354 case -NFS4ERR_BADSESSION:
1355 case -NFS4ERR_BADSLOT:
1356 case -NFS4ERR_BAD_HIGH_SLOT:
1357 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1358 case -NFS4ERR_DEADSESSION:
1359 set_bit(NFS_DELEGATED_STATE, &state->flags);
1360 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1361 err = -EAGAIN;
1362 goto out;
1363 case -NFS4ERR_STALE_CLIENTID:
1364 case -NFS4ERR_STALE_STATEID:
1365 set_bit(NFS_DELEGATED_STATE, &state->flags);
1366 case -NFS4ERR_EXPIRED:
1367 /* Don't recall a delegation if it was lost */
1368 nfs4_schedule_lease_recovery(server->nfs_client);
1369 err = -EAGAIN;
1370 goto out;
1371 case -NFS4ERR_DELEG_REVOKED:
1372 case -NFS4ERR_ADMIN_REVOKED:
1373 case -NFS4ERR_BAD_STATEID:
1374 nfs_inode_find_state_and_recover(state->inode,
1375 stateid);
1376 nfs4_schedule_stateid_recovery(server, state);
1377 case -ENOMEM:
1378 err = 0;
1379 goto out;
1380 }
1381 set_bit(NFS_DELEGATED_STATE, &state->flags);
1382 err = nfs4_handle_exception(server, err, &exception);
1383 } while (exception.retry);
1384 out:
1385 return err;
1386 }
1387
1388 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1389 {
1390 struct nfs4_opendata *data = calldata;
1391
1392 data->rpc_status = task->tk_status;
1393 if (data->rpc_status == 0) {
1394 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1395 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1396 renew_lease(data->o_res.server, data->timestamp);
1397 data->rpc_done = 1;
1398 }
1399 }
1400
1401 static void nfs4_open_confirm_release(void *calldata)
1402 {
1403 struct nfs4_opendata *data = calldata;
1404 struct nfs4_state *state = NULL;
1405
1406 /* If this request hasn't been cancelled, do nothing */
1407 if (data->cancelled == 0)
1408 goto out_free;
1409 /* In case of error, no cleanup! */
1410 if (!data->rpc_done)
1411 goto out_free;
1412 state = nfs4_opendata_to_nfs4_state(data);
1413 if (!IS_ERR(state))
1414 nfs4_close_state(state, data->o_arg.fmode);
1415 out_free:
1416 nfs4_opendata_put(data);
1417 }
1418
1419 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1420 .rpc_call_done = nfs4_open_confirm_done,
1421 .rpc_release = nfs4_open_confirm_release,
1422 };
1423
1424 /*
1425 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1426 */
1427 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1428 {
1429 struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1430 struct rpc_task *task;
1431 struct rpc_message msg = {
1432 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1433 .rpc_argp = &data->c_arg,
1434 .rpc_resp = &data->c_res,
1435 .rpc_cred = data->owner->so_cred,
1436 };
1437 struct rpc_task_setup task_setup_data = {
1438 .rpc_client = server->client,
1439 .rpc_message = &msg,
1440 .callback_ops = &nfs4_open_confirm_ops,
1441 .callback_data = data,
1442 .workqueue = nfsiod_workqueue,
1443 .flags = RPC_TASK_ASYNC,
1444 };
1445 int status;
1446
1447 kref_get(&data->kref);
1448 data->rpc_done = 0;
1449 data->rpc_status = 0;
1450 data->timestamp = jiffies;
1451 task = rpc_run_task(&task_setup_data);
1452 if (IS_ERR(task))
1453 return PTR_ERR(task);
1454 status = nfs4_wait_for_completion_rpc_task(task);
1455 if (status != 0) {
1456 data->cancelled = 1;
1457 smp_wmb();
1458 } else
1459 status = data->rpc_status;
1460 rpc_put_task(task);
1461 return status;
1462 }
1463
1464 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1465 {
1466 struct nfs4_opendata *data = calldata;
1467 struct nfs4_state_owner *sp = data->owner;
1468
1469 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1470 goto out_wait;
1471 /*
1472 * Check if we still need to send an OPEN call, or if we can use
1473 * a delegation instead.
1474 */
1475 if (data->state != NULL) {
1476 struct nfs_delegation *delegation;
1477
1478 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1479 goto out_no_action;
1480 rcu_read_lock();
1481 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1482 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1483 can_open_delegated(delegation, data->o_arg.fmode))
1484 goto unlock_no_action;
1485 rcu_read_unlock();
1486 }
1487 /* Update client id. */
1488 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1489 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1490 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1491 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1492 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1493 }
1494 data->timestamp = jiffies;
1495 if (nfs4_setup_sequence(data->o_arg.server,
1496 &data->o_arg.seq_args,
1497 &data->o_res.seq_res,
1498 task) != 0)
1499 nfs_release_seqid(data->o_arg.seqid);
1500 return;
1501 unlock_no_action:
1502 rcu_read_unlock();
1503 out_no_action:
1504 task->tk_action = NULL;
1505 out_wait:
1506 nfs4_sequence_done(task, &data->o_res.seq_res);
1507 }
1508
1509 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1510 {
1511 struct nfs4_opendata *data = calldata;
1512
1513 data->rpc_status = task->tk_status;
1514
1515 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1516 return;
1517
1518 if (task->tk_status == 0) {
1519 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
1520 switch (data->o_res.f_attr->mode & S_IFMT) {
1521 case S_IFREG:
1522 break;
1523 case S_IFLNK:
1524 data->rpc_status = -ELOOP;
1525 break;
1526 case S_IFDIR:
1527 data->rpc_status = -EISDIR;
1528 break;
1529 default:
1530 data->rpc_status = -ENOTDIR;
1531 }
1532 }
1533 renew_lease(data->o_res.server, data->timestamp);
1534 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1535 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1536 }
1537 data->rpc_done = 1;
1538 }
1539
1540 static void nfs4_open_release(void *calldata)
1541 {
1542 struct nfs4_opendata *data = calldata;
1543 struct nfs4_state *state = NULL;
1544
1545 /* If this request hasn't been cancelled, do nothing */
1546 if (data->cancelled == 0)
1547 goto out_free;
1548 /* In case of error, no cleanup! */
1549 if (data->rpc_status != 0 || !data->rpc_done)
1550 goto out_free;
1551 /* In case we need an open_confirm, no cleanup! */
1552 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1553 goto out_free;
1554 state = nfs4_opendata_to_nfs4_state(data);
1555 if (!IS_ERR(state))
1556 nfs4_close_state(state, data->o_arg.fmode);
1557 out_free:
1558 nfs4_opendata_put(data);
1559 }
1560
1561 static const struct rpc_call_ops nfs4_open_ops = {
1562 .rpc_call_prepare = nfs4_open_prepare,
1563 .rpc_call_done = nfs4_open_done,
1564 .rpc_release = nfs4_open_release,
1565 };
1566
1567 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1568 {
1569 struct inode *dir = data->dir->d_inode;
1570 struct nfs_server *server = NFS_SERVER(dir);
1571 struct nfs_openargs *o_arg = &data->o_arg;
1572 struct nfs_openres *o_res = &data->o_res;
1573 struct rpc_task *task;
1574 struct rpc_message msg = {
1575 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1576 .rpc_argp = o_arg,
1577 .rpc_resp = o_res,
1578 .rpc_cred = data->owner->so_cred,
1579 };
1580 struct rpc_task_setup task_setup_data = {
1581 .rpc_client = server->client,
1582 .rpc_message = &msg,
1583 .callback_ops = &nfs4_open_ops,
1584 .callback_data = data,
1585 .workqueue = nfsiod_workqueue,
1586 .flags = RPC_TASK_ASYNC,
1587 };
1588 int status;
1589
1590 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1591 kref_get(&data->kref);
1592 data->rpc_done = 0;
1593 data->rpc_status = 0;
1594 data->cancelled = 0;
1595 if (isrecover)
1596 nfs4_set_sequence_privileged(&o_arg->seq_args);
1597 task = rpc_run_task(&task_setup_data);
1598 if (IS_ERR(task))
1599 return PTR_ERR(task);
1600 status = nfs4_wait_for_completion_rpc_task(task);
1601 if (status != 0) {
1602 data->cancelled = 1;
1603 smp_wmb();
1604 } else
1605 status = data->rpc_status;
1606 rpc_put_task(task);
1607
1608 return status;
1609 }
1610
1611 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1612 {
1613 struct inode *dir = data->dir->d_inode;
1614 struct nfs_openres *o_res = &data->o_res;
1615 int status;
1616
1617 status = nfs4_run_open_task(data, 1);
1618 if (status != 0 || !data->rpc_done)
1619 return status;
1620
1621 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
1622
1623 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1624 status = _nfs4_proc_open_confirm(data);
1625 if (status != 0)
1626 return status;
1627 }
1628
1629 return status;
1630 }
1631
1632 static int nfs4_opendata_access(struct rpc_cred *cred,
1633 struct nfs4_opendata *opendata,
1634 struct nfs4_state *state, fmode_t fmode,
1635 int openflags)
1636 {
1637 struct nfs_access_entry cache;
1638 u32 mask;
1639
1640 /* access call failed or for some reason the server doesn't
1641 * support any access modes -- defer access call until later */
1642 if (opendata->o_res.access_supported == 0)
1643 return 0;
1644
1645 mask = 0;
1646 /* don't check MAY_WRITE - a newly created file may not have
1647 * write mode bits, but POSIX allows the creating process to write.
1648 * use openflags to check for exec, because fmode won't
1649 * always have FMODE_EXEC set when file open for exec. */
1650 if (openflags & __FMODE_EXEC) {
1651 /* ONLY check for exec rights */
1652 mask = MAY_EXEC;
1653 } else if (fmode & FMODE_READ)
1654 mask = MAY_READ;
1655
1656 cache.cred = cred;
1657 cache.jiffies = jiffies;
1658 nfs_access_set_mask(&cache, opendata->o_res.access_result);
1659 nfs_access_add_cache(state->inode, &cache);
1660
1661 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
1662 return 0;
1663
1664 /* even though OPEN succeeded, access is denied. Close the file */
1665 nfs4_close_state(state, fmode);
1666 return -EACCES;
1667 }
1668
1669 /*
1670 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1671 */
1672 static int _nfs4_proc_open(struct nfs4_opendata *data)
1673 {
1674 struct inode *dir = data->dir->d_inode;
1675 struct nfs_server *server = NFS_SERVER(dir);
1676 struct nfs_openargs *o_arg = &data->o_arg;
1677 struct nfs_openres *o_res = &data->o_res;
1678 int status;
1679
1680 status = nfs4_run_open_task(data, 0);
1681 if (!data->rpc_done)
1682 return status;
1683 if (status != 0) {
1684 if (status == -NFS4ERR_BADNAME &&
1685 !(o_arg->open_flags & O_CREAT))
1686 return -ENOENT;
1687 return status;
1688 }
1689
1690 nfs_fattr_map_and_free_names(server, &data->f_attr);
1691
1692 if (o_arg->open_flags & O_CREAT)
1693 update_changeattr(dir, &o_res->cinfo);
1694 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1695 server->caps &= ~NFS_CAP_POSIX_LOCK;
1696 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1697 status = _nfs4_proc_open_confirm(data);
1698 if (status != 0)
1699 return status;
1700 }
1701 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1702 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1703 return 0;
1704 }
1705
1706 static int nfs4_recover_expired_lease(struct nfs_server *server)
1707 {
1708 return nfs4_client_recover_expired_lease(server->nfs_client);
1709 }
1710
1711 /*
1712 * OPEN_EXPIRED:
1713 * reclaim state on the server after a network partition.
1714 * Assumes caller holds the appropriate lock
1715 */
1716 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1717 {
1718 struct nfs4_opendata *opendata;
1719 int ret;
1720
1721 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1722 if (IS_ERR(opendata))
1723 return PTR_ERR(opendata);
1724 ret = nfs4_open_recover(opendata, state);
1725 if (ret == -ESTALE)
1726 d_drop(ctx->dentry);
1727 nfs4_opendata_put(opendata);
1728 return ret;
1729 }
1730
1731 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1732 {
1733 struct nfs_server *server = NFS_SERVER(state->inode);
1734 struct nfs4_exception exception = { };
1735 int err;
1736
1737 do {
1738 err = _nfs4_open_expired(ctx, state);
1739 switch (err) {
1740 default:
1741 goto out;
1742 case -NFS4ERR_GRACE:
1743 case -NFS4ERR_DELAY:
1744 nfs4_handle_exception(server, err, &exception);
1745 err = 0;
1746 }
1747 } while (exception.retry);
1748 out:
1749 return err;
1750 }
1751
1752 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1753 {
1754 struct nfs_open_context *ctx;
1755 int ret;
1756
1757 ctx = nfs4_state_find_open_context(state);
1758 if (IS_ERR(ctx))
1759 return PTR_ERR(ctx);
1760 ret = nfs4_do_open_expired(ctx, state);
1761 put_nfs_open_context(ctx);
1762 return ret;
1763 }
1764
1765 #if defined(CONFIG_NFS_V4_1)
1766 static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
1767 {
1768 struct nfs_server *server = NFS_SERVER(state->inode);
1769 nfs4_stateid *stateid = &state->stateid;
1770 int status;
1771
1772 /* If a state reset has been done, test_stateid is unneeded */
1773 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1774 return;
1775
1776 status = nfs41_test_stateid(server, stateid);
1777 if (status != NFS_OK) {
1778 /* Free the stateid unless the server explicitly
1779 * informs us the stateid is unrecognized. */
1780 if (status != -NFS4ERR_BAD_STATEID)
1781 nfs41_free_stateid(server, stateid);
1782 nfs_remove_bad_delegation(state->inode);
1783
1784 write_seqlock(&state->seqlock);
1785 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1786 write_sequnlock(&state->seqlock);
1787 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1788 }
1789 }
1790
1791 /**
1792 * nfs41_check_open_stateid - possibly free an open stateid
1793 *
1794 * @state: NFSv4 state for an inode
1795 *
1796 * Returns NFS_OK if recovery for this stateid is now finished.
1797 * Otherwise a negative NFS4ERR value is returned.
1798 */
1799 static int nfs41_check_open_stateid(struct nfs4_state *state)
1800 {
1801 struct nfs_server *server = NFS_SERVER(state->inode);
1802 nfs4_stateid *stateid = &state->open_stateid;
1803 int status;
1804
1805 /* If a state reset has been done, test_stateid is unneeded */
1806 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
1807 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
1808 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
1809 return -NFS4ERR_BAD_STATEID;
1810
1811 status = nfs41_test_stateid(server, stateid);
1812 if (status != NFS_OK) {
1813 /* Free the stateid unless the server explicitly
1814 * informs us the stateid is unrecognized. */
1815 if (status != -NFS4ERR_BAD_STATEID)
1816 nfs41_free_stateid(server, stateid);
1817
1818 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1819 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1820 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1821 }
1822 return status;
1823 }
1824
1825 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1826 {
1827 int status;
1828
1829 nfs41_clear_delegation_stateid(state);
1830 status = nfs41_check_open_stateid(state);
1831 if (status != NFS_OK)
1832 status = nfs4_open_expired(sp, state);
1833 return status;
1834 }
1835 #endif
1836
1837 /*
1838 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1839 * fields corresponding to attributes that were used to store the verifier.
1840 * Make sure we clobber those fields in the later setattr call
1841 */
1842 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1843 {
1844 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1845 !(sattr->ia_valid & ATTR_ATIME_SET))
1846 sattr->ia_valid |= ATTR_ATIME;
1847
1848 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1849 !(sattr->ia_valid & ATTR_MTIME_SET))
1850 sattr->ia_valid |= ATTR_MTIME;
1851 }
1852
1853 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
1854 fmode_t fmode,
1855 int flags,
1856 struct nfs4_state **res)
1857 {
1858 struct nfs4_state_owner *sp = opendata->owner;
1859 struct nfs_server *server = sp->so_server;
1860 struct nfs4_state *state;
1861 unsigned int seq;
1862 int ret;
1863
1864 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
1865
1866 ret = _nfs4_proc_open(opendata);
1867 if (ret != 0)
1868 goto out;
1869
1870 state = nfs4_opendata_to_nfs4_state(opendata);
1871 ret = PTR_ERR(state);
1872 if (IS_ERR(state))
1873 goto out;
1874 if (server->caps & NFS_CAP_POSIX_LOCK)
1875 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1876
1877 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
1878 if (ret != 0)
1879 goto out;
1880
1881 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) {
1882 nfs4_schedule_stateid_recovery(server, state);
1883 nfs4_wait_clnt_recover(server->nfs_client);
1884 }
1885 *res = state;
1886 out:
1887 return ret;
1888 }
1889
1890 /*
1891 * Returns a referenced nfs4_state
1892 */
1893 static int _nfs4_do_open(struct inode *dir,
1894 struct dentry *dentry,
1895 fmode_t fmode,
1896 int flags,
1897 struct iattr *sattr,
1898 struct rpc_cred *cred,
1899 struct nfs4_state **res,
1900 struct nfs4_threshold **ctx_th)
1901 {
1902 struct nfs4_state_owner *sp;
1903 struct nfs4_state *state = NULL;
1904 struct nfs_server *server = NFS_SERVER(dir);
1905 struct nfs4_opendata *opendata;
1906 int status;
1907
1908 /* Protect against reboot recovery conflicts */
1909 status = -ENOMEM;
1910 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
1911 if (sp == NULL) {
1912 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1913 goto out_err;
1914 }
1915 status = nfs4_recover_expired_lease(server);
1916 if (status != 0)
1917 goto err_put_state_owner;
1918 if (dentry->d_inode != NULL)
1919 nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
1920 status = -ENOMEM;
1921 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
1922 if (opendata == NULL)
1923 goto err_put_state_owner;
1924
1925 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
1926 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
1927 if (!opendata->f_attr.mdsthreshold)
1928 goto err_opendata_put;
1929 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
1930 }
1931 if (dentry->d_inode != NULL)
1932 opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
1933
1934 status = _nfs4_open_and_get_state(opendata, fmode, flags, &state);
1935 if (status != 0)
1936 goto err_opendata_put;
1937
1938 if (opendata->o_arg.open_flags & O_EXCL) {
1939 nfs4_exclusive_attrset(opendata, sattr);
1940
1941 nfs_fattr_init(opendata->o_res.f_attr);
1942 status = nfs4_do_setattr(state->inode, cred,
1943 opendata->o_res.f_attr, sattr,
1944 state);
1945 if (status == 0)
1946 nfs_setattr_update_inode(state->inode, sattr);
1947 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
1948 }
1949
1950 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
1951 *ctx_th = opendata->f_attr.mdsthreshold;
1952 else
1953 kfree(opendata->f_attr.mdsthreshold);
1954 opendata->f_attr.mdsthreshold = NULL;
1955
1956 nfs4_opendata_put(opendata);
1957 nfs4_put_state_owner(sp);
1958 *res = state;
1959 return 0;
1960 err_opendata_put:
1961 kfree(opendata->f_attr.mdsthreshold);
1962 nfs4_opendata_put(opendata);
1963 err_put_state_owner:
1964 nfs4_put_state_owner(sp);
1965 out_err:
1966 *res = NULL;
1967 return status;
1968 }
1969
1970
1971 static struct nfs4_state *nfs4_do_open(struct inode *dir,
1972 struct dentry *dentry,
1973 fmode_t fmode,
1974 int flags,
1975 struct iattr *sattr,
1976 struct rpc_cred *cred,
1977 struct nfs4_threshold **ctx_th)
1978 {
1979 struct nfs4_exception exception = { };
1980 struct nfs4_state *res;
1981 int status;
1982
1983 fmode &= FMODE_READ|FMODE_WRITE|FMODE_EXEC;
1984 do {
1985 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
1986 &res, ctx_th);
1987 if (status == 0)
1988 break;
1989 /* NOTE: BAD_SEQID means the server and client disagree about the
1990 * book-keeping w.r.t. state-changing operations
1991 * (OPEN/CLOSE/LOCK/LOCKU...)
1992 * It is actually a sign of a bug on the client or on the server.
1993 *
1994 * If we receive a BAD_SEQID error in the particular case of
1995 * doing an OPEN, we assume that nfs_increment_open_seqid() will
1996 * have unhashed the old state_owner for us, and that we can
1997 * therefore safely retry using a new one. We should still warn
1998 * the user though...
1999 */
2000 if (status == -NFS4ERR_BAD_SEQID) {
2001 pr_warn_ratelimited("NFS: v4 server %s "
2002 " returned a bad sequence-id error!\n",
2003 NFS_SERVER(dir)->nfs_client->cl_hostname);
2004 exception.retry = 1;
2005 continue;
2006 }
2007 /*
2008 * BAD_STATEID on OPEN means that the server cancelled our
2009 * state before it received the OPEN_CONFIRM.
2010 * Recover by retrying the request as per the discussion
2011 * on Page 181 of RFC3530.
2012 */
2013 if (status == -NFS4ERR_BAD_STATEID) {
2014 exception.retry = 1;
2015 continue;
2016 }
2017 if (status == -EAGAIN) {
2018 /* We must have found a delegation */
2019 exception.retry = 1;
2020 continue;
2021 }
2022 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
2023 status, &exception));
2024 } while (exception.retry);
2025 return res;
2026 }
2027
2028 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2029 struct nfs_fattr *fattr, struct iattr *sattr,
2030 struct nfs4_state *state)
2031 {
2032 struct nfs_server *server = NFS_SERVER(inode);
2033 struct nfs_setattrargs arg = {
2034 .fh = NFS_FH(inode),
2035 .iap = sattr,
2036 .server = server,
2037 .bitmask = server->attr_bitmask,
2038 };
2039 struct nfs_setattrres res = {
2040 .fattr = fattr,
2041 .server = server,
2042 };
2043 struct rpc_message msg = {
2044 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2045 .rpc_argp = &arg,
2046 .rpc_resp = &res,
2047 .rpc_cred = cred,
2048 };
2049 unsigned long timestamp = jiffies;
2050 int status;
2051
2052 nfs_fattr_init(fattr);
2053
2054 if (state != NULL) {
2055 struct nfs_lockowner lockowner = {
2056 .l_owner = current->files,
2057 .l_pid = current->tgid,
2058 };
2059 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2060 &lockowner);
2061 } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
2062 FMODE_WRITE)) {
2063 /* Use that stateid */
2064 } else
2065 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2066
2067 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2068 if (status == 0 && state != NULL)
2069 renew_lease(server, timestamp);
2070 return status;
2071 }
2072
2073 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2074 struct nfs_fattr *fattr, struct iattr *sattr,
2075 struct nfs4_state *state)
2076 {
2077 struct nfs_server *server = NFS_SERVER(inode);
2078 struct nfs4_exception exception = {
2079 .state = state,
2080 .inode = inode,
2081 };
2082 int err;
2083 do {
2084 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
2085 switch (err) {
2086 case -NFS4ERR_OPENMODE:
2087 if (state && !(state->state & FMODE_WRITE)) {
2088 err = -EBADF;
2089 if (sattr->ia_valid & ATTR_OPEN)
2090 err = -EACCES;
2091 goto out;
2092 }
2093 }
2094 err = nfs4_handle_exception(server, err, &exception);
2095 } while (exception.retry);
2096 out:
2097 return err;
2098 }
2099
2100 struct nfs4_closedata {
2101 struct inode *inode;
2102 struct nfs4_state *state;
2103 struct nfs_closeargs arg;
2104 struct nfs_closeres res;
2105 struct nfs_fattr fattr;
2106 unsigned long timestamp;
2107 bool roc;
2108 u32 roc_barrier;
2109 };
2110
2111 static void nfs4_free_closedata(void *data)
2112 {
2113 struct nfs4_closedata *calldata = data;
2114 struct nfs4_state_owner *sp = calldata->state->owner;
2115 struct super_block *sb = calldata->state->inode->i_sb;
2116
2117 if (calldata->roc)
2118 pnfs_roc_release(calldata->state->inode);
2119 nfs4_put_open_state(calldata->state);
2120 nfs_free_seqid(calldata->arg.seqid);
2121 nfs4_put_state_owner(sp);
2122 nfs_sb_deactive(sb);
2123 kfree(calldata);
2124 }
2125
2126 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
2127 fmode_t fmode)
2128 {
2129 spin_lock(&state->owner->so_lock);
2130 if (!(fmode & FMODE_READ))
2131 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2132 if (!(fmode & FMODE_WRITE))
2133 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2134 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2135 spin_unlock(&state->owner->so_lock);
2136 }
2137
2138 static void nfs4_close_done(struct rpc_task *task, void *data)
2139 {
2140 struct nfs4_closedata *calldata = data;
2141 struct nfs4_state *state = calldata->state;
2142 struct nfs_server *server = NFS_SERVER(calldata->inode);
2143
2144 dprintk("%s: begin!\n", __func__);
2145 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2146 return;
2147 /* hmm. we are done with the inode, and in the process of freeing
2148 * the state_owner. we keep this around to process errors
2149 */
2150 switch (task->tk_status) {
2151 case 0:
2152 if (calldata->roc)
2153 pnfs_roc_set_barrier(state->inode,
2154 calldata->roc_barrier);
2155 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2156 renew_lease(server, calldata->timestamp);
2157 nfs4_close_clear_stateid_flags(state,
2158 calldata->arg.fmode);
2159 break;
2160 case -NFS4ERR_STALE_STATEID:
2161 case -NFS4ERR_OLD_STATEID:
2162 case -NFS4ERR_BAD_STATEID:
2163 case -NFS4ERR_EXPIRED:
2164 if (calldata->arg.fmode == 0)
2165 break;
2166 default:
2167 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
2168 rpc_restart_call_prepare(task);
2169 }
2170 nfs_release_seqid(calldata->arg.seqid);
2171 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2172 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2173 }
2174
2175 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2176 {
2177 struct nfs4_closedata *calldata = data;
2178 struct nfs4_state *state = calldata->state;
2179 struct inode *inode = calldata->inode;
2180 int call_close = 0;
2181
2182 dprintk("%s: begin!\n", __func__);
2183 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2184 goto out_wait;
2185
2186 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2187 calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
2188 spin_lock(&state->owner->so_lock);
2189 /* Calculate the change in open mode */
2190 if (state->n_rdwr == 0) {
2191 if (state->n_rdonly == 0) {
2192 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
2193 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2194 calldata->arg.fmode &= ~FMODE_READ;
2195 }
2196 if (state->n_wronly == 0) {
2197 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
2198 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2199 calldata->arg.fmode &= ~FMODE_WRITE;
2200 }
2201 }
2202 spin_unlock(&state->owner->so_lock);
2203
2204 if (!call_close) {
2205 /* Note: exit _without_ calling nfs4_close_done */
2206 goto out_no_action;
2207 }
2208
2209 if (calldata->arg.fmode == 0) {
2210 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2211 if (calldata->roc &&
2212 pnfs_roc_drain(inode, &calldata->roc_barrier, task))
2213 goto out_wait;
2214 }
2215
2216 nfs_fattr_init(calldata->res.fattr);
2217 calldata->timestamp = jiffies;
2218 if (nfs4_setup_sequence(NFS_SERVER(inode),
2219 &calldata->arg.seq_args,
2220 &calldata->res.seq_res,
2221 task) != 0)
2222 nfs_release_seqid(calldata->arg.seqid);
2223 dprintk("%s: done!\n", __func__);
2224 return;
2225 out_no_action:
2226 task->tk_action = NULL;
2227 out_wait:
2228 nfs4_sequence_done(task, &calldata->res.seq_res);
2229 }
2230
2231 static const struct rpc_call_ops nfs4_close_ops = {
2232 .rpc_call_prepare = nfs4_close_prepare,
2233 .rpc_call_done = nfs4_close_done,
2234 .rpc_release = nfs4_free_closedata,
2235 };
2236
2237 /*
2238 * It is possible for data to be read/written from a mem-mapped file
2239 * after the sys_close call (which hits the vfs layer as a flush).
2240 * This means that we can't safely call nfsv4 close on a file until
2241 * the inode is cleared. This in turn means that we are not good
2242 * NFSv4 citizens - we do not indicate to the server to update the file's
2243 * share state even when we are done with one of the three share
2244 * stateid's in the inode.
2245 *
2246 * NOTE: Caller must be holding the sp->so_owner semaphore!
2247 */
2248 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2249 {
2250 struct nfs_server *server = NFS_SERVER(state->inode);
2251 struct nfs4_closedata *calldata;
2252 struct nfs4_state_owner *sp = state->owner;
2253 struct rpc_task *task;
2254 struct rpc_message msg = {
2255 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2256 .rpc_cred = state->owner->so_cred,
2257 };
2258 struct rpc_task_setup task_setup_data = {
2259 .rpc_client = server->client,
2260 .rpc_message = &msg,
2261 .callback_ops = &nfs4_close_ops,
2262 .workqueue = nfsiod_workqueue,
2263 .flags = RPC_TASK_ASYNC,
2264 };
2265 int status = -ENOMEM;
2266
2267 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2268 if (calldata == NULL)
2269 goto out;
2270 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2271 calldata->inode = state->inode;
2272 calldata->state = state;
2273 calldata->arg.fh = NFS_FH(state->inode);
2274 calldata->arg.stateid = &state->open_stateid;
2275 /* Serialization for the sequence id */
2276 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
2277 if (calldata->arg.seqid == NULL)
2278 goto out_free_calldata;
2279 calldata->arg.fmode = 0;
2280 calldata->arg.bitmask = server->cache_consistency_bitmask;
2281 calldata->res.fattr = &calldata->fattr;
2282 calldata->res.seqid = calldata->arg.seqid;
2283 calldata->res.server = server;
2284 calldata->roc = pnfs_roc(state->inode);
2285 nfs_sb_active(calldata->inode->i_sb);
2286
2287 msg.rpc_argp = &calldata->arg;
2288 msg.rpc_resp = &calldata->res;
2289 task_setup_data.callback_data = calldata;
2290 task = rpc_run_task(&task_setup_data);
2291 if (IS_ERR(task))
2292 return PTR_ERR(task);
2293 status = 0;
2294 if (wait)
2295 status = rpc_wait_for_completion_task(task);
2296 rpc_put_task(task);
2297 return status;
2298 out_free_calldata:
2299 kfree(calldata);
2300 out:
2301 nfs4_put_open_state(state);
2302 nfs4_put_state_owner(sp);
2303 return status;
2304 }
2305
2306 static struct inode *
2307 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2308 {
2309 struct nfs4_state *state;
2310
2311 /* Protect against concurrent sillydeletes */
2312 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr,
2313 ctx->cred, &ctx->mdsthreshold);
2314 if (IS_ERR(state))
2315 return ERR_CAST(state);
2316 ctx->state = state;
2317 return igrab(state->inode);
2318 }
2319
2320 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2321 {
2322 if (ctx->state == NULL)
2323 return;
2324 if (is_sync)
2325 nfs4_close_sync(ctx->state, ctx->mode);
2326 else
2327 nfs4_close_state(ctx->state, ctx->mode);
2328 }
2329
2330 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2331 {
2332 struct nfs4_server_caps_arg args = {
2333 .fhandle = fhandle,
2334 };
2335 struct nfs4_server_caps_res res = {};
2336 struct rpc_message msg = {
2337 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2338 .rpc_argp = &args,
2339 .rpc_resp = &res,
2340 };
2341 int status;
2342
2343 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2344 if (status == 0) {
2345 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2346 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2347 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2348 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2349 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2350 NFS_CAP_CTIME|NFS_CAP_MTIME);
2351 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2352 server->caps |= NFS_CAP_ACLS;
2353 if (res.has_links != 0)
2354 server->caps |= NFS_CAP_HARDLINKS;
2355 if (res.has_symlinks != 0)
2356 server->caps |= NFS_CAP_SYMLINKS;
2357 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2358 server->caps |= NFS_CAP_FILEID;
2359 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2360 server->caps |= NFS_CAP_MODE;
2361 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2362 server->caps |= NFS_CAP_NLINK;
2363 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2364 server->caps |= NFS_CAP_OWNER;
2365 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2366 server->caps |= NFS_CAP_OWNER_GROUP;
2367 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2368 server->caps |= NFS_CAP_ATIME;
2369 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2370 server->caps |= NFS_CAP_CTIME;
2371 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2372 server->caps |= NFS_CAP_MTIME;
2373
2374 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2375 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2376 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2377 server->acl_bitmask = res.acl_bitmask;
2378 server->fh_expire_type = res.fh_expire_type;
2379 }
2380
2381 return status;
2382 }
2383
2384 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2385 {
2386 struct nfs4_exception exception = { };
2387 int err;
2388 do {
2389 err = nfs4_handle_exception(server,
2390 _nfs4_server_capabilities(server, fhandle),
2391 &exception);
2392 } while (exception.retry);
2393 return err;
2394 }
2395
2396 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2397 struct nfs_fsinfo *info)
2398 {
2399 struct nfs4_lookup_root_arg args = {
2400 .bitmask = nfs4_fattr_bitmap,
2401 };
2402 struct nfs4_lookup_res res = {
2403 .server = server,
2404 .fattr = info->fattr,
2405 .fh = fhandle,
2406 };
2407 struct rpc_message msg = {
2408 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2409 .rpc_argp = &args,
2410 .rpc_resp = &res,
2411 };
2412
2413 nfs_fattr_init(info->fattr);
2414 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2415 }
2416
2417 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2418 struct nfs_fsinfo *info)
2419 {
2420 struct nfs4_exception exception = { };
2421 int err;
2422 do {
2423 err = _nfs4_lookup_root(server, fhandle, info);
2424 switch (err) {
2425 case 0:
2426 case -NFS4ERR_WRONGSEC:
2427 goto out;
2428 default:
2429 err = nfs4_handle_exception(server, err, &exception);
2430 }
2431 } while (exception.retry);
2432 out:
2433 return err;
2434 }
2435
2436 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2437 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
2438 {
2439 struct rpc_auth *auth;
2440 int ret;
2441
2442 auth = rpcauth_create(flavor, server->client);
2443 if (IS_ERR(auth)) {
2444 ret = -EIO;
2445 goto out;
2446 }
2447 ret = nfs4_lookup_root(server, fhandle, info);
2448 out:
2449 return ret;
2450 }
2451
2452 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2453 struct nfs_fsinfo *info)
2454 {
2455 int i, len, status = 0;
2456 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
2457
2458 len = rpcauth_list_flavors(flav_array, ARRAY_SIZE(flav_array));
2459 if (len < 0)
2460 return len;
2461
2462 for (i = 0; i < len; i++) {
2463 /* AUTH_UNIX is the default flavor if none was specified,
2464 * thus has already been tried. */
2465 if (flav_array[i] == RPC_AUTH_UNIX)
2466 continue;
2467
2468 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2469 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
2470 continue;
2471 break;
2472 }
2473 /*
2474 * -EACCESS could mean that the user doesn't have correct permissions
2475 * to access the mount. It could also mean that we tried to mount
2476 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
2477 * existing mount programs don't handle -EACCES very well so it should
2478 * be mapped to -EPERM instead.
2479 */
2480 if (status == -EACCES)
2481 status = -EPERM;
2482 return status;
2483 }
2484
2485 /*
2486 * get the file handle for the "/" directory on the server
2487 */
2488 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
2489 struct nfs_fsinfo *info)
2490 {
2491 int minor_version = server->nfs_client->cl_minorversion;
2492 int status = nfs4_lookup_root(server, fhandle, info);
2493 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
2494 /*
2495 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2496 * by nfs4_map_errors() as this function exits.
2497 */
2498 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
2499 if (status == 0)
2500 status = nfs4_server_capabilities(server, fhandle);
2501 if (status == 0)
2502 status = nfs4_do_fsinfo(server, fhandle, info);
2503 return nfs4_map_errors(status);
2504 }
2505
2506 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
2507 struct nfs_fsinfo *info)
2508 {
2509 int error;
2510 struct nfs_fattr *fattr = info->fattr;
2511
2512 error = nfs4_server_capabilities(server, mntfh);
2513 if (error < 0) {
2514 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
2515 return error;
2516 }
2517
2518 error = nfs4_proc_getattr(server, mntfh, fattr);
2519 if (error < 0) {
2520 dprintk("nfs4_get_root: getattr error = %d\n", -error);
2521 return error;
2522 }
2523
2524 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
2525 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
2526 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
2527
2528 return error;
2529 }
2530
2531 /*
2532 * Get locations and (maybe) other attributes of a referral.
2533 * Note that we'll actually follow the referral later when
2534 * we detect fsid mismatch in inode revalidation
2535 */
2536 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
2537 const struct qstr *name, struct nfs_fattr *fattr,
2538 struct nfs_fh *fhandle)
2539 {
2540 int status = -ENOMEM;
2541 struct page *page = NULL;
2542 struct nfs4_fs_locations *locations = NULL;
2543
2544 page = alloc_page(GFP_KERNEL);
2545 if (page == NULL)
2546 goto out;
2547 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2548 if (locations == NULL)
2549 goto out;
2550
2551 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
2552 if (status != 0)
2553 goto out;
2554 /* Make sure server returned a different fsid for the referral */
2555 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2556 dprintk("%s: server did not return a different fsid for"
2557 " a referral at %s\n", __func__, name->name);
2558 status = -EIO;
2559 goto out;
2560 }
2561 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2562 nfs_fixup_referral_attributes(&locations->fattr);
2563
2564 /* replace the lookup nfs_fattr with the locations nfs_fattr */
2565 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2566 memset(fhandle, 0, sizeof(struct nfs_fh));
2567 out:
2568 if (page)
2569 __free_page(page);
2570 kfree(locations);
2571 return status;
2572 }
2573
2574 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2575 {
2576 struct nfs4_getattr_arg args = {
2577 .fh = fhandle,
2578 .bitmask = server->attr_bitmask,
2579 };
2580 struct nfs4_getattr_res res = {
2581 .fattr = fattr,
2582 .server = server,
2583 };
2584 struct rpc_message msg = {
2585 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2586 .rpc_argp = &args,
2587 .rpc_resp = &res,
2588 };
2589
2590 nfs_fattr_init(fattr);
2591 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2592 }
2593
2594 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2595 {
2596 struct nfs4_exception exception = { };
2597 int err;
2598 do {
2599 err = nfs4_handle_exception(server,
2600 _nfs4_proc_getattr(server, fhandle, fattr),
2601 &exception);
2602 } while (exception.retry);
2603 return err;
2604 }
2605
2606 /*
2607 * The file is not closed if it is opened due to the a request to change
2608 * the size of the file. The open call will not be needed once the
2609 * VFS layer lookup-intents are implemented.
2610 *
2611 * Close is called when the inode is destroyed.
2612 * If we haven't opened the file for O_WRONLY, we
2613 * need to in the size_change case to obtain a stateid.
2614 *
2615 * Got race?
2616 * Because OPEN is always done by name in nfsv4, it is
2617 * possible that we opened a different file by the same
2618 * name. We can recognize this race condition, but we
2619 * can't do anything about it besides returning an error.
2620 *
2621 * This will be fixed with VFS changes (lookup-intent).
2622 */
2623 static int
2624 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2625 struct iattr *sattr)
2626 {
2627 struct inode *inode = dentry->d_inode;
2628 struct rpc_cred *cred = NULL;
2629 struct nfs4_state *state = NULL;
2630 int status;
2631
2632 if (pnfs_ld_layoutret_on_setattr(inode))
2633 pnfs_return_layout(inode);
2634
2635 nfs_fattr_init(fattr);
2636
2637 /* Deal with open(O_TRUNC) */
2638 if (sattr->ia_valid & ATTR_OPEN)
2639 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2640
2641 /* Optimization: if the end result is no change, don't RPC */
2642 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0)
2643 return 0;
2644
2645 /* Search for an existing open(O_WRITE) file */
2646 if (sattr->ia_valid & ATTR_FILE) {
2647 struct nfs_open_context *ctx;
2648
2649 ctx = nfs_file_open_context(sattr->ia_file);
2650 if (ctx) {
2651 cred = ctx->cred;
2652 state = ctx->state;
2653 }
2654 }
2655
2656 status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2657 if (status == 0)
2658 nfs_setattr_update_inode(inode, sattr);
2659 return status;
2660 }
2661
2662 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2663 const struct qstr *name, struct nfs_fh *fhandle,
2664 struct nfs_fattr *fattr)
2665 {
2666 struct nfs_server *server = NFS_SERVER(dir);
2667 int status;
2668 struct nfs4_lookup_arg args = {
2669 .bitmask = server->attr_bitmask,
2670 .dir_fh = NFS_FH(dir),
2671 .name = name,
2672 };
2673 struct nfs4_lookup_res res = {
2674 .server = server,
2675 .fattr = fattr,
2676 .fh = fhandle,
2677 };
2678 struct rpc_message msg = {
2679 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2680 .rpc_argp = &args,
2681 .rpc_resp = &res,
2682 };
2683
2684 nfs_fattr_init(fattr);
2685
2686 dprintk("NFS call lookup %s\n", name->name);
2687 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
2688 dprintk("NFS reply lookup: %d\n", status);
2689 return status;
2690 }
2691
2692 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
2693 {
2694 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2695 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
2696 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2697 fattr->nlink = 2;
2698 }
2699
2700 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
2701 struct qstr *name, struct nfs_fh *fhandle,
2702 struct nfs_fattr *fattr)
2703 {
2704 struct nfs4_exception exception = { };
2705 struct rpc_clnt *client = *clnt;
2706 int err;
2707 do {
2708 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
2709 switch (err) {
2710 case -NFS4ERR_BADNAME:
2711 err = -ENOENT;
2712 goto out;
2713 case -NFS4ERR_MOVED:
2714 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
2715 goto out;
2716 case -NFS4ERR_WRONGSEC:
2717 err = -EPERM;
2718 if (client != *clnt)
2719 goto out;
2720
2721 client = nfs4_create_sec_client(client, dir, name);
2722 if (IS_ERR(client))
2723 return PTR_ERR(client);
2724
2725 exception.retry = 1;
2726 break;
2727 default:
2728 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
2729 }
2730 } while (exception.retry);
2731
2732 out:
2733 if (err == 0)
2734 *clnt = client;
2735 else if (client != *clnt)
2736 rpc_shutdown_client(client);
2737
2738 return err;
2739 }
2740
2741 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
2742 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2743 {
2744 int status;
2745 struct rpc_clnt *client = NFS_CLIENT(dir);
2746
2747 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2748 if (client != NFS_CLIENT(dir)) {
2749 rpc_shutdown_client(client);
2750 nfs_fixup_secinfo_attributes(fattr);
2751 }
2752 return status;
2753 }
2754
2755 struct rpc_clnt *
2756 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
2757 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2758 {
2759 int status;
2760 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
2761
2762 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2763 if (status < 0) {
2764 rpc_shutdown_client(client);
2765 return ERR_PTR(status);
2766 }
2767 return client;
2768 }
2769
2770 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2771 {
2772 struct nfs_server *server = NFS_SERVER(inode);
2773 struct nfs4_accessargs args = {
2774 .fh = NFS_FH(inode),
2775 .bitmask = server->cache_consistency_bitmask,
2776 };
2777 struct nfs4_accessres res = {
2778 .server = server,
2779 };
2780 struct rpc_message msg = {
2781 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2782 .rpc_argp = &args,
2783 .rpc_resp = &res,
2784 .rpc_cred = entry->cred,
2785 };
2786 int mode = entry->mask;
2787 int status;
2788
2789 /*
2790 * Determine which access bits we want to ask for...
2791 */
2792 if (mode & MAY_READ)
2793 args.access |= NFS4_ACCESS_READ;
2794 if (S_ISDIR(inode->i_mode)) {
2795 if (mode & MAY_WRITE)
2796 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2797 if (mode & MAY_EXEC)
2798 args.access |= NFS4_ACCESS_LOOKUP;
2799 } else {
2800 if (mode & MAY_WRITE)
2801 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2802 if (mode & MAY_EXEC)
2803 args.access |= NFS4_ACCESS_EXECUTE;
2804 }
2805
2806 res.fattr = nfs_alloc_fattr();
2807 if (res.fattr == NULL)
2808 return -ENOMEM;
2809
2810 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2811 if (!status) {
2812 nfs_access_set_mask(entry, res.access);
2813 nfs_refresh_inode(inode, res.fattr);
2814 }
2815 nfs_free_fattr(res.fattr);
2816 return status;
2817 }
2818
2819 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2820 {
2821 struct nfs4_exception exception = { };
2822 int err;
2823 do {
2824 err = nfs4_handle_exception(NFS_SERVER(inode),
2825 _nfs4_proc_access(inode, entry),
2826 &exception);
2827 } while (exception.retry);
2828 return err;
2829 }
2830
2831 /*
2832 * TODO: For the time being, we don't try to get any attributes
2833 * along with any of the zero-copy operations READ, READDIR,
2834 * READLINK, WRITE.
2835 *
2836 * In the case of the first three, we want to put the GETATTR
2837 * after the read-type operation -- this is because it is hard
2838 * to predict the length of a GETATTR response in v4, and thus
2839 * align the READ data correctly. This means that the GETATTR
2840 * may end up partially falling into the page cache, and we should
2841 * shift it into the 'tail' of the xdr_buf before processing.
2842 * To do this efficiently, we need to know the total length
2843 * of data received, which doesn't seem to be available outside
2844 * of the RPC layer.
2845 *
2846 * In the case of WRITE, we also want to put the GETATTR after
2847 * the operation -- in this case because we want to make sure
2848 * we get the post-operation mtime and size.
2849 *
2850 * Both of these changes to the XDR layer would in fact be quite
2851 * minor, but I decided to leave them for a subsequent patch.
2852 */
2853 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2854 unsigned int pgbase, unsigned int pglen)
2855 {
2856 struct nfs4_readlink args = {
2857 .fh = NFS_FH(inode),
2858 .pgbase = pgbase,
2859 .pglen = pglen,
2860 .pages = &page,
2861 };
2862 struct nfs4_readlink_res res;
2863 struct rpc_message msg = {
2864 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
2865 .rpc_argp = &args,
2866 .rpc_resp = &res,
2867 };
2868
2869 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
2870 }
2871
2872 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
2873 unsigned int pgbase, unsigned int pglen)
2874 {
2875 struct nfs4_exception exception = { };
2876 int err;
2877 do {
2878 err = nfs4_handle_exception(NFS_SERVER(inode),
2879 _nfs4_proc_readlink(inode, page, pgbase, pglen),
2880 &exception);
2881 } while (exception.retry);
2882 return err;
2883 }
2884
2885 /*
2886 * This is just for mknod. open(O_CREAT) will always do ->open_context().
2887 */
2888 static int
2889 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2890 int flags)
2891 {
2892 struct nfs_open_context *ctx;
2893 struct nfs4_state *state;
2894 int status = 0;
2895
2896 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
2897 if (IS_ERR(ctx))
2898 return PTR_ERR(ctx);
2899
2900 sattr->ia_mode &= ~current_umask();
2901 state = nfs4_do_open(dir, dentry, ctx->mode,
2902 flags, sattr, ctx->cred,
2903 &ctx->mdsthreshold);
2904 d_drop(dentry);
2905 if (IS_ERR(state)) {
2906 status = PTR_ERR(state);
2907 goto out;
2908 }
2909 d_add(dentry, igrab(state->inode));
2910 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2911 ctx->state = state;
2912 out:
2913 put_nfs_open_context(ctx);
2914 return status;
2915 }
2916
2917 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2918 {
2919 struct nfs_server *server = NFS_SERVER(dir);
2920 struct nfs_removeargs args = {
2921 .fh = NFS_FH(dir),
2922 .name = *name,
2923 };
2924 struct nfs_removeres res = {
2925 .server = server,
2926 };
2927 struct rpc_message msg = {
2928 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
2929 .rpc_argp = &args,
2930 .rpc_resp = &res,
2931 };
2932 int status;
2933
2934 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
2935 if (status == 0)
2936 update_changeattr(dir, &res.cinfo);
2937 return status;
2938 }
2939
2940 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
2941 {
2942 struct nfs4_exception exception = { };
2943 int err;
2944 do {
2945 err = nfs4_handle_exception(NFS_SERVER(dir),
2946 _nfs4_proc_remove(dir, name),
2947 &exception);
2948 } while (exception.retry);
2949 return err;
2950 }
2951
2952 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
2953 {
2954 struct nfs_server *server = NFS_SERVER(dir);
2955 struct nfs_removeargs *args = msg->rpc_argp;
2956 struct nfs_removeres *res = msg->rpc_resp;
2957
2958 res->server = server;
2959 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
2960 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
2961 }
2962
2963 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
2964 {
2965 nfs4_setup_sequence(NFS_SERVER(data->dir),
2966 &data->args.seq_args,
2967 &data->res.seq_res,
2968 task);
2969 }
2970
2971 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2972 {
2973 struct nfs_removeres *res = task->tk_msg.rpc_resp;
2974
2975 if (!nfs4_sequence_done(task, &res->seq_res))
2976 return 0;
2977 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2978 return 0;
2979 update_changeattr(dir, &res->cinfo);
2980 return 1;
2981 }
2982
2983 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
2984 {
2985 struct nfs_server *server = NFS_SERVER(dir);
2986 struct nfs_renameargs *arg = msg->rpc_argp;
2987 struct nfs_renameres *res = msg->rpc_resp;
2988
2989 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
2990 res->server = server;
2991 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
2992 }
2993
2994 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
2995 {
2996 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
2997 &data->args.seq_args,
2998 &data->res.seq_res,
2999 task);
3000 }
3001
3002 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3003 struct inode *new_dir)
3004 {
3005 struct nfs_renameres *res = task->tk_msg.rpc_resp;
3006
3007 if (!nfs4_sequence_done(task, &res->seq_res))
3008 return 0;
3009 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
3010 return 0;
3011
3012 update_changeattr(old_dir, &res->old_cinfo);
3013 update_changeattr(new_dir, &res->new_cinfo);
3014 return 1;
3015 }
3016
3017 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
3018 struct inode *new_dir, struct qstr *new_name)
3019 {
3020 struct nfs_server *server = NFS_SERVER(old_dir);
3021 struct nfs_renameargs arg = {
3022 .old_dir = NFS_FH(old_dir),
3023 .new_dir = NFS_FH(new_dir),
3024 .old_name = old_name,
3025 .new_name = new_name,
3026 };
3027 struct nfs_renameres res = {
3028 .server = server,
3029 };
3030 struct rpc_message msg = {
3031 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
3032 .rpc_argp = &arg,
3033 .rpc_resp = &res,
3034 };
3035 int status = -ENOMEM;
3036
3037 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3038 if (!status) {
3039 update_changeattr(old_dir, &res.old_cinfo);
3040 update_changeattr(new_dir, &res.new_cinfo);
3041 }
3042 return status;
3043 }
3044
3045 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
3046 struct inode *new_dir, struct qstr *new_name)
3047 {
3048 struct nfs4_exception exception = { };
3049 int err;
3050 do {
3051 err = nfs4_handle_exception(NFS_SERVER(old_dir),
3052 _nfs4_proc_rename(old_dir, old_name,
3053 new_dir, new_name),
3054 &exception);
3055 } while (exception.retry);
3056 return err;
3057 }
3058
3059 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3060 {
3061 struct nfs_server *server = NFS_SERVER(inode);
3062 struct nfs4_link_arg arg = {
3063 .fh = NFS_FH(inode),
3064 .dir_fh = NFS_FH(dir),
3065 .name = name,
3066 .bitmask = server->attr_bitmask,
3067 };
3068 struct nfs4_link_res res = {
3069 .server = server,
3070 };
3071 struct rpc_message msg = {
3072 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3073 .rpc_argp = &arg,
3074 .rpc_resp = &res,
3075 };
3076 int status = -ENOMEM;
3077
3078 res.fattr = nfs_alloc_fattr();
3079 if (res.fattr == NULL)
3080 goto out;
3081
3082 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3083 if (!status) {
3084 update_changeattr(dir, &res.cinfo);
3085 nfs_post_op_update_inode(inode, res.fattr);
3086 }
3087 out:
3088 nfs_free_fattr(res.fattr);
3089 return status;
3090 }
3091
3092 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3093 {
3094 struct nfs4_exception exception = { };
3095 int err;
3096 do {
3097 err = nfs4_handle_exception(NFS_SERVER(inode),
3098 _nfs4_proc_link(inode, dir, name),
3099 &exception);
3100 } while (exception.retry);
3101 return err;
3102 }
3103
3104 struct nfs4_createdata {
3105 struct rpc_message msg;
3106 struct nfs4_create_arg arg;
3107 struct nfs4_create_res res;
3108 struct nfs_fh fh;
3109 struct nfs_fattr fattr;
3110 };
3111
3112 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3113 struct qstr *name, struct iattr *sattr, u32 ftype)
3114 {
3115 struct nfs4_createdata *data;
3116
3117 data = kzalloc(sizeof(*data), GFP_KERNEL);
3118 if (data != NULL) {
3119 struct nfs_server *server = NFS_SERVER(dir);
3120
3121 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3122 data->msg.rpc_argp = &data->arg;
3123 data->msg.rpc_resp = &data->res;
3124 data->arg.dir_fh = NFS_FH(dir);
3125 data->arg.server = server;
3126 data->arg.name = name;
3127 data->arg.attrs = sattr;
3128 data->arg.ftype = ftype;
3129 data->arg.bitmask = server->attr_bitmask;
3130 data->res.server = server;
3131 data->res.fh = &data->fh;
3132 data->res.fattr = &data->fattr;
3133 nfs_fattr_init(data->res.fattr);
3134 }
3135 return data;
3136 }
3137
3138 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3139 {
3140 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3141 &data->arg.seq_args, &data->res.seq_res, 1);
3142 if (status == 0) {
3143 update_changeattr(dir, &data->res.dir_cinfo);
3144 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
3145 }
3146 return status;
3147 }
3148
3149 static void nfs4_free_createdata(struct nfs4_createdata *data)
3150 {
3151 kfree(data);
3152 }
3153
3154 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3155 struct page *page, unsigned int len, struct iattr *sattr)
3156 {
3157 struct nfs4_createdata *data;
3158 int status = -ENAMETOOLONG;
3159
3160 if (len > NFS4_MAXPATHLEN)
3161 goto out;
3162
3163 status = -ENOMEM;
3164 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3165 if (data == NULL)
3166 goto out;
3167
3168 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3169 data->arg.u.symlink.pages = &page;
3170 data->arg.u.symlink.len = len;
3171
3172 status = nfs4_do_create(dir, dentry, data);
3173
3174 nfs4_free_createdata(data);
3175 out:
3176 return status;
3177 }
3178
3179 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3180 struct page *page, unsigned int len, struct iattr *sattr)
3181 {
3182 struct nfs4_exception exception = { };
3183 int err;
3184 do {
3185 err = nfs4_handle_exception(NFS_SERVER(dir),
3186 _nfs4_proc_symlink(dir, dentry, page,
3187 len, sattr),
3188 &exception);
3189 } while (exception.retry);
3190 return err;
3191 }
3192
3193 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3194 struct iattr *sattr)
3195 {
3196 struct nfs4_createdata *data;
3197 int status = -ENOMEM;
3198
3199 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3200 if (data == NULL)
3201 goto out;
3202
3203 status = nfs4_do_create(dir, dentry, data);
3204
3205 nfs4_free_createdata(data);
3206 out:
3207 return status;
3208 }
3209
3210 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3211 struct iattr *sattr)
3212 {
3213 struct nfs4_exception exception = { };
3214 int err;
3215
3216 sattr->ia_mode &= ~current_umask();
3217 do {
3218 err = nfs4_handle_exception(NFS_SERVER(dir),
3219 _nfs4_proc_mkdir(dir, dentry, sattr),
3220 &exception);
3221 } while (exception.retry);
3222 return err;
3223 }
3224
3225 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3226 u64 cookie, struct page **pages, unsigned int count, int plus)
3227 {
3228 struct inode *dir = dentry->d_inode;
3229 struct nfs4_readdir_arg args = {
3230 .fh = NFS_FH(dir),
3231 .pages = pages,
3232 .pgbase = 0,
3233 .count = count,
3234 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
3235 .plus = plus,
3236 };
3237 struct nfs4_readdir_res res;
3238 struct rpc_message msg = {
3239 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3240 .rpc_argp = &args,
3241 .rpc_resp = &res,
3242 .rpc_cred = cred,
3243 };
3244 int status;
3245
3246 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
3247 dentry->d_parent->d_name.name,
3248 dentry->d_name.name,
3249 (unsigned long long)cookie);
3250 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3251 res.pgbase = args.pgbase;
3252 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3253 if (status >= 0) {
3254 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3255 status += args.pgbase;
3256 }
3257
3258 nfs_invalidate_atime(dir);
3259
3260 dprintk("%s: returns %d\n", __func__, status);
3261 return status;
3262 }
3263
3264 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3265 u64 cookie, struct page **pages, unsigned int count, int plus)
3266 {
3267 struct nfs4_exception exception = { };
3268 int err;
3269 do {
3270 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
3271 _nfs4_proc_readdir(dentry, cred, cookie,
3272 pages, count, plus),
3273 &exception);
3274 } while (exception.retry);
3275 return err;
3276 }
3277
3278 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3279 struct iattr *sattr, dev_t rdev)
3280 {
3281 struct nfs4_createdata *data;
3282 int mode = sattr->ia_mode;
3283 int status = -ENOMEM;
3284
3285 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3286 if (data == NULL)
3287 goto out;
3288
3289 if (S_ISFIFO(mode))
3290 data->arg.ftype = NF4FIFO;
3291 else if (S_ISBLK(mode)) {
3292 data->arg.ftype = NF4BLK;
3293 data->arg.u.device.specdata1 = MAJOR(rdev);
3294 data->arg.u.device.specdata2 = MINOR(rdev);
3295 }
3296 else if (S_ISCHR(mode)) {
3297 data->arg.ftype = NF4CHR;
3298 data->arg.u.device.specdata1 = MAJOR(rdev);
3299 data->arg.u.device.specdata2 = MINOR(rdev);
3300 } else if (!S_ISSOCK(mode)) {
3301 status = -EINVAL;
3302 goto out_free;
3303 }
3304
3305 status = nfs4_do_create(dir, dentry, data);
3306 out_free:
3307 nfs4_free_createdata(data);
3308 out:
3309 return status;
3310 }
3311
3312 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3313 struct iattr *sattr, dev_t rdev)
3314 {
3315 struct nfs4_exception exception = { };
3316 int err;
3317
3318 sattr->ia_mode &= ~current_umask();
3319 do {
3320 err = nfs4_handle_exception(NFS_SERVER(dir),
3321 _nfs4_proc_mknod(dir, dentry, sattr, rdev),
3322 &exception);
3323 } while (exception.retry);
3324 return err;
3325 }
3326
3327 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3328 struct nfs_fsstat *fsstat)
3329 {
3330 struct nfs4_statfs_arg args = {
3331 .fh = fhandle,
3332 .bitmask = server->attr_bitmask,
3333 };
3334 struct nfs4_statfs_res res = {
3335 .fsstat = fsstat,
3336 };
3337 struct rpc_message msg = {
3338 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3339 .rpc_argp = &args,
3340 .rpc_resp = &res,
3341 };
3342
3343 nfs_fattr_init(fsstat->fattr);
3344 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3345 }
3346
3347 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
3348 {
3349 struct nfs4_exception exception = { };
3350 int err;
3351 do {
3352 err = nfs4_handle_exception(server,
3353 _nfs4_proc_statfs(server, fhandle, fsstat),
3354 &exception);
3355 } while (exception.retry);
3356 return err;
3357 }
3358
3359 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
3360 struct nfs_fsinfo *fsinfo)
3361 {
3362 struct nfs4_fsinfo_arg args = {
3363 .fh = fhandle,
3364 .bitmask = server->attr_bitmask,
3365 };
3366 struct nfs4_fsinfo_res res = {
3367 .fsinfo = fsinfo,
3368 };
3369 struct rpc_message msg = {
3370 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
3371 .rpc_argp = &args,
3372 .rpc_resp = &res,
3373 };
3374
3375 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3376 }
3377
3378 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3379 {
3380 struct nfs4_exception exception = { };
3381 int err;
3382
3383 do {
3384 err = nfs4_handle_exception(server,
3385 _nfs4_do_fsinfo(server, fhandle, fsinfo),
3386 &exception);
3387 } while (exception.retry);
3388 return err;
3389 }
3390
3391 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3392 {
3393 int error;
3394
3395 nfs_fattr_init(fsinfo->fattr);
3396 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
3397 if (error == 0) {
3398 /* block layout checks this! */
3399 server->pnfs_blksize = fsinfo->blksize;
3400 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
3401 }
3402
3403 return error;
3404 }
3405
3406 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3407 struct nfs_pathconf *pathconf)
3408 {
3409 struct nfs4_pathconf_arg args = {
3410 .fh = fhandle,
3411 .bitmask = server->attr_bitmask,
3412 };
3413 struct nfs4_pathconf_res res = {
3414 .pathconf = pathconf,
3415 };
3416 struct rpc_message msg = {
3417 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3418 .rpc_argp = &args,
3419 .rpc_resp = &res,
3420 };
3421
3422 /* None of the pathconf attributes are mandatory to implement */
3423 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3424 memset(pathconf, 0, sizeof(*pathconf));
3425 return 0;
3426 }
3427
3428 nfs_fattr_init(pathconf->fattr);
3429 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3430 }
3431
3432 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3433 struct nfs_pathconf *pathconf)
3434 {
3435 struct nfs4_exception exception = { };
3436 int err;
3437
3438 do {
3439 err = nfs4_handle_exception(server,
3440 _nfs4_proc_pathconf(server, fhandle, pathconf),
3441 &exception);
3442 } while (exception.retry);
3443 return err;
3444 }
3445
3446 void __nfs4_read_done_cb(struct nfs_read_data *data)
3447 {
3448 nfs_invalidate_atime(data->header->inode);
3449 }
3450
3451 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3452 {
3453 struct nfs_server *server = NFS_SERVER(data->header->inode);
3454
3455 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3456 rpc_restart_call_prepare(task);
3457 return -EAGAIN;
3458 }
3459
3460 __nfs4_read_done_cb(data);
3461 if (task->tk_status > 0)
3462 renew_lease(server, data->timestamp);
3463 return 0;
3464 }
3465
3466 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3467 {
3468
3469 dprintk("--> %s\n", __func__);
3470
3471 if (!nfs4_sequence_done(task, &data->res.seq_res))
3472 return -EAGAIN;
3473
3474 return data->read_done_cb ? data->read_done_cb(task, data) :
3475 nfs4_read_done_cb(task, data);
3476 }
3477
3478 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3479 {
3480 data->timestamp = jiffies;
3481 data->read_done_cb = nfs4_read_done_cb;
3482 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3483 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
3484 }
3485
3486 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
3487 {
3488 nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3489 &data->args.seq_args,
3490 &data->res.seq_res,
3491 task);
3492 }
3493
3494 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3495 {
3496 struct inode *inode = data->header->inode;
3497
3498 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3499 rpc_restart_call_prepare(task);
3500 return -EAGAIN;
3501 }
3502 if (task->tk_status >= 0) {
3503 renew_lease(NFS_SERVER(inode), data->timestamp);
3504 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
3505 }
3506 return 0;
3507 }
3508
3509 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3510 {
3511 if (!nfs4_sequence_done(task, &data->res.seq_res))
3512 return -EAGAIN;
3513 return data->write_done_cb ? data->write_done_cb(task, data) :
3514 nfs4_write_done_cb(task, data);
3515 }
3516
3517 static
3518 bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
3519 {
3520 const struct nfs_pgio_header *hdr = data->header;
3521
3522 /* Don't request attributes for pNFS or O_DIRECT writes */
3523 if (data->ds_clp != NULL || hdr->dreq != NULL)
3524 return false;
3525 /* Otherwise, request attributes if and only if we don't hold
3526 * a delegation
3527 */
3528 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
3529 }
3530
3531 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3532 {
3533 struct nfs_server *server = NFS_SERVER(data->header->inode);
3534
3535 if (!nfs4_write_need_cache_consistency_data(data)) {
3536 data->args.bitmask = NULL;
3537 data->res.fattr = NULL;
3538 } else
3539 data->args.bitmask = server->cache_consistency_bitmask;
3540
3541 if (!data->write_done_cb)
3542 data->write_done_cb = nfs4_write_done_cb;
3543 data->res.server = server;
3544 data->timestamp = jiffies;
3545
3546 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3547 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3548 }
3549
3550 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
3551 {
3552 nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3553 &data->args.seq_args,
3554 &data->res.seq_res,
3555 task);
3556 }
3557
3558 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
3559 {
3560 nfs4_setup_sequence(NFS_SERVER(data->inode),
3561 &data->args.seq_args,
3562 &data->res.seq_res,
3563 task);
3564 }
3565
3566 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
3567 {
3568 struct inode *inode = data->inode;
3569
3570 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3571 rpc_restart_call_prepare(task);
3572 return -EAGAIN;
3573 }
3574 return 0;
3575 }
3576
3577 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
3578 {
3579 if (!nfs4_sequence_done(task, &data->res.seq_res))
3580 return -EAGAIN;
3581 return data->commit_done_cb(task, data);
3582 }
3583
3584 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
3585 {
3586 struct nfs_server *server = NFS_SERVER(data->inode);
3587
3588 if (data->commit_done_cb == NULL)
3589 data->commit_done_cb = nfs4_commit_done_cb;
3590 data->res.server = server;
3591 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3592 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3593 }
3594
3595 struct nfs4_renewdata {
3596 struct nfs_client *client;
3597 unsigned long timestamp;
3598 };
3599
3600 /*
3601 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3602 * standalone procedure for queueing an asynchronous RENEW.
3603 */
3604 static void nfs4_renew_release(void *calldata)
3605 {
3606 struct nfs4_renewdata *data = calldata;
3607 struct nfs_client *clp = data->client;
3608
3609 if (atomic_read(&clp->cl_count) > 1)
3610 nfs4_schedule_state_renewal(clp);
3611 nfs_put_client(clp);
3612 kfree(data);
3613 }
3614
3615 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3616 {
3617 struct nfs4_renewdata *data = calldata;
3618 struct nfs_client *clp = data->client;
3619 unsigned long timestamp = data->timestamp;
3620
3621 if (task->tk_status < 0) {
3622 /* Unless we're shutting down, schedule state recovery! */
3623 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
3624 return;
3625 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
3626 nfs4_schedule_lease_recovery(clp);
3627 return;
3628 }
3629 nfs4_schedule_path_down_recovery(clp);
3630 }
3631 do_renew_lease(clp, timestamp);
3632 }
3633
3634 static const struct rpc_call_ops nfs4_renew_ops = {
3635 .rpc_call_done = nfs4_renew_done,
3636 .rpc_release = nfs4_renew_release,
3637 };
3638
3639 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
3640 {
3641 struct rpc_message msg = {
3642 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3643 .rpc_argp = clp,
3644 .rpc_cred = cred,
3645 };
3646 struct nfs4_renewdata *data;
3647
3648 if (renew_flags == 0)
3649 return 0;
3650 if (!atomic_inc_not_zero(&clp->cl_count))
3651 return -EIO;
3652 data = kmalloc(sizeof(*data), GFP_NOFS);
3653 if (data == NULL)
3654 return -ENOMEM;
3655 data->client = clp;
3656 data->timestamp = jiffies;
3657 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3658 &nfs4_renew_ops, data);
3659 }
3660
3661 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3662 {
3663 struct rpc_message msg = {
3664 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3665 .rpc_argp = clp,
3666 .rpc_cred = cred,
3667 };
3668 unsigned long now = jiffies;
3669 int status;
3670
3671 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3672 if (status < 0)
3673 return status;
3674 do_renew_lease(clp, now);
3675 return 0;
3676 }
3677
3678 static inline int nfs4_server_supports_acls(struct nfs_server *server)
3679 {
3680 return (server->caps & NFS_CAP_ACLS)
3681 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3682 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3683 }
3684
3685 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
3686 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
3687 * the stack.
3688 */
3689 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
3690
3691 static int buf_to_pages_noslab(const void *buf, size_t buflen,
3692 struct page **pages, unsigned int *pgbase)
3693 {
3694 struct page *newpage, **spages;
3695 int rc = 0;
3696 size_t len;
3697 spages = pages;
3698
3699 do {
3700 len = min_t(size_t, PAGE_SIZE, buflen);
3701 newpage = alloc_page(GFP_KERNEL);
3702
3703 if (newpage == NULL)
3704 goto unwind;
3705 memcpy(page_address(newpage), buf, len);
3706 buf += len;
3707 buflen -= len;
3708 *pages++ = newpage;
3709 rc++;
3710 } while (buflen != 0);
3711
3712 return rc;
3713
3714 unwind:
3715 for(; rc > 0; rc--)
3716 __free_page(spages[rc-1]);
3717 return -ENOMEM;
3718 }
3719
3720 struct nfs4_cached_acl {
3721 int cached;
3722 size_t len;
3723 char data[0];
3724 };
3725
3726 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3727 {
3728 struct nfs_inode *nfsi = NFS_I(inode);
3729
3730 spin_lock(&inode->i_lock);
3731 kfree(nfsi->nfs4_acl);
3732 nfsi->nfs4_acl = acl;
3733 spin_unlock(&inode->i_lock);
3734 }
3735
3736 static void nfs4_zap_acl_attr(struct inode *inode)
3737 {
3738 nfs4_set_cached_acl(inode, NULL);
3739 }
3740
3741 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3742 {
3743 struct nfs_inode *nfsi = NFS_I(inode);
3744 struct nfs4_cached_acl *acl;
3745 int ret = -ENOENT;
3746
3747 spin_lock(&inode->i_lock);
3748 acl = nfsi->nfs4_acl;
3749 if (acl == NULL)
3750 goto out;
3751 if (buf == NULL) /* user is just asking for length */
3752 goto out_len;
3753 if (acl->cached == 0)
3754 goto out;
3755 ret = -ERANGE; /* see getxattr(2) man page */
3756 if (acl->len > buflen)
3757 goto out;
3758 memcpy(buf, acl->data, acl->len);
3759 out_len:
3760 ret = acl->len;
3761 out:
3762 spin_unlock(&inode->i_lock);
3763 return ret;
3764 }
3765
3766 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3767 {
3768 struct nfs4_cached_acl *acl;
3769 size_t buflen = sizeof(*acl) + acl_len;
3770
3771 if (buflen <= PAGE_SIZE) {
3772 acl = kmalloc(buflen, GFP_KERNEL);
3773 if (acl == NULL)
3774 goto out;
3775 acl->cached = 1;
3776 _copy_from_pages(acl->data, pages, pgbase, acl_len);
3777 } else {
3778 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3779 if (acl == NULL)
3780 goto out;
3781 acl->cached = 0;
3782 }
3783 acl->len = acl_len;
3784 out:
3785 nfs4_set_cached_acl(inode, acl);
3786 }
3787
3788 /*
3789 * The getxattr API returns the required buffer length when called with a
3790 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3791 * the required buf. On a NULL buf, we send a page of data to the server
3792 * guessing that the ACL request can be serviced by a page. If so, we cache
3793 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3794 * the cache. If not so, we throw away the page, and cache the required
3795 * length. The next getxattr call will then produce another round trip to
3796 * the server, this time with the input buf of the required size.
3797 */
3798 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3799 {
3800 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
3801 struct nfs_getaclargs args = {
3802 .fh = NFS_FH(inode),
3803 .acl_pages = pages,
3804 .acl_len = buflen,
3805 };
3806 struct nfs_getaclres res = {
3807 .acl_len = buflen,
3808 };
3809 struct rpc_message msg = {
3810 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3811 .rpc_argp = &args,
3812 .rpc_resp = &res,
3813 };
3814 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
3815 int ret = -ENOMEM, i;
3816
3817 /* As long as we're doing a round trip to the server anyway,
3818 * let's be prepared for a page of acl data. */
3819 if (npages == 0)
3820 npages = 1;
3821 if (npages > ARRAY_SIZE(pages))
3822 return -ERANGE;
3823
3824 for (i = 0; i < npages; i++) {
3825 pages[i] = alloc_page(GFP_KERNEL);
3826 if (!pages[i])
3827 goto out_free;
3828 }
3829
3830 /* for decoding across pages */
3831 res.acl_scratch = alloc_page(GFP_KERNEL);
3832 if (!res.acl_scratch)
3833 goto out_free;
3834
3835 args.acl_len = npages * PAGE_SIZE;
3836 args.acl_pgbase = 0;
3837
3838 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
3839 __func__, buf, buflen, npages, args.acl_len);
3840 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
3841 &msg, &args.seq_args, &res.seq_res, 0);
3842 if (ret)
3843 goto out_free;
3844
3845 /* Handle the case where the passed-in buffer is too short */
3846 if (res.acl_flags & NFS4_ACL_TRUNC) {
3847 /* Did the user only issue a request for the acl length? */
3848 if (buf == NULL)
3849 goto out_ok;
3850 ret = -ERANGE;
3851 goto out_free;
3852 }
3853 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
3854 if (buf) {
3855 if (res.acl_len > buflen) {
3856 ret = -ERANGE;
3857 goto out_free;
3858 }
3859 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
3860 }
3861 out_ok:
3862 ret = res.acl_len;
3863 out_free:
3864 for (i = 0; i < npages; i++)
3865 if (pages[i])
3866 __free_page(pages[i]);
3867 if (res.acl_scratch)
3868 __free_page(res.acl_scratch);
3869 return ret;
3870 }
3871
3872 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3873 {
3874 struct nfs4_exception exception = { };
3875 ssize_t ret;
3876 do {
3877 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
3878 if (ret >= 0)
3879 break;
3880 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
3881 } while (exception.retry);
3882 return ret;
3883 }
3884
3885 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3886 {
3887 struct nfs_server *server = NFS_SERVER(inode);
3888 int ret;
3889
3890 if (!nfs4_server_supports_acls(server))
3891 return -EOPNOTSUPP;
3892 ret = nfs_revalidate_inode(server, inode);
3893 if (ret < 0)
3894 return ret;
3895 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
3896 nfs_zap_acl_cache(inode);
3897 ret = nfs4_read_cached_acl(inode, buf, buflen);
3898 if (ret != -ENOENT)
3899 /* -ENOENT is returned if there is no ACL or if there is an ACL
3900 * but no cached acl data, just the acl length */
3901 return ret;
3902 return nfs4_get_acl_uncached(inode, buf, buflen);
3903 }
3904
3905 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3906 {
3907 struct nfs_server *server = NFS_SERVER(inode);
3908 struct page *pages[NFS4ACL_MAXPAGES];
3909 struct nfs_setaclargs arg = {
3910 .fh = NFS_FH(inode),
3911 .acl_pages = pages,
3912 .acl_len = buflen,
3913 };
3914 struct nfs_setaclres res;
3915 struct rpc_message msg = {
3916 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
3917 .rpc_argp = &arg,
3918 .rpc_resp = &res,
3919 };
3920 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
3921 int ret, i;
3922
3923 if (!nfs4_server_supports_acls(server))
3924 return -EOPNOTSUPP;
3925 if (npages > ARRAY_SIZE(pages))
3926 return -ERANGE;
3927 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3928 if (i < 0)
3929 return i;
3930 nfs4_inode_return_delegation(inode);
3931 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3932
3933 /*
3934 * Free each page after tx, so the only ref left is
3935 * held by the network stack
3936 */
3937 for (; i > 0; i--)
3938 put_page(pages[i-1]);
3939
3940 /*
3941 * Acl update can result in inode attribute update.
3942 * so mark the attribute cache invalid.
3943 */
3944 spin_lock(&inode->i_lock);
3945 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
3946 spin_unlock(&inode->i_lock);
3947 nfs_access_zap_cache(inode);
3948 nfs_zap_acl_cache(inode);
3949 return ret;
3950 }
3951
3952 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3953 {
3954 struct nfs4_exception exception = { };
3955 int err;
3956 do {
3957 err = nfs4_handle_exception(NFS_SERVER(inode),
3958 __nfs4_proc_set_acl(inode, buf, buflen),
3959 &exception);
3960 } while (exception.retry);
3961 return err;
3962 }
3963
3964 static int
3965 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3966 {
3967 struct nfs_client *clp = server->nfs_client;
3968
3969 if (task->tk_status >= 0)
3970 return 0;
3971 switch(task->tk_status) {
3972 case -NFS4ERR_DELEG_REVOKED:
3973 case -NFS4ERR_ADMIN_REVOKED:
3974 case -NFS4ERR_BAD_STATEID:
3975 if (state == NULL)
3976 break;
3977 nfs_remove_bad_delegation(state->inode);
3978 case -NFS4ERR_OPENMODE:
3979 if (state == NULL)
3980 break;
3981 nfs4_schedule_stateid_recovery(server, state);
3982 goto wait_on_recovery;
3983 case -NFS4ERR_EXPIRED:
3984 if (state != NULL)
3985 nfs4_schedule_stateid_recovery(server, state);
3986 case -NFS4ERR_STALE_STATEID:
3987 case -NFS4ERR_STALE_CLIENTID:
3988 nfs4_schedule_lease_recovery(clp);
3989 goto wait_on_recovery;
3990 #if defined(CONFIG_NFS_V4_1)
3991 case -NFS4ERR_BADSESSION:
3992 case -NFS4ERR_BADSLOT:
3993 case -NFS4ERR_BAD_HIGH_SLOT:
3994 case -NFS4ERR_DEADSESSION:
3995 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
3996 case -NFS4ERR_SEQ_FALSE_RETRY:
3997 case -NFS4ERR_SEQ_MISORDERED:
3998 dprintk("%s ERROR %d, Reset session\n", __func__,
3999 task->tk_status);
4000 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4001 task->tk_status = 0;
4002 return -EAGAIN;
4003 #endif /* CONFIG_NFS_V4_1 */
4004 case -NFS4ERR_DELAY:
4005 nfs_inc_server_stats(server, NFSIOS_DELAY);
4006 case -NFS4ERR_GRACE:
4007 rpc_delay(task, NFS4_POLL_RETRY_MAX);
4008 task->tk_status = 0;
4009 return -EAGAIN;
4010 case -NFS4ERR_RETRY_UNCACHED_REP:
4011 case -NFS4ERR_OLD_STATEID:
4012 task->tk_status = 0;
4013 return -EAGAIN;
4014 }
4015 task->tk_status = nfs4_map_errors(task->tk_status);
4016 return 0;
4017 wait_on_recovery:
4018 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
4019 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
4020 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
4021 task->tk_status = 0;
4022 return -EAGAIN;
4023 }
4024
4025 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
4026 nfs4_verifier *bootverf)
4027 {
4028 __be32 verf[2];
4029
4030 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
4031 /* An impossible timestamp guarantees this value
4032 * will never match a generated boot time. */
4033 verf[0] = 0;
4034 verf[1] = (__be32)(NSEC_PER_SEC + 1);
4035 } else {
4036 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
4037 verf[0] = (__be32)nn->boot_time.tv_sec;
4038 verf[1] = (__be32)nn->boot_time.tv_nsec;
4039 }
4040 memcpy(bootverf->data, verf, sizeof(bootverf->data));
4041 }
4042
4043 static unsigned int
4044 nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
4045 char *buf, size_t len)
4046 {
4047 unsigned int result;
4048
4049 rcu_read_lock();
4050 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
4051 clp->cl_ipaddr,
4052 rpc_peeraddr2str(clp->cl_rpcclient,
4053 RPC_DISPLAY_ADDR),
4054 rpc_peeraddr2str(clp->cl_rpcclient,
4055 RPC_DISPLAY_PROTO));
4056 rcu_read_unlock();
4057 return result;
4058 }
4059
4060 static unsigned int
4061 nfs4_init_uniform_client_string(const struct nfs_client *clp,
4062 char *buf, size_t len)
4063 {
4064 char *nodename = clp->cl_rpcclient->cl_nodename;
4065
4066 if (nfs4_client_id_uniquifier[0] != '\0')
4067 nodename = nfs4_client_id_uniquifier;
4068 return scnprintf(buf, len, "Linux NFSv%u.%u %s",
4069 clp->rpc_ops->version, clp->cl_minorversion,
4070 nodename);
4071 }
4072
4073 /**
4074 * nfs4_proc_setclientid - Negotiate client ID
4075 * @clp: state data structure
4076 * @program: RPC program for NFSv4 callback service
4077 * @port: IP port number for NFS4 callback service
4078 * @cred: RPC credential to use for this call
4079 * @res: where to place the result
4080 *
4081 * Returns zero, a negative errno, or a negative NFS4ERR status code.
4082 */
4083 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
4084 unsigned short port, struct rpc_cred *cred,
4085 struct nfs4_setclientid_res *res)
4086 {
4087 nfs4_verifier sc_verifier;
4088 struct nfs4_setclientid setclientid = {
4089 .sc_verifier = &sc_verifier,
4090 .sc_prog = program,
4091 .sc_cb_ident = clp->cl_cb_ident,
4092 };
4093 struct rpc_message msg = {
4094 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
4095 .rpc_argp = &setclientid,
4096 .rpc_resp = res,
4097 .rpc_cred = cred,
4098 };
4099 int status;
4100
4101 /* nfs_client_id4 */
4102 nfs4_init_boot_verifier(clp, &sc_verifier);
4103 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
4104 setclientid.sc_name_len =
4105 nfs4_init_uniform_client_string(clp,
4106 setclientid.sc_name,
4107 sizeof(setclientid.sc_name));
4108 else
4109 setclientid.sc_name_len =
4110 nfs4_init_nonuniform_client_string(clp,
4111 setclientid.sc_name,
4112 sizeof(setclientid.sc_name));
4113 /* cb_client4 */
4114 rcu_read_lock();
4115 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
4116 sizeof(setclientid.sc_netid),
4117 rpc_peeraddr2str(clp->cl_rpcclient,
4118 RPC_DISPLAY_NETID));
4119 rcu_read_unlock();
4120 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
4121 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
4122 clp->cl_ipaddr, port >> 8, port & 255);
4123
4124 dprintk("NFS call setclientid auth=%s, '%.*s'\n",
4125 clp->cl_rpcclient->cl_auth->au_ops->au_name,
4126 setclientid.sc_name_len, setclientid.sc_name);
4127 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4128 dprintk("NFS reply setclientid: %d\n", status);
4129 return status;
4130 }
4131
4132 /**
4133 * nfs4_proc_setclientid_confirm - Confirm client ID
4134 * @clp: state data structure
4135 * @res: result of a previous SETCLIENTID
4136 * @cred: RPC credential to use for this call
4137 *
4138 * Returns zero, a negative errno, or a negative NFS4ERR status code.
4139 */
4140 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
4141 struct nfs4_setclientid_res *arg,
4142 struct rpc_cred *cred)
4143 {
4144 struct nfs_fsinfo fsinfo;
4145 struct rpc_message msg = {
4146 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
4147 .rpc_argp = arg,
4148 .rpc_resp = &fsinfo,
4149 .rpc_cred = cred,
4150 };
4151 unsigned long now;
4152 int status;
4153
4154 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
4155 clp->cl_rpcclient->cl_auth->au_ops->au_name,
4156 clp->cl_clientid);
4157 now = jiffies;
4158 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4159 if (status == 0) {
4160 spin_lock(&clp->cl_lock);
4161 clp->cl_lease_time = fsinfo.lease_time * HZ;
4162 clp->cl_last_renewal = now;
4163 spin_unlock(&clp->cl_lock);
4164 }
4165 dprintk("NFS reply setclientid_confirm: %d\n", status);
4166 return status;
4167 }
4168
4169 struct nfs4_delegreturndata {
4170 struct nfs4_delegreturnargs args;
4171 struct nfs4_delegreturnres res;
4172 struct nfs_fh fh;
4173 nfs4_stateid stateid;
4174 unsigned long timestamp;
4175 struct nfs_fattr fattr;
4176 int rpc_status;
4177 };
4178
4179 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4180 {
4181 struct nfs4_delegreturndata *data = calldata;
4182
4183 if (!nfs4_sequence_done(task, &data->res.seq_res))
4184 return;
4185
4186 switch (task->tk_status) {
4187 case -NFS4ERR_STALE_STATEID:
4188 case -NFS4ERR_EXPIRED:
4189 case 0:
4190 renew_lease(data->res.server, data->timestamp);
4191 break;
4192 default:
4193 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4194 -EAGAIN) {
4195 rpc_restart_call_prepare(task);
4196 return;
4197 }
4198 }
4199 data->rpc_status = task->tk_status;
4200 }
4201
4202 static void nfs4_delegreturn_release(void *calldata)
4203 {
4204 kfree(calldata);
4205 }
4206
4207 #if defined(CONFIG_NFS_V4_1)
4208 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
4209 {
4210 struct nfs4_delegreturndata *d_data;
4211
4212 d_data = (struct nfs4_delegreturndata *)data;
4213
4214 nfs4_setup_sequence(d_data->res.server,
4215 &d_data->args.seq_args,
4216 &d_data->res.seq_res,
4217 task);
4218 }
4219 #endif /* CONFIG_NFS_V4_1 */
4220
4221 static const struct rpc_call_ops nfs4_delegreturn_ops = {
4222 #if defined(CONFIG_NFS_V4_1)
4223 .rpc_call_prepare = nfs4_delegreturn_prepare,
4224 #endif /* CONFIG_NFS_V4_1 */
4225 .rpc_call_done = nfs4_delegreturn_done,
4226 .rpc_release = nfs4_delegreturn_release,
4227 };
4228
4229 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4230 {
4231 struct nfs4_delegreturndata *data;
4232 struct nfs_server *server = NFS_SERVER(inode);
4233 struct rpc_task *task;
4234 struct rpc_message msg = {
4235 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
4236 .rpc_cred = cred,
4237 };
4238 struct rpc_task_setup task_setup_data = {
4239 .rpc_client = server->client,
4240 .rpc_message = &msg,
4241 .callback_ops = &nfs4_delegreturn_ops,
4242 .flags = RPC_TASK_ASYNC,
4243 };
4244 int status = 0;
4245
4246 data = kzalloc(sizeof(*data), GFP_NOFS);
4247 if (data == NULL)
4248 return -ENOMEM;
4249 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4250 data->args.fhandle = &data->fh;
4251 data->args.stateid = &data->stateid;
4252 data->args.bitmask = server->cache_consistency_bitmask;
4253 nfs_copy_fh(&data->fh, NFS_FH(inode));
4254 nfs4_stateid_copy(&data->stateid, stateid);
4255 data->res.fattr = &data->fattr;
4256 data->res.server = server;
4257 nfs_fattr_init(data->res.fattr);
4258 data->timestamp = jiffies;
4259 data->rpc_status = 0;
4260
4261 task_setup_data.callback_data = data;
4262 msg.rpc_argp = &data->args;
4263 msg.rpc_resp = &data->res;
4264 task = rpc_run_task(&task_setup_data);
4265 if (IS_ERR(task))
4266 return PTR_ERR(task);
4267 if (!issync)
4268 goto out;
4269 status = nfs4_wait_for_completion_rpc_task(task);
4270 if (status != 0)
4271 goto out;
4272 status = data->rpc_status;
4273 if (status == 0)
4274 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
4275 else
4276 nfs_refresh_inode(inode, &data->fattr);
4277 out:
4278 rpc_put_task(task);
4279 return status;
4280 }
4281
4282 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4283 {
4284 struct nfs_server *server = NFS_SERVER(inode);
4285 struct nfs4_exception exception = { };
4286 int err;
4287 do {
4288 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
4289 switch (err) {
4290 case -NFS4ERR_STALE_STATEID:
4291 case -NFS4ERR_EXPIRED:
4292 case 0:
4293 return 0;
4294 }
4295 err = nfs4_handle_exception(server, err, &exception);
4296 } while (exception.retry);
4297 return err;
4298 }
4299
4300 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
4301 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
4302
4303 /*
4304 * sleep, with exponential backoff, and retry the LOCK operation.
4305 */
4306 static unsigned long
4307 nfs4_set_lock_task_retry(unsigned long timeout)
4308 {
4309 freezable_schedule_timeout_killable(timeout);
4310 timeout <<= 1;
4311 if (timeout > NFS4_LOCK_MAXTIMEOUT)
4312 return NFS4_LOCK_MAXTIMEOUT;
4313 return timeout;
4314 }
4315
4316 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4317 {
4318 struct inode *inode = state->inode;
4319 struct nfs_server *server = NFS_SERVER(inode);
4320 struct nfs_client *clp = server->nfs_client;
4321 struct nfs_lockt_args arg = {
4322 .fh = NFS_FH(inode),
4323 .fl = request,
4324 };
4325 struct nfs_lockt_res res = {
4326 .denied = request,
4327 };
4328 struct rpc_message msg = {
4329 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
4330 .rpc_argp = &arg,
4331 .rpc_resp = &res,
4332 .rpc_cred = state->owner->so_cred,
4333 };
4334 struct nfs4_lock_state *lsp;
4335 int status;
4336
4337 arg.lock_owner.clientid = clp->cl_clientid;
4338 status = nfs4_set_lock_state(state, request);
4339 if (status != 0)
4340 goto out;
4341 lsp = request->fl_u.nfs4_fl.owner;
4342 arg.lock_owner.id = lsp->ls_seqid.owner_id;
4343 arg.lock_owner.s_dev = server->s_dev;
4344 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4345 switch (status) {
4346 case 0:
4347 request->fl_type = F_UNLCK;
4348 break;
4349 case -NFS4ERR_DENIED:
4350 status = 0;
4351 }
4352 request->fl_ops->fl_release_private(request);
4353 out:
4354 return status;
4355 }
4356
4357 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4358 {
4359 struct nfs4_exception exception = { };
4360 int err;
4361
4362 do {
4363 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4364 _nfs4_proc_getlk(state, cmd, request),
4365 &exception);
4366 } while (exception.retry);
4367 return err;
4368 }
4369
4370 static int do_vfs_lock(struct file *file, struct file_lock *fl)
4371 {
4372 int res = 0;
4373 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
4374 case FL_POSIX:
4375 res = posix_lock_file_wait(file, fl);
4376 break;
4377 case FL_FLOCK:
4378 res = flock_lock_file_wait(file, fl);
4379 break;
4380 default:
4381 BUG();
4382 }
4383 return res;
4384 }
4385
4386 struct nfs4_unlockdata {
4387 struct nfs_locku_args arg;
4388 struct nfs_locku_res res;
4389 struct nfs4_lock_state *lsp;
4390 struct nfs_open_context *ctx;
4391 struct file_lock fl;
4392 const struct nfs_server *server;
4393 unsigned long timestamp;
4394 };
4395
4396 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
4397 struct nfs_open_context *ctx,
4398 struct nfs4_lock_state *lsp,
4399 struct nfs_seqid *seqid)
4400 {
4401 struct nfs4_unlockdata *p;
4402 struct inode *inode = lsp->ls_state->inode;
4403
4404 p = kzalloc(sizeof(*p), GFP_NOFS);
4405 if (p == NULL)
4406 return NULL;
4407 p->arg.fh = NFS_FH(inode);
4408 p->arg.fl = &p->fl;
4409 p->arg.seqid = seqid;
4410 p->res.seqid = seqid;
4411 p->arg.stateid = &lsp->ls_stateid;
4412 p->lsp = lsp;
4413 atomic_inc(&lsp->ls_count);
4414 /* Ensure we don't close file until we're done freeing locks! */
4415 p->ctx = get_nfs_open_context(ctx);
4416 memcpy(&p->fl, fl, sizeof(p->fl));
4417 p->server = NFS_SERVER(inode);
4418 return p;
4419 }
4420
4421 static void nfs4_locku_release_calldata(void *data)
4422 {
4423 struct nfs4_unlockdata *calldata = data;
4424 nfs_free_seqid(calldata->arg.seqid);
4425 nfs4_put_lock_state(calldata->lsp);
4426 put_nfs_open_context(calldata->ctx);
4427 kfree(calldata);
4428 }
4429
4430 static void nfs4_locku_done(struct rpc_task *task, void *data)
4431 {
4432 struct nfs4_unlockdata *calldata = data;
4433
4434 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
4435 return;
4436 switch (task->tk_status) {
4437 case 0:
4438 nfs4_stateid_copy(&calldata->lsp->ls_stateid,
4439 &calldata->res.stateid);
4440 renew_lease(calldata->server, calldata->timestamp);
4441 break;
4442 case -NFS4ERR_BAD_STATEID:
4443 case -NFS4ERR_OLD_STATEID:
4444 case -NFS4ERR_STALE_STATEID:
4445 case -NFS4ERR_EXPIRED:
4446 break;
4447 default:
4448 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
4449 rpc_restart_call_prepare(task);
4450 }
4451 nfs_release_seqid(calldata->arg.seqid);
4452 }
4453
4454 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
4455 {
4456 struct nfs4_unlockdata *calldata = data;
4457
4458 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
4459 goto out_wait;
4460 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
4461 /* Note: exit _without_ running nfs4_locku_done */
4462 goto out_no_action;
4463 }
4464 calldata->timestamp = jiffies;
4465 if (nfs4_setup_sequence(calldata->server,
4466 &calldata->arg.seq_args,
4467 &calldata->res.seq_res,
4468 task) != 0)
4469 nfs_release_seqid(calldata->arg.seqid);
4470 return;
4471 out_no_action:
4472 task->tk_action = NULL;
4473 out_wait:
4474 nfs4_sequence_done(task, &calldata->res.seq_res);
4475 }
4476
4477 static const struct rpc_call_ops nfs4_locku_ops = {
4478 .rpc_call_prepare = nfs4_locku_prepare,
4479 .rpc_call_done = nfs4_locku_done,
4480 .rpc_release = nfs4_locku_release_calldata,
4481 };
4482
4483 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
4484 struct nfs_open_context *ctx,
4485 struct nfs4_lock_state *lsp,
4486 struct nfs_seqid *seqid)
4487 {
4488 struct nfs4_unlockdata *data;
4489 struct rpc_message msg = {
4490 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
4491 .rpc_cred = ctx->cred,
4492 };
4493 struct rpc_task_setup task_setup_data = {
4494 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
4495 .rpc_message = &msg,
4496 .callback_ops = &nfs4_locku_ops,
4497 .workqueue = nfsiod_workqueue,
4498 .flags = RPC_TASK_ASYNC,
4499 };
4500
4501 /* Ensure this is an unlock - when canceling a lock, the
4502 * canceled lock is passed in, and it won't be an unlock.
4503 */
4504 fl->fl_type = F_UNLCK;
4505
4506 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
4507 if (data == NULL) {
4508 nfs_free_seqid(seqid);
4509 return ERR_PTR(-ENOMEM);
4510 }
4511
4512 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4513 msg.rpc_argp = &data->arg;
4514 msg.rpc_resp = &data->res;
4515 task_setup_data.callback_data = data;
4516 return rpc_run_task(&task_setup_data);
4517 }
4518
4519 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
4520 {
4521 struct inode *inode = state->inode;
4522 struct nfs4_state_owner *sp = state->owner;
4523 struct nfs_inode *nfsi = NFS_I(inode);
4524 struct nfs_seqid *seqid;
4525 struct nfs4_lock_state *lsp;
4526 struct rpc_task *task;
4527 int status = 0;
4528 unsigned char fl_flags = request->fl_flags;
4529
4530 status = nfs4_set_lock_state(state, request);
4531 /* Unlock _before_ we do the RPC call */
4532 request->fl_flags |= FL_EXISTS;
4533 /* Exclude nfs_delegation_claim_locks() */
4534 mutex_lock(&sp->so_delegreturn_mutex);
4535 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
4536 down_read(&nfsi->rwsem);
4537 if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
4538 up_read(&nfsi->rwsem);
4539 mutex_unlock(&sp->so_delegreturn_mutex);
4540 goto out;
4541 }
4542 up_read(&nfsi->rwsem);
4543 mutex_unlock(&sp->so_delegreturn_mutex);
4544 if (status != 0)
4545 goto out;
4546 /* Is this a delegated lock? */
4547 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
4548 goto out;
4549 lsp = request->fl_u.nfs4_fl.owner;
4550 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
4551 status = -ENOMEM;
4552 if (seqid == NULL)
4553 goto out;
4554 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
4555 status = PTR_ERR(task);
4556 if (IS_ERR(task))
4557 goto out;
4558 status = nfs4_wait_for_completion_rpc_task(task);
4559 rpc_put_task(task);
4560 out:
4561 request->fl_flags = fl_flags;
4562 return status;
4563 }
4564
4565 struct nfs4_lockdata {
4566 struct nfs_lock_args arg;
4567 struct nfs_lock_res res;
4568 struct nfs4_lock_state *lsp;
4569 struct nfs_open_context *ctx;
4570 struct file_lock fl;
4571 unsigned long timestamp;
4572 int rpc_status;
4573 int cancelled;
4574 struct nfs_server *server;
4575 };
4576
4577 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
4578 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
4579 gfp_t gfp_mask)
4580 {
4581 struct nfs4_lockdata *p;
4582 struct inode *inode = lsp->ls_state->inode;
4583 struct nfs_server *server = NFS_SERVER(inode);
4584
4585 p = kzalloc(sizeof(*p), gfp_mask);
4586 if (p == NULL)
4587 return NULL;
4588
4589 p->arg.fh = NFS_FH(inode);
4590 p->arg.fl = &p->fl;
4591 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
4592 if (p->arg.open_seqid == NULL)
4593 goto out_free;
4594 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
4595 if (p->arg.lock_seqid == NULL)
4596 goto out_free_seqid;
4597 p->arg.lock_stateid = &lsp->ls_stateid;
4598 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
4599 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
4600 p->arg.lock_owner.s_dev = server->s_dev;
4601 p->res.lock_seqid = p->arg.lock_seqid;
4602 p->lsp = lsp;
4603 p->server = server;
4604 atomic_inc(&lsp->ls_count);
4605 p->ctx = get_nfs_open_context(ctx);
4606 memcpy(&p->fl, fl, sizeof(p->fl));
4607 return p;
4608 out_free_seqid:
4609 nfs_free_seqid(p->arg.open_seqid);
4610 out_free:
4611 kfree(p);
4612 return NULL;
4613 }
4614
4615 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4616 {
4617 struct nfs4_lockdata *data = calldata;
4618 struct nfs4_state *state = data->lsp->ls_state;
4619
4620 dprintk("%s: begin!\n", __func__);
4621 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
4622 goto out_wait;
4623 /* Do we need to do an open_to_lock_owner? */
4624 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4625 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
4626 goto out_release_lock_seqid;
4627 }
4628 data->arg.open_stateid = &state->stateid;
4629 data->arg.new_lock_owner = 1;
4630 data->res.open_seqid = data->arg.open_seqid;
4631 } else
4632 data->arg.new_lock_owner = 0;
4633 data->timestamp = jiffies;
4634 if (nfs4_setup_sequence(data->server,
4635 &data->arg.seq_args,
4636 &data->res.seq_res,
4637 task) == 0)
4638 return;
4639 nfs_release_seqid(data->arg.open_seqid);
4640 out_release_lock_seqid:
4641 nfs_release_seqid(data->arg.lock_seqid);
4642 out_wait:
4643 nfs4_sequence_done(task, &data->res.seq_res);
4644 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
4645 }
4646
4647 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4648 {
4649 struct nfs4_lockdata *data = calldata;
4650
4651 dprintk("%s: begin!\n", __func__);
4652
4653 if (!nfs4_sequence_done(task, &data->res.seq_res))
4654 return;
4655
4656 data->rpc_status = task->tk_status;
4657 if (data->arg.new_lock_owner != 0) {
4658 if (data->rpc_status == 0)
4659 nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4660 else
4661 goto out;
4662 }
4663 if (data->rpc_status == 0) {
4664 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
4665 set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags);
4666 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
4667 }
4668 out:
4669 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4670 }
4671
4672 static void nfs4_lock_release(void *calldata)
4673 {
4674 struct nfs4_lockdata *data = calldata;
4675
4676 dprintk("%s: begin!\n", __func__);
4677 nfs_free_seqid(data->arg.open_seqid);
4678 if (data->cancelled != 0) {
4679 struct rpc_task *task;
4680 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4681 data->arg.lock_seqid);
4682 if (!IS_ERR(task))
4683 rpc_put_task_async(task);
4684 dprintk("%s: cancelling lock!\n", __func__);
4685 } else
4686 nfs_free_seqid(data->arg.lock_seqid);
4687 nfs4_put_lock_state(data->lsp);
4688 put_nfs_open_context(data->ctx);
4689 kfree(data);
4690 dprintk("%s: done!\n", __func__);
4691 }
4692
4693 static const struct rpc_call_ops nfs4_lock_ops = {
4694 .rpc_call_prepare = nfs4_lock_prepare,
4695 .rpc_call_done = nfs4_lock_done,
4696 .rpc_release = nfs4_lock_release,
4697 };
4698
4699 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4700 {
4701 switch (error) {
4702 case -NFS4ERR_ADMIN_REVOKED:
4703 case -NFS4ERR_BAD_STATEID:
4704 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4705 if (new_lock_owner != 0 ||
4706 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
4707 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4708 break;
4709 case -NFS4ERR_STALE_STATEID:
4710 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4711 case -NFS4ERR_EXPIRED:
4712 nfs4_schedule_lease_recovery(server->nfs_client);
4713 };
4714 }
4715
4716 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4717 {
4718 struct nfs4_lockdata *data;
4719 struct rpc_task *task;
4720 struct rpc_message msg = {
4721 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4722 .rpc_cred = state->owner->so_cred,
4723 };
4724 struct rpc_task_setup task_setup_data = {
4725 .rpc_client = NFS_CLIENT(state->inode),
4726 .rpc_message = &msg,
4727 .callback_ops = &nfs4_lock_ops,
4728 .workqueue = nfsiod_workqueue,
4729 .flags = RPC_TASK_ASYNC,
4730 };
4731 int ret;
4732
4733 dprintk("%s: begin!\n", __func__);
4734 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4735 fl->fl_u.nfs4_fl.owner,
4736 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4737 if (data == NULL)
4738 return -ENOMEM;
4739 if (IS_SETLKW(cmd))
4740 data->arg.block = 1;
4741 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4742 msg.rpc_argp = &data->arg;
4743 msg.rpc_resp = &data->res;
4744 task_setup_data.callback_data = data;
4745 if (recovery_type > NFS_LOCK_NEW) {
4746 if (recovery_type == NFS_LOCK_RECLAIM)
4747 data->arg.reclaim = NFS_LOCK_RECLAIM;
4748 nfs4_set_sequence_privileged(&data->arg.seq_args);
4749 }
4750 task = rpc_run_task(&task_setup_data);
4751 if (IS_ERR(task))
4752 return PTR_ERR(task);
4753 ret = nfs4_wait_for_completion_rpc_task(task);
4754 if (ret == 0) {
4755 ret = data->rpc_status;
4756 if (ret)
4757 nfs4_handle_setlk_error(data->server, data->lsp,
4758 data->arg.new_lock_owner, ret);
4759 } else
4760 data->cancelled = 1;
4761 rpc_put_task(task);
4762 dprintk("%s: done, ret = %d!\n", __func__, ret);
4763 return ret;
4764 }
4765
4766 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4767 {
4768 struct nfs_server *server = NFS_SERVER(state->inode);
4769 struct nfs4_exception exception = {
4770 .inode = state->inode,
4771 };
4772 int err;
4773
4774 do {
4775 /* Cache the lock if possible... */
4776 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4777 return 0;
4778 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
4779 if (err != -NFS4ERR_DELAY)
4780 break;
4781 nfs4_handle_exception(server, err, &exception);
4782 } while (exception.retry);
4783 return err;
4784 }
4785
4786 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4787 {
4788 struct nfs_server *server = NFS_SERVER(state->inode);
4789 struct nfs4_exception exception = {
4790 .inode = state->inode,
4791 };
4792 int err;
4793
4794 err = nfs4_set_lock_state(state, request);
4795 if (err != 0)
4796 return err;
4797 do {
4798 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4799 return 0;
4800 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
4801 switch (err) {
4802 default:
4803 goto out;
4804 case -NFS4ERR_GRACE:
4805 case -NFS4ERR_DELAY:
4806 nfs4_handle_exception(server, err, &exception);
4807 err = 0;
4808 }
4809 } while (exception.retry);
4810 out:
4811 return err;
4812 }
4813
4814 #if defined(CONFIG_NFS_V4_1)
4815 /**
4816 * nfs41_check_expired_locks - possibly free a lock stateid
4817 *
4818 * @state: NFSv4 state for an inode
4819 *
4820 * Returns NFS_OK if recovery for this stateid is now finished.
4821 * Otherwise a negative NFS4ERR value is returned.
4822 */
4823 static int nfs41_check_expired_locks(struct nfs4_state *state)
4824 {
4825 int status, ret = -NFS4ERR_BAD_STATEID;
4826 struct nfs4_lock_state *lsp;
4827 struct nfs_server *server = NFS_SERVER(state->inode);
4828
4829 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
4830 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
4831 status = nfs41_test_stateid(server, &lsp->ls_stateid);
4832 if (status != NFS_OK) {
4833 /* Free the stateid unless the server
4834 * informs us the stateid is unrecognized. */
4835 if (status != -NFS4ERR_BAD_STATEID)
4836 nfs41_free_stateid(server,
4837 &lsp->ls_stateid);
4838 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
4839 ret = status;
4840 }
4841 }
4842 };
4843
4844 return ret;
4845 }
4846
4847 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
4848 {
4849 int status = NFS_OK;
4850
4851 if (test_bit(LK_STATE_IN_USE, &state->flags))
4852 status = nfs41_check_expired_locks(state);
4853 if (status != NFS_OK)
4854 status = nfs4_lock_expired(state, request);
4855 return status;
4856 }
4857 #endif
4858
4859 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4860 {
4861 struct nfs4_state_owner *sp = state->owner;
4862 struct nfs_inode *nfsi = NFS_I(state->inode);
4863 unsigned char fl_flags = request->fl_flags;
4864 unsigned int seq;
4865 int status = -ENOLCK;
4866
4867 if ((fl_flags & FL_POSIX) &&
4868 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4869 goto out;
4870 /* Is this a delegated open? */
4871 status = nfs4_set_lock_state(state, request);
4872 if (status != 0)
4873 goto out;
4874 request->fl_flags |= FL_ACCESS;
4875 status = do_vfs_lock(request->fl_file, request);
4876 if (status < 0)
4877 goto out;
4878 down_read(&nfsi->rwsem);
4879 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
4880 /* Yes: cache locks! */
4881 /* ...but avoid races with delegation recall... */
4882 request->fl_flags = fl_flags & ~FL_SLEEP;
4883 status = do_vfs_lock(request->fl_file, request);
4884 goto out_unlock;
4885 }
4886 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
4887 up_read(&nfsi->rwsem);
4888 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
4889 if (status != 0)
4890 goto out;
4891 down_read(&nfsi->rwsem);
4892 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) {
4893 status = -NFS4ERR_DELAY;
4894 goto out_unlock;
4895 }
4896 /* Note: we always want to sleep here! */
4897 request->fl_flags = fl_flags | FL_SLEEP;
4898 if (do_vfs_lock(request->fl_file, request) < 0)
4899 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
4900 "manager!\n", __func__);
4901 out_unlock:
4902 up_read(&nfsi->rwsem);
4903 out:
4904 request->fl_flags = fl_flags;
4905 return status;
4906 }
4907
4908 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4909 {
4910 struct nfs4_exception exception = {
4911 .state = state,
4912 .inode = state->inode,
4913 };
4914 int err;
4915
4916 do {
4917 err = _nfs4_proc_setlk(state, cmd, request);
4918 if (err == -NFS4ERR_DENIED)
4919 err = -EAGAIN;
4920 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4921 err, &exception);
4922 } while (exception.retry);
4923 return err;
4924 }
4925
4926 static int
4927 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4928 {
4929 struct nfs_open_context *ctx;
4930 struct nfs4_state *state;
4931 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
4932 int status;
4933
4934 /* verify open state */
4935 ctx = nfs_file_open_context(filp);
4936 state = ctx->state;
4937
4938 if (request->fl_start < 0 || request->fl_end < 0)
4939 return -EINVAL;
4940
4941 if (IS_GETLK(cmd)) {
4942 if (state != NULL)
4943 return nfs4_proc_getlk(state, F_GETLK, request);
4944 return 0;
4945 }
4946
4947 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
4948 return -EINVAL;
4949
4950 if (request->fl_type == F_UNLCK) {
4951 if (state != NULL)
4952 return nfs4_proc_unlck(state, cmd, request);
4953 return 0;
4954 }
4955
4956 if (state == NULL)
4957 return -ENOLCK;
4958 /*
4959 * Don't rely on the VFS having checked the file open mode,
4960 * since it won't do this for flock() locks.
4961 */
4962 switch (request->fl_type) {
4963 case F_RDLCK:
4964 if (!(filp->f_mode & FMODE_READ))
4965 return -EBADF;
4966 break;
4967 case F_WRLCK:
4968 if (!(filp->f_mode & FMODE_WRITE))
4969 return -EBADF;
4970 }
4971
4972 do {
4973 status = nfs4_proc_setlk(state, cmd, request);
4974 if ((status != -EAGAIN) || IS_SETLK(cmd))
4975 break;
4976 timeout = nfs4_set_lock_task_retry(timeout);
4977 status = -ERESTARTSYS;
4978 if (signalled())
4979 break;
4980 } while(status < 0);
4981 return status;
4982 }
4983
4984 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4985 {
4986 struct nfs_server *server = NFS_SERVER(state->inode);
4987 struct nfs4_exception exception = { };
4988 int err;
4989
4990 err = nfs4_set_lock_state(state, fl);
4991 if (err != 0)
4992 goto out;
4993 do {
4994 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
4995 switch (err) {
4996 default:
4997 printk(KERN_ERR "NFS: %s: unhandled error "
4998 "%d.\n", __func__, err);
4999 case 0:
5000 case -ESTALE:
5001 goto out;
5002 case -NFS4ERR_STALE_CLIENTID:
5003 case -NFS4ERR_STALE_STATEID:
5004 set_bit(NFS_DELEGATED_STATE, &state->flags);
5005 case -NFS4ERR_EXPIRED:
5006 nfs4_schedule_lease_recovery(server->nfs_client);
5007 err = -EAGAIN;
5008 goto out;
5009 case -NFS4ERR_BADSESSION:
5010 case -NFS4ERR_BADSLOT:
5011 case -NFS4ERR_BAD_HIGH_SLOT:
5012 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
5013 case -NFS4ERR_DEADSESSION:
5014 set_bit(NFS_DELEGATED_STATE, &state->flags);
5015 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
5016 err = -EAGAIN;
5017 goto out;
5018 case -NFS4ERR_DELEG_REVOKED:
5019 case -NFS4ERR_ADMIN_REVOKED:
5020 case -NFS4ERR_BAD_STATEID:
5021 case -NFS4ERR_OPENMODE:
5022 nfs4_schedule_stateid_recovery(server, state);
5023 err = 0;
5024 goto out;
5025 case -ENOMEM:
5026 case -NFS4ERR_DENIED:
5027 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
5028 err = 0;
5029 goto out;
5030 }
5031 set_bit(NFS_DELEGATED_STATE, &state->flags);
5032 err = nfs4_handle_exception(server, err, &exception);
5033 } while (exception.retry);
5034 out:
5035 return err;
5036 }
5037
5038 struct nfs_release_lockowner_data {
5039 struct nfs4_lock_state *lsp;
5040 struct nfs_server *server;
5041 struct nfs_release_lockowner_args args;
5042 };
5043
5044 static void nfs4_release_lockowner_release(void *calldata)
5045 {
5046 struct nfs_release_lockowner_data *data = calldata;
5047 nfs4_free_lock_state(data->server, data->lsp);
5048 kfree(calldata);
5049 }
5050
5051 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
5052 .rpc_release = nfs4_release_lockowner_release,
5053 };
5054
5055 int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
5056 {
5057 struct nfs_server *server = lsp->ls_state->owner->so_server;
5058 struct nfs_release_lockowner_data *data;
5059 struct rpc_message msg = {
5060 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
5061 };
5062
5063 if (server->nfs_client->cl_mvops->minor_version != 0)
5064 return -EINVAL;
5065 data = kmalloc(sizeof(*data), GFP_NOFS);
5066 if (!data)
5067 return -ENOMEM;
5068 data->lsp = lsp;
5069 data->server = server;
5070 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
5071 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
5072 data->args.lock_owner.s_dev = server->s_dev;
5073 msg.rpc_argp = &data->args;
5074 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
5075 return 0;
5076 }
5077
5078 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
5079
5080 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
5081 const void *buf, size_t buflen,
5082 int flags, int type)
5083 {
5084 if (strcmp(key, "") != 0)
5085 return -EINVAL;
5086
5087 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
5088 }
5089
5090 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
5091 void *buf, size_t buflen, int type)
5092 {
5093 if (strcmp(key, "") != 0)
5094 return -EINVAL;
5095
5096 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
5097 }
5098
5099 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
5100 size_t list_len, const char *name,
5101 size_t name_len, int type)
5102 {
5103 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
5104
5105 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
5106 return 0;
5107
5108 if (list && len <= list_len)
5109 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
5110 return len;
5111 }
5112
5113 /*
5114 * nfs_fhget will use either the mounted_on_fileid or the fileid
5115 */
5116 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
5117 {
5118 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
5119 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
5120 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
5121 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
5122 return;
5123
5124 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
5125 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
5126 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
5127 fattr->nlink = 2;
5128 }
5129
5130 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5131 const struct qstr *name,
5132 struct nfs4_fs_locations *fs_locations,
5133 struct page *page)
5134 {
5135 struct nfs_server *server = NFS_SERVER(dir);
5136 u32 bitmask[2] = {
5137 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
5138 };
5139 struct nfs4_fs_locations_arg args = {
5140 .dir_fh = NFS_FH(dir),
5141 .name = name,
5142 .page = page,
5143 .bitmask = bitmask,
5144 };
5145 struct nfs4_fs_locations_res res = {
5146 .fs_locations = fs_locations,
5147 };
5148 struct rpc_message msg = {
5149 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
5150 .rpc_argp = &args,
5151 .rpc_resp = &res,
5152 };
5153 int status;
5154
5155 dprintk("%s: start\n", __func__);
5156
5157 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
5158 * is not supported */
5159 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
5160 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
5161 else
5162 bitmask[0] |= FATTR4_WORD0_FILEID;
5163
5164 nfs_fattr_init(&fs_locations->fattr);
5165 fs_locations->server = server;
5166 fs_locations->nlocations = 0;
5167 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
5168 dprintk("%s: returned status = %d\n", __func__, status);
5169 return status;
5170 }
5171
5172 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5173 const struct qstr *name,
5174 struct nfs4_fs_locations *fs_locations,
5175 struct page *page)
5176 {
5177 struct nfs4_exception exception = { };
5178 int err;
5179 do {
5180 err = nfs4_handle_exception(NFS_SERVER(dir),
5181 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
5182 &exception);
5183 } while (exception.retry);
5184 return err;
5185 }
5186
5187 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
5188 {
5189 int status;
5190 struct nfs4_secinfo_arg args = {
5191 .dir_fh = NFS_FH(dir),
5192 .name = name,
5193 };
5194 struct nfs4_secinfo_res res = {
5195 .flavors = flavors,
5196 };
5197 struct rpc_message msg = {
5198 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
5199 .rpc_argp = &args,
5200 .rpc_resp = &res,
5201 };
5202
5203 dprintk("NFS call secinfo %s\n", name->name);
5204 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
5205 dprintk("NFS reply secinfo: %d\n", status);
5206 return status;
5207 }
5208
5209 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
5210 struct nfs4_secinfo_flavors *flavors)
5211 {
5212 struct nfs4_exception exception = { };
5213 int err;
5214 do {
5215 err = nfs4_handle_exception(NFS_SERVER(dir),
5216 _nfs4_proc_secinfo(dir, name, flavors),
5217 &exception);
5218 } while (exception.retry);
5219 return err;
5220 }
5221
5222 #ifdef CONFIG_NFS_V4_1
5223 /*
5224 * Check the exchange flags returned by the server for invalid flags, having
5225 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
5226 * DS flags set.
5227 */
5228 static int nfs4_check_cl_exchange_flags(u32 flags)
5229 {
5230 if (flags & ~EXCHGID4_FLAG_MASK_R)
5231 goto out_inval;
5232 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
5233 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
5234 goto out_inval;
5235 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
5236 goto out_inval;
5237 return NFS_OK;
5238 out_inval:
5239 return -NFS4ERR_INVAL;
5240 }
5241
5242 static bool
5243 nfs41_same_server_scope(struct nfs41_server_scope *a,
5244 struct nfs41_server_scope *b)
5245 {
5246 if (a->server_scope_sz == b->server_scope_sz &&
5247 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
5248 return true;
5249
5250 return false;
5251 }
5252
5253 /*
5254 * nfs4_proc_bind_conn_to_session()
5255 *
5256 * The 4.1 client currently uses the same TCP connection for the
5257 * fore and backchannel.
5258 */
5259 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
5260 {
5261 int status;
5262 struct nfs41_bind_conn_to_session_res res;
5263 struct rpc_message msg = {
5264 .rpc_proc =
5265 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
5266 .rpc_argp = clp,
5267 .rpc_resp = &res,
5268 .rpc_cred = cred,
5269 };
5270
5271 dprintk("--> %s\n", __func__);
5272
5273 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5274 if (unlikely(res.session == NULL)) {
5275 status = -ENOMEM;
5276 goto out;
5277 }
5278
5279 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5280 if (status == 0) {
5281 if (memcmp(res.session->sess_id.data,
5282 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
5283 dprintk("NFS: %s: Session ID mismatch\n", __func__);
5284 status = -EIO;
5285 goto out_session;
5286 }
5287 if (res.dir != NFS4_CDFS4_BOTH) {
5288 dprintk("NFS: %s: Unexpected direction from server\n",
5289 __func__);
5290 status = -EIO;
5291 goto out_session;
5292 }
5293 if (res.use_conn_in_rdma_mode) {
5294 dprintk("NFS: %s: Server returned RDMA mode = true\n",
5295 __func__);
5296 status = -EIO;
5297 goto out_session;
5298 }
5299 }
5300 out_session:
5301 kfree(res.session);
5302 out:
5303 dprintk("<-- %s status= %d\n", __func__, status);
5304 return status;
5305 }
5306
5307 /*
5308 * nfs4_proc_exchange_id()
5309 *
5310 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5311 *
5312 * Since the clientid has expired, all compounds using sessions
5313 * associated with the stale clientid will be returning
5314 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
5315 * be in some phase of session reset.
5316 */
5317 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5318 {
5319 nfs4_verifier verifier;
5320 struct nfs41_exchange_id_args args = {
5321 .verifier = &verifier,
5322 .client = clp,
5323 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
5324 };
5325 struct nfs41_exchange_id_res res = {
5326 0
5327 };
5328 int status;
5329 struct rpc_message msg = {
5330 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
5331 .rpc_argp = &args,
5332 .rpc_resp = &res,
5333 .rpc_cred = cred,
5334 };
5335
5336 nfs4_init_boot_verifier(clp, &verifier);
5337 args.id_len = nfs4_init_uniform_client_string(clp, args.id,
5338 sizeof(args.id));
5339 dprintk("NFS call exchange_id auth=%s, '%.*s'\n",
5340 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5341 args.id_len, args.id);
5342
5343 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
5344 GFP_NOFS);
5345 if (unlikely(res.server_owner == NULL)) {
5346 status = -ENOMEM;
5347 goto out;
5348 }
5349
5350 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
5351 GFP_NOFS);
5352 if (unlikely(res.server_scope == NULL)) {
5353 status = -ENOMEM;
5354 goto out_server_owner;
5355 }
5356
5357 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
5358 if (unlikely(res.impl_id == NULL)) {
5359 status = -ENOMEM;
5360 goto out_server_scope;
5361 }
5362
5363 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5364 if (status == 0)
5365 status = nfs4_check_cl_exchange_flags(res.flags);
5366
5367 if (status == 0) {
5368 clp->cl_clientid = res.clientid;
5369 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
5370 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
5371 clp->cl_seqid = res.seqid;
5372
5373 kfree(clp->cl_serverowner);
5374 clp->cl_serverowner = res.server_owner;
5375 res.server_owner = NULL;
5376
5377 /* use the most recent implementation id */
5378 kfree(clp->cl_implid);
5379 clp->cl_implid = res.impl_id;
5380
5381 if (clp->cl_serverscope != NULL &&
5382 !nfs41_same_server_scope(clp->cl_serverscope,
5383 res.server_scope)) {
5384 dprintk("%s: server_scope mismatch detected\n",
5385 __func__);
5386 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
5387 kfree(clp->cl_serverscope);
5388 clp->cl_serverscope = NULL;
5389 }
5390
5391 if (clp->cl_serverscope == NULL) {
5392 clp->cl_serverscope = res.server_scope;
5393 goto out;
5394 }
5395 } else
5396 kfree(res.impl_id);
5397
5398 out_server_owner:
5399 kfree(res.server_owner);
5400 out_server_scope:
5401 kfree(res.server_scope);
5402 out:
5403 if (clp->cl_implid != NULL)
5404 dprintk("NFS reply exchange_id: Server Implementation ID: "
5405 "domain: %s, name: %s, date: %llu,%u\n",
5406 clp->cl_implid->domain, clp->cl_implid->name,
5407 clp->cl_implid->date.seconds,
5408 clp->cl_implid->date.nseconds);
5409 dprintk("NFS reply exchange_id: %d\n", status);
5410 return status;
5411 }
5412
5413 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
5414 struct rpc_cred *cred)
5415 {
5416 struct rpc_message msg = {
5417 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
5418 .rpc_argp = clp,
5419 .rpc_cred = cred,
5420 };
5421 int status;
5422
5423 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5424 if (status)
5425 dprintk("NFS: Got error %d from the server %s on "
5426 "DESTROY_CLIENTID.", status, clp->cl_hostname);
5427 return status;
5428 }
5429
5430 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
5431 struct rpc_cred *cred)
5432 {
5433 unsigned int loop;
5434 int ret;
5435
5436 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
5437 ret = _nfs4_proc_destroy_clientid(clp, cred);
5438 switch (ret) {
5439 case -NFS4ERR_DELAY:
5440 case -NFS4ERR_CLIENTID_BUSY:
5441 ssleep(1);
5442 break;
5443 default:
5444 return ret;
5445 }
5446 }
5447 return 0;
5448 }
5449
5450 int nfs4_destroy_clientid(struct nfs_client *clp)
5451 {
5452 struct rpc_cred *cred;
5453 int ret = 0;
5454
5455 if (clp->cl_mvops->minor_version < 1)
5456 goto out;
5457 if (clp->cl_exchange_flags == 0)
5458 goto out;
5459 if (clp->cl_preserve_clid)
5460 goto out;
5461 cred = nfs4_get_exchange_id_cred(clp);
5462 ret = nfs4_proc_destroy_clientid(clp, cred);
5463 if (cred)
5464 put_rpccred(cred);
5465 switch (ret) {
5466 case 0:
5467 case -NFS4ERR_STALE_CLIENTID:
5468 clp->cl_exchange_flags = 0;
5469 }
5470 out:
5471 return ret;
5472 }
5473
5474 struct nfs4_get_lease_time_data {
5475 struct nfs4_get_lease_time_args *args;
5476 struct nfs4_get_lease_time_res *res;
5477 struct nfs_client *clp;
5478 };
5479
5480 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
5481 void *calldata)
5482 {
5483 struct nfs4_get_lease_time_data *data =
5484 (struct nfs4_get_lease_time_data *)calldata;
5485
5486 dprintk("--> %s\n", __func__);
5487 /* just setup sequence, do not trigger session recovery
5488 since we're invoked within one */
5489 nfs41_setup_sequence(data->clp->cl_session,
5490 &data->args->la_seq_args,
5491 &data->res->lr_seq_res,
5492 task);
5493 dprintk("<-- %s\n", __func__);
5494 }
5495
5496 /*
5497 * Called from nfs4_state_manager thread for session setup, so don't recover
5498 * from sequence operation or clientid errors.
5499 */
5500 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
5501 {
5502 struct nfs4_get_lease_time_data *data =
5503 (struct nfs4_get_lease_time_data *)calldata;
5504
5505 dprintk("--> %s\n", __func__);
5506 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
5507 return;
5508 switch (task->tk_status) {
5509 case -NFS4ERR_DELAY:
5510 case -NFS4ERR_GRACE:
5511 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
5512 rpc_delay(task, NFS4_POLL_RETRY_MIN);
5513 task->tk_status = 0;
5514 /* fall through */
5515 case -NFS4ERR_RETRY_UNCACHED_REP:
5516 rpc_restart_call_prepare(task);
5517 return;
5518 }
5519 dprintk("<-- %s\n", __func__);
5520 }
5521
5522 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
5523 .rpc_call_prepare = nfs4_get_lease_time_prepare,
5524 .rpc_call_done = nfs4_get_lease_time_done,
5525 };
5526
5527 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
5528 {
5529 struct rpc_task *task;
5530 struct nfs4_get_lease_time_args args;
5531 struct nfs4_get_lease_time_res res = {
5532 .lr_fsinfo = fsinfo,
5533 };
5534 struct nfs4_get_lease_time_data data = {
5535 .args = &args,
5536 .res = &res,
5537 .clp = clp,
5538 };
5539 struct rpc_message msg = {
5540 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
5541 .rpc_argp = &args,
5542 .rpc_resp = &res,
5543 };
5544 struct rpc_task_setup task_setup = {
5545 .rpc_client = clp->cl_rpcclient,
5546 .rpc_message = &msg,
5547 .callback_ops = &nfs4_get_lease_time_ops,
5548 .callback_data = &data,
5549 .flags = RPC_TASK_TIMEOUT,
5550 };
5551 int status;
5552
5553 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
5554 nfs4_set_sequence_privileged(&args.la_seq_args);
5555 dprintk("--> %s\n", __func__);
5556 task = rpc_run_task(&task_setup);
5557
5558 if (IS_ERR(task))
5559 status = PTR_ERR(task);
5560 else {
5561 status = task->tk_status;
5562 rpc_put_task(task);
5563 }
5564 dprintk("<-- %s return %d\n", __func__, status);
5565
5566 return status;
5567 }
5568
5569 /*
5570 * Initialize the values to be used by the client in CREATE_SESSION
5571 * If nfs4_init_session set the fore channel request and response sizes,
5572 * use them.
5573 *
5574 * Set the back channel max_resp_sz_cached to zero to force the client to
5575 * always set csa_cachethis to FALSE because the current implementation
5576 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5577 */
5578 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5579 {
5580 struct nfs4_session *session = args->client->cl_session;
5581 unsigned int mxrqst_sz = session->fc_target_max_rqst_sz,
5582 mxresp_sz = session->fc_target_max_resp_sz;
5583
5584 if (mxrqst_sz == 0)
5585 mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
5586 if (mxresp_sz == 0)
5587 mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5588 /* Fore channel attributes */
5589 args->fc_attrs.max_rqst_sz = mxrqst_sz;
5590 args->fc_attrs.max_resp_sz = mxresp_sz;
5591 args->fc_attrs.max_ops = NFS4_MAX_OPS;
5592 args->fc_attrs.max_reqs = max_session_slots;
5593
5594 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5595 "max_ops=%u max_reqs=%u\n",
5596 __func__,
5597 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
5598 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5599
5600 /* Back channel attributes */
5601 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5602 args->bc_attrs.max_resp_sz = PAGE_SIZE;
5603 args->bc_attrs.max_resp_sz_cached = 0;
5604 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
5605 args->bc_attrs.max_reqs = 1;
5606
5607 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5608 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5609 __func__,
5610 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
5611 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
5612 args->bc_attrs.max_reqs);
5613 }
5614
5615 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5616 {
5617 struct nfs4_channel_attrs *sent = &args->fc_attrs;
5618 struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5619
5620 if (rcvd->max_resp_sz > sent->max_resp_sz)
5621 return -EINVAL;
5622 /*
5623 * Our requested max_ops is the minimum we need; we're not
5624 * prepared to break up compounds into smaller pieces than that.
5625 * So, no point even trying to continue if the server won't
5626 * cooperate:
5627 */
5628 if (rcvd->max_ops < sent->max_ops)
5629 return -EINVAL;
5630 if (rcvd->max_reqs == 0)
5631 return -EINVAL;
5632 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
5633 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
5634 return 0;
5635 }
5636
5637 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5638 {
5639 struct nfs4_channel_attrs *sent = &args->bc_attrs;
5640 struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
5641
5642 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
5643 return -EINVAL;
5644 if (rcvd->max_resp_sz < sent->max_resp_sz)
5645 return -EINVAL;
5646 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
5647 return -EINVAL;
5648 /* These would render the backchannel useless: */
5649 if (rcvd->max_ops != sent->max_ops)
5650 return -EINVAL;
5651 if (rcvd->max_reqs != sent->max_reqs)
5652 return -EINVAL;
5653 return 0;
5654 }
5655
5656 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
5657 struct nfs4_session *session)
5658 {
5659 int ret;
5660
5661 ret = nfs4_verify_fore_channel_attrs(args, session);
5662 if (ret)
5663 return ret;
5664 return nfs4_verify_back_channel_attrs(args, session);
5665 }
5666
5667 static int _nfs4_proc_create_session(struct nfs_client *clp,
5668 struct rpc_cred *cred)
5669 {
5670 struct nfs4_session *session = clp->cl_session;
5671 struct nfs41_create_session_args args = {
5672 .client = clp,
5673 .cb_program = NFS4_CALLBACK,
5674 };
5675 struct nfs41_create_session_res res = {
5676 .client = clp,
5677 };
5678 struct rpc_message msg = {
5679 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
5680 .rpc_argp = &args,
5681 .rpc_resp = &res,
5682 .rpc_cred = cred,
5683 };
5684 int status;
5685
5686 nfs4_init_channel_attrs(&args);
5687 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
5688
5689 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5690
5691 if (!status) {
5692 /* Verify the session's negotiated channel_attrs values */
5693 status = nfs4_verify_channel_attrs(&args, session);
5694 /* Increment the clientid slot sequence id */
5695 clp->cl_seqid++;
5696 }
5697
5698 return status;
5699 }
5700
5701 /*
5702 * Issues a CREATE_SESSION operation to the server.
5703 * It is the responsibility of the caller to verify the session is
5704 * expired before calling this routine.
5705 */
5706 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
5707 {
5708 int status;
5709 unsigned *ptr;
5710 struct nfs4_session *session = clp->cl_session;
5711
5712 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
5713
5714 status = _nfs4_proc_create_session(clp, cred);
5715 if (status)
5716 goto out;
5717
5718 /* Init or reset the session slot tables */
5719 status = nfs4_setup_session_slot_tables(session);
5720 dprintk("slot table setup returned %d\n", status);
5721 if (status)
5722 goto out;
5723
5724 ptr = (unsigned *)&session->sess_id.data[0];
5725 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
5726 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
5727 out:
5728 dprintk("<-- %s\n", __func__);
5729 return status;
5730 }
5731
5732 /*
5733 * Issue the over-the-wire RPC DESTROY_SESSION.
5734 * The caller must serialize access to this routine.
5735 */
5736 int nfs4_proc_destroy_session(struct nfs4_session *session,
5737 struct rpc_cred *cred)
5738 {
5739 struct rpc_message msg = {
5740 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
5741 .rpc_argp = session,
5742 .rpc_cred = cred,
5743 };
5744 int status = 0;
5745
5746 dprintk("--> nfs4_proc_destroy_session\n");
5747
5748 /* session is still being setup */
5749 if (session->clp->cl_cons_state != NFS_CS_READY)
5750 return status;
5751
5752 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5753
5754 if (status)
5755 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
5756 "Session has been destroyed regardless...\n", status);
5757
5758 dprintk("<-- nfs4_proc_destroy_session\n");
5759 return status;
5760 }
5761
5762 /*
5763 * Renew the cl_session lease.
5764 */
5765 struct nfs4_sequence_data {
5766 struct nfs_client *clp;
5767 struct nfs4_sequence_args args;
5768 struct nfs4_sequence_res res;
5769 };
5770
5771 static void nfs41_sequence_release(void *data)
5772 {
5773 struct nfs4_sequence_data *calldata = data;
5774 struct nfs_client *clp = calldata->clp;
5775
5776 if (atomic_read(&clp->cl_count) > 1)
5777 nfs4_schedule_state_renewal(clp);
5778 nfs_put_client(clp);
5779 kfree(calldata);
5780 }
5781
5782 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5783 {
5784 switch(task->tk_status) {
5785 case -NFS4ERR_DELAY:
5786 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5787 return -EAGAIN;
5788 default:
5789 nfs4_schedule_lease_recovery(clp);
5790 }
5791 return 0;
5792 }
5793
5794 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5795 {
5796 struct nfs4_sequence_data *calldata = data;
5797 struct nfs_client *clp = calldata->clp;
5798
5799 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
5800 return;
5801
5802 if (task->tk_status < 0) {
5803 dprintk("%s ERROR %d\n", __func__, task->tk_status);
5804 if (atomic_read(&clp->cl_count) == 1)
5805 goto out;
5806
5807 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
5808 rpc_restart_call_prepare(task);
5809 return;
5810 }
5811 }
5812 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
5813 out:
5814 dprintk("<-- %s\n", __func__);
5815 }
5816
5817 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
5818 {
5819 struct nfs4_sequence_data *calldata = data;
5820 struct nfs_client *clp = calldata->clp;
5821 struct nfs4_sequence_args *args;
5822 struct nfs4_sequence_res *res;
5823
5824 args = task->tk_msg.rpc_argp;
5825 res = task->tk_msg.rpc_resp;
5826
5827 nfs41_setup_sequence(clp->cl_session, args, res, task);
5828 }
5829
5830 static const struct rpc_call_ops nfs41_sequence_ops = {
5831 .rpc_call_done = nfs41_sequence_call_done,
5832 .rpc_call_prepare = nfs41_sequence_prepare,
5833 .rpc_release = nfs41_sequence_release,
5834 };
5835
5836 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
5837 struct rpc_cred *cred,
5838 bool is_privileged)
5839 {
5840 struct nfs4_sequence_data *calldata;
5841 struct rpc_message msg = {
5842 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
5843 .rpc_cred = cred,
5844 };
5845 struct rpc_task_setup task_setup_data = {
5846 .rpc_client = clp->cl_rpcclient,
5847 .rpc_message = &msg,
5848 .callback_ops = &nfs41_sequence_ops,
5849 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
5850 };
5851
5852 if (!atomic_inc_not_zero(&clp->cl_count))
5853 return ERR_PTR(-EIO);
5854 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5855 if (calldata == NULL) {
5856 nfs_put_client(clp);
5857 return ERR_PTR(-ENOMEM);
5858 }
5859 nfs41_init_sequence(&calldata->args, &calldata->res, 0);
5860 if (is_privileged)
5861 nfs4_set_sequence_privileged(&calldata->args);
5862 msg.rpc_argp = &calldata->args;
5863 msg.rpc_resp = &calldata->res;
5864 calldata->clp = clp;
5865 task_setup_data.callback_data = calldata;
5866
5867 return rpc_run_task(&task_setup_data);
5868 }
5869
5870 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
5871 {
5872 struct rpc_task *task;
5873 int ret = 0;
5874
5875 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
5876 return 0;
5877 task = _nfs41_proc_sequence(clp, cred, false);
5878 if (IS_ERR(task))
5879 ret = PTR_ERR(task);
5880 else
5881 rpc_put_task_async(task);
5882 dprintk("<-- %s status=%d\n", __func__, ret);
5883 return ret;
5884 }
5885
5886 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5887 {
5888 struct rpc_task *task;
5889 int ret;
5890
5891 task = _nfs41_proc_sequence(clp, cred, true);
5892 if (IS_ERR(task)) {
5893 ret = PTR_ERR(task);
5894 goto out;
5895 }
5896 ret = rpc_wait_for_completion_task(task);
5897 if (!ret) {
5898 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
5899
5900 if (task->tk_status == 0)
5901 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
5902 ret = task->tk_status;
5903 }
5904 rpc_put_task(task);
5905 out:
5906 dprintk("<-- %s status=%d\n", __func__, ret);
5907 return ret;
5908 }
5909
5910 struct nfs4_reclaim_complete_data {
5911 struct nfs_client *clp;
5912 struct nfs41_reclaim_complete_args arg;
5913 struct nfs41_reclaim_complete_res res;
5914 };
5915
5916 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
5917 {
5918 struct nfs4_reclaim_complete_data *calldata = data;
5919
5920 nfs41_setup_sequence(calldata->clp->cl_session,
5921 &calldata->arg.seq_args,
5922 &calldata->res.seq_res,
5923 task);
5924 }
5925
5926 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5927 {
5928 switch(task->tk_status) {
5929 case 0:
5930 case -NFS4ERR_COMPLETE_ALREADY:
5931 case -NFS4ERR_WRONG_CRED: /* What to do here? */
5932 break;
5933 case -NFS4ERR_DELAY:
5934 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5935 /* fall through */
5936 case -NFS4ERR_RETRY_UNCACHED_REP:
5937 return -EAGAIN;
5938 default:
5939 nfs4_schedule_lease_recovery(clp);
5940 }
5941 return 0;
5942 }
5943
5944 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
5945 {
5946 struct nfs4_reclaim_complete_data *calldata = data;
5947 struct nfs_client *clp = calldata->clp;
5948 struct nfs4_sequence_res *res = &calldata->res.seq_res;
5949
5950 dprintk("--> %s\n", __func__);
5951 if (!nfs41_sequence_done(task, res))
5952 return;
5953
5954 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
5955 rpc_restart_call_prepare(task);
5956 return;
5957 }
5958 dprintk("<-- %s\n", __func__);
5959 }
5960
5961 static void nfs4_free_reclaim_complete_data(void *data)
5962 {
5963 struct nfs4_reclaim_complete_data *calldata = data;
5964
5965 kfree(calldata);
5966 }
5967
5968 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
5969 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
5970 .rpc_call_done = nfs4_reclaim_complete_done,
5971 .rpc_release = nfs4_free_reclaim_complete_data,
5972 };
5973
5974 /*
5975 * Issue a global reclaim complete.
5976 */
5977 static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
5978 {
5979 struct nfs4_reclaim_complete_data *calldata;
5980 struct rpc_task *task;
5981 struct rpc_message msg = {
5982 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
5983 };
5984 struct rpc_task_setup task_setup_data = {
5985 .rpc_client = clp->cl_rpcclient,
5986 .rpc_message = &msg,
5987 .callback_ops = &nfs4_reclaim_complete_call_ops,
5988 .flags = RPC_TASK_ASYNC,
5989 };
5990 int status = -ENOMEM;
5991
5992 dprintk("--> %s\n", __func__);
5993 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5994 if (calldata == NULL)
5995 goto out;
5996 calldata->clp = clp;
5997 calldata->arg.one_fs = 0;
5998
5999 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
6000 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
6001 msg.rpc_argp = &calldata->arg;
6002 msg.rpc_resp = &calldata->res;
6003 task_setup_data.callback_data = calldata;
6004 task = rpc_run_task(&task_setup_data);
6005 if (IS_ERR(task)) {
6006 status = PTR_ERR(task);
6007 goto out;
6008 }
6009 status = nfs4_wait_for_completion_rpc_task(task);
6010 if (status == 0)
6011 status = task->tk_status;
6012 rpc_put_task(task);
6013 return 0;
6014 out:
6015 dprintk("<-- %s status=%d\n", __func__, status);
6016 return status;
6017 }
6018
6019 static void
6020 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
6021 {
6022 struct nfs4_layoutget *lgp = calldata;
6023 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6024 struct nfs4_session *session = nfs4_get_session(server);
6025
6026 dprintk("--> %s\n", __func__);
6027 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
6028 * right now covering the LAYOUTGET we are about to send.
6029 * However, that is not so catastrophic, and there seems
6030 * to be no way to prevent it completely.
6031 */
6032 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
6033 &lgp->res.seq_res, task))
6034 return;
6035 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
6036 NFS_I(lgp->args.inode)->layout,
6037 lgp->args.ctx->state)) {
6038 rpc_exit(task, NFS4_OK);
6039 }
6040 }
6041
6042 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
6043 {
6044 struct nfs4_layoutget *lgp = calldata;
6045 struct inode *inode = lgp->args.inode;
6046 struct nfs_server *server = NFS_SERVER(inode);
6047 struct pnfs_layout_hdr *lo;
6048 struct nfs4_state *state = NULL;
6049
6050 dprintk("--> %s\n", __func__);
6051
6052 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
6053 goto out;
6054
6055 switch (task->tk_status) {
6056 case 0:
6057 goto out;
6058 case -NFS4ERR_LAYOUTTRYLATER:
6059 case -NFS4ERR_RECALLCONFLICT:
6060 task->tk_status = -NFS4ERR_DELAY;
6061 break;
6062 case -NFS4ERR_EXPIRED:
6063 case -NFS4ERR_BAD_STATEID:
6064 spin_lock(&inode->i_lock);
6065 lo = NFS_I(inode)->layout;
6066 if (!lo || list_empty(&lo->plh_segs)) {
6067 spin_unlock(&inode->i_lock);
6068 /* If the open stateid was bad, then recover it. */
6069 state = lgp->args.ctx->state;
6070 } else {
6071 LIST_HEAD(head);
6072
6073 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
6074 spin_unlock(&inode->i_lock);
6075 /* Mark the bad layout state as invalid, then
6076 * retry using the open stateid. */
6077 pnfs_free_lseg_list(&head);
6078 }
6079 }
6080 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
6081 rpc_restart_call_prepare(task);
6082 out:
6083 dprintk("<-- %s\n", __func__);
6084 }
6085
6086 static size_t max_response_pages(struct nfs_server *server)
6087 {
6088 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
6089 return nfs_page_array_len(0, max_resp_sz);
6090 }
6091
6092 static void nfs4_free_pages(struct page **pages, size_t size)
6093 {
6094 int i;
6095
6096 if (!pages)
6097 return;
6098
6099 for (i = 0; i < size; i++) {
6100 if (!pages[i])
6101 break;
6102 __free_page(pages[i]);
6103 }
6104 kfree(pages);
6105 }
6106
6107 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
6108 {
6109 struct page **pages;
6110 int i;
6111
6112 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
6113 if (!pages) {
6114 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
6115 return NULL;
6116 }
6117
6118 for (i = 0; i < size; i++) {
6119 pages[i] = alloc_page(gfp_flags);
6120 if (!pages[i]) {
6121 dprintk("%s: failed to allocate page\n", __func__);
6122 nfs4_free_pages(pages, size);
6123 return NULL;
6124 }
6125 }
6126
6127 return pages;
6128 }
6129
6130 static void nfs4_layoutget_release(void *calldata)
6131 {
6132 struct nfs4_layoutget *lgp = calldata;
6133 struct inode *inode = lgp->args.inode;
6134 struct nfs_server *server = NFS_SERVER(inode);
6135 size_t max_pages = max_response_pages(server);
6136
6137 dprintk("--> %s\n", __func__);
6138 nfs4_free_pages(lgp->args.layout.pages, max_pages);
6139 pnfs_put_layout_hdr(NFS_I(inode)->layout);
6140 put_nfs_open_context(lgp->args.ctx);
6141 kfree(calldata);
6142 dprintk("<-- %s\n", __func__);
6143 }
6144
6145 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
6146 .rpc_call_prepare = nfs4_layoutget_prepare,
6147 .rpc_call_done = nfs4_layoutget_done,
6148 .rpc_release = nfs4_layoutget_release,
6149 };
6150
6151 struct pnfs_layout_segment *
6152 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
6153 {
6154 struct inode *inode = lgp->args.inode;
6155 struct nfs_server *server = NFS_SERVER(inode);
6156 size_t max_pages = max_response_pages(server);
6157 struct rpc_task *task;
6158 struct rpc_message msg = {
6159 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
6160 .rpc_argp = &lgp->args,
6161 .rpc_resp = &lgp->res,
6162 };
6163 struct rpc_task_setup task_setup_data = {
6164 .rpc_client = server->client,
6165 .rpc_message = &msg,
6166 .callback_ops = &nfs4_layoutget_call_ops,
6167 .callback_data = lgp,
6168 .flags = RPC_TASK_ASYNC,
6169 };
6170 struct pnfs_layout_segment *lseg = NULL;
6171 int status = 0;
6172
6173 dprintk("--> %s\n", __func__);
6174
6175 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
6176 if (!lgp->args.layout.pages) {
6177 nfs4_layoutget_release(lgp);
6178 return ERR_PTR(-ENOMEM);
6179 }
6180 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
6181
6182 lgp->res.layoutp = &lgp->args.layout;
6183 lgp->res.seq_res.sr_slot = NULL;
6184 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
6185
6186 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
6187 pnfs_get_layout_hdr(NFS_I(inode)->layout);
6188
6189 task = rpc_run_task(&task_setup_data);
6190 if (IS_ERR(task))
6191 return ERR_CAST(task);
6192 status = nfs4_wait_for_completion_rpc_task(task);
6193 if (status == 0)
6194 status = task->tk_status;
6195 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
6196 if (status == 0 && lgp->res.layoutp->len)
6197 lseg = pnfs_layout_process(lgp);
6198 rpc_put_task(task);
6199 dprintk("<-- %s status=%d\n", __func__, status);
6200 if (status)
6201 return ERR_PTR(status);
6202 return lseg;
6203 }
6204
6205 static void
6206 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
6207 {
6208 struct nfs4_layoutreturn *lrp = calldata;
6209
6210 dprintk("--> %s\n", __func__);
6211 nfs41_setup_sequence(lrp->clp->cl_session,
6212 &lrp->args.seq_args,
6213 &lrp->res.seq_res,
6214 task);
6215 }
6216
6217 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
6218 {
6219 struct nfs4_layoutreturn *lrp = calldata;
6220 struct nfs_server *server;
6221
6222 dprintk("--> %s\n", __func__);
6223
6224 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
6225 return;
6226
6227 server = NFS_SERVER(lrp->args.inode);
6228 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6229 rpc_restart_call_prepare(task);
6230 return;
6231 }
6232 dprintk("<-- %s\n", __func__);
6233 }
6234
6235 static void nfs4_layoutreturn_release(void *calldata)
6236 {
6237 struct nfs4_layoutreturn *lrp = calldata;
6238 struct pnfs_layout_hdr *lo = lrp->args.layout;
6239
6240 dprintk("--> %s\n", __func__);
6241 spin_lock(&lo->plh_inode->i_lock);
6242 if (lrp->res.lrs_present)
6243 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6244 lo->plh_block_lgets--;
6245 spin_unlock(&lo->plh_inode->i_lock);
6246 pnfs_put_layout_hdr(lrp->args.layout);
6247 kfree(calldata);
6248 dprintk("<-- %s\n", __func__);
6249 }
6250
6251 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
6252 .rpc_call_prepare = nfs4_layoutreturn_prepare,
6253 .rpc_call_done = nfs4_layoutreturn_done,
6254 .rpc_release = nfs4_layoutreturn_release,
6255 };
6256
6257 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
6258 {
6259 struct rpc_task *task;
6260 struct rpc_message msg = {
6261 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
6262 .rpc_argp = &lrp->args,
6263 .rpc_resp = &lrp->res,
6264 };
6265 struct rpc_task_setup task_setup_data = {
6266 .rpc_client = lrp->clp->cl_rpcclient,
6267 .rpc_message = &msg,
6268 .callback_ops = &nfs4_layoutreturn_call_ops,
6269 .callback_data = lrp,
6270 };
6271 int status;
6272
6273 dprintk("--> %s\n", __func__);
6274 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
6275 task = rpc_run_task(&task_setup_data);
6276 if (IS_ERR(task))
6277 return PTR_ERR(task);
6278 status = task->tk_status;
6279 dprintk("<-- %s status=%d\n", __func__, status);
6280 rpc_put_task(task);
6281 return status;
6282 }
6283
6284 /*
6285 * Retrieve the list of Data Server devices from the MDS.
6286 */
6287 static int _nfs4_getdevicelist(struct nfs_server *server,
6288 const struct nfs_fh *fh,
6289 struct pnfs_devicelist *devlist)
6290 {
6291 struct nfs4_getdevicelist_args args = {
6292 .fh = fh,
6293 .layoutclass = server->pnfs_curr_ld->id,
6294 };
6295 struct nfs4_getdevicelist_res res = {
6296 .devlist = devlist,
6297 };
6298 struct rpc_message msg = {
6299 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
6300 .rpc_argp = &args,
6301 .rpc_resp = &res,
6302 };
6303 int status;
6304
6305 dprintk("--> %s\n", __func__);
6306 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
6307 &res.seq_res, 0);
6308 dprintk("<-- %s status=%d\n", __func__, status);
6309 return status;
6310 }
6311
6312 int nfs4_proc_getdevicelist(struct nfs_server *server,
6313 const struct nfs_fh *fh,
6314 struct pnfs_devicelist *devlist)
6315 {
6316 struct nfs4_exception exception = { };
6317 int err;
6318
6319 do {
6320 err = nfs4_handle_exception(server,
6321 _nfs4_getdevicelist(server, fh, devlist),
6322 &exception);
6323 } while (exception.retry);
6324
6325 dprintk("%s: err=%d, num_devs=%u\n", __func__,
6326 err, devlist->num_devs);
6327
6328 return err;
6329 }
6330 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
6331
6332 static int
6333 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6334 {
6335 struct nfs4_getdeviceinfo_args args = {
6336 .pdev = pdev,
6337 };
6338 struct nfs4_getdeviceinfo_res res = {
6339 .pdev = pdev,
6340 };
6341 struct rpc_message msg = {
6342 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
6343 .rpc_argp = &args,
6344 .rpc_resp = &res,
6345 };
6346 int status;
6347
6348 dprintk("--> %s\n", __func__);
6349 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6350 dprintk("<-- %s status=%d\n", __func__, status);
6351
6352 return status;
6353 }
6354
6355 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6356 {
6357 struct nfs4_exception exception = { };
6358 int err;
6359
6360 do {
6361 err = nfs4_handle_exception(server,
6362 _nfs4_proc_getdeviceinfo(server, pdev),
6363 &exception);
6364 } while (exception.retry);
6365 return err;
6366 }
6367 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
6368
6369 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
6370 {
6371 struct nfs4_layoutcommit_data *data = calldata;
6372 struct nfs_server *server = NFS_SERVER(data->args.inode);
6373 struct nfs4_session *session = nfs4_get_session(server);
6374
6375 nfs41_setup_sequence(session,
6376 &data->args.seq_args,
6377 &data->res.seq_res,
6378 task);
6379 }
6380
6381 static void
6382 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6383 {
6384 struct nfs4_layoutcommit_data *data = calldata;
6385 struct nfs_server *server = NFS_SERVER(data->args.inode);
6386
6387 if (!nfs41_sequence_done(task, &data->res.seq_res))
6388 return;
6389
6390 switch (task->tk_status) { /* Just ignore these failures */
6391 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
6392 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
6393 case -NFS4ERR_BADLAYOUT: /* no layout */
6394 case -NFS4ERR_GRACE: /* loca_recalim always false */
6395 task->tk_status = 0;
6396 break;
6397 case 0:
6398 nfs_post_op_update_inode_force_wcc(data->args.inode,
6399 data->res.fattr);
6400 break;
6401 default:
6402 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6403 rpc_restart_call_prepare(task);
6404 return;
6405 }
6406 }
6407 }
6408
6409 static void nfs4_layoutcommit_release(void *calldata)
6410 {
6411 struct nfs4_layoutcommit_data *data = calldata;
6412 struct pnfs_layout_segment *lseg, *tmp;
6413 unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
6414
6415 pnfs_cleanup_layoutcommit(data);
6416 /* Matched by references in pnfs_set_layoutcommit */
6417 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
6418 list_del_init(&lseg->pls_lc_list);
6419 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
6420 &lseg->pls_flags))
6421 pnfs_put_lseg(lseg);
6422 }
6423
6424 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
6425 smp_mb__after_clear_bit();
6426 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
6427
6428 put_rpccred(data->cred);
6429 kfree(data);
6430 }
6431
6432 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
6433 .rpc_call_prepare = nfs4_layoutcommit_prepare,
6434 .rpc_call_done = nfs4_layoutcommit_done,
6435 .rpc_release = nfs4_layoutcommit_release,
6436 };
6437
6438 int
6439 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
6440 {
6441 struct rpc_message msg = {
6442 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
6443 .rpc_argp = &data->args,
6444 .rpc_resp = &data->res,
6445 .rpc_cred = data->cred,
6446 };
6447 struct rpc_task_setup task_setup_data = {
6448 .task = &data->task,
6449 .rpc_client = NFS_CLIENT(data->args.inode),
6450 .rpc_message = &msg,
6451 .callback_ops = &nfs4_layoutcommit_ops,
6452 .callback_data = data,
6453 .flags = RPC_TASK_ASYNC,
6454 };
6455 struct rpc_task *task;
6456 int status = 0;
6457
6458 dprintk("NFS: %4d initiating layoutcommit call. sync %d "
6459 "lbw: %llu inode %lu\n",
6460 data->task.tk_pid, sync,
6461 data->args.lastbytewritten,
6462 data->args.inode->i_ino);
6463
6464 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
6465 task = rpc_run_task(&task_setup_data);
6466 if (IS_ERR(task))
6467 return PTR_ERR(task);
6468 if (sync == false)
6469 goto out;
6470 status = nfs4_wait_for_completion_rpc_task(task);
6471 if (status != 0)
6472 goto out;
6473 status = task->tk_status;
6474 out:
6475 dprintk("%s: status %d\n", __func__, status);
6476 rpc_put_task(task);
6477 return status;
6478 }
6479
6480 static int
6481 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6482 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6483 {
6484 struct nfs41_secinfo_no_name_args args = {
6485 .style = SECINFO_STYLE_CURRENT_FH,
6486 };
6487 struct nfs4_secinfo_res res = {
6488 .flavors = flavors,
6489 };
6490 struct rpc_message msg = {
6491 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
6492 .rpc_argp = &args,
6493 .rpc_resp = &res,
6494 };
6495 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6496 }
6497
6498 static int
6499 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6500 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6501 {
6502 struct nfs4_exception exception = { };
6503 int err;
6504 do {
6505 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6506 switch (err) {
6507 case 0:
6508 case -NFS4ERR_WRONGSEC:
6509 case -NFS4ERR_NOTSUPP:
6510 goto out;
6511 default:
6512 err = nfs4_handle_exception(server, err, &exception);
6513 }
6514 } while (exception.retry);
6515 out:
6516 return err;
6517 }
6518
6519 static int
6520 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
6521 struct nfs_fsinfo *info)
6522 {
6523 int err;
6524 struct page *page;
6525 rpc_authflavor_t flavor;
6526 struct nfs4_secinfo_flavors *flavors;
6527
6528 page = alloc_page(GFP_KERNEL);
6529 if (!page) {
6530 err = -ENOMEM;
6531 goto out;
6532 }
6533
6534 flavors = page_address(page);
6535 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6536
6537 /*
6538 * Fall back on "guess and check" method if
6539 * the server doesn't support SECINFO_NO_NAME
6540 */
6541 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
6542 err = nfs4_find_root_sec(server, fhandle, info);
6543 goto out_freepage;
6544 }
6545 if (err)
6546 goto out_freepage;
6547
6548 flavor = nfs_find_best_sec(flavors);
6549 if (err == 0)
6550 err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
6551
6552 out_freepage:
6553 put_page(page);
6554 if (err == -EACCES)
6555 return -EPERM;
6556 out:
6557 return err;
6558 }
6559
6560 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6561 {
6562 int status;
6563 struct nfs41_test_stateid_args args = {
6564 .stateid = stateid,
6565 };
6566 struct nfs41_test_stateid_res res;
6567 struct rpc_message msg = {
6568 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
6569 .rpc_argp = &args,
6570 .rpc_resp = &res,
6571 };
6572
6573 dprintk("NFS call test_stateid %p\n", stateid);
6574 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6575 nfs4_set_sequence_privileged(&args.seq_args);
6576 status = nfs4_call_sync_sequence(server->client, server, &msg,
6577 &args.seq_args, &res.seq_res);
6578 if (status != NFS_OK) {
6579 dprintk("NFS reply test_stateid: failed, %d\n", status);
6580 return status;
6581 }
6582 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
6583 return -res.status;
6584 }
6585
6586 /**
6587 * nfs41_test_stateid - perform a TEST_STATEID operation
6588 *
6589 * @server: server / transport on which to perform the operation
6590 * @stateid: state ID to test
6591 *
6592 * Returns NFS_OK if the server recognizes that "stateid" is valid.
6593 * Otherwise a negative NFS4ERR value is returned if the operation
6594 * failed or the state ID is not currently valid.
6595 */
6596 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6597 {
6598 struct nfs4_exception exception = { };
6599 int err;
6600 do {
6601 err = _nfs41_test_stateid(server, stateid);
6602 if (err != -NFS4ERR_DELAY)
6603 break;
6604 nfs4_handle_exception(server, err, &exception);
6605 } while (exception.retry);
6606 return err;
6607 }
6608
6609 static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6610 {
6611 struct nfs41_free_stateid_args args = {
6612 .stateid = stateid,
6613 };
6614 struct nfs41_free_stateid_res res;
6615 struct rpc_message msg = {
6616 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
6617 .rpc_argp = &args,
6618 .rpc_resp = &res,
6619 };
6620 int status;
6621
6622 dprintk("NFS call free_stateid %p\n", stateid);
6623 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6624 nfs4_set_sequence_privileged(&args.seq_args);
6625 status = nfs4_call_sync_sequence(server->client, server, &msg,
6626 &args.seq_args, &res.seq_res);
6627 dprintk("NFS reply free_stateid: %d\n", status);
6628 return status;
6629 }
6630
6631 /**
6632 * nfs41_free_stateid - perform a FREE_STATEID operation
6633 *
6634 * @server: server / transport on which to perform the operation
6635 * @stateid: state ID to release
6636 *
6637 * Returns NFS_OK if the server freed "stateid". Otherwise a
6638 * negative NFS4ERR value is returned.
6639 */
6640 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6641 {
6642 struct nfs4_exception exception = { };
6643 int err;
6644 do {
6645 err = _nfs4_free_stateid(server, stateid);
6646 if (err != -NFS4ERR_DELAY)
6647 break;
6648 nfs4_handle_exception(server, err, &exception);
6649 } while (exception.retry);
6650 return err;
6651 }
6652
6653 static bool nfs41_match_stateid(const nfs4_stateid *s1,
6654 const nfs4_stateid *s2)
6655 {
6656 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
6657 return false;
6658
6659 if (s1->seqid == s2->seqid)
6660 return true;
6661 if (s1->seqid == 0 || s2->seqid == 0)
6662 return true;
6663
6664 return false;
6665 }
6666
6667 #endif /* CONFIG_NFS_V4_1 */
6668
6669 static bool nfs4_match_stateid(const nfs4_stateid *s1,
6670 const nfs4_stateid *s2)
6671 {
6672 return nfs4_stateid_match(s1, s2);
6673 }
6674
6675
6676 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
6677 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6678 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6679 .recover_open = nfs4_open_reclaim,
6680 .recover_lock = nfs4_lock_reclaim,
6681 .establish_clid = nfs4_init_clientid,
6682 .get_clid_cred = nfs4_get_setclientid_cred,
6683 .detect_trunking = nfs40_discover_server_trunking,
6684 };
6685
6686 #if defined(CONFIG_NFS_V4_1)
6687 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
6688 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6689 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6690 .recover_open = nfs4_open_reclaim,
6691 .recover_lock = nfs4_lock_reclaim,
6692 .establish_clid = nfs41_init_clientid,
6693 .get_clid_cred = nfs4_get_exchange_id_cred,
6694 .reclaim_complete = nfs41_proc_reclaim_complete,
6695 .detect_trunking = nfs41_discover_server_trunking,
6696 };
6697 #endif /* CONFIG_NFS_V4_1 */
6698
6699 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
6700 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6701 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6702 .recover_open = nfs4_open_expired,
6703 .recover_lock = nfs4_lock_expired,
6704 .establish_clid = nfs4_init_clientid,
6705 .get_clid_cred = nfs4_get_setclientid_cred,
6706 };
6707
6708 #if defined(CONFIG_NFS_V4_1)
6709 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
6710 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6711 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6712 .recover_open = nfs41_open_expired,
6713 .recover_lock = nfs41_lock_expired,
6714 .establish_clid = nfs41_init_clientid,
6715 .get_clid_cred = nfs4_get_exchange_id_cred,
6716 };
6717 #endif /* CONFIG_NFS_V4_1 */
6718
6719 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
6720 .sched_state_renewal = nfs4_proc_async_renew,
6721 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
6722 .renew_lease = nfs4_proc_renew,
6723 };
6724
6725 #if defined(CONFIG_NFS_V4_1)
6726 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
6727 .sched_state_renewal = nfs41_proc_async_sequence,
6728 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
6729 .renew_lease = nfs4_proc_sequence,
6730 };
6731 #endif
6732
6733 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
6734 .minor_version = 0,
6735 .call_sync = _nfs4_call_sync,
6736 .match_stateid = nfs4_match_stateid,
6737 .find_root_sec = nfs4_find_root_sec,
6738 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
6739 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
6740 .state_renewal_ops = &nfs40_state_renewal_ops,
6741 };
6742
6743 #if defined(CONFIG_NFS_V4_1)
6744 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
6745 .minor_version = 1,
6746 .call_sync = nfs4_call_sync_sequence,
6747 .match_stateid = nfs41_match_stateid,
6748 .find_root_sec = nfs41_find_root_sec,
6749 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
6750 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
6751 .state_renewal_ops = &nfs41_state_renewal_ops,
6752 };
6753 #endif
6754
6755 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
6756 [0] = &nfs_v4_0_minor_ops,
6757 #if defined(CONFIG_NFS_V4_1)
6758 [1] = &nfs_v4_1_minor_ops,
6759 #endif
6760 };
6761
6762 const struct inode_operations nfs4_dir_inode_operations = {
6763 .create = nfs_create,
6764 .lookup = nfs_lookup,
6765 .atomic_open = nfs_atomic_open,
6766 .link = nfs_link,
6767 .unlink = nfs_unlink,
6768 .symlink = nfs_symlink,
6769 .mkdir = nfs_mkdir,
6770 .rmdir = nfs_rmdir,
6771 .mknod = nfs_mknod,
6772 .rename = nfs_rename,
6773 .permission = nfs_permission,
6774 .getattr = nfs_getattr,
6775 .setattr = nfs_setattr,
6776 .getxattr = generic_getxattr,
6777 .setxattr = generic_setxattr,
6778 .listxattr = generic_listxattr,
6779 .removexattr = generic_removexattr,
6780 };
6781
6782 static const struct inode_operations nfs4_file_inode_operations = {
6783 .permission = nfs_permission,
6784 .getattr = nfs_getattr,
6785 .setattr = nfs_setattr,
6786 .getxattr = generic_getxattr,
6787 .setxattr = generic_setxattr,
6788 .listxattr = generic_listxattr,
6789 .removexattr = generic_removexattr,
6790 };
6791
6792 const struct nfs_rpc_ops nfs_v4_clientops = {
6793 .version = 4, /* protocol version */
6794 .dentry_ops = &nfs4_dentry_operations,
6795 .dir_inode_ops = &nfs4_dir_inode_operations,
6796 .file_inode_ops = &nfs4_file_inode_operations,
6797 .file_ops = &nfs4_file_operations,
6798 .getroot = nfs4_proc_get_root,
6799 .submount = nfs4_submount,
6800 .try_mount = nfs4_try_mount,
6801 .getattr = nfs4_proc_getattr,
6802 .setattr = nfs4_proc_setattr,
6803 .lookup = nfs4_proc_lookup,
6804 .access = nfs4_proc_access,
6805 .readlink = nfs4_proc_readlink,
6806 .create = nfs4_proc_create,
6807 .remove = nfs4_proc_remove,
6808 .unlink_setup = nfs4_proc_unlink_setup,
6809 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
6810 .unlink_done = nfs4_proc_unlink_done,
6811 .rename = nfs4_proc_rename,
6812 .rename_setup = nfs4_proc_rename_setup,
6813 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
6814 .rename_done = nfs4_proc_rename_done,
6815 .link = nfs4_proc_link,
6816 .symlink = nfs4_proc_symlink,
6817 .mkdir = nfs4_proc_mkdir,
6818 .rmdir = nfs4_proc_remove,
6819 .readdir = nfs4_proc_readdir,
6820 .mknod = nfs4_proc_mknod,
6821 .statfs = nfs4_proc_statfs,
6822 .fsinfo = nfs4_proc_fsinfo,
6823 .pathconf = nfs4_proc_pathconf,
6824 .set_capabilities = nfs4_server_capabilities,
6825 .decode_dirent = nfs4_decode_dirent,
6826 .read_setup = nfs4_proc_read_setup,
6827 .read_pageio_init = pnfs_pageio_init_read,
6828 .read_rpc_prepare = nfs4_proc_read_rpc_prepare,
6829 .read_done = nfs4_read_done,
6830 .write_setup = nfs4_proc_write_setup,
6831 .write_pageio_init = pnfs_pageio_init_write,
6832 .write_rpc_prepare = nfs4_proc_write_rpc_prepare,
6833 .write_done = nfs4_write_done,
6834 .commit_setup = nfs4_proc_commit_setup,
6835 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
6836 .commit_done = nfs4_commit_done,
6837 .lock = nfs4_proc_lock,
6838 .clear_acl_cache = nfs4_zap_acl_attr,
6839 .close_context = nfs4_close_context,
6840 .open_context = nfs4_atomic_open,
6841 .have_delegation = nfs4_have_delegation,
6842 .return_delegation = nfs4_inode_return_delegation,
6843 .alloc_client = nfs4_alloc_client,
6844 .init_client = nfs4_init_client,
6845 .free_client = nfs4_free_client,
6846 .create_server = nfs4_create_server,
6847 .clone_server = nfs_clone_server,
6848 };
6849
6850 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
6851 .prefix = XATTR_NAME_NFSV4_ACL,
6852 .list = nfs4_xattr_list_nfs4_acl,
6853 .get = nfs4_xattr_get_nfs4_acl,
6854 .set = nfs4_xattr_set_nfs4_acl,
6855 };
6856
6857 const struct xattr_handler *nfs4_xattr_handlers[] = {
6858 &nfs4_xattr_nfs4_acl_handler,
6859 NULL
6860 };
6861
6862 /*
6863 * Local variables:
6864 * c-basic-offset: 8
6865 * End:
6866 */
This page took 0.189191 seconds and 5 git commands to generate.