nfsd4: better stateid hashing
[deliverable/linux.git] / fs / nfsd / nfs4state.c
1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/sunrpc/svcauth_gss.h>
42 #include <linux/sunrpc/clnt.h>
43 #include "xdr4.h"
44 #include "vfs.h"
45
46 #define NFSDDBG_FACILITY NFSDDBG_PROC
47
48 /* Globals */
49 time_t nfsd4_lease = 90; /* default lease time */
50 time_t nfsd4_grace = 90;
51 static time_t boot_time;
52 static u32 current_ownerid = 1;
53 static u32 current_fileid = 1;
54 static u32 current_delegid = 1;
55 static stateid_t zerostateid; /* bits all 0 */
56 static stateid_t onestateid; /* bits all 1 */
57 static u64 current_sessionid = 1;
58
59 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zerostateid, sizeof(stateid_t)))
60 #define ONE_STATEID(stateid) (!memcmp((stateid), &onestateid, sizeof(stateid_t)))
61
62 /* forward declarations */
63 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
64
65 /* Locking: */
66
67 /* Currently used for almost all code touching nfsv4 state: */
68 static DEFINE_MUTEX(client_mutex);
69
70 /*
71 * Currently used for the del_recall_lru and file hash table. In an
72 * effort to decrease the scope of the client_mutex, this spinlock may
73 * eventually cover more:
74 */
75 static DEFINE_SPINLOCK(recall_lock);
76
77 static struct kmem_cache *openowner_slab = NULL;
78 static struct kmem_cache *lockowner_slab = NULL;
79 static struct kmem_cache *file_slab = NULL;
80 static struct kmem_cache *stateid_slab = NULL;
81 static struct kmem_cache *deleg_slab = NULL;
82
83 void
84 nfs4_lock_state(void)
85 {
86 mutex_lock(&client_mutex);
87 }
88
89 void
90 nfs4_unlock_state(void)
91 {
92 mutex_unlock(&client_mutex);
93 }
94
95 static inline u32
96 opaque_hashval(const void *ptr, int nbytes)
97 {
98 unsigned char *cptr = (unsigned char *) ptr;
99
100 u32 x = 0;
101 while (nbytes--) {
102 x *= 37;
103 x += *cptr++;
104 }
105 return x;
106 }
107
108 static struct list_head del_recall_lru;
109
110 static inline void
111 put_nfs4_file(struct nfs4_file *fi)
112 {
113 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
114 list_del(&fi->fi_hash);
115 spin_unlock(&recall_lock);
116 iput(fi->fi_inode);
117 kmem_cache_free(file_slab, fi);
118 }
119 }
120
121 static inline void
122 get_nfs4_file(struct nfs4_file *fi)
123 {
124 atomic_inc(&fi->fi_ref);
125 }
126
127 static int num_delegations;
128 unsigned int max_delegations;
129
130 /*
131 * Open owner state (share locks)
132 */
133
134 /* hash tables for open owners */
135 #define OPEN_OWNER_HASH_BITS 8
136 #define OPEN_OWNER_HASH_SIZE (1 << OPEN_OWNER_HASH_BITS)
137 #define OPEN_OWNER_HASH_MASK (OPEN_OWNER_HASH_SIZE - 1)
138
139 static unsigned int open_ownerid_hashval(const u32 id)
140 {
141 return id & OPEN_OWNER_HASH_MASK;
142 }
143
144 static unsigned int open_ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
145 {
146 unsigned int ret;
147
148 ret = opaque_hashval(ownername->data, ownername->len);
149 ret += clientid;
150 return ret & OPEN_OWNER_HASH_MASK;
151 }
152
153 static struct list_head open_ownerid_hashtbl[OPEN_OWNER_HASH_SIZE];
154 static struct list_head open_ownerstr_hashtbl[OPEN_OWNER_HASH_SIZE];
155
156 /* hash table for nfs4_file */
157 #define FILE_HASH_BITS 8
158 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
159
160 /* hash table for (open)nfs4_ol_stateid */
161 #define STATEID_HASH_BITS 10
162 #define STATEID_HASH_SIZE (1 << STATEID_HASH_BITS)
163 #define STATEID_HASH_MASK (STATEID_HASH_SIZE - 1)
164
165 static unsigned int file_hashval(struct inode *ino)
166 {
167 /* XXX: why are we hashing on inode pointer, anyway? */
168 return hash_ptr(ino, FILE_HASH_BITS);
169 }
170
171 static unsigned int stateid_hashval(stateid_t *s)
172 {
173 return opaque_hashval(&s->si_opaque, sizeof(stateid_opaque_t)) & STATEID_HASH_MASK;
174 }
175
176 static struct list_head file_hashtbl[FILE_HASH_SIZE];
177 static struct list_head stateid_hashtbl[STATEID_HASH_SIZE];
178
179 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
180 {
181 BUG_ON(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
182 atomic_inc(&fp->fi_access[oflag]);
183 }
184
185 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
186 {
187 if (oflag == O_RDWR) {
188 __nfs4_file_get_access(fp, O_RDONLY);
189 __nfs4_file_get_access(fp, O_WRONLY);
190 } else
191 __nfs4_file_get_access(fp, oflag);
192 }
193
194 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
195 {
196 if (fp->fi_fds[oflag]) {
197 fput(fp->fi_fds[oflag]);
198 fp->fi_fds[oflag] = NULL;
199 }
200 }
201
202 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
203 {
204 if (atomic_dec_and_test(&fp->fi_access[oflag])) {
205 nfs4_file_put_fd(fp, O_RDWR);
206 nfs4_file_put_fd(fp, oflag);
207 }
208 }
209
210 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
211 {
212 if (oflag == O_RDWR) {
213 __nfs4_file_put_access(fp, O_RDONLY);
214 __nfs4_file_put_access(fp, O_WRONLY);
215 } else
216 __nfs4_file_put_access(fp, oflag);
217 }
218
219 static inline void hash_stid(struct nfs4_stid *stid)
220 {
221 stateid_t *s = &stid->sc_stateid;
222 unsigned int hashval;
223
224 hashval = stateid_hashval(s);
225 list_add(&stid->sc_hash, &stateid_hashtbl[hashval]);
226 }
227
228 static struct nfs4_delegation *
229 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type)
230 {
231 struct nfs4_delegation *dp;
232 struct nfs4_file *fp = stp->st_file;
233
234 dprintk("NFSD alloc_init_deleg\n");
235 /*
236 * Major work on the lease subsystem (for example, to support
237 * calbacks on stat) will be required before we can support
238 * write delegations properly.
239 */
240 if (type != NFS4_OPEN_DELEGATE_READ)
241 return NULL;
242 if (fp->fi_had_conflict)
243 return NULL;
244 if (num_delegations > max_delegations)
245 return NULL;
246 dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL);
247 if (dp == NULL)
248 return dp;
249 num_delegations++;
250 INIT_LIST_HEAD(&dp->dl_perfile);
251 INIT_LIST_HEAD(&dp->dl_perclnt);
252 INIT_LIST_HEAD(&dp->dl_recall_lru);
253 dp->dl_client = clp;
254 get_nfs4_file(fp);
255 dp->dl_file = fp;
256 dp->dl_type = type;
257 dp->dl_stid.sc_type = NFS4_DELEG_STID;
258 dp->dl_stid.sc_stateid.si_boot = boot_time;
259 dp->dl_stid.sc_stateid.si_stateownerid = current_delegid++;
260 dp->dl_stid.sc_stateid.si_fileid = 0;
261 dp->dl_stid.sc_stateid.si_generation = 1;
262 hash_stid(&dp->dl_stid);
263 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
264 dp->dl_time = 0;
265 atomic_set(&dp->dl_count, 1);
266 INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
267 return dp;
268 }
269
270 void
271 nfs4_put_delegation(struct nfs4_delegation *dp)
272 {
273 if (atomic_dec_and_test(&dp->dl_count)) {
274 dprintk("NFSD: freeing dp %p\n",dp);
275 put_nfs4_file(dp->dl_file);
276 kmem_cache_free(deleg_slab, dp);
277 num_delegations--;
278 }
279 }
280
281 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
282 {
283 if (atomic_dec_and_test(&fp->fi_delegees)) {
284 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
285 fp->fi_lease = NULL;
286 fput(fp->fi_deleg_file);
287 fp->fi_deleg_file = NULL;
288 }
289 }
290
291 /* Called under the state lock. */
292 static void
293 unhash_delegation(struct nfs4_delegation *dp)
294 {
295 list_del_init(&dp->dl_stid.sc_hash);
296 list_del_init(&dp->dl_perclnt);
297 spin_lock(&recall_lock);
298 list_del_init(&dp->dl_perfile);
299 list_del_init(&dp->dl_recall_lru);
300 spin_unlock(&recall_lock);
301 nfs4_put_deleg_lease(dp->dl_file);
302 nfs4_put_delegation(dp);
303 }
304
305 /*
306 * SETCLIENTID state
307 */
308
309 /* client_lock protects the client lru list and session hash table */
310 static DEFINE_SPINLOCK(client_lock);
311
312 /* Hash tables for nfs4_clientid state */
313 #define CLIENT_HASH_BITS 4
314 #define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS)
315 #define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1)
316
317 static unsigned int clientid_hashval(u32 id)
318 {
319 return id & CLIENT_HASH_MASK;
320 }
321
322 static unsigned int clientstr_hashval(const char *name)
323 {
324 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
325 }
326
327 /*
328 * reclaim_str_hashtbl[] holds known client info from previous reset/reboot
329 * used in reboot/reset lease grace period processing
330 *
331 * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed
332 * setclientid_confirmed info.
333 *
334 * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed
335 * setclientid info.
336 *
337 * client_lru holds client queue ordered by nfs4_client.cl_time
338 * for lease renewal.
339 *
340 * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
341 * for last close replay.
342 */
343 static struct list_head reclaim_str_hashtbl[CLIENT_HASH_SIZE];
344 static int reclaim_str_hashtbl_size = 0;
345 static struct list_head conf_id_hashtbl[CLIENT_HASH_SIZE];
346 static struct list_head conf_str_hashtbl[CLIENT_HASH_SIZE];
347 static struct list_head unconf_str_hashtbl[CLIENT_HASH_SIZE];
348 static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE];
349 static struct list_head client_lru;
350 static struct list_head close_lru;
351
352 /*
353 * We store the NONE, READ, WRITE, and BOTH bits separately in the
354 * st_{access,deny}_bmap field of the stateid, in order to track not
355 * only what share bits are currently in force, but also what
356 * combinations of share bits previous opens have used. This allows us
357 * to enforce the recommendation of rfc 3530 14.2.19 that the server
358 * return an error if the client attempt to downgrade to a combination
359 * of share bits not explicable by closing some of its previous opens.
360 *
361 * XXX: This enforcement is actually incomplete, since we don't keep
362 * track of access/deny bit combinations; so, e.g., we allow:
363 *
364 * OPEN allow read, deny write
365 * OPEN allow both, deny none
366 * DOWNGRADE allow read, deny none
367 *
368 * which we should reject.
369 */
370 static void
371 set_access(unsigned int *access, unsigned long bmap) {
372 int i;
373
374 *access = 0;
375 for (i = 1; i < 4; i++) {
376 if (test_bit(i, &bmap))
377 *access |= i;
378 }
379 }
380
381 static void
382 set_deny(unsigned int *deny, unsigned long bmap) {
383 int i;
384
385 *deny = 0;
386 for (i = 0; i < 4; i++) {
387 if (test_bit(i, &bmap))
388 *deny |= i ;
389 }
390 }
391
392 static int
393 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
394 unsigned int access, deny;
395
396 set_access(&access, stp->st_access_bmap);
397 set_deny(&deny, stp->st_deny_bmap);
398 if ((access & open->op_share_deny) || (deny & open->op_share_access))
399 return 0;
400 return 1;
401 }
402
403 static int nfs4_access_to_omode(u32 access)
404 {
405 switch (access & NFS4_SHARE_ACCESS_BOTH) {
406 case NFS4_SHARE_ACCESS_READ:
407 return O_RDONLY;
408 case NFS4_SHARE_ACCESS_WRITE:
409 return O_WRONLY;
410 case NFS4_SHARE_ACCESS_BOTH:
411 return O_RDWR;
412 }
413 BUG();
414 }
415
416 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
417 {
418 list_del(&stp->st_stid.sc_hash);
419 list_del(&stp->st_perfile);
420 list_del(&stp->st_perstateowner);
421 }
422
423 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
424 {
425 int i;
426
427 if (stp->st_access_bmap) {
428 for (i = 1; i < 4; i++) {
429 if (test_bit(i, &stp->st_access_bmap))
430 nfs4_file_put_access(stp->st_file,
431 nfs4_access_to_omode(i));
432 __clear_bit(i, &stp->st_access_bmap);
433 }
434 }
435 put_nfs4_file(stp->st_file);
436 stp->st_file = NULL;
437 }
438
439 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
440 {
441 close_generic_stateid(stp);
442 kmem_cache_free(stateid_slab, stp);
443 }
444
445 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
446 {
447 struct file *file;
448
449 unhash_generic_stateid(stp);
450 file = find_any_file(stp->st_file);
451 if (file)
452 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
453 free_generic_stateid(stp);
454 }
455
456 static void unhash_lockowner(struct nfs4_lockowner *lo)
457 {
458 struct nfs4_ol_stateid *stp;
459
460 list_del(&lo->lo_owner.so_idhash);
461 list_del(&lo->lo_owner.so_strhash);
462 list_del(&lo->lo_perstateid);
463 while (!list_empty(&lo->lo_owner.so_stateids)) {
464 stp = list_first_entry(&lo->lo_owner.so_stateids,
465 struct nfs4_ol_stateid, st_perstateowner);
466 release_lock_stateid(stp);
467 }
468 }
469
470 static void release_lockowner(struct nfs4_lockowner *lo)
471 {
472 unhash_lockowner(lo);
473 nfs4_free_lockowner(lo);
474 }
475
476 static void
477 release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
478 {
479 struct nfs4_lockowner *lo;
480
481 while (!list_empty(&open_stp->st_lockowners)) {
482 lo = list_entry(open_stp->st_lockowners.next,
483 struct nfs4_lockowner, lo_perstateid);
484 release_lockowner(lo);
485 }
486 }
487
488 static void release_open_stateid(struct nfs4_ol_stateid *stp)
489 {
490 unhash_generic_stateid(stp);
491 release_stateid_lockowners(stp);
492 free_generic_stateid(stp);
493 }
494
495 static void unhash_openowner(struct nfs4_openowner *oo)
496 {
497 struct nfs4_ol_stateid *stp;
498
499 list_del(&oo->oo_owner.so_idhash);
500 list_del(&oo->oo_owner.so_strhash);
501 list_del(&oo->oo_perclient);
502 while (!list_empty(&oo->oo_owner.so_stateids)) {
503 stp = list_first_entry(&oo->oo_owner.so_stateids,
504 struct nfs4_ol_stateid, st_perstateowner);
505 release_open_stateid(stp);
506 }
507 }
508
509 static void release_openowner(struct nfs4_openowner *oo)
510 {
511 unhash_openowner(oo);
512 list_del(&oo->oo_close_lru);
513 nfs4_free_openowner(oo);
514 }
515
516 #define SESSION_HASH_SIZE 512
517 static struct list_head sessionid_hashtbl[SESSION_HASH_SIZE];
518
519 static inline int
520 hash_sessionid(struct nfs4_sessionid *sessionid)
521 {
522 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
523
524 return sid->sequence % SESSION_HASH_SIZE;
525 }
526
527 static inline void
528 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
529 {
530 u32 *ptr = (u32 *)(&sessionid->data[0]);
531 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
532 }
533
534 static void
535 gen_sessionid(struct nfsd4_session *ses)
536 {
537 struct nfs4_client *clp = ses->se_client;
538 struct nfsd4_sessionid *sid;
539
540 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
541 sid->clientid = clp->cl_clientid;
542 sid->sequence = current_sessionid++;
543 sid->reserved = 0;
544 }
545
546 /*
547 * The protocol defines ca_maxresponssize_cached to include the size of
548 * the rpc header, but all we need to cache is the data starting after
549 * the end of the initial SEQUENCE operation--the rest we regenerate
550 * each time. Therefore we can advertise a ca_maxresponssize_cached
551 * value that is the number of bytes in our cache plus a few additional
552 * bytes. In order to stay on the safe side, and not promise more than
553 * we can cache, those additional bytes must be the minimum possible: 24
554 * bytes of rpc header (xid through accept state, with AUTH_NULL
555 * verifier), 12 for the compound header (with zero-length tag), and 44
556 * for the SEQUENCE op response:
557 */
558 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
559
560 static void
561 free_session_slots(struct nfsd4_session *ses)
562 {
563 int i;
564
565 for (i = 0; i < ses->se_fchannel.maxreqs; i++)
566 kfree(ses->se_slots[i]);
567 }
568
569 /*
570 * We don't actually need to cache the rpc and session headers, so we
571 * can allocate a little less for each slot:
572 */
573 static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
574 {
575 return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
576 }
577
578 static int nfsd4_sanitize_slot_size(u32 size)
579 {
580 size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */
581 size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE);
582
583 return size;
584 }
585
586 /*
587 * XXX: If we run out of reserved DRC memory we could (up to a point)
588 * re-negotiate active sessions and reduce their slot usage to make
589 * rooom for new connections. For now we just fail the create session.
590 */
591 static int nfsd4_get_drc_mem(int slotsize, u32 num)
592 {
593 int avail;
594
595 num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
596
597 spin_lock(&nfsd_drc_lock);
598 avail = min_t(int, NFSD_MAX_MEM_PER_SESSION,
599 nfsd_drc_max_mem - nfsd_drc_mem_used);
600 num = min_t(int, num, avail / slotsize);
601 nfsd_drc_mem_used += num * slotsize;
602 spin_unlock(&nfsd_drc_lock);
603
604 return num;
605 }
606
607 static void nfsd4_put_drc_mem(int slotsize, int num)
608 {
609 spin_lock(&nfsd_drc_lock);
610 nfsd_drc_mem_used -= slotsize * num;
611 spin_unlock(&nfsd_drc_lock);
612 }
613
614 static struct nfsd4_session *alloc_session(int slotsize, int numslots)
615 {
616 struct nfsd4_session *new;
617 int mem, i;
618
619 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
620 + sizeof(struct nfsd4_session) > PAGE_SIZE);
621 mem = numslots * sizeof(struct nfsd4_slot *);
622
623 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
624 if (!new)
625 return NULL;
626 /* allocate each struct nfsd4_slot and data cache in one piece */
627 for (i = 0; i < numslots; i++) {
628 mem = sizeof(struct nfsd4_slot) + slotsize;
629 new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
630 if (!new->se_slots[i])
631 goto out_free;
632 }
633 return new;
634 out_free:
635 while (i--)
636 kfree(new->se_slots[i]);
637 kfree(new);
638 return NULL;
639 }
640
641 static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize)
642 {
643 u32 maxrpc = nfsd_serv->sv_max_mesg;
644
645 new->maxreqs = numslots;
646 new->maxresp_cached = min_t(u32, req->maxresp_cached,
647 slotsize + NFSD_MIN_HDR_SEQ_SZ);
648 new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
649 new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
650 new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
651 }
652
653 static void free_conn(struct nfsd4_conn *c)
654 {
655 svc_xprt_put(c->cn_xprt);
656 kfree(c);
657 }
658
659 static void nfsd4_conn_lost(struct svc_xpt_user *u)
660 {
661 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
662 struct nfs4_client *clp = c->cn_session->se_client;
663
664 spin_lock(&clp->cl_lock);
665 if (!list_empty(&c->cn_persession)) {
666 list_del(&c->cn_persession);
667 free_conn(c);
668 }
669 spin_unlock(&clp->cl_lock);
670 nfsd4_probe_callback(clp);
671 }
672
673 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
674 {
675 struct nfsd4_conn *conn;
676
677 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
678 if (!conn)
679 return NULL;
680 svc_xprt_get(rqstp->rq_xprt);
681 conn->cn_xprt = rqstp->rq_xprt;
682 conn->cn_flags = flags;
683 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
684 return conn;
685 }
686
687 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
688 {
689 conn->cn_session = ses;
690 list_add(&conn->cn_persession, &ses->se_conns);
691 }
692
693 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
694 {
695 struct nfs4_client *clp = ses->se_client;
696
697 spin_lock(&clp->cl_lock);
698 __nfsd4_hash_conn(conn, ses);
699 spin_unlock(&clp->cl_lock);
700 }
701
702 static int nfsd4_register_conn(struct nfsd4_conn *conn)
703 {
704 conn->cn_xpt_user.callback = nfsd4_conn_lost;
705 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
706 }
707
708 static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir)
709 {
710 struct nfsd4_conn *conn;
711 int ret;
712
713 conn = alloc_conn(rqstp, dir);
714 if (!conn)
715 return nfserr_jukebox;
716 nfsd4_hash_conn(conn, ses);
717 ret = nfsd4_register_conn(conn);
718 if (ret)
719 /* oops; xprt is already down: */
720 nfsd4_conn_lost(&conn->cn_xpt_user);
721 return nfs_ok;
722 }
723
724 static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses)
725 {
726 u32 dir = NFS4_CDFC4_FORE;
727
728 if (ses->se_flags & SESSION4_BACK_CHAN)
729 dir |= NFS4_CDFC4_BACK;
730
731 return nfsd4_new_conn(rqstp, ses, dir);
732 }
733
734 /* must be called under client_lock */
735 static void nfsd4_del_conns(struct nfsd4_session *s)
736 {
737 struct nfs4_client *clp = s->se_client;
738 struct nfsd4_conn *c;
739
740 spin_lock(&clp->cl_lock);
741 while (!list_empty(&s->se_conns)) {
742 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
743 list_del_init(&c->cn_persession);
744 spin_unlock(&clp->cl_lock);
745
746 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
747 free_conn(c);
748
749 spin_lock(&clp->cl_lock);
750 }
751 spin_unlock(&clp->cl_lock);
752 }
753
754 void free_session(struct kref *kref)
755 {
756 struct nfsd4_session *ses;
757 int mem;
758
759 ses = container_of(kref, struct nfsd4_session, se_ref);
760 nfsd4_del_conns(ses);
761 spin_lock(&nfsd_drc_lock);
762 mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
763 nfsd_drc_mem_used -= mem;
764 spin_unlock(&nfsd_drc_lock);
765 free_session_slots(ses);
766 kfree(ses);
767 }
768
769 static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses)
770 {
771 struct nfsd4_session *new;
772 struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
773 int numslots, slotsize;
774 int status;
775 int idx;
776
777 /*
778 * Note decreasing slot size below client's request may
779 * make it difficult for client to function correctly, whereas
780 * decreasing the number of slots will (just?) affect
781 * performance. When short on memory we therefore prefer to
782 * decrease number of slots instead of their size.
783 */
784 slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
785 numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
786 if (numslots < 1)
787 return NULL;
788
789 new = alloc_session(slotsize, numslots);
790 if (!new) {
791 nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
792 return NULL;
793 }
794 init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
795
796 new->se_client = clp;
797 gen_sessionid(new);
798
799 INIT_LIST_HEAD(&new->se_conns);
800
801 new->se_cb_seq_nr = 1;
802 new->se_flags = cses->flags;
803 new->se_cb_prog = cses->callback_prog;
804 kref_init(&new->se_ref);
805 idx = hash_sessionid(&new->se_sessionid);
806 spin_lock(&client_lock);
807 list_add(&new->se_hash, &sessionid_hashtbl[idx]);
808 spin_lock(&clp->cl_lock);
809 list_add(&new->se_perclnt, &clp->cl_sessions);
810 spin_unlock(&clp->cl_lock);
811 spin_unlock(&client_lock);
812
813 status = nfsd4_new_conn_from_crses(rqstp, new);
814 /* whoops: benny points out, status is ignored! (err, or bogus) */
815 if (status) {
816 free_session(&new->se_ref);
817 return NULL;
818 }
819 if (cses->flags & SESSION4_BACK_CHAN) {
820 struct sockaddr *sa = svc_addr(rqstp);
821 /*
822 * This is a little silly; with sessions there's no real
823 * use for the callback address. Use the peer address
824 * as a reasonable default for now, but consider fixing
825 * the rpc client not to require an address in the
826 * future:
827 */
828 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
829 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
830 }
831 nfsd4_probe_callback(clp);
832 return new;
833 }
834
835 /* caller must hold client_lock */
836 static struct nfsd4_session *
837 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid)
838 {
839 struct nfsd4_session *elem;
840 int idx;
841
842 dump_sessionid(__func__, sessionid);
843 idx = hash_sessionid(sessionid);
844 /* Search in the appropriate list */
845 list_for_each_entry(elem, &sessionid_hashtbl[idx], se_hash) {
846 if (!memcmp(elem->se_sessionid.data, sessionid->data,
847 NFS4_MAX_SESSIONID_LEN)) {
848 return elem;
849 }
850 }
851
852 dprintk("%s: session not found\n", __func__);
853 return NULL;
854 }
855
856 /* caller must hold client_lock */
857 static void
858 unhash_session(struct nfsd4_session *ses)
859 {
860 list_del(&ses->se_hash);
861 spin_lock(&ses->se_client->cl_lock);
862 list_del(&ses->se_perclnt);
863 spin_unlock(&ses->se_client->cl_lock);
864 }
865
866 /* must be called under the client_lock */
867 static inline void
868 renew_client_locked(struct nfs4_client *clp)
869 {
870 if (is_client_expired(clp)) {
871 dprintk("%s: client (clientid %08x/%08x) already expired\n",
872 __func__,
873 clp->cl_clientid.cl_boot,
874 clp->cl_clientid.cl_id);
875 return;
876 }
877
878 /*
879 * Move client to the end to the LRU list.
880 */
881 dprintk("renewing client (clientid %08x/%08x)\n",
882 clp->cl_clientid.cl_boot,
883 clp->cl_clientid.cl_id);
884 list_move_tail(&clp->cl_lru, &client_lru);
885 clp->cl_time = get_seconds();
886 }
887
888 static inline void
889 renew_client(struct nfs4_client *clp)
890 {
891 spin_lock(&client_lock);
892 renew_client_locked(clp);
893 spin_unlock(&client_lock);
894 }
895
896 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
897 static int
898 STALE_CLIENTID(clientid_t *clid)
899 {
900 if (clid->cl_boot == boot_time)
901 return 0;
902 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
903 clid->cl_boot, clid->cl_id, boot_time);
904 return 1;
905 }
906
907 /*
908 * XXX Should we use a slab cache ?
909 * This type of memory management is somewhat inefficient, but we use it
910 * anyway since SETCLIENTID is not a common operation.
911 */
912 static struct nfs4_client *alloc_client(struct xdr_netobj name)
913 {
914 struct nfs4_client *clp;
915
916 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
917 if (clp == NULL)
918 return NULL;
919 clp->cl_name.data = kmalloc(name.len, GFP_KERNEL);
920 if (clp->cl_name.data == NULL) {
921 kfree(clp);
922 return NULL;
923 }
924 memcpy(clp->cl_name.data, name.data, name.len);
925 clp->cl_name.len = name.len;
926 return clp;
927 }
928
929 static inline void
930 free_client(struct nfs4_client *clp)
931 {
932 while (!list_empty(&clp->cl_sessions)) {
933 struct nfsd4_session *ses;
934 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
935 se_perclnt);
936 list_del(&ses->se_perclnt);
937 nfsd4_put_session(ses);
938 }
939 if (clp->cl_cred.cr_group_info)
940 put_group_info(clp->cl_cred.cr_group_info);
941 kfree(clp->cl_principal);
942 kfree(clp->cl_name.data);
943 kfree(clp);
944 }
945
946 void
947 release_session_client(struct nfsd4_session *session)
948 {
949 struct nfs4_client *clp = session->se_client;
950
951 if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock))
952 return;
953 if (is_client_expired(clp)) {
954 free_client(clp);
955 session->se_client = NULL;
956 } else
957 renew_client_locked(clp);
958 spin_unlock(&client_lock);
959 }
960
961 /* must be called under the client_lock */
962 static inline void
963 unhash_client_locked(struct nfs4_client *clp)
964 {
965 struct nfsd4_session *ses;
966
967 mark_client_expired(clp);
968 list_del(&clp->cl_lru);
969 spin_lock(&clp->cl_lock);
970 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
971 list_del_init(&ses->se_hash);
972 spin_unlock(&clp->cl_lock);
973 }
974
975 static void
976 expire_client(struct nfs4_client *clp)
977 {
978 struct nfs4_openowner *oo;
979 struct nfs4_delegation *dp;
980 struct list_head reaplist;
981
982 INIT_LIST_HEAD(&reaplist);
983 spin_lock(&recall_lock);
984 while (!list_empty(&clp->cl_delegations)) {
985 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
986 list_del_init(&dp->dl_perclnt);
987 list_move(&dp->dl_recall_lru, &reaplist);
988 }
989 spin_unlock(&recall_lock);
990 while (!list_empty(&reaplist)) {
991 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
992 list_del_init(&dp->dl_recall_lru);
993 unhash_delegation(dp);
994 }
995 while (!list_empty(&clp->cl_openowners)) {
996 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
997 release_openowner(oo);
998 }
999 nfsd4_shutdown_callback(clp);
1000 if (clp->cl_cb_conn.cb_xprt)
1001 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1002 list_del(&clp->cl_idhash);
1003 list_del(&clp->cl_strhash);
1004 spin_lock(&client_lock);
1005 unhash_client_locked(clp);
1006 if (atomic_read(&clp->cl_refcount) == 0)
1007 free_client(clp);
1008 spin_unlock(&client_lock);
1009 }
1010
1011 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1012 {
1013 memcpy(target->cl_verifier.data, source->data,
1014 sizeof(target->cl_verifier.data));
1015 }
1016
1017 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1018 {
1019 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1020 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1021 }
1022
1023 static void copy_cred(struct svc_cred *target, struct svc_cred *source)
1024 {
1025 target->cr_uid = source->cr_uid;
1026 target->cr_gid = source->cr_gid;
1027 target->cr_group_info = source->cr_group_info;
1028 get_group_info(target->cr_group_info);
1029 }
1030
1031 static int same_name(const char *n1, const char *n2)
1032 {
1033 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1034 }
1035
1036 static int
1037 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1038 {
1039 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1040 }
1041
1042 static int
1043 same_clid(clientid_t *cl1, clientid_t *cl2)
1044 {
1045 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1046 }
1047
1048 /* XXX what about NGROUP */
1049 static int
1050 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1051 {
1052 return cr1->cr_uid == cr2->cr_uid;
1053 }
1054
1055 static void gen_clid(struct nfs4_client *clp)
1056 {
1057 static u32 current_clientid = 1;
1058
1059 clp->cl_clientid.cl_boot = boot_time;
1060 clp->cl_clientid.cl_id = current_clientid++;
1061 }
1062
1063 static void gen_confirm(struct nfs4_client *clp)
1064 {
1065 static u32 i;
1066 u32 *p;
1067
1068 p = (u32 *)clp->cl_confirm.data;
1069 *p++ = get_seconds();
1070 *p++ = i++;
1071 }
1072
1073 static int
1074 same_stateid(stateid_t *id_one, stateid_t *id_two)
1075 {
1076 if (id_one->si_stateownerid != id_two->si_stateownerid)
1077 return 0;
1078 return id_one->si_fileid == id_two->si_fileid;
1079 }
1080
1081 static struct nfs4_stid *find_stateid(stateid_t *t)
1082 {
1083 struct nfs4_stid *s;
1084 unsigned int hashval;
1085
1086 hashval = stateid_hashval(t);
1087 list_for_each_entry(s, &stateid_hashtbl[hashval], sc_hash)
1088 if (same_stateid(&s->sc_stateid, t))
1089 return s;
1090 return NULL;
1091 }
1092
1093 static struct nfs4_ol_stateid *find_ol_stateid(stateid_t *t)
1094 {
1095 struct nfs4_stid *s;
1096
1097 s = find_stateid(t);
1098 if (!s)
1099 return NULL;
1100 return openlockstateid(s);
1101 }
1102
1103 static struct nfs4_stid *find_stateid_by_type(stateid_t *t, char typemask)
1104 {
1105 struct nfs4_stid *s;
1106
1107 s = find_stateid(t);
1108 if (!s)
1109 return NULL;
1110 if (typemask & s->sc_type)
1111 return s;
1112 return NULL;
1113 }
1114
1115 static struct nfs4_ol_stateid *find_ol_stateid_by_type(stateid_t *t, char typemask)
1116 {
1117 struct nfs4_stid *s;
1118
1119 s = find_stateid_by_type(t, typemask);
1120 if (!s)
1121 return NULL;
1122 return openlockstateid(s);
1123 }
1124
1125 static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
1126 struct svc_rqst *rqstp, nfs4_verifier *verf)
1127 {
1128 struct nfs4_client *clp;
1129 struct sockaddr *sa = svc_addr(rqstp);
1130 char *princ;
1131
1132 clp = alloc_client(name);
1133 if (clp == NULL)
1134 return NULL;
1135
1136 INIT_LIST_HEAD(&clp->cl_sessions);
1137
1138 princ = svc_gss_principal(rqstp);
1139 if (princ) {
1140 clp->cl_principal = kstrdup(princ, GFP_KERNEL);
1141 if (clp->cl_principal == NULL) {
1142 free_client(clp);
1143 return NULL;
1144 }
1145 }
1146
1147 memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
1148 atomic_set(&clp->cl_refcount, 0);
1149 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1150 INIT_LIST_HEAD(&clp->cl_idhash);
1151 INIT_LIST_HEAD(&clp->cl_strhash);
1152 INIT_LIST_HEAD(&clp->cl_openowners);
1153 INIT_LIST_HEAD(&clp->cl_delegations);
1154 INIT_LIST_HEAD(&clp->cl_lru);
1155 INIT_LIST_HEAD(&clp->cl_callbacks);
1156 spin_lock_init(&clp->cl_lock);
1157 INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
1158 clp->cl_time = get_seconds();
1159 clear_bit(0, &clp->cl_cb_slot_busy);
1160 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1161 copy_verf(clp, verf);
1162 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1163 clp->cl_flavor = rqstp->rq_flavor;
1164 copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1165 gen_confirm(clp);
1166 clp->cl_cb_session = NULL;
1167 return clp;
1168 }
1169
1170 static int check_name(struct xdr_netobj name)
1171 {
1172 if (name.len == 0)
1173 return 0;
1174 if (name.len > NFS4_OPAQUE_LIMIT) {
1175 dprintk("NFSD: check_name: name too long(%d)!\n", name.len);
1176 return 0;
1177 }
1178 return 1;
1179 }
1180
1181 static void
1182 add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval)
1183 {
1184 unsigned int idhashval;
1185
1186 list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]);
1187 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1188 list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]);
1189 renew_client(clp);
1190 }
1191
1192 static void
1193 move_to_confirmed(struct nfs4_client *clp)
1194 {
1195 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1196 unsigned int strhashval;
1197
1198 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1199 list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
1200 strhashval = clientstr_hashval(clp->cl_recdir);
1201 list_move(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
1202 renew_client(clp);
1203 }
1204
1205 static struct nfs4_client *
1206 find_confirmed_client(clientid_t *clid)
1207 {
1208 struct nfs4_client *clp;
1209 unsigned int idhashval = clientid_hashval(clid->cl_id);
1210
1211 list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
1212 if (same_clid(&clp->cl_clientid, clid))
1213 return clp;
1214 }
1215 return NULL;
1216 }
1217
1218 static struct nfs4_client *
1219 find_unconfirmed_client(clientid_t *clid)
1220 {
1221 struct nfs4_client *clp;
1222 unsigned int idhashval = clientid_hashval(clid->cl_id);
1223
1224 list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) {
1225 if (same_clid(&clp->cl_clientid, clid))
1226 return clp;
1227 }
1228 return NULL;
1229 }
1230
1231 static bool clp_used_exchangeid(struct nfs4_client *clp)
1232 {
1233 return clp->cl_exchange_flags != 0;
1234 }
1235
1236 static struct nfs4_client *
1237 find_confirmed_client_by_str(const char *dname, unsigned int hashval)
1238 {
1239 struct nfs4_client *clp;
1240
1241 list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) {
1242 if (same_name(clp->cl_recdir, dname))
1243 return clp;
1244 }
1245 return NULL;
1246 }
1247
1248 static struct nfs4_client *
1249 find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
1250 {
1251 struct nfs4_client *clp;
1252
1253 list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) {
1254 if (same_name(clp->cl_recdir, dname))
1255 return clp;
1256 }
1257 return NULL;
1258 }
1259
1260 static void rpc_svcaddr2sockaddr(struct sockaddr *sa, unsigned short family, union svc_addr_u *svcaddr)
1261 {
1262 switch (family) {
1263 case AF_INET:
1264 ((struct sockaddr_in *)sa)->sin_family = AF_INET;
1265 ((struct sockaddr_in *)sa)->sin_addr = svcaddr->addr;
1266 return;
1267 case AF_INET6:
1268 ((struct sockaddr_in6 *)sa)->sin6_family = AF_INET6;
1269 ((struct sockaddr_in6 *)sa)->sin6_addr = svcaddr->addr6;
1270 return;
1271 }
1272 }
1273
1274 static void
1275 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1276 {
1277 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1278 struct sockaddr *sa = svc_addr(rqstp);
1279 u32 scopeid = rpc_get_scope_id(sa);
1280 unsigned short expected_family;
1281
1282 /* Currently, we only support tcp and tcp6 for the callback channel */
1283 if (se->se_callback_netid_len == 3 &&
1284 !memcmp(se->se_callback_netid_val, "tcp", 3))
1285 expected_family = AF_INET;
1286 else if (se->se_callback_netid_len == 4 &&
1287 !memcmp(se->se_callback_netid_val, "tcp6", 4))
1288 expected_family = AF_INET6;
1289 else
1290 goto out_err;
1291
1292 conn->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val,
1293 se->se_callback_addr_len,
1294 (struct sockaddr *)&conn->cb_addr,
1295 sizeof(conn->cb_addr));
1296
1297 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1298 goto out_err;
1299
1300 if (conn->cb_addr.ss_family == AF_INET6)
1301 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1302
1303 conn->cb_prog = se->se_callback_prog;
1304 conn->cb_ident = se->se_callback_ident;
1305 rpc_svcaddr2sockaddr((struct sockaddr *)&conn->cb_saddr, expected_family, &rqstp->rq_daddr);
1306 return;
1307 out_err:
1308 conn->cb_addr.ss_family = AF_UNSPEC;
1309 conn->cb_addrlen = 0;
1310 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1311 "will not receive delegations\n",
1312 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1313
1314 return;
1315 }
1316
1317 /*
1318 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size.
1319 */
1320 void
1321 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1322 {
1323 struct nfsd4_slot *slot = resp->cstate.slot;
1324 unsigned int base;
1325
1326 dprintk("--> %s slot %p\n", __func__, slot);
1327
1328 slot->sl_opcnt = resp->opcnt;
1329 slot->sl_status = resp->cstate.status;
1330
1331 if (nfsd4_not_cached(resp)) {
1332 slot->sl_datalen = 0;
1333 return;
1334 }
1335 slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap;
1336 base = (char *)resp->cstate.datap -
1337 (char *)resp->xbuf->head[0].iov_base;
1338 if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data,
1339 slot->sl_datalen))
1340 WARN("%s: sessions DRC could not cache compound\n", __func__);
1341 return;
1342 }
1343
1344 /*
1345 * Encode the replay sequence operation from the slot values.
1346 * If cachethis is FALSE encode the uncached rep error on the next
1347 * operation which sets resp->p and increments resp->opcnt for
1348 * nfs4svc_encode_compoundres.
1349 *
1350 */
1351 static __be32
1352 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1353 struct nfsd4_compoundres *resp)
1354 {
1355 struct nfsd4_op *op;
1356 struct nfsd4_slot *slot = resp->cstate.slot;
1357
1358 dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__,
1359 resp->opcnt, resp->cstate.slot->sl_cachethis);
1360
1361 /* Encode the replayed sequence operation */
1362 op = &args->ops[resp->opcnt - 1];
1363 nfsd4_encode_operation(resp, op);
1364
1365 /* Return nfserr_retry_uncached_rep in next operation. */
1366 if (args->opcnt > 1 && slot->sl_cachethis == 0) {
1367 op = &args->ops[resp->opcnt++];
1368 op->status = nfserr_retry_uncached_rep;
1369 nfsd4_encode_operation(resp, op);
1370 }
1371 return op->status;
1372 }
1373
1374 /*
1375 * The sequence operation is not cached because we can use the slot and
1376 * session values.
1377 */
1378 __be32
1379 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1380 struct nfsd4_sequence *seq)
1381 {
1382 struct nfsd4_slot *slot = resp->cstate.slot;
1383 __be32 status;
1384
1385 dprintk("--> %s slot %p\n", __func__, slot);
1386
1387 /* Either returns 0 or nfserr_retry_uncached */
1388 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1389 if (status == nfserr_retry_uncached_rep)
1390 return status;
1391
1392 /* The sequence operation has been encoded, cstate->datap set. */
1393 memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen);
1394
1395 resp->opcnt = slot->sl_opcnt;
1396 resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen);
1397 status = slot->sl_status;
1398
1399 return status;
1400 }
1401
1402 /*
1403 * Set the exchange_id flags returned by the server.
1404 */
1405 static void
1406 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1407 {
1408 /* pNFS is not supported */
1409 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
1410
1411 /* Referrals are supported, Migration is not. */
1412 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1413
1414 /* set the wire flags to return to client. */
1415 clid->flags = new->cl_exchange_flags;
1416 }
1417
1418 __be32
1419 nfsd4_exchange_id(struct svc_rqst *rqstp,
1420 struct nfsd4_compound_state *cstate,
1421 struct nfsd4_exchange_id *exid)
1422 {
1423 struct nfs4_client *unconf, *conf, *new;
1424 int status;
1425 unsigned int strhashval;
1426 char dname[HEXDIR_LEN];
1427 char addr_str[INET6_ADDRSTRLEN];
1428 nfs4_verifier verf = exid->verifier;
1429 struct sockaddr *sa = svc_addr(rqstp);
1430
1431 rpc_ntop(sa, addr_str, sizeof(addr_str));
1432 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1433 "ip_addr=%s flags %x, spa_how %d\n",
1434 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
1435 addr_str, exid->flags, exid->spa_how);
1436
1437 if (!check_name(exid->clname) || (exid->flags & ~EXCHGID4_FLAG_MASK_A))
1438 return nfserr_inval;
1439
1440 /* Currently only support SP4_NONE */
1441 switch (exid->spa_how) {
1442 case SP4_NONE:
1443 break;
1444 case SP4_SSV:
1445 return nfserr_serverfault;
1446 default:
1447 BUG(); /* checked by xdr code */
1448 case SP4_MACH_CRED:
1449 return nfserr_serverfault; /* no excuse :-/ */
1450 }
1451
1452 status = nfs4_make_rec_clidname(dname, &exid->clname);
1453
1454 if (status)
1455 goto error;
1456
1457 strhashval = clientstr_hashval(dname);
1458
1459 nfs4_lock_state();
1460 status = nfs_ok;
1461
1462 conf = find_confirmed_client_by_str(dname, strhashval);
1463 if (conf) {
1464 if (!clp_used_exchangeid(conf)) {
1465 status = nfserr_clid_inuse; /* XXX: ? */
1466 goto out;
1467 }
1468 if (!same_verf(&verf, &conf->cl_verifier)) {
1469 /* 18.35.4 case 8 */
1470 if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1471 status = nfserr_not_same;
1472 goto out;
1473 }
1474 /* Client reboot: destroy old state */
1475 expire_client(conf);
1476 goto out_new;
1477 }
1478 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
1479 /* 18.35.4 case 9 */
1480 if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1481 status = nfserr_perm;
1482 goto out;
1483 }
1484 expire_client(conf);
1485 goto out_new;
1486 }
1487 /*
1488 * Set bit when the owner id and verifier map to an already
1489 * confirmed client id (18.35.3).
1490 */
1491 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1492
1493 /*
1494 * Falling into 18.35.4 case 2, possible router replay.
1495 * Leave confirmed record intact and return same result.
1496 */
1497 copy_verf(conf, &verf);
1498 new = conf;
1499 goto out_copy;
1500 }
1501
1502 /* 18.35.4 case 7 */
1503 if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1504 status = nfserr_noent;
1505 goto out;
1506 }
1507
1508 unconf = find_unconfirmed_client_by_str(dname, strhashval);
1509 if (unconf) {
1510 /*
1511 * Possible retry or client restart. Per 18.35.4 case 4,
1512 * a new unconfirmed record should be generated regardless
1513 * of whether any properties have changed.
1514 */
1515 expire_client(unconf);
1516 }
1517
1518 out_new:
1519 /* Normal case */
1520 new = create_client(exid->clname, dname, rqstp, &verf);
1521 if (new == NULL) {
1522 status = nfserr_jukebox;
1523 goto out;
1524 }
1525
1526 gen_clid(new);
1527 add_to_unconfirmed(new, strhashval);
1528 out_copy:
1529 exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1530 exid->clientid.cl_id = new->cl_clientid.cl_id;
1531
1532 exid->seqid = 1;
1533 nfsd4_set_ex_flags(new, exid);
1534
1535 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1536 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1537 status = nfs_ok;
1538
1539 out:
1540 nfs4_unlock_state();
1541 error:
1542 dprintk("nfsd4_exchange_id returns %d\n", ntohl(status));
1543 return status;
1544 }
1545
1546 static int
1547 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1548 {
1549 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1550 slot_seqid);
1551
1552 /* The slot is in use, and no response has been sent. */
1553 if (slot_inuse) {
1554 if (seqid == slot_seqid)
1555 return nfserr_jukebox;
1556 else
1557 return nfserr_seq_misordered;
1558 }
1559 /* Normal */
1560 if (likely(seqid == slot_seqid + 1))
1561 return nfs_ok;
1562 /* Replay */
1563 if (seqid == slot_seqid)
1564 return nfserr_replay_cache;
1565 /* Wraparound */
1566 if (seqid == 1 && (slot_seqid + 1) == 0)
1567 return nfs_ok;
1568 /* Misordered replay or misordered new request */
1569 return nfserr_seq_misordered;
1570 }
1571
1572 /*
1573 * Cache the create session result into the create session single DRC
1574 * slot cache by saving the xdr structure. sl_seqid has been set.
1575 * Do this for solo or embedded create session operations.
1576 */
1577 static void
1578 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1579 struct nfsd4_clid_slot *slot, int nfserr)
1580 {
1581 slot->sl_status = nfserr;
1582 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1583 }
1584
1585 static __be32
1586 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1587 struct nfsd4_clid_slot *slot)
1588 {
1589 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1590 return slot->sl_status;
1591 }
1592
1593 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
1594 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1595 1 + /* MIN tag is length with zero, only length */ \
1596 3 + /* version, opcount, opcode */ \
1597 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1598 /* seqid, slotID, slotID, cache */ \
1599 4 ) * sizeof(__be32))
1600
1601 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
1602 2 + /* verifier: AUTH_NULL, length 0 */\
1603 1 + /* status */ \
1604 1 + /* MIN tag is length with zero, only length */ \
1605 3 + /* opcount, opcode, opstatus*/ \
1606 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1607 /* seqid, slotID, slotID, slotID, status */ \
1608 5 ) * sizeof(__be32))
1609
1610 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
1611 {
1612 return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
1613 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
1614 }
1615
1616 __be32
1617 nfsd4_create_session(struct svc_rqst *rqstp,
1618 struct nfsd4_compound_state *cstate,
1619 struct nfsd4_create_session *cr_ses)
1620 {
1621 struct sockaddr *sa = svc_addr(rqstp);
1622 struct nfs4_client *conf, *unconf;
1623 struct nfsd4_session *new;
1624 struct nfsd4_clid_slot *cs_slot = NULL;
1625 bool confirm_me = false;
1626 int status = 0;
1627
1628 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1629 return nfserr_inval;
1630
1631 nfs4_lock_state();
1632 unconf = find_unconfirmed_client(&cr_ses->clientid);
1633 conf = find_confirmed_client(&cr_ses->clientid);
1634
1635 if (conf) {
1636 cs_slot = &conf->cl_cs_slot;
1637 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1638 if (status == nfserr_replay_cache) {
1639 dprintk("Got a create_session replay! seqid= %d\n",
1640 cs_slot->sl_seqid);
1641 /* Return the cached reply status */
1642 status = nfsd4_replay_create_session(cr_ses, cs_slot);
1643 goto out;
1644 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1645 status = nfserr_seq_misordered;
1646 dprintk("Sequence misordered!\n");
1647 dprintk("Expected seqid= %d but got seqid= %d\n",
1648 cs_slot->sl_seqid, cr_ses->seqid);
1649 goto out;
1650 }
1651 } else if (unconf) {
1652 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
1653 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
1654 status = nfserr_clid_inuse;
1655 goto out;
1656 }
1657
1658 cs_slot = &unconf->cl_cs_slot;
1659 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1660 if (status) {
1661 /* an unconfirmed replay returns misordered */
1662 status = nfserr_seq_misordered;
1663 goto out;
1664 }
1665
1666 confirm_me = true;
1667 conf = unconf;
1668 } else {
1669 status = nfserr_stale_clientid;
1670 goto out;
1671 }
1672
1673 /*
1674 * XXX: we should probably set this at creation time, and check
1675 * for consistent minorversion use throughout:
1676 */
1677 conf->cl_minorversion = 1;
1678 /*
1679 * We do not support RDMA or persistent sessions
1680 */
1681 cr_ses->flags &= ~SESSION4_PERSIST;
1682 cr_ses->flags &= ~SESSION4_RDMA;
1683
1684 status = nfserr_toosmall;
1685 if (check_forechannel_attrs(cr_ses->fore_channel))
1686 goto out;
1687
1688 status = nfserr_jukebox;
1689 new = alloc_init_session(rqstp, conf, cr_ses);
1690 if (!new)
1691 goto out;
1692 status = nfs_ok;
1693 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
1694 NFS4_MAX_SESSIONID_LEN);
1695 memcpy(&cr_ses->fore_channel, &new->se_fchannel,
1696 sizeof(struct nfsd4_channel_attrs));
1697 cs_slot->sl_seqid++;
1698 cr_ses->seqid = cs_slot->sl_seqid;
1699
1700 /* cache solo and embedded create sessions under the state lock */
1701 nfsd4_cache_create_session(cr_ses, cs_slot, status);
1702 if (confirm_me)
1703 move_to_confirmed(conf);
1704 out:
1705 nfs4_unlock_state();
1706 dprintk("%s returns %d\n", __func__, ntohl(status));
1707 return status;
1708 }
1709
1710 static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
1711 {
1712 struct nfsd4_compoundres *resp = rqstp->rq_resp;
1713 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
1714
1715 return argp->opcnt == resp->opcnt;
1716 }
1717
1718 static __be32 nfsd4_map_bcts_dir(u32 *dir)
1719 {
1720 switch (*dir) {
1721 case NFS4_CDFC4_FORE:
1722 case NFS4_CDFC4_BACK:
1723 return nfs_ok;
1724 case NFS4_CDFC4_FORE_OR_BOTH:
1725 case NFS4_CDFC4_BACK_OR_BOTH:
1726 *dir = NFS4_CDFC4_BOTH;
1727 return nfs_ok;
1728 };
1729 return nfserr_inval;
1730 }
1731
1732 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
1733 struct nfsd4_compound_state *cstate,
1734 struct nfsd4_bind_conn_to_session *bcts)
1735 {
1736 __be32 status;
1737
1738 if (!nfsd4_last_compound_op(rqstp))
1739 return nfserr_not_only_op;
1740 spin_lock(&client_lock);
1741 cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid);
1742 /* Sorta weird: we only need the refcnt'ing because new_conn acquires
1743 * client_lock iself: */
1744 if (cstate->session) {
1745 nfsd4_get_session(cstate->session);
1746 atomic_inc(&cstate->session->se_client->cl_refcount);
1747 }
1748 spin_unlock(&client_lock);
1749 if (!cstate->session)
1750 return nfserr_badsession;
1751
1752 status = nfsd4_map_bcts_dir(&bcts->dir);
1753 if (!status)
1754 nfsd4_new_conn(rqstp, cstate->session, bcts->dir);
1755 return status;
1756 }
1757
1758 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
1759 {
1760 if (!session)
1761 return 0;
1762 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
1763 }
1764
1765 __be32
1766 nfsd4_destroy_session(struct svc_rqst *r,
1767 struct nfsd4_compound_state *cstate,
1768 struct nfsd4_destroy_session *sessionid)
1769 {
1770 struct nfsd4_session *ses;
1771 u32 status = nfserr_badsession;
1772
1773 /* Notes:
1774 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
1775 * - Should we return nfserr_back_chan_busy if waiting for
1776 * callbacks on to-be-destroyed session?
1777 * - Do we need to clear any callback info from previous session?
1778 */
1779
1780 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
1781 if (!nfsd4_last_compound_op(r))
1782 return nfserr_not_only_op;
1783 }
1784 dump_sessionid(__func__, &sessionid->sessionid);
1785 spin_lock(&client_lock);
1786 ses = find_in_sessionid_hashtbl(&sessionid->sessionid);
1787 if (!ses) {
1788 spin_unlock(&client_lock);
1789 goto out;
1790 }
1791
1792 unhash_session(ses);
1793 spin_unlock(&client_lock);
1794
1795 nfs4_lock_state();
1796 nfsd4_probe_callback_sync(ses->se_client);
1797 nfs4_unlock_state();
1798
1799 nfsd4_del_conns(ses);
1800
1801 nfsd4_put_session(ses);
1802 status = nfs_ok;
1803 out:
1804 dprintk("%s returns %d\n", __func__, ntohl(status));
1805 return status;
1806 }
1807
1808 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
1809 {
1810 struct nfsd4_conn *c;
1811
1812 list_for_each_entry(c, &s->se_conns, cn_persession) {
1813 if (c->cn_xprt == xpt) {
1814 return c;
1815 }
1816 }
1817 return NULL;
1818 }
1819
1820 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
1821 {
1822 struct nfs4_client *clp = ses->se_client;
1823 struct nfsd4_conn *c;
1824 int ret;
1825
1826 spin_lock(&clp->cl_lock);
1827 c = __nfsd4_find_conn(new->cn_xprt, ses);
1828 if (c) {
1829 spin_unlock(&clp->cl_lock);
1830 free_conn(new);
1831 return;
1832 }
1833 __nfsd4_hash_conn(new, ses);
1834 spin_unlock(&clp->cl_lock);
1835 ret = nfsd4_register_conn(new);
1836 if (ret)
1837 /* oops; xprt is already down: */
1838 nfsd4_conn_lost(&new->cn_xpt_user);
1839 return;
1840 }
1841
1842 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
1843 {
1844 struct nfsd4_compoundargs *args = rqstp->rq_argp;
1845
1846 return args->opcnt > session->se_fchannel.maxops;
1847 }
1848
1849 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
1850 struct nfsd4_session *session)
1851 {
1852 struct xdr_buf *xb = &rqstp->rq_arg;
1853
1854 return xb->len > session->se_fchannel.maxreq_sz;
1855 }
1856
1857 __be32
1858 nfsd4_sequence(struct svc_rqst *rqstp,
1859 struct nfsd4_compound_state *cstate,
1860 struct nfsd4_sequence *seq)
1861 {
1862 struct nfsd4_compoundres *resp = rqstp->rq_resp;
1863 struct nfsd4_session *session;
1864 struct nfsd4_slot *slot;
1865 struct nfsd4_conn *conn;
1866 int status;
1867
1868 if (resp->opcnt != 1)
1869 return nfserr_sequence_pos;
1870
1871 /*
1872 * Will be either used or freed by nfsd4_sequence_check_conn
1873 * below.
1874 */
1875 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
1876 if (!conn)
1877 return nfserr_jukebox;
1878
1879 spin_lock(&client_lock);
1880 status = nfserr_badsession;
1881 session = find_in_sessionid_hashtbl(&seq->sessionid);
1882 if (!session)
1883 goto out;
1884
1885 status = nfserr_too_many_ops;
1886 if (nfsd4_session_too_many_ops(rqstp, session))
1887 goto out;
1888
1889 status = nfserr_req_too_big;
1890 if (nfsd4_request_too_big(rqstp, session))
1891 goto out;
1892
1893 status = nfserr_badslot;
1894 if (seq->slotid >= session->se_fchannel.maxreqs)
1895 goto out;
1896
1897 slot = session->se_slots[seq->slotid];
1898 dprintk("%s: slotid %d\n", __func__, seq->slotid);
1899
1900 /* We do not negotiate the number of slots yet, so set the
1901 * maxslots to the session maxreqs which is used to encode
1902 * sr_highest_slotid and the sr_target_slot id to maxslots */
1903 seq->maxslots = session->se_fchannel.maxreqs;
1904
1905 status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_inuse);
1906 if (status == nfserr_replay_cache) {
1907 cstate->slot = slot;
1908 cstate->session = session;
1909 /* Return the cached reply status and set cstate->status
1910 * for nfsd4_proc_compound processing */
1911 status = nfsd4_replay_cache_entry(resp, seq);
1912 cstate->status = nfserr_replay_cache;
1913 goto out;
1914 }
1915 if (status)
1916 goto out;
1917
1918 nfsd4_sequence_check_conn(conn, session);
1919 conn = NULL;
1920
1921 /* Success! bump slot seqid */
1922 slot->sl_inuse = true;
1923 slot->sl_seqid = seq->seqid;
1924 slot->sl_cachethis = seq->cachethis;
1925
1926 cstate->slot = slot;
1927 cstate->session = session;
1928
1929 out:
1930 /* Hold a session reference until done processing the compound. */
1931 if (cstate->session) {
1932 struct nfs4_client *clp = session->se_client;
1933
1934 nfsd4_get_session(cstate->session);
1935 atomic_inc(&clp->cl_refcount);
1936 if (clp->cl_cb_state == NFSD4_CB_DOWN)
1937 seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN;
1938 }
1939 kfree(conn);
1940 spin_unlock(&client_lock);
1941 dprintk("%s: return %d\n", __func__, ntohl(status));
1942 return status;
1943 }
1944
1945 __be32
1946 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
1947 {
1948 int status = 0;
1949
1950 if (rc->rca_one_fs) {
1951 if (!cstate->current_fh.fh_dentry)
1952 return nfserr_nofilehandle;
1953 /*
1954 * We don't take advantage of the rca_one_fs case.
1955 * That's OK, it's optional, we can safely ignore it.
1956 */
1957 return nfs_ok;
1958 }
1959
1960 nfs4_lock_state();
1961 status = nfserr_complete_already;
1962 if (cstate->session->se_client->cl_firststate)
1963 goto out;
1964
1965 status = nfserr_stale_clientid;
1966 if (is_client_expired(cstate->session->se_client))
1967 /*
1968 * The following error isn't really legal.
1969 * But we only get here if the client just explicitly
1970 * destroyed the client. Surely it no longer cares what
1971 * error it gets back on an operation for the dead
1972 * client.
1973 */
1974 goto out;
1975
1976 status = nfs_ok;
1977 nfsd4_create_clid_dir(cstate->session->se_client);
1978 out:
1979 nfs4_unlock_state();
1980 return status;
1981 }
1982
1983 __be32
1984 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1985 struct nfsd4_setclientid *setclid)
1986 {
1987 struct xdr_netobj clname = {
1988 .len = setclid->se_namelen,
1989 .data = setclid->se_name,
1990 };
1991 nfs4_verifier clverifier = setclid->se_verf;
1992 unsigned int strhashval;
1993 struct nfs4_client *conf, *unconf, *new;
1994 __be32 status;
1995 char dname[HEXDIR_LEN];
1996
1997 if (!check_name(clname))
1998 return nfserr_inval;
1999
2000 status = nfs4_make_rec_clidname(dname, &clname);
2001 if (status)
2002 return status;
2003
2004 /*
2005 * XXX The Duplicate Request Cache (DRC) has been checked (??)
2006 * We get here on a DRC miss.
2007 */
2008
2009 strhashval = clientstr_hashval(dname);
2010
2011 nfs4_lock_state();
2012 conf = find_confirmed_client_by_str(dname, strhashval);
2013 if (conf) {
2014 /* RFC 3530 14.2.33 CASE 0: */
2015 status = nfserr_clid_inuse;
2016 if (clp_used_exchangeid(conf))
2017 goto out;
2018 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2019 char addr_str[INET6_ADDRSTRLEN];
2020 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2021 sizeof(addr_str));
2022 dprintk("NFSD: setclientid: string in use by client "
2023 "at %s\n", addr_str);
2024 goto out;
2025 }
2026 }
2027 /*
2028 * section 14.2.33 of RFC 3530 (under the heading "IMPLEMENTATION")
2029 * has a description of SETCLIENTID request processing consisting
2030 * of 5 bullet points, labeled as CASE0 - CASE4 below.
2031 */
2032 unconf = find_unconfirmed_client_by_str(dname, strhashval);
2033 status = nfserr_jukebox;
2034 if (!conf) {
2035 /*
2036 * RFC 3530 14.2.33 CASE 4:
2037 * placed first, because it is the normal case
2038 */
2039 if (unconf)
2040 expire_client(unconf);
2041 new = create_client(clname, dname, rqstp, &clverifier);
2042 if (new == NULL)
2043 goto out;
2044 gen_clid(new);
2045 } else if (same_verf(&conf->cl_verifier, &clverifier)) {
2046 /*
2047 * RFC 3530 14.2.33 CASE 1:
2048 * probable callback update
2049 */
2050 if (unconf) {
2051 /* Note this is removing unconfirmed {*x***},
2052 * which is stronger than RFC recommended {vxc**}.
2053 * This has the advantage that there is at most
2054 * one {*x***} in either list at any time.
2055 */
2056 expire_client(unconf);
2057 }
2058 new = create_client(clname, dname, rqstp, &clverifier);
2059 if (new == NULL)
2060 goto out;
2061 copy_clid(new, conf);
2062 } else if (!unconf) {
2063 /*
2064 * RFC 3530 14.2.33 CASE 2:
2065 * probable client reboot; state will be removed if
2066 * confirmed.
2067 */
2068 new = create_client(clname, dname, rqstp, &clverifier);
2069 if (new == NULL)
2070 goto out;
2071 gen_clid(new);
2072 } else {
2073 /*
2074 * RFC 3530 14.2.33 CASE 3:
2075 * probable client reboot; state will be removed if
2076 * confirmed.
2077 */
2078 expire_client(unconf);
2079 new = create_client(clname, dname, rqstp, &clverifier);
2080 if (new == NULL)
2081 goto out;
2082 gen_clid(new);
2083 }
2084 /*
2085 * XXX: we should probably set this at creation time, and check
2086 * for consistent minorversion use throughout:
2087 */
2088 new->cl_minorversion = 0;
2089 gen_callback(new, setclid, rqstp);
2090 add_to_unconfirmed(new, strhashval);
2091 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2092 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2093 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2094 status = nfs_ok;
2095 out:
2096 nfs4_unlock_state();
2097 return status;
2098 }
2099
2100
2101 /*
2102 * Section 14.2.34 of RFC 3530 (under the heading "IMPLEMENTATION") has
2103 * a description of SETCLIENTID_CONFIRM request processing consisting of 4
2104 * bullets, labeled as CASE1 - CASE4 below.
2105 */
2106 __be32
2107 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2108 struct nfsd4_compound_state *cstate,
2109 struct nfsd4_setclientid_confirm *setclientid_confirm)
2110 {
2111 struct sockaddr *sa = svc_addr(rqstp);
2112 struct nfs4_client *conf, *unconf;
2113 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2114 clientid_t * clid = &setclientid_confirm->sc_clientid;
2115 __be32 status;
2116
2117 if (STALE_CLIENTID(clid))
2118 return nfserr_stale_clientid;
2119 /*
2120 * XXX The Duplicate Request Cache (DRC) has been checked (??)
2121 * We get here on a DRC miss.
2122 */
2123
2124 nfs4_lock_state();
2125
2126 conf = find_confirmed_client(clid);
2127 unconf = find_unconfirmed_client(clid);
2128
2129 status = nfserr_clid_inuse;
2130 if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa))
2131 goto out;
2132 if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa))
2133 goto out;
2134
2135 /*
2136 * section 14.2.34 of RFC 3530 has a description of
2137 * SETCLIENTID_CONFIRM request processing consisting
2138 * of 4 bullet points, labeled as CASE1 - CASE4 below.
2139 */
2140 if (conf && unconf && same_verf(&confirm, &unconf->cl_confirm)) {
2141 /*
2142 * RFC 3530 14.2.34 CASE 1:
2143 * callback update
2144 */
2145 if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
2146 status = nfserr_clid_inuse;
2147 else {
2148 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2149 nfsd4_probe_callback(conf);
2150 expire_client(unconf);
2151 status = nfs_ok;
2152
2153 }
2154 } else if (conf && !unconf) {
2155 /*
2156 * RFC 3530 14.2.34 CASE 2:
2157 * probable retransmitted request; play it safe and
2158 * do nothing.
2159 */
2160 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred))
2161 status = nfserr_clid_inuse;
2162 else
2163 status = nfs_ok;
2164 } else if (!conf && unconf
2165 && same_verf(&unconf->cl_confirm, &confirm)) {
2166 /*
2167 * RFC 3530 14.2.34 CASE 3:
2168 * Normal case; new or rebooted client:
2169 */
2170 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
2171 status = nfserr_clid_inuse;
2172 } else {
2173 unsigned int hash =
2174 clientstr_hashval(unconf->cl_recdir);
2175 conf = find_confirmed_client_by_str(unconf->cl_recdir,
2176 hash);
2177 if (conf) {
2178 nfsd4_remove_clid_dir(conf);
2179 expire_client(conf);
2180 }
2181 move_to_confirmed(unconf);
2182 conf = unconf;
2183 nfsd4_probe_callback(conf);
2184 status = nfs_ok;
2185 }
2186 } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
2187 && (!unconf || (unconf && !same_verf(&unconf->cl_confirm,
2188 &confirm)))) {
2189 /*
2190 * RFC 3530 14.2.34 CASE 4:
2191 * Client probably hasn't noticed that we rebooted yet.
2192 */
2193 status = nfserr_stale_clientid;
2194 } else {
2195 /* check that we have hit one of the cases...*/
2196 status = nfserr_clid_inuse;
2197 }
2198 out:
2199 nfs4_unlock_state();
2200 return status;
2201 }
2202
2203 /* OPEN Share state helper functions */
2204 static inline struct nfs4_file *
2205 alloc_init_file(struct inode *ino)
2206 {
2207 struct nfs4_file *fp;
2208 unsigned int hashval = file_hashval(ino);
2209
2210 fp = kmem_cache_alloc(file_slab, GFP_KERNEL);
2211 if (fp) {
2212 atomic_set(&fp->fi_ref, 1);
2213 INIT_LIST_HEAD(&fp->fi_hash);
2214 INIT_LIST_HEAD(&fp->fi_stateids);
2215 INIT_LIST_HEAD(&fp->fi_delegations);
2216 fp->fi_inode = igrab(ino);
2217 fp->fi_id = current_fileid++;
2218 fp->fi_had_conflict = false;
2219 fp->fi_lease = NULL;
2220 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2221 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2222 spin_lock(&recall_lock);
2223 list_add(&fp->fi_hash, &file_hashtbl[hashval]);
2224 spin_unlock(&recall_lock);
2225 return fp;
2226 }
2227 return NULL;
2228 }
2229
2230 static void
2231 nfsd4_free_slab(struct kmem_cache **slab)
2232 {
2233 if (*slab == NULL)
2234 return;
2235 kmem_cache_destroy(*slab);
2236 *slab = NULL;
2237 }
2238
2239 void
2240 nfsd4_free_slabs(void)
2241 {
2242 nfsd4_free_slab(&openowner_slab);
2243 nfsd4_free_slab(&lockowner_slab);
2244 nfsd4_free_slab(&file_slab);
2245 nfsd4_free_slab(&stateid_slab);
2246 nfsd4_free_slab(&deleg_slab);
2247 }
2248
2249 static int
2250 nfsd4_init_slabs(void)
2251 {
2252 openowner_slab = kmem_cache_create("nfsd4_openowners",
2253 sizeof(struct nfs4_openowner), 0, 0, NULL);
2254 if (openowner_slab == NULL)
2255 goto out_nomem;
2256 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2257 sizeof(struct nfs4_openowner), 0, 0, NULL);
2258 if (lockowner_slab == NULL)
2259 goto out_nomem;
2260 file_slab = kmem_cache_create("nfsd4_files",
2261 sizeof(struct nfs4_file), 0, 0, NULL);
2262 if (file_slab == NULL)
2263 goto out_nomem;
2264 stateid_slab = kmem_cache_create("nfsd4_stateids",
2265 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2266 if (stateid_slab == NULL)
2267 goto out_nomem;
2268 deleg_slab = kmem_cache_create("nfsd4_delegations",
2269 sizeof(struct nfs4_delegation), 0, 0, NULL);
2270 if (deleg_slab == NULL)
2271 goto out_nomem;
2272 return 0;
2273 out_nomem:
2274 nfsd4_free_slabs();
2275 dprintk("nfsd4: out of memory while initializing nfsv4\n");
2276 return -ENOMEM;
2277 }
2278
2279 void nfs4_free_openowner(struct nfs4_openowner *oo)
2280 {
2281 kfree(oo->oo_owner.so_owner.data);
2282 kmem_cache_free(openowner_slab, oo);
2283 }
2284
2285 void nfs4_free_lockowner(struct nfs4_lockowner *lo)
2286 {
2287 kfree(lo->lo_owner.so_owner.data);
2288 kmem_cache_free(lockowner_slab, lo);
2289 }
2290
2291 static void init_nfs4_replay(struct nfs4_replay *rp)
2292 {
2293 rp->rp_status = nfserr_serverfault;
2294 rp->rp_buflen = 0;
2295 rp->rp_buf = rp->rp_ibuf;
2296 }
2297
2298 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2299 {
2300 struct nfs4_stateowner *sop;
2301
2302 sop = kmem_cache_alloc(slab, GFP_KERNEL);
2303 if (!sop)
2304 return NULL;
2305
2306 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2307 if (!sop->so_owner.data) {
2308 kmem_cache_free(slab, sop);
2309 return NULL;
2310 }
2311 sop->so_owner.len = owner->len;
2312
2313 INIT_LIST_HEAD(&sop->so_stateids);
2314 sop->so_id = current_ownerid++;
2315 sop->so_client = clp;
2316 init_nfs4_replay(&sop->so_replay);
2317 return sop;
2318 }
2319
2320 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2321 {
2322 unsigned int idhashval;
2323
2324 idhashval = open_ownerid_hashval(oo->oo_owner.so_id);
2325 list_add(&oo->oo_owner.so_idhash, &open_ownerid_hashtbl[idhashval]);
2326 list_add(&oo->oo_owner.so_strhash, &open_ownerstr_hashtbl[strhashval]);
2327 list_add(&oo->oo_perclient, &clp->cl_openowners);
2328 }
2329
2330 static struct nfs4_openowner *
2331 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
2332 struct nfs4_openowner *oo;
2333
2334 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2335 if (!oo)
2336 return NULL;
2337 oo->oo_owner.so_is_open_owner = 1;
2338 oo->oo_owner.so_seqid = open->op_seqid;
2339 oo->oo_confirmed = 0;
2340 oo->oo_time = 0;
2341 INIT_LIST_HEAD(&oo->oo_close_lru);
2342 hash_openowner(oo, clp, strhashval);
2343 return oo;
2344 }
2345
2346 static inline void
2347 init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2348 struct nfs4_openowner *oo = open->op_openowner;
2349
2350 INIT_LIST_HEAD(&stp->st_lockowners);
2351 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2352 list_add(&stp->st_perfile, &fp->fi_stateids);
2353 stp->st_stid.sc_type = NFS4_OPEN_STID;
2354 stp->st_stateowner = &oo->oo_owner;
2355 get_nfs4_file(fp);
2356 stp->st_file = fp;
2357 stp->st_stid.sc_stateid.si_boot = boot_time;
2358 stp->st_stid.sc_stateid.si_stateownerid = oo->oo_owner.so_id;
2359 stp->st_stid.sc_stateid.si_fileid = fp->fi_id;
2360 /* note will be incremented before first return to client: */
2361 stp->st_stid.sc_stateid.si_generation = 0;
2362 hash_stid(&stp->st_stid);
2363 stp->st_access_bmap = 0;
2364 stp->st_deny_bmap = 0;
2365 __set_bit(open->op_share_access & ~NFS4_SHARE_WANT_MASK,
2366 &stp->st_access_bmap);
2367 __set_bit(open->op_share_deny, &stp->st_deny_bmap);
2368 stp->st_openstp = NULL;
2369 }
2370
2371 static void
2372 move_to_close_lru(struct nfs4_openowner *oo)
2373 {
2374 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2375
2376 list_move_tail(&oo->oo_close_lru, &close_lru);
2377 oo->oo_time = get_seconds();
2378 }
2379
2380 static int
2381 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2382 clientid_t *clid)
2383 {
2384 return (sop->so_owner.len == owner->len) &&
2385 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2386 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2387 }
2388
2389 static struct nfs4_openowner *
2390 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open)
2391 {
2392 struct nfs4_stateowner *so = NULL;
2393
2394 list_for_each_entry(so, &open_ownerstr_hashtbl[hashval], so_strhash) {
2395 if (same_owner_str(so, &open->op_owner, &open->op_clientid))
2396 return container_of(so, struct nfs4_openowner, oo_owner);
2397 }
2398 return NULL;
2399 }
2400
2401 /* search file_hashtbl[] for file */
2402 static struct nfs4_file *
2403 find_file(struct inode *ino)
2404 {
2405 unsigned int hashval = file_hashval(ino);
2406 struct nfs4_file *fp;
2407
2408 spin_lock(&recall_lock);
2409 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2410 if (fp->fi_inode == ino) {
2411 get_nfs4_file(fp);
2412 spin_unlock(&recall_lock);
2413 return fp;
2414 }
2415 }
2416 spin_unlock(&recall_lock);
2417 return NULL;
2418 }
2419
2420 static inline int access_valid(u32 x, u32 minorversion)
2421 {
2422 if ((x & NFS4_SHARE_ACCESS_MASK) < NFS4_SHARE_ACCESS_READ)
2423 return 0;
2424 if ((x & NFS4_SHARE_ACCESS_MASK) > NFS4_SHARE_ACCESS_BOTH)
2425 return 0;
2426 x &= ~NFS4_SHARE_ACCESS_MASK;
2427 if (minorversion && x) {
2428 if ((x & NFS4_SHARE_WANT_MASK) > NFS4_SHARE_WANT_CANCEL)
2429 return 0;
2430 if ((x & NFS4_SHARE_WHEN_MASK) > NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED)
2431 return 0;
2432 x &= ~(NFS4_SHARE_WANT_MASK | NFS4_SHARE_WHEN_MASK);
2433 }
2434 if (x)
2435 return 0;
2436 return 1;
2437 }
2438
2439 static inline int deny_valid(u32 x)
2440 {
2441 /* Note: unlike access bits, deny bits may be zero. */
2442 return x <= NFS4_SHARE_DENY_BOTH;
2443 }
2444
2445 /*
2446 * Called to check deny when READ with all zero stateid or
2447 * WRITE with all zero or all one stateid
2448 */
2449 static __be32
2450 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2451 {
2452 struct inode *ino = current_fh->fh_dentry->d_inode;
2453 struct nfs4_file *fp;
2454 struct nfs4_ol_stateid *stp;
2455 __be32 ret;
2456
2457 dprintk("NFSD: nfs4_share_conflict\n");
2458
2459 fp = find_file(ino);
2460 if (!fp)
2461 return nfs_ok;
2462 ret = nfserr_locked;
2463 /* Search for conflicting share reservations */
2464 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2465 if (test_bit(deny_type, &stp->st_deny_bmap) ||
2466 test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap))
2467 goto out;
2468 }
2469 ret = nfs_ok;
2470 out:
2471 put_nfs4_file(fp);
2472 return ret;
2473 }
2474
2475 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2476 {
2477 /* We're assuming the state code never drops its reference
2478 * without first removing the lease. Since we're in this lease
2479 * callback (and since the lease code is serialized by the kernel
2480 * lock) we know the server hasn't removed the lease yet, we know
2481 * it's safe to take a reference: */
2482 atomic_inc(&dp->dl_count);
2483
2484 list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
2485
2486 /* only place dl_time is set. protected by lock_flocks*/
2487 dp->dl_time = get_seconds();
2488
2489 nfsd4_cb_recall(dp);
2490 }
2491
2492 /* Called from break_lease() with lock_flocks() held. */
2493 static void nfsd_break_deleg_cb(struct file_lock *fl)
2494 {
2495 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2496 struct nfs4_delegation *dp;
2497
2498 BUG_ON(!fp);
2499 /* We assume break_lease is only called once per lease: */
2500 BUG_ON(fp->fi_had_conflict);
2501 /*
2502 * We don't want the locks code to timeout the lease for us;
2503 * we'll remove it ourself if a delegation isn't returned
2504 * in time:
2505 */
2506 fl->fl_break_time = 0;
2507
2508 spin_lock(&recall_lock);
2509 fp->fi_had_conflict = true;
2510 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2511 nfsd_break_one_deleg(dp);
2512 spin_unlock(&recall_lock);
2513 }
2514
2515 static
2516 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
2517 {
2518 if (arg & F_UNLCK)
2519 return lease_modify(onlist, arg);
2520 else
2521 return -EAGAIN;
2522 }
2523
2524 static const struct lock_manager_operations nfsd_lease_mng_ops = {
2525 .lm_break = nfsd_break_deleg_cb,
2526 .lm_change = nfsd_change_deleg_cb,
2527 };
2528
2529 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
2530 {
2531 if (nfsd4_has_session(cstate))
2532 return nfs_ok;
2533 if (seqid == so->so_seqid - 1)
2534 return nfserr_replay_me;
2535 if (seqid == so->so_seqid)
2536 return nfs_ok;
2537 return nfserr_bad_seqid;
2538 }
2539
2540 __be32
2541 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2542 struct nfsd4_open *open)
2543 {
2544 clientid_t *clientid = &open->op_clientid;
2545 struct nfs4_client *clp = NULL;
2546 unsigned int strhashval;
2547 struct nfs4_openowner *oo = NULL;
2548 __be32 status;
2549
2550 if (!check_name(open->op_owner))
2551 return nfserr_inval;
2552
2553 if (STALE_CLIENTID(&open->op_clientid))
2554 return nfserr_stale_clientid;
2555
2556 strhashval = open_ownerstr_hashval(clientid->cl_id, &open->op_owner);
2557 oo = find_openstateowner_str(strhashval, open);
2558 open->op_openowner = oo;
2559 if (!oo) {
2560 /* Make sure the client's lease hasn't expired. */
2561 clp = find_confirmed_client(clientid);
2562 if (clp == NULL)
2563 return nfserr_expired;
2564 goto renew;
2565 }
2566 if (!oo->oo_confirmed) {
2567 /* Replace unconfirmed owners without checking for replay. */
2568 clp = oo->oo_owner.so_client;
2569 release_openowner(oo);
2570 open->op_openowner = NULL;
2571 goto renew;
2572 }
2573 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
2574 if (status)
2575 return status;
2576 renew:
2577 if (open->op_openowner == NULL) {
2578 oo = alloc_init_open_stateowner(strhashval, clp, open);
2579 if (oo == NULL)
2580 return nfserr_jukebox;
2581 open->op_openowner = oo;
2582 }
2583 list_del_init(&oo->oo_close_lru);
2584 renew_client(oo->oo_owner.so_client);
2585 return nfs_ok;
2586 }
2587
2588 static inline __be32
2589 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2590 {
2591 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
2592 return nfserr_openmode;
2593 else
2594 return nfs_ok;
2595 }
2596
2597 static int share_access_to_flags(u32 share_access)
2598 {
2599 share_access &= ~NFS4_SHARE_WANT_MASK;
2600
2601 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
2602 }
2603
2604 static struct nfs4_delegation *find_deleg_stateid(stateid_t *s)
2605 {
2606 struct nfs4_stid *ret;
2607
2608 ret = find_stateid_by_type(s, NFS4_DELEG_STID);
2609 if (!ret)
2610 return NULL;
2611 return delegstateid(ret);
2612 }
2613
2614 static __be32
2615 nfs4_check_deleg(struct nfs4_file *fp, struct nfsd4_open *open,
2616 struct nfs4_delegation **dp)
2617 {
2618 int flags;
2619 __be32 status = nfserr_bad_stateid;
2620
2621 *dp = find_deleg_stateid(&open->op_delegate_stateid);
2622 if (*dp == NULL)
2623 goto out;
2624 flags = share_access_to_flags(open->op_share_access);
2625 status = nfs4_check_delegmode(*dp, flags);
2626 if (status)
2627 *dp = NULL;
2628 out:
2629 if (open->op_claim_type != NFS4_OPEN_CLAIM_DELEGATE_CUR)
2630 return nfs_ok;
2631 if (status)
2632 return status;
2633 open->op_openowner->oo_confirmed = 1;
2634 return nfs_ok;
2635 }
2636
2637 static __be32
2638 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
2639 {
2640 struct nfs4_ol_stateid *local;
2641 struct nfs4_openowner *oo = open->op_openowner;
2642
2643 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
2644 /* ignore lock owners */
2645 if (local->st_stateowner->so_is_open_owner == 0)
2646 continue;
2647 /* remember if we have seen this open owner */
2648 if (local->st_stateowner == &oo->oo_owner)
2649 *stpp = local;
2650 /* check for conflicting share reservations */
2651 if (!test_share(local, open))
2652 return nfserr_share_denied;
2653 }
2654 return nfs_ok;
2655 }
2656
2657 static inline struct nfs4_ol_stateid *
2658 nfs4_alloc_stateid(void)
2659 {
2660 return kmem_cache_alloc(stateid_slab, GFP_KERNEL);
2661 }
2662
2663 static inline int nfs4_access_to_access(u32 nfs4_access)
2664 {
2665 int flags = 0;
2666
2667 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
2668 flags |= NFSD_MAY_READ;
2669 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
2670 flags |= NFSD_MAY_WRITE;
2671 return flags;
2672 }
2673
2674 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
2675 struct svc_fh *cur_fh, struct nfsd4_open *open)
2676 {
2677 __be32 status;
2678 int oflag = nfs4_access_to_omode(open->op_share_access);
2679 int access = nfs4_access_to_access(open->op_share_access);
2680
2681 /* CLAIM_DELEGATE_CUR is used in response to a broken lease;
2682 * allowing it to break the lease and return EAGAIN leaves the
2683 * client unable to make progress in returning the delegation */
2684 if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
2685 access |= NFSD_MAY_NOT_BREAK_LEASE;
2686
2687 if (!fp->fi_fds[oflag]) {
2688 status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
2689 &fp->fi_fds[oflag]);
2690 if (status)
2691 return status;
2692 }
2693 nfs4_file_get_access(fp, oflag);
2694
2695 return nfs_ok;
2696 }
2697
2698 static __be32
2699 nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_ol_stateid **stpp,
2700 struct nfs4_file *fp, struct svc_fh *cur_fh,
2701 struct nfsd4_open *open)
2702 {
2703 struct nfs4_ol_stateid *stp;
2704 __be32 status;
2705
2706 stp = nfs4_alloc_stateid();
2707 if (stp == NULL)
2708 return nfserr_jukebox;
2709
2710 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2711 if (status) {
2712 kmem_cache_free(stateid_slab, stp);
2713 return status;
2714 }
2715 *stpp = stp;
2716 return 0;
2717 }
2718
2719 static inline __be32
2720 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2721 struct nfsd4_open *open)
2722 {
2723 struct iattr iattr = {
2724 .ia_valid = ATTR_SIZE,
2725 .ia_size = 0,
2726 };
2727 if (!open->op_truncate)
2728 return 0;
2729 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
2730 return nfserr_inval;
2731 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
2732 }
2733
2734 static __be32
2735 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
2736 {
2737 u32 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
2738 bool new_access;
2739 __be32 status;
2740
2741 new_access = !test_bit(op_share_access, &stp->st_access_bmap);
2742 if (new_access) {
2743 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2744 if (status)
2745 return status;
2746 }
2747 status = nfsd4_truncate(rqstp, cur_fh, open);
2748 if (status) {
2749 if (new_access) {
2750 int oflag = nfs4_access_to_omode(op_share_access);
2751 nfs4_file_put_access(fp, oflag);
2752 }
2753 return status;
2754 }
2755 /* remember the open */
2756 __set_bit(op_share_access, &stp->st_access_bmap);
2757 __set_bit(open->op_share_deny, &stp->st_deny_bmap);
2758
2759 return nfs_ok;
2760 }
2761
2762
2763 static void
2764 nfs4_set_claim_prev(struct nfsd4_open *open)
2765 {
2766 open->op_openowner->oo_confirmed = 1;
2767 open->op_openowner->oo_owner.so_client->cl_firststate = 1;
2768 }
2769
2770 /* Should we give out recallable state?: */
2771 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
2772 {
2773 if (clp->cl_cb_state == NFSD4_CB_UP)
2774 return true;
2775 /*
2776 * In the sessions case, since we don't have to establish a
2777 * separate connection for callbacks, we assume it's OK
2778 * until we hear otherwise:
2779 */
2780 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
2781 }
2782
2783 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
2784 {
2785 struct file_lock *fl;
2786
2787 fl = locks_alloc_lock();
2788 if (!fl)
2789 return NULL;
2790 locks_init_lock(fl);
2791 fl->fl_lmops = &nfsd_lease_mng_ops;
2792 fl->fl_flags = FL_LEASE;
2793 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2794 fl->fl_end = OFFSET_MAX;
2795 fl->fl_owner = (fl_owner_t)(dp->dl_file);
2796 fl->fl_pid = current->tgid;
2797 return fl;
2798 }
2799
2800 static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
2801 {
2802 struct nfs4_file *fp = dp->dl_file;
2803 struct file_lock *fl;
2804 int status;
2805
2806 fl = nfs4_alloc_init_lease(dp, flag);
2807 if (!fl)
2808 return -ENOMEM;
2809 fl->fl_file = find_readable_file(fp);
2810 list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
2811 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
2812 if (status) {
2813 list_del_init(&dp->dl_perclnt);
2814 locks_free_lock(fl);
2815 return -ENOMEM;
2816 }
2817 fp->fi_lease = fl;
2818 fp->fi_deleg_file = fl->fl_file;
2819 get_file(fp->fi_deleg_file);
2820 atomic_set(&fp->fi_delegees, 1);
2821 list_add(&dp->dl_perfile, &fp->fi_delegations);
2822 return 0;
2823 }
2824
2825 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
2826 {
2827 struct nfs4_file *fp = dp->dl_file;
2828
2829 if (!fp->fi_lease)
2830 return nfs4_setlease(dp, flag);
2831 spin_lock(&recall_lock);
2832 if (fp->fi_had_conflict) {
2833 spin_unlock(&recall_lock);
2834 return -EAGAIN;
2835 }
2836 atomic_inc(&fp->fi_delegees);
2837 list_add(&dp->dl_perfile, &fp->fi_delegations);
2838 spin_unlock(&recall_lock);
2839 list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
2840 return 0;
2841 }
2842
2843 /*
2844 * Attempt to hand out a delegation.
2845 */
2846 static void
2847 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
2848 {
2849 struct nfs4_delegation *dp;
2850 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
2851 int cb_up;
2852 int status, flag = 0;
2853
2854 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
2855 flag = NFS4_OPEN_DELEGATE_NONE;
2856 open->op_recall = 0;
2857 switch (open->op_claim_type) {
2858 case NFS4_OPEN_CLAIM_PREVIOUS:
2859 if (!cb_up)
2860 open->op_recall = 1;
2861 flag = open->op_delegate_type;
2862 if (flag == NFS4_OPEN_DELEGATE_NONE)
2863 goto out;
2864 break;
2865 case NFS4_OPEN_CLAIM_NULL:
2866 /* Let's not give out any delegations till everyone's
2867 * had the chance to reclaim theirs.... */
2868 if (locks_in_grace())
2869 goto out;
2870 if (!cb_up || !oo->oo_confirmed)
2871 goto out;
2872 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
2873 flag = NFS4_OPEN_DELEGATE_WRITE;
2874 else
2875 flag = NFS4_OPEN_DELEGATE_READ;
2876 break;
2877 default:
2878 goto out;
2879 }
2880
2881 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag);
2882 if (dp == NULL)
2883 goto out_no_deleg;
2884 status = nfs4_set_delegation(dp, flag);
2885 if (status)
2886 goto out_free;
2887
2888 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
2889
2890 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
2891 STATEID_VAL(&dp->dl_stid.sc_stateid));
2892 out:
2893 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS
2894 && flag == NFS4_OPEN_DELEGATE_NONE
2895 && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2896 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2897 open->op_delegate_type = flag;
2898 return;
2899 out_free:
2900 nfs4_put_delegation(dp);
2901 out_no_deleg:
2902 flag = NFS4_OPEN_DELEGATE_NONE;
2903 goto out;
2904 }
2905
2906 /*
2907 * called with nfs4_lock_state() held.
2908 */
2909 __be32
2910 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
2911 {
2912 struct nfsd4_compoundres *resp = rqstp->rq_resp;
2913 struct nfs4_file *fp = NULL;
2914 struct inode *ino = current_fh->fh_dentry->d_inode;
2915 struct nfs4_ol_stateid *stp = NULL;
2916 struct nfs4_delegation *dp = NULL;
2917 __be32 status;
2918
2919 status = nfserr_inval;
2920 if (!access_valid(open->op_share_access, resp->cstate.minorversion)
2921 || !deny_valid(open->op_share_deny))
2922 goto out;
2923 /*
2924 * Lookup file; if found, lookup stateid and check open request,
2925 * and check for delegations in the process of being recalled.
2926 * If not found, create the nfs4_file struct
2927 */
2928 fp = find_file(ino);
2929 if (fp) {
2930 if ((status = nfs4_check_open(fp, open, &stp)))
2931 goto out;
2932 status = nfs4_check_deleg(fp, open, &dp);
2933 if (status)
2934 goto out;
2935 } else {
2936 status = nfserr_bad_stateid;
2937 if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
2938 goto out;
2939 status = nfserr_jukebox;
2940 fp = alloc_init_file(ino);
2941 if (fp == NULL)
2942 goto out;
2943 }
2944
2945 /*
2946 * OPEN the file, or upgrade an existing OPEN.
2947 * If truncate fails, the OPEN fails.
2948 */
2949 if (stp) {
2950 /* Stateid was found, this is an OPEN upgrade */
2951 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
2952 if (status)
2953 goto out;
2954 } else {
2955 status = nfs4_new_open(rqstp, &stp, fp, current_fh, open);
2956 if (status)
2957 goto out;
2958 init_open_stateid(stp, fp, open);
2959 status = nfsd4_truncate(rqstp, current_fh, open);
2960 if (status) {
2961 release_open_stateid(stp);
2962 goto out;
2963 }
2964 }
2965 update_stateid(&stp->st_stid.sc_stateid);
2966 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
2967
2968 if (nfsd4_has_session(&resp->cstate))
2969 open->op_openowner->oo_confirmed = 1;
2970
2971 /*
2972 * Attempt to hand out a delegation. No error return, because the
2973 * OPEN succeeds even if we fail.
2974 */
2975 nfs4_open_delegation(current_fh, open, stp);
2976
2977 status = nfs_ok;
2978
2979 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
2980 STATEID_VAL(&stp->st_stid.sc_stateid));
2981 out:
2982 if (fp)
2983 put_nfs4_file(fp);
2984 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
2985 nfs4_set_claim_prev(open);
2986 /*
2987 * To finish the open response, we just need to set the rflags.
2988 */
2989 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
2990 if (!open->op_openowner->oo_confirmed &&
2991 !nfsd4_has_session(&resp->cstate))
2992 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
2993
2994 return status;
2995 }
2996
2997 __be32
2998 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2999 clientid_t *clid)
3000 {
3001 struct nfs4_client *clp;
3002 __be32 status;
3003
3004 nfs4_lock_state();
3005 dprintk("process_renew(%08x/%08x): starting\n",
3006 clid->cl_boot, clid->cl_id);
3007 status = nfserr_stale_clientid;
3008 if (STALE_CLIENTID(clid))
3009 goto out;
3010 clp = find_confirmed_client(clid);
3011 status = nfserr_expired;
3012 if (clp == NULL) {
3013 /* We assume the client took too long to RENEW. */
3014 dprintk("nfsd4_renew: clientid not found!\n");
3015 goto out;
3016 }
3017 renew_client(clp);
3018 status = nfserr_cb_path_down;
3019 if (!list_empty(&clp->cl_delegations)
3020 && clp->cl_cb_state != NFSD4_CB_UP)
3021 goto out;
3022 status = nfs_ok;
3023 out:
3024 nfs4_unlock_state();
3025 return status;
3026 }
3027
3028 static struct lock_manager nfsd4_manager = {
3029 };
3030
3031 static void
3032 nfsd4_end_grace(void)
3033 {
3034 dprintk("NFSD: end of grace period\n");
3035 nfsd4_recdir_purge_old();
3036 locks_end_grace(&nfsd4_manager);
3037 /*
3038 * Now that every NFSv4 client has had the chance to recover and
3039 * to see the (possibly new, possibly shorter) lease time, we
3040 * can safely set the next grace time to the current lease time:
3041 */
3042 nfsd4_grace = nfsd4_lease;
3043 }
3044
3045 static time_t
3046 nfs4_laundromat(void)
3047 {
3048 struct nfs4_client *clp;
3049 struct nfs4_openowner *oo;
3050 struct nfs4_delegation *dp;
3051 struct list_head *pos, *next, reaplist;
3052 time_t cutoff = get_seconds() - nfsd4_lease;
3053 time_t t, clientid_val = nfsd4_lease;
3054 time_t u, test_val = nfsd4_lease;
3055
3056 nfs4_lock_state();
3057
3058 dprintk("NFSD: laundromat service - starting\n");
3059 if (locks_in_grace())
3060 nfsd4_end_grace();
3061 INIT_LIST_HEAD(&reaplist);
3062 spin_lock(&client_lock);
3063 list_for_each_safe(pos, next, &client_lru) {
3064 clp = list_entry(pos, struct nfs4_client, cl_lru);
3065 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3066 t = clp->cl_time - cutoff;
3067 if (clientid_val > t)
3068 clientid_val = t;
3069 break;
3070 }
3071 if (atomic_read(&clp->cl_refcount)) {
3072 dprintk("NFSD: client in use (clientid %08x)\n",
3073 clp->cl_clientid.cl_id);
3074 continue;
3075 }
3076 unhash_client_locked(clp);
3077 list_add(&clp->cl_lru, &reaplist);
3078 }
3079 spin_unlock(&client_lock);
3080 list_for_each_safe(pos, next, &reaplist) {
3081 clp = list_entry(pos, struct nfs4_client, cl_lru);
3082 dprintk("NFSD: purging unused client (clientid %08x)\n",
3083 clp->cl_clientid.cl_id);
3084 nfsd4_remove_clid_dir(clp);
3085 expire_client(clp);
3086 }
3087 spin_lock(&recall_lock);
3088 list_for_each_safe(pos, next, &del_recall_lru) {
3089 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3090 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3091 u = dp->dl_time - cutoff;
3092 if (test_val > u)
3093 test_val = u;
3094 break;
3095 }
3096 list_move(&dp->dl_recall_lru, &reaplist);
3097 }
3098 spin_unlock(&recall_lock);
3099 list_for_each_safe(pos, next, &reaplist) {
3100 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3101 list_del_init(&dp->dl_recall_lru);
3102 unhash_delegation(dp);
3103 }
3104 test_val = nfsd4_lease;
3105 list_for_each_safe(pos, next, &close_lru) {
3106 oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3107 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3108 u = oo->oo_time - cutoff;
3109 if (test_val > u)
3110 test_val = u;
3111 break;
3112 }
3113 dprintk("NFSD: purging unused open stateowner (so_id %d)\n",
3114 oo->oo_owner.so_id);
3115 release_openowner(oo);
3116 }
3117 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
3118 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
3119 nfs4_unlock_state();
3120 return clientid_val;
3121 }
3122
3123 static struct workqueue_struct *laundry_wq;
3124 static void laundromat_main(struct work_struct *);
3125 static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
3126
3127 static void
3128 laundromat_main(struct work_struct *not_used)
3129 {
3130 time_t t;
3131
3132 t = nfs4_laundromat();
3133 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3134 queue_delayed_work(laundry_wq, &laundromat_work, t*HZ);
3135 }
3136
3137 static struct nfs4_openowner * search_close_lru(u32 st_id)
3138 {
3139 struct nfs4_openowner *local;
3140
3141 list_for_each_entry(local, &close_lru, oo_close_lru) {
3142 if (local->oo_owner.so_id == st_id)
3143 return local;
3144 }
3145 return NULL;
3146 }
3147
3148 static inline int
3149 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3150 {
3151 return fhp->fh_dentry->d_inode != stp->st_file->fi_inode;
3152 }
3153
3154 static int
3155 STALE_STATEID(stateid_t *stateid)
3156 {
3157 if (stateid->si_boot == boot_time)
3158 return 0;
3159 dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
3160 STATEID_VAL(stateid));
3161 return 1;
3162 }
3163
3164 static inline int
3165 access_permit_read(unsigned long access_bmap)
3166 {
3167 return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) ||
3168 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) ||
3169 test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap);
3170 }
3171
3172 static inline int
3173 access_permit_write(unsigned long access_bmap)
3174 {
3175 return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) ||
3176 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap);
3177 }
3178
3179 static
3180 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3181 {
3182 __be32 status = nfserr_openmode;
3183
3184 /* For lock stateid's, we test the parent open, not the lock: */
3185 if (stp->st_openstp)
3186 stp = stp->st_openstp;
3187 if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap)))
3188 goto out;
3189 if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap)))
3190 goto out;
3191 status = nfs_ok;
3192 out:
3193 return status;
3194 }
3195
3196 static inline __be32
3197 check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
3198 {
3199 if (ONE_STATEID(stateid) && (flags & RD_STATE))
3200 return nfs_ok;
3201 else if (locks_in_grace()) {
3202 /* Answer in remaining cases depends on existence of
3203 * conflicting state; so we must wait out the grace period. */
3204 return nfserr_grace;
3205 } else if (flags & WR_STATE)
3206 return nfs4_share_conflict(current_fh,
3207 NFS4_SHARE_DENY_WRITE);
3208 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3209 return nfs4_share_conflict(current_fh,
3210 NFS4_SHARE_DENY_READ);
3211 }
3212
3213 /*
3214 * Allow READ/WRITE during grace period on recovered state only for files
3215 * that are not able to provide mandatory locking.
3216 */
3217 static inline int
3218 grace_disallows_io(struct inode *inode)
3219 {
3220 return locks_in_grace() && mandatory_lock(inode);
3221 }
3222
3223 /* Returns true iff a is later than b: */
3224 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3225 {
3226 return (s32)a->si_generation - (s32)b->si_generation > 0;
3227 }
3228
3229 static int check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3230 {
3231 /*
3232 * When sessions are used the stateid generation number is ignored
3233 * when it is zero.
3234 */
3235 if (has_session && in->si_generation == 0)
3236 return nfs_ok;
3237
3238 if (in->si_generation == ref->si_generation)
3239 return nfs_ok;
3240
3241 /* If the client sends us a stateid from the future, it's buggy: */
3242 if (stateid_generation_after(in, ref))
3243 return nfserr_bad_stateid;
3244 /*
3245 * However, we could see a stateid from the past, even from a
3246 * non-buggy client. For example, if the client sends a lock
3247 * while some IO is outstanding, the lock may bump si_generation
3248 * while the IO is still in flight. The client could avoid that
3249 * situation by waiting for responses on all the IO requests,
3250 * but better performance may result in retrying IO that
3251 * receives an old_stateid error if requests are rarely
3252 * reordered in flight:
3253 */
3254 return nfserr_old_stateid;
3255 }
3256
3257 static int is_delegation_stateid(stateid_t *stateid)
3258 {
3259 return stateid->si_fileid == 0;
3260 }
3261
3262 __be32 nfs4_validate_stateid(stateid_t *stateid, bool has_session)
3263 {
3264 struct nfs4_stid *s;
3265 struct nfs4_ol_stateid *ols;
3266 __be32 status;
3267
3268 if (STALE_STATEID(stateid))
3269 return nfserr_stale_stateid;
3270
3271 s = find_stateid(stateid);
3272 if (!s)
3273 return nfserr_stale_stateid;
3274 status = check_stateid_generation(stateid, &s->sc_stateid, has_session);
3275 if (status)
3276 return status;
3277 if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID)))
3278 return nfs_ok;
3279 ols = openlockstateid(s);
3280 if (ols->st_stateowner->so_is_open_owner
3281 && !openowner(ols->st_stateowner)->oo_confirmed)
3282 return nfserr_bad_stateid;
3283 return nfs_ok;
3284 }
3285
3286 /*
3287 * Checks for stateid operations
3288 */
3289 __be32
3290 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
3291 stateid_t *stateid, int flags, struct file **filpp)
3292 {
3293 struct nfs4_stid *s;
3294 struct nfs4_ol_stateid *stp = NULL;
3295 struct nfs4_delegation *dp = NULL;
3296 struct svc_fh *current_fh = &cstate->current_fh;
3297 struct inode *ino = current_fh->fh_dentry->d_inode;
3298 __be32 status;
3299
3300 if (filpp)
3301 *filpp = NULL;
3302
3303 if (grace_disallows_io(ino))
3304 return nfserr_grace;
3305
3306 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3307 return check_special_stateids(current_fh, stateid, flags);
3308
3309 status = nfserr_stale_stateid;
3310 if (STALE_STATEID(stateid))
3311 goto out;
3312
3313 /*
3314 * We assume that any stateid that has the current boot time,
3315 * but that we can't find, is expired:
3316 */
3317 status = nfserr_expired;
3318 s = find_stateid(stateid);
3319 if (!s)
3320 goto out;
3321 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
3322 if (status)
3323 goto out;
3324 if (s->sc_type == NFS4_DELEG_STID) {
3325 dp = delegstateid(s);
3326 status = nfs4_check_delegmode(dp, flags);
3327 if (status)
3328 goto out;
3329 renew_client(dp->dl_client);
3330 if (filpp) {
3331 *filpp = dp->dl_file->fi_deleg_file;
3332 BUG_ON(!*filpp);
3333 }
3334 } else { /* open or lock stateid */
3335 stp = openlockstateid(s);
3336 status = nfserr_bad_stateid;
3337 if (nfs4_check_fh(current_fh, stp))
3338 goto out;
3339 if (stp->st_stateowner->so_is_open_owner
3340 && !openowner(stp->st_stateowner)->oo_confirmed)
3341 goto out;
3342 status = nfs4_check_openmode(stp, flags);
3343 if (status)
3344 goto out;
3345 renew_client(stp->st_stateowner->so_client);
3346 if (filpp) {
3347 if (flags & RD_STATE)
3348 *filpp = find_readable_file(stp->st_file);
3349 else
3350 *filpp = find_writeable_file(stp->st_file);
3351 }
3352 }
3353 status = nfs_ok;
3354 out:
3355 return status;
3356 }
3357
3358 static __be32
3359 nfsd4_free_delegation_stateid(stateid_t *stateid)
3360 {
3361 struct nfs4_delegation *dp = find_deleg_stateid(stateid);
3362 if (dp)
3363 return nfserr_locks_held;
3364
3365 return nfserr_bad_stateid;
3366 }
3367
3368 static __be32
3369 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3370 {
3371 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
3372 return nfserr_locks_held;
3373 release_lock_stateid(stp);
3374 return nfs_ok;
3375 }
3376
3377 /*
3378 * Test if the stateid is valid
3379 */
3380 __be32
3381 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3382 struct nfsd4_test_stateid *test_stateid)
3383 {
3384 test_stateid->ts_has_session = nfsd4_has_session(cstate);
3385 return nfs_ok;
3386 }
3387
3388 /*
3389 * Free a state id
3390 */
3391 __be32
3392 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3393 struct nfsd4_free_stateid *free_stateid)
3394 {
3395 stateid_t *stateid = &free_stateid->fr_stateid;
3396 struct nfs4_ol_stateid *stp;
3397 __be32 ret;
3398
3399 nfs4_lock_state();
3400 if (is_delegation_stateid(stateid)) {
3401 ret = nfsd4_free_delegation_stateid(stateid);
3402 goto out;
3403 }
3404
3405 stp = find_ol_stateid(stateid);
3406 if (!stp) {
3407 ret = nfserr_bad_stateid;
3408 goto out;
3409 }
3410 ret = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, 1);
3411 if (ret)
3412 goto out;
3413
3414 if (stp->st_stid.sc_type == NFS4_OPEN_STID) {
3415 ret = nfserr_locks_held;
3416 goto out;
3417 } else {
3418 ret = nfsd4_free_lock_stateid(stp);
3419 goto out;
3420 }
3421
3422 out:
3423 nfs4_unlock_state();
3424 return ret;
3425 }
3426
3427 static inline int
3428 setlkflg (int type)
3429 {
3430 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
3431 RD_STATE : WR_STATE;
3432 }
3433
3434 static __be32 nfs4_nospecial_stateid_checks(stateid_t *stateid)
3435 {
3436 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3437 return nfserr_bad_stateid;
3438 if (STALE_STATEID(stateid))
3439 return nfserr_stale_stateid;
3440 return nfs_ok;
3441 }
3442
3443 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
3444 {
3445 struct svc_fh *current_fh = &cstate->current_fh;
3446 struct nfs4_stateowner *sop = stp->st_stateowner;
3447 __be32 status;
3448
3449 if (nfs4_check_fh(current_fh, stp))
3450 return nfserr_bad_stateid;
3451 status = nfsd4_check_seqid(cstate, sop, seqid);
3452 if (status)
3453 return status;
3454 return check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
3455 }
3456
3457 /*
3458 * Checks for sequence id mutating operations.
3459 */
3460 static __be32
3461 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3462 stateid_t *stateid, char typemask,
3463 struct nfs4_ol_stateid **stpp)
3464 {
3465 __be32 status;
3466
3467 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
3468 seqid, STATEID_VAL(stateid));
3469
3470 *stpp = NULL;
3471 status = nfs4_nospecial_stateid_checks(stateid);
3472 if (status)
3473 return status;
3474 *stpp = find_ol_stateid_by_type(stateid, typemask);
3475 if (*stpp == NULL)
3476 return nfserr_expired;
3477 cstate->replay_owner = (*stpp)->st_stateowner;
3478 renew_client((*stpp)->st_stateowner->so_client);
3479
3480 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
3481 }
3482
3483 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp)
3484 {
3485 __be32 status;
3486 struct nfs4_openowner *oo;
3487
3488 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
3489 NFS4_OPEN_STID, stpp);
3490 if (status)
3491 return status;
3492 oo = openowner((*stpp)->st_stateowner);
3493 if (!oo->oo_confirmed)
3494 return nfserr_bad_stateid;
3495 return nfs_ok;
3496 }
3497
3498 __be32
3499 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3500 struct nfsd4_open_confirm *oc)
3501 {
3502 __be32 status;
3503 struct nfs4_openowner *oo;
3504 struct nfs4_ol_stateid *stp;
3505
3506 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
3507 (int)cstate->current_fh.fh_dentry->d_name.len,
3508 cstate->current_fh.fh_dentry->d_name.name);
3509
3510 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
3511 if (status)
3512 return status;
3513
3514 nfs4_lock_state();
3515
3516 status = nfs4_preprocess_seqid_op(cstate,
3517 oc->oc_seqid, &oc->oc_req_stateid,
3518 NFS4_OPEN_STID, &stp);
3519 if (status)
3520 goto out;
3521 oo = openowner(stp->st_stateowner);
3522 status = nfserr_bad_stateid;
3523 if (oo->oo_confirmed)
3524 goto out;
3525 oo->oo_confirmed = 1;
3526 update_stateid(&stp->st_stid.sc_stateid);
3527 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3528 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
3529 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
3530
3531 nfsd4_create_clid_dir(oo->oo_owner.so_client);
3532 status = nfs_ok;
3533 out:
3534 if (!cstate->replay_owner)
3535 nfs4_unlock_state();
3536 return status;
3537 }
3538
3539 static inline void nfs4_file_downgrade(struct nfs4_ol_stateid *stp, unsigned int to_access)
3540 {
3541 int i;
3542
3543 for (i = 1; i < 4; i++) {
3544 if (test_bit(i, &stp->st_access_bmap) && !(i & to_access)) {
3545 nfs4_file_put_access(stp->st_file, i);
3546 __clear_bit(i, &stp->st_access_bmap);
3547 }
3548 }
3549 }
3550
3551 static void
3552 reset_union_bmap_deny(unsigned long deny, unsigned long *bmap)
3553 {
3554 int i;
3555 for (i = 0; i < 4; i++) {
3556 if ((i & deny) != i)
3557 __clear_bit(i, bmap);
3558 }
3559 }
3560
3561 __be32
3562 nfsd4_open_downgrade(struct svc_rqst *rqstp,
3563 struct nfsd4_compound_state *cstate,
3564 struct nfsd4_open_downgrade *od)
3565 {
3566 __be32 status;
3567 struct nfs4_ol_stateid *stp;
3568
3569 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
3570 (int)cstate->current_fh.fh_dentry->d_name.len,
3571 cstate->current_fh.fh_dentry->d_name.name);
3572
3573 if (!access_valid(od->od_share_access, cstate->minorversion)
3574 || !deny_valid(od->od_share_deny))
3575 return nfserr_inval;
3576
3577 nfs4_lock_state();
3578 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
3579 &od->od_stateid, &stp);
3580 if (status)
3581 goto out;
3582 status = nfserr_inval;
3583 if (!test_bit(od->od_share_access, &stp->st_access_bmap)) {
3584 dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n",
3585 stp->st_access_bmap, od->od_share_access);
3586 goto out;
3587 }
3588 if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) {
3589 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
3590 stp->st_deny_bmap, od->od_share_deny);
3591 goto out;
3592 }
3593 nfs4_file_downgrade(stp, od->od_share_access);
3594
3595 reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap);
3596
3597 update_stateid(&stp->st_stid.sc_stateid);
3598 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3599 status = nfs_ok;
3600 out:
3601 if (!cstate->replay_owner)
3602 nfs4_unlock_state();
3603 return status;
3604 }
3605
3606 /*
3607 * nfs4_unlock_state() called after encode
3608 */
3609 __be32
3610 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3611 struct nfsd4_close *close)
3612 {
3613 __be32 status;
3614 struct nfs4_openowner *oo;
3615 struct nfs4_ol_stateid *stp;
3616
3617 dprintk("NFSD: nfsd4_close on file %.*s\n",
3618 (int)cstate->current_fh.fh_dentry->d_name.len,
3619 cstate->current_fh.fh_dentry->d_name.name);
3620
3621 nfs4_lock_state();
3622 /* check close_lru for replay */
3623 status = nfs4_preprocess_confirmed_seqid_op(cstate, close->cl_seqid,
3624 &close->cl_stateid, &stp);
3625 if (stp == NULL && status == nfserr_expired) {
3626 /*
3627 * Also, we should make sure this isn't just the result of
3628 * a replayed close:
3629 */
3630 oo = search_close_lru(close->cl_stateid.si_stateownerid);
3631 /* It's not stale; let's assume it's expired: */
3632 if (oo == NULL)
3633 goto out;
3634 cstate->replay_owner = &oo->oo_owner;
3635 status = nfsd4_check_seqid(cstate, &oo->oo_owner, close->cl_seqid);
3636 if (status)
3637 goto out;
3638 status = nfserr_bad_seqid;
3639 }
3640 if (status)
3641 goto out;
3642 oo = openowner(stp->st_stateowner);
3643 status = nfs_ok;
3644 update_stateid(&stp->st_stid.sc_stateid);
3645 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3646
3647 /* release_stateid() calls nfsd_close() if needed */
3648 release_open_stateid(stp);
3649
3650 /* place unused nfs4_stateowners on so_close_lru list to be
3651 * released by the laundromat service after the lease period
3652 * to enable us to handle CLOSE replay
3653 */
3654 if (list_empty(&oo->oo_owner.so_stateids))
3655 move_to_close_lru(oo);
3656 out:
3657 if (!cstate->replay_owner)
3658 nfs4_unlock_state();
3659 return status;
3660 }
3661
3662 __be32
3663 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3664 struct nfsd4_delegreturn *dr)
3665 {
3666 struct nfs4_delegation *dp;
3667 stateid_t *stateid = &dr->dr_stateid;
3668 struct inode *inode;
3669 __be32 status;
3670
3671 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
3672 return status;
3673 inode = cstate->current_fh.fh_dentry->d_inode;
3674
3675 nfs4_lock_state();
3676 status = nfserr_bad_stateid;
3677 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3678 goto out;
3679 status = nfserr_stale_stateid;
3680 if (STALE_STATEID(stateid))
3681 goto out;
3682 status = nfserr_bad_stateid;
3683 if (!is_delegation_stateid(stateid))
3684 goto out;
3685 status = nfserr_expired;
3686 dp = find_deleg_stateid(stateid);
3687 if (!dp)
3688 goto out;
3689 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
3690 if (status)
3691 goto out;
3692 renew_client(dp->dl_client);
3693
3694 unhash_delegation(dp);
3695 out:
3696 nfs4_unlock_state();
3697
3698 return status;
3699 }
3700
3701
3702 /*
3703 * Lock owner state (byte-range locks)
3704 */
3705 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
3706 #define LOCK_HASH_BITS 8
3707 #define LOCK_HASH_SIZE (1 << LOCK_HASH_BITS)
3708 #define LOCK_HASH_MASK (LOCK_HASH_SIZE - 1)
3709
3710 static inline u64
3711 end_offset(u64 start, u64 len)
3712 {
3713 u64 end;
3714
3715 end = start + len;
3716 return end >= start ? end: NFS4_MAX_UINT64;
3717 }
3718
3719 /* last octet in a range */
3720 static inline u64
3721 last_byte_offset(u64 start, u64 len)
3722 {
3723 u64 end;
3724
3725 BUG_ON(!len);
3726 end = start + len;
3727 return end > start ? end - 1: NFS4_MAX_UINT64;
3728 }
3729
3730 static unsigned int lockownerid_hashval(u32 id)
3731 {
3732 return id & LOCK_HASH_MASK;
3733 }
3734
3735 static inline unsigned int
3736 lock_ownerstr_hashval(struct inode *inode, u32 cl_id,
3737 struct xdr_netobj *ownername)
3738 {
3739 return (file_hashval(inode) + cl_id
3740 + opaque_hashval(ownername->data, ownername->len))
3741 & LOCK_HASH_MASK;
3742 }
3743
3744 static struct list_head lock_ownerid_hashtbl[LOCK_HASH_SIZE];
3745 static struct list_head lock_ownerstr_hashtbl[LOCK_HASH_SIZE];
3746
3747 /*
3748 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
3749 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
3750 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
3751 * locking, this prevents us from being completely protocol-compliant. The
3752 * real solution to this problem is to start using unsigned file offsets in
3753 * the VFS, but this is a very deep change!
3754 */
3755 static inline void
3756 nfs4_transform_lock_offset(struct file_lock *lock)
3757 {
3758 if (lock->fl_start < 0)
3759 lock->fl_start = OFFSET_MAX;
3760 if (lock->fl_end < 0)
3761 lock->fl_end = OFFSET_MAX;
3762 }
3763
3764 /* Hack!: For now, we're defining this just so we can use a pointer to it
3765 * as a unique cookie to identify our (NFSv4's) posix locks. */
3766 static const struct lock_manager_operations nfsd_posix_mng_ops = {
3767 };
3768
3769 static inline void
3770 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
3771 {
3772 struct nfs4_lockowner *lo;
3773
3774 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
3775 lo = (struct nfs4_lockowner *) fl->fl_owner;
3776 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
3777 lo->lo_owner.so_owner.len, GFP_KERNEL);
3778 if (!deny->ld_owner.data)
3779 /* We just don't care that much */
3780 goto nevermind;
3781 deny->ld_owner.len = lo->lo_owner.so_owner.len;
3782 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
3783 } else {
3784 nevermind:
3785 deny->ld_owner.len = 0;
3786 deny->ld_owner.data = NULL;
3787 deny->ld_clientid.cl_boot = 0;
3788 deny->ld_clientid.cl_id = 0;
3789 }
3790 deny->ld_start = fl->fl_start;
3791 deny->ld_length = NFS4_MAX_UINT64;
3792 if (fl->fl_end != NFS4_MAX_UINT64)
3793 deny->ld_length = fl->fl_end - fl->fl_start + 1;
3794 deny->ld_type = NFS4_READ_LT;
3795 if (fl->fl_type != F_RDLCK)
3796 deny->ld_type = NFS4_WRITE_LT;
3797 }
3798
3799 static struct nfs4_lockowner *
3800 find_lockowner_str(struct inode *inode, clientid_t *clid,
3801 struct xdr_netobj *owner)
3802 {
3803 unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
3804 struct nfs4_stateowner *op;
3805
3806 list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
3807 if (same_owner_str(op, owner, clid))
3808 return lockowner(op);
3809 }
3810 return NULL;
3811 }
3812
3813 static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp)
3814 {
3815 unsigned int idhashval;
3816
3817 idhashval = lockownerid_hashval(lo->lo_owner.so_id);
3818 list_add(&lo->lo_owner.so_idhash, &lock_ownerid_hashtbl[idhashval]);
3819 list_add(&lo->lo_owner.so_strhash, &lock_ownerstr_hashtbl[strhashval]);
3820 list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
3821 }
3822
3823 /*
3824 * Alloc a lock owner structure.
3825 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
3826 * occurred.
3827 *
3828 * strhashval = lock_ownerstr_hashval
3829 */
3830
3831 static struct nfs4_lockowner *
3832 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
3833 struct nfs4_lockowner *lo;
3834
3835 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
3836 if (!lo)
3837 return NULL;
3838 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
3839 lo->lo_owner.so_is_open_owner = 0;
3840 /* It is the openowner seqid that will be incremented in encode in the
3841 * case of new lockowners; so increment the lock seqid manually: */
3842 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
3843 hash_lockowner(lo, strhashval, clp, open_stp);
3844 return lo;
3845 }
3846
3847 static struct nfs4_ol_stateid *
3848 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
3849 {
3850 struct nfs4_ol_stateid *stp;
3851
3852 stp = nfs4_alloc_stateid();
3853 if (stp == NULL)
3854 goto out;
3855 list_add(&stp->st_perfile, &fp->fi_stateids);
3856 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
3857 stp->st_stateowner = &lo->lo_owner;
3858 stp->st_stid.sc_type = NFS4_LOCK_STID;
3859 get_nfs4_file(fp);
3860 stp->st_file = fp;
3861 stp->st_stid.sc_stateid.si_boot = boot_time;
3862 stp->st_stid.sc_stateid.si_stateownerid = lo->lo_owner.so_id;
3863 stp->st_stid.sc_stateid.si_fileid = fp->fi_id;
3864 /* note will be incremented before first return to client: */
3865 stp->st_stid.sc_stateid.si_generation = 0;
3866 hash_stid(&stp->st_stid);
3867 stp->st_access_bmap = 0;
3868 stp->st_deny_bmap = open_stp->st_deny_bmap;
3869 stp->st_openstp = open_stp;
3870
3871 out:
3872 return stp;
3873 }
3874
3875 static int
3876 check_lock_length(u64 offset, u64 length)
3877 {
3878 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
3879 LOFF_OVERFLOW(offset, length)));
3880 }
3881
3882 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
3883 {
3884 struct nfs4_file *fp = lock_stp->st_file;
3885 int oflag = nfs4_access_to_omode(access);
3886
3887 if (test_bit(access, &lock_stp->st_access_bmap))
3888 return;
3889 nfs4_file_get_access(fp, oflag);
3890 __set_bit(access, &lock_stp->st_access_bmap);
3891 }
3892
3893 /*
3894 * LOCK operation
3895 */
3896 __be32
3897 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3898 struct nfsd4_lock *lock)
3899 {
3900 struct nfs4_openowner *open_sop = NULL;
3901 struct nfs4_lockowner *lock_sop = NULL;
3902 struct nfs4_ol_stateid *lock_stp;
3903 struct nfs4_file *fp;
3904 struct file *filp = NULL;
3905 struct file_lock file_lock;
3906 struct file_lock conflock;
3907 __be32 status = 0;
3908 unsigned int strhashval;
3909 int lkflg;
3910 int err;
3911
3912 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
3913 (long long) lock->lk_offset,
3914 (long long) lock->lk_length);
3915
3916 if (check_lock_length(lock->lk_offset, lock->lk_length))
3917 return nfserr_inval;
3918
3919 if ((status = fh_verify(rqstp, &cstate->current_fh,
3920 S_IFREG, NFSD_MAY_LOCK))) {
3921 dprintk("NFSD: nfsd4_lock: permission denied!\n");
3922 return status;
3923 }
3924
3925 nfs4_lock_state();
3926
3927 if (lock->lk_is_new) {
3928 /*
3929 * Client indicates that this is a new lockowner.
3930 * Use open owner and open stateid to create lock owner and
3931 * lock stateid.
3932 */
3933 struct nfs4_ol_stateid *open_stp = NULL;
3934
3935 status = nfserr_stale_clientid;
3936 if (!nfsd4_has_session(cstate) &&
3937 STALE_CLIENTID(&lock->lk_new_clientid))
3938 goto out;
3939
3940 /* validate and update open stateid and open seqid */
3941 status = nfs4_preprocess_confirmed_seqid_op(cstate,
3942 lock->lk_new_open_seqid,
3943 &lock->lk_new_open_stateid,
3944 &open_stp);
3945 if (status)
3946 goto out;
3947 open_sop = openowner(open_stp->st_stateowner);
3948 status = nfserr_bad_stateid;
3949 if (!nfsd4_has_session(cstate) &&
3950 !same_clid(&open_sop->oo_owner.so_client->cl_clientid,
3951 &lock->v.new.clientid))
3952 goto out;
3953 /* create lockowner and lock stateid */
3954 fp = open_stp->st_file;
3955 strhashval = lock_ownerstr_hashval(fp->fi_inode,
3956 open_sop->oo_owner.so_client->cl_clientid.cl_id,
3957 &lock->v.new.owner);
3958 /* XXX: Do we need to check for duplicate stateowners on
3959 * the same file, or should they just be allowed (and
3960 * create new stateids)? */
3961 status = nfserr_jukebox;
3962 lock_sop = alloc_init_lock_stateowner(strhashval,
3963 open_sop->oo_owner.so_client, open_stp, lock);
3964 if (lock_sop == NULL)
3965 goto out;
3966 lock_stp = alloc_init_lock_stateid(lock_sop, fp, open_stp);
3967 if (lock_stp == NULL)
3968 goto out;
3969 } else {
3970 /* lock (lock owner + lock stateid) already exists */
3971 status = nfs4_preprocess_seqid_op(cstate,
3972 lock->lk_old_lock_seqid,
3973 &lock->lk_old_lock_stateid,
3974 NFS4_LOCK_STID, &lock_stp);
3975 if (status)
3976 goto out;
3977 lock_sop = lockowner(lock_stp->st_stateowner);
3978 fp = lock_stp->st_file;
3979 }
3980 /* lock_sop and lock_stp have been created or found */
3981
3982 lkflg = setlkflg(lock->lk_type);
3983 status = nfs4_check_openmode(lock_stp, lkflg);
3984 if (status)
3985 goto out;
3986
3987 status = nfserr_grace;
3988 if (locks_in_grace() && !lock->lk_reclaim)
3989 goto out;
3990 status = nfserr_no_grace;
3991 if (!locks_in_grace() && lock->lk_reclaim)
3992 goto out;
3993
3994 locks_init_lock(&file_lock);
3995 switch (lock->lk_type) {
3996 case NFS4_READ_LT:
3997 case NFS4_READW_LT:
3998 filp = find_readable_file(lock_stp->st_file);
3999 if (filp)
4000 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
4001 file_lock.fl_type = F_RDLCK;
4002 break;
4003 case NFS4_WRITE_LT:
4004 case NFS4_WRITEW_LT:
4005 filp = find_writeable_file(lock_stp->st_file);
4006 if (filp)
4007 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
4008 file_lock.fl_type = F_WRLCK;
4009 break;
4010 default:
4011 status = nfserr_inval;
4012 goto out;
4013 }
4014 if (!filp) {
4015 status = nfserr_openmode;
4016 goto out;
4017 }
4018 file_lock.fl_owner = (fl_owner_t)lock_sop;
4019 file_lock.fl_pid = current->tgid;
4020 file_lock.fl_file = filp;
4021 file_lock.fl_flags = FL_POSIX;
4022 file_lock.fl_lmops = &nfsd_posix_mng_ops;
4023
4024 file_lock.fl_start = lock->lk_offset;
4025 file_lock.fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
4026 nfs4_transform_lock_offset(&file_lock);
4027
4028 /*
4029 * Try to lock the file in the VFS.
4030 * Note: locks.c uses the BKL to protect the inode's lock list.
4031 */
4032
4033 err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
4034 switch (-err) {
4035 case 0: /* success! */
4036 update_stateid(&lock_stp->st_stid.sc_stateid);
4037 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
4038 sizeof(stateid_t));
4039 status = 0;
4040 break;
4041 case (EAGAIN): /* conflock holds conflicting lock */
4042 status = nfserr_denied;
4043 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4044 nfs4_set_lock_denied(&conflock, &lock->lk_denied);
4045 break;
4046 case (EDEADLK):
4047 status = nfserr_deadlock;
4048 break;
4049 default:
4050 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4051 status = nfserrno(err);
4052 break;
4053 }
4054 out:
4055 if (status && lock->lk_is_new && lock_sop)
4056 release_lockowner(lock_sop);
4057 if (!cstate->replay_owner)
4058 nfs4_unlock_state();
4059 return status;
4060 }
4061
4062 /*
4063 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
4064 * so we do a temporary open here just to get an open file to pass to
4065 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
4066 * inode operation.)
4067 */
4068 static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4069 {
4070 struct file *file;
4071 int err;
4072
4073 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4074 if (err)
4075 return err;
4076 err = vfs_test_lock(file, lock);
4077 nfsd_close(file);
4078 return err;
4079 }
4080
4081 /*
4082 * LOCKT operation
4083 */
4084 __be32
4085 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4086 struct nfsd4_lockt *lockt)
4087 {
4088 struct inode *inode;
4089 struct file_lock file_lock;
4090 struct nfs4_lockowner *lo;
4091 int error;
4092 __be32 status;
4093
4094 if (locks_in_grace())
4095 return nfserr_grace;
4096
4097 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4098 return nfserr_inval;
4099
4100 nfs4_lock_state();
4101
4102 status = nfserr_stale_clientid;
4103 if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid))
4104 goto out;
4105
4106 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4107 goto out;
4108
4109 inode = cstate->current_fh.fh_dentry->d_inode;
4110 locks_init_lock(&file_lock);
4111 switch (lockt->lt_type) {
4112 case NFS4_READ_LT:
4113 case NFS4_READW_LT:
4114 file_lock.fl_type = F_RDLCK;
4115 break;
4116 case NFS4_WRITE_LT:
4117 case NFS4_WRITEW_LT:
4118 file_lock.fl_type = F_WRLCK;
4119 break;
4120 default:
4121 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
4122 status = nfserr_inval;
4123 goto out;
4124 }
4125
4126 lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner);
4127 if (lo)
4128 file_lock.fl_owner = (fl_owner_t)lo;
4129 file_lock.fl_pid = current->tgid;
4130 file_lock.fl_flags = FL_POSIX;
4131
4132 file_lock.fl_start = lockt->lt_offset;
4133 file_lock.fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
4134
4135 nfs4_transform_lock_offset(&file_lock);
4136
4137 status = nfs_ok;
4138 error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
4139 if (error) {
4140 status = nfserrno(error);
4141 goto out;
4142 }
4143 if (file_lock.fl_type != F_UNLCK) {
4144 status = nfserr_denied;
4145 nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
4146 }
4147 out:
4148 nfs4_unlock_state();
4149 return status;
4150 }
4151
4152 __be32
4153 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4154 struct nfsd4_locku *locku)
4155 {
4156 struct nfs4_ol_stateid *stp;
4157 struct file *filp = NULL;
4158 struct file_lock file_lock;
4159 __be32 status;
4160 int err;
4161
4162 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
4163 (long long) locku->lu_offset,
4164 (long long) locku->lu_length);
4165
4166 if (check_lock_length(locku->lu_offset, locku->lu_length))
4167 return nfserr_inval;
4168
4169 nfs4_lock_state();
4170
4171 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4172 &locku->lu_stateid, NFS4_LOCK_STID, &stp);
4173 if (status)
4174 goto out;
4175 filp = find_any_file(stp->st_file);
4176 if (!filp) {
4177 status = nfserr_lock_range;
4178 goto out;
4179 }
4180 BUG_ON(!filp);
4181 locks_init_lock(&file_lock);
4182 file_lock.fl_type = F_UNLCK;
4183 file_lock.fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
4184 file_lock.fl_pid = current->tgid;
4185 file_lock.fl_file = filp;
4186 file_lock.fl_flags = FL_POSIX;
4187 file_lock.fl_lmops = &nfsd_posix_mng_ops;
4188 file_lock.fl_start = locku->lu_offset;
4189
4190 file_lock.fl_end = last_byte_offset(locku->lu_offset, locku->lu_length);
4191 nfs4_transform_lock_offset(&file_lock);
4192
4193 /*
4194 * Try to unlock the file in the VFS.
4195 */
4196 err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL);
4197 if (err) {
4198 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
4199 goto out_nfserr;
4200 }
4201 /*
4202 * OK, unlock succeeded; the only thing left to do is update the stateid.
4203 */
4204 update_stateid(&stp->st_stid.sc_stateid);
4205 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4206
4207 out:
4208 nfs4_unlock_state();
4209 return status;
4210
4211 out_nfserr:
4212 status = nfserrno(err);
4213 goto out;
4214 }
4215
4216 /*
4217 * returns
4218 * 1: locks held by lockowner
4219 * 0: no locks held by lockowner
4220 */
4221 static int
4222 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
4223 {
4224 struct file_lock **flpp;
4225 struct inode *inode = filp->fi_inode;
4226 int status = 0;
4227
4228 lock_flocks();
4229 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4230 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
4231 status = 1;
4232 goto out;
4233 }
4234 }
4235 out:
4236 unlock_flocks();
4237 return status;
4238 }
4239
4240 __be32
4241 nfsd4_release_lockowner(struct svc_rqst *rqstp,
4242 struct nfsd4_compound_state *cstate,
4243 struct nfsd4_release_lockowner *rlockowner)
4244 {
4245 clientid_t *clid = &rlockowner->rl_clientid;
4246 struct nfs4_stateowner *sop;
4247 struct nfs4_lockowner *lo;
4248 struct nfs4_ol_stateid *stp;
4249 struct xdr_netobj *owner = &rlockowner->rl_owner;
4250 struct list_head matches;
4251 int i;
4252 __be32 status;
4253
4254 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
4255 clid->cl_boot, clid->cl_id);
4256
4257 /* XXX check for lease expiration */
4258
4259 status = nfserr_stale_clientid;
4260 if (STALE_CLIENTID(clid))
4261 return status;
4262
4263 nfs4_lock_state();
4264
4265 status = nfserr_locks_held;
4266 /* XXX: we're doing a linear search through all the lockowners.
4267 * Yipes! For now we'll just hope clients aren't really using
4268 * release_lockowner much, but eventually we have to fix these
4269 * data structures. */
4270 INIT_LIST_HEAD(&matches);
4271 for (i = 0; i < LOCK_HASH_SIZE; i++) {
4272 list_for_each_entry(sop, &lock_ownerid_hashtbl[i], so_idhash) {
4273 if (!same_owner_str(sop, owner, clid))
4274 continue;
4275 list_for_each_entry(stp, &sop->so_stateids,
4276 st_perstateowner) {
4277 lo = lockowner(sop);
4278 if (check_for_locks(stp->st_file, lo))
4279 goto out;
4280 list_add(&lo->lo_list, &matches);
4281 }
4282 }
4283 }
4284 /* Clients probably won't expect us to return with some (but not all)
4285 * of the lockowner state released; so don't release any until all
4286 * have been checked. */
4287 status = nfs_ok;
4288 while (!list_empty(&matches)) {
4289 lo = list_entry(matches.next, struct nfs4_lockowner,
4290 lo_list);
4291 /* unhash_stateowner deletes so_perclient only
4292 * for openowners. */
4293 list_del(&lo->lo_list);
4294 release_lockowner(lo);
4295 }
4296 out:
4297 nfs4_unlock_state();
4298 return status;
4299 }
4300
4301 static inline struct nfs4_client_reclaim *
4302 alloc_reclaim(void)
4303 {
4304 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
4305 }
4306
4307 int
4308 nfs4_has_reclaimed_state(const char *name, bool use_exchange_id)
4309 {
4310 unsigned int strhashval = clientstr_hashval(name);
4311 struct nfs4_client *clp;
4312
4313 clp = find_confirmed_client_by_str(name, strhashval);
4314 return clp ? 1 : 0;
4315 }
4316
4317 /*
4318 * failure => all reset bets are off, nfserr_no_grace...
4319 */
4320 int
4321 nfs4_client_to_reclaim(const char *name)
4322 {
4323 unsigned int strhashval;
4324 struct nfs4_client_reclaim *crp = NULL;
4325
4326 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
4327 crp = alloc_reclaim();
4328 if (!crp)
4329 return 0;
4330 strhashval = clientstr_hashval(name);
4331 INIT_LIST_HEAD(&crp->cr_strhash);
4332 list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]);
4333 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
4334 reclaim_str_hashtbl_size++;
4335 return 1;
4336 }
4337
4338 static void
4339 nfs4_release_reclaim(void)
4340 {
4341 struct nfs4_client_reclaim *crp = NULL;
4342 int i;
4343
4344 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4345 while (!list_empty(&reclaim_str_hashtbl[i])) {
4346 crp = list_entry(reclaim_str_hashtbl[i].next,
4347 struct nfs4_client_reclaim, cr_strhash);
4348 list_del(&crp->cr_strhash);
4349 kfree(crp);
4350 reclaim_str_hashtbl_size--;
4351 }
4352 }
4353 BUG_ON(reclaim_str_hashtbl_size);
4354 }
4355
4356 /*
4357 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
4358 static struct nfs4_client_reclaim *
4359 nfs4_find_reclaim_client(clientid_t *clid)
4360 {
4361 unsigned int strhashval;
4362 struct nfs4_client *clp;
4363 struct nfs4_client_reclaim *crp = NULL;
4364
4365
4366 /* find clientid in conf_id_hashtbl */
4367 clp = find_confirmed_client(clid);
4368 if (clp == NULL)
4369 return NULL;
4370
4371 dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n",
4372 clp->cl_name.len, clp->cl_name.data,
4373 clp->cl_recdir);
4374
4375 /* find clp->cl_name in reclaim_str_hashtbl */
4376 strhashval = clientstr_hashval(clp->cl_recdir);
4377 list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) {
4378 if (same_name(crp->cr_recdir, clp->cl_recdir)) {
4379 return crp;
4380 }
4381 }
4382 return NULL;
4383 }
4384
4385 /*
4386 * Called from OPEN. Look for clientid in reclaim list.
4387 */
4388 __be32
4389 nfs4_check_open_reclaim(clientid_t *clid)
4390 {
4391 return nfs4_find_reclaim_client(clid) ? nfs_ok : nfserr_reclaim_bad;
4392 }
4393
4394 /* initialization to perform at module load time: */
4395
4396 int
4397 nfs4_state_init(void)
4398 {
4399 int i, status;
4400
4401 status = nfsd4_init_slabs();
4402 if (status)
4403 return status;
4404 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4405 INIT_LIST_HEAD(&conf_id_hashtbl[i]);
4406 INIT_LIST_HEAD(&conf_str_hashtbl[i]);
4407 INIT_LIST_HEAD(&unconf_str_hashtbl[i]);
4408 INIT_LIST_HEAD(&unconf_id_hashtbl[i]);
4409 INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
4410 }
4411 for (i = 0; i < SESSION_HASH_SIZE; i++)
4412 INIT_LIST_HEAD(&sessionid_hashtbl[i]);
4413 for (i = 0; i < FILE_HASH_SIZE; i++) {
4414 INIT_LIST_HEAD(&file_hashtbl[i]);
4415 }
4416 for (i = 0; i < OPEN_OWNER_HASH_SIZE; i++) {
4417 INIT_LIST_HEAD(&open_ownerstr_hashtbl[i]);
4418 INIT_LIST_HEAD(&open_ownerid_hashtbl[i]);
4419 }
4420 for (i = 0; i < STATEID_HASH_SIZE; i++)
4421 INIT_LIST_HEAD(&stateid_hashtbl[i]);
4422 for (i = 0; i < LOCK_HASH_SIZE; i++) {
4423 INIT_LIST_HEAD(&lock_ownerid_hashtbl[i]);
4424 INIT_LIST_HEAD(&lock_ownerstr_hashtbl[i]);
4425 }
4426 memset(&onestateid, ~0, sizeof(stateid_t));
4427 INIT_LIST_HEAD(&close_lru);
4428 INIT_LIST_HEAD(&client_lru);
4429 INIT_LIST_HEAD(&del_recall_lru);
4430 reclaim_str_hashtbl_size = 0;
4431 return 0;
4432 }
4433
4434 static void
4435 nfsd4_load_reboot_recovery_data(void)
4436 {
4437 int status;
4438
4439 nfs4_lock_state();
4440 nfsd4_init_recdir();
4441 status = nfsd4_recdir_load();
4442 nfs4_unlock_state();
4443 if (status)
4444 printk("NFSD: Failure reading reboot recovery data\n");
4445 }
4446
4447 /*
4448 * Since the lifetime of a delegation isn't limited to that of an open, a
4449 * client may quite reasonably hang on to a delegation as long as it has
4450 * the inode cached. This becomes an obvious problem the first time a
4451 * client's inode cache approaches the size of the server's total memory.
4452 *
4453 * For now we avoid this problem by imposing a hard limit on the number
4454 * of delegations, which varies according to the server's memory size.
4455 */
4456 static void
4457 set_max_delegations(void)
4458 {
4459 /*
4460 * Allow at most 4 delegations per megabyte of RAM. Quick
4461 * estimates suggest that in the worst case (where every delegation
4462 * is for a different inode), a delegation could take about 1.5K,
4463 * giving a worst case usage of about 6% of memory.
4464 */
4465 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
4466 }
4467
4468 /* initialization to perform when the nfsd service is started: */
4469
4470 static int
4471 __nfs4_state_start(void)
4472 {
4473 int ret;
4474
4475 boot_time = get_seconds();
4476 locks_start_grace(&nfsd4_manager);
4477 printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
4478 nfsd4_grace);
4479 ret = set_callback_cred();
4480 if (ret)
4481 return -ENOMEM;
4482 laundry_wq = create_singlethread_workqueue("nfsd4");
4483 if (laundry_wq == NULL)
4484 return -ENOMEM;
4485 ret = nfsd4_create_callback_queue();
4486 if (ret)
4487 goto out_free_laundry;
4488 queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ);
4489 set_max_delegations();
4490 return 0;
4491 out_free_laundry:
4492 destroy_workqueue(laundry_wq);
4493 return ret;
4494 }
4495
4496 int
4497 nfs4_state_start(void)
4498 {
4499 nfsd4_load_reboot_recovery_data();
4500 return __nfs4_state_start();
4501 }
4502
4503 static void
4504 __nfs4_state_shutdown(void)
4505 {
4506 int i;
4507 struct nfs4_client *clp = NULL;
4508 struct nfs4_delegation *dp = NULL;
4509 struct list_head *pos, *next, reaplist;
4510
4511 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4512 while (!list_empty(&conf_id_hashtbl[i])) {
4513 clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
4514 expire_client(clp);
4515 }
4516 while (!list_empty(&unconf_str_hashtbl[i])) {
4517 clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash);
4518 expire_client(clp);
4519 }
4520 }
4521 INIT_LIST_HEAD(&reaplist);
4522 spin_lock(&recall_lock);
4523 list_for_each_safe(pos, next, &del_recall_lru) {
4524 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4525 list_move(&dp->dl_recall_lru, &reaplist);
4526 }
4527 spin_unlock(&recall_lock);
4528 list_for_each_safe(pos, next, &reaplist) {
4529 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4530 list_del_init(&dp->dl_recall_lru);
4531 unhash_delegation(dp);
4532 }
4533
4534 nfsd4_shutdown_recdir();
4535 }
4536
4537 void
4538 nfs4_state_shutdown(void)
4539 {
4540 cancel_delayed_work_sync(&laundromat_work);
4541 destroy_workqueue(laundry_wq);
4542 locks_end_grace(&nfsd4_manager);
4543 nfs4_lock_state();
4544 nfs4_release_reclaim();
4545 __nfs4_state_shutdown();
4546 nfs4_unlock_state();
4547 nfsd4_destroy_callback_queue();
4548 }
This page took 0.435854 seconds and 6 git commands to generate.