Merge branch 'nfs-for-2.6.32' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6...
[deliverable/linux.git] / fs / nfsd / nfssvc.c
1 /*
2 * linux/fs/nfsd/nfssvc.c
3 *
4 * Central processing for nfsd.
5 *
6 * Authors: Olaf Kirch (okir@monad.swb.de)
7 *
8 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
9 */
10
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/time.h>
14 #include <linux/errno.h>
15 #include <linux/nfs.h>
16 #include <linux/in.h>
17 #include <linux/uio.h>
18 #include <linux/unistd.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/freezer.h>
22 #include <linux/fs_struct.h>
23 #include <linux/kthread.h>
24 #include <linux/swap.h>
25
26 #include <linux/sunrpc/types.h>
27 #include <linux/sunrpc/stats.h>
28 #include <linux/sunrpc/svc.h>
29 #include <linux/sunrpc/svcsock.h>
30 #include <linux/sunrpc/cache.h>
31 #include <linux/nfsd/nfsd.h>
32 #include <linux/nfsd/stats.h>
33 #include <linux/nfsd/cache.h>
34 #include <linux/nfsd/syscall.h>
35 #include <linux/lockd/bind.h>
36 #include <linux/nfsacl.h>
37
38 #define NFSDDBG_FACILITY NFSDDBG_SVC
39
40 extern struct svc_program nfsd_program;
41 static int nfsd(void *vrqstp);
42 struct timeval nfssvc_boot;
43
44 /*
45 * nfsd_mutex protects nfsd_serv -- both the pointer itself and the members
46 * of the svc_serv struct. In particular, ->sv_nrthreads but also to some
47 * extent ->sv_temp_socks and ->sv_permsocks. It also protects nfsdstats.th_cnt
48 *
49 * If (out side the lock) nfsd_serv is non-NULL, then it must point to a
50 * properly initialised 'struct svc_serv' with ->sv_nrthreads > 0. That number
51 * of nfsd threads must exist and each must listed in ->sp_all_threads in each
52 * entry of ->sv_pools[].
53 *
54 * Transitions of the thread count between zero and non-zero are of particular
55 * interest since the svc_serv needs to be created and initialized at that
56 * point, or freed.
57 *
58 * Finally, the nfsd_mutex also protects some of the global variables that are
59 * accessed when nfsd starts and that are settable via the write_* routines in
60 * nfsctl.c. In particular:
61 *
62 * user_recovery_dirname
63 * user_lease_time
64 * nfsd_versions
65 */
66 DEFINE_MUTEX(nfsd_mutex);
67 struct svc_serv *nfsd_serv;
68
69 /*
70 * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
71 * nfsd_drc_max_pages limits the total amount of memory available for
72 * version 4.1 DRC caches.
73 * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage.
74 */
75 spinlock_t nfsd_drc_lock;
76 unsigned int nfsd_drc_max_mem;
77 unsigned int nfsd_drc_mem_used;
78
79 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
80 static struct svc_stat nfsd_acl_svcstats;
81 static struct svc_version * nfsd_acl_version[] = {
82 [2] = &nfsd_acl_version2,
83 [3] = &nfsd_acl_version3,
84 };
85
86 #define NFSD_ACL_MINVERS 2
87 #define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
88 static struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS];
89
90 static struct svc_program nfsd_acl_program = {
91 .pg_prog = NFS_ACL_PROGRAM,
92 .pg_nvers = NFSD_ACL_NRVERS,
93 .pg_vers = nfsd_acl_versions,
94 .pg_name = "nfsacl",
95 .pg_class = "nfsd",
96 .pg_stats = &nfsd_acl_svcstats,
97 .pg_authenticate = &svc_set_client,
98 };
99
100 static struct svc_stat nfsd_acl_svcstats = {
101 .program = &nfsd_acl_program,
102 };
103 #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
104
105 static struct svc_version * nfsd_version[] = {
106 [2] = &nfsd_version2,
107 #if defined(CONFIG_NFSD_V3)
108 [3] = &nfsd_version3,
109 #endif
110 #if defined(CONFIG_NFSD_V4)
111 [4] = &nfsd_version4,
112 #endif
113 };
114
115 #define NFSD_MINVERS 2
116 #define NFSD_NRVERS ARRAY_SIZE(nfsd_version)
117 static struct svc_version *nfsd_versions[NFSD_NRVERS];
118
119 struct svc_program nfsd_program = {
120 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
121 .pg_next = &nfsd_acl_program,
122 #endif
123 .pg_prog = NFS_PROGRAM, /* program number */
124 .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */
125 .pg_vers = nfsd_versions, /* version table */
126 .pg_name = "nfsd", /* program name */
127 .pg_class = "nfsd", /* authentication class */
128 .pg_stats = &nfsd_svcstats, /* version table */
129 .pg_authenticate = &svc_set_client, /* export authentication */
130
131 };
132
133 u32 nfsd_supported_minorversion;
134
135 int nfsd_vers(int vers, enum vers_op change)
136 {
137 if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
138 return -1;
139 switch(change) {
140 case NFSD_SET:
141 nfsd_versions[vers] = nfsd_version[vers];
142 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
143 if (vers < NFSD_ACL_NRVERS)
144 nfsd_acl_versions[vers] = nfsd_acl_version[vers];
145 #endif
146 break;
147 case NFSD_CLEAR:
148 nfsd_versions[vers] = NULL;
149 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
150 if (vers < NFSD_ACL_NRVERS)
151 nfsd_acl_versions[vers] = NULL;
152 #endif
153 break;
154 case NFSD_TEST:
155 return nfsd_versions[vers] != NULL;
156 case NFSD_AVAIL:
157 return nfsd_version[vers] != NULL;
158 }
159 return 0;
160 }
161
162 int nfsd_minorversion(u32 minorversion, enum vers_op change)
163 {
164 if (minorversion > NFSD_SUPPORTED_MINOR_VERSION)
165 return -1;
166 switch(change) {
167 case NFSD_SET:
168 nfsd_supported_minorversion = minorversion;
169 break;
170 case NFSD_CLEAR:
171 if (minorversion == 0)
172 return -1;
173 nfsd_supported_minorversion = minorversion - 1;
174 break;
175 case NFSD_TEST:
176 return minorversion <= nfsd_supported_minorversion;
177 case NFSD_AVAIL:
178 return minorversion <= NFSD_SUPPORTED_MINOR_VERSION;
179 }
180 return 0;
181 }
182
183 /*
184 * Maximum number of nfsd processes
185 */
186 #define NFSD_MAXSERVS 8192
187
188 int nfsd_nrthreads(void)
189 {
190 int rv = 0;
191 mutex_lock(&nfsd_mutex);
192 if (nfsd_serv)
193 rv = nfsd_serv->sv_nrthreads;
194 mutex_unlock(&nfsd_mutex);
195 return rv;
196 }
197
198 static void nfsd_last_thread(struct svc_serv *serv)
199 {
200 /* When last nfsd thread exits we need to do some clean-up */
201 struct svc_xprt *xprt;
202 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list)
203 lockd_down();
204 nfsd_serv = NULL;
205 nfsd_racache_shutdown();
206 nfs4_state_shutdown();
207
208 printk(KERN_WARNING "nfsd: last server has exited, flushing export "
209 "cache\n");
210 nfsd_export_flush();
211 }
212
213 void nfsd_reset_versions(void)
214 {
215 int found_one = 0;
216 int i;
217
218 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
219 if (nfsd_program.pg_vers[i])
220 found_one = 1;
221 }
222
223 if (!found_one) {
224 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++)
225 nfsd_program.pg_vers[i] = nfsd_version[i];
226 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
227 for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++)
228 nfsd_acl_program.pg_vers[i] =
229 nfsd_acl_version[i];
230 #endif
231 }
232 }
233
234 /*
235 * Each session guarantees a negotiated per slot memory cache for replies
236 * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated
237 * NFSv4.1 server might want to use more memory for a DRC than a machine
238 * with mutiple services.
239 *
240 * Impose a hard limit on the number of pages for the DRC which varies
241 * according to the machines free pages. This is of course only a default.
242 *
243 * For now this is a #defined shift which could be under admin control
244 * in the future.
245 */
246 static void set_max_drc(void)
247 {
248 #define NFSD_DRC_SIZE_SHIFT 10
249 nfsd_drc_max_mem = (nr_free_buffer_pages()
250 >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
251 nfsd_drc_mem_used = 0;
252 spin_lock_init(&nfsd_drc_lock);
253 dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem);
254 }
255
256 int nfsd_create_serv(void)
257 {
258 int err = 0;
259
260 WARN_ON(!mutex_is_locked(&nfsd_mutex));
261 if (nfsd_serv) {
262 svc_get(nfsd_serv);
263 return 0;
264 }
265 if (nfsd_max_blksize == 0) {
266 /* choose a suitable default */
267 struct sysinfo i;
268 si_meminfo(&i);
269 /* Aim for 1/4096 of memory per thread
270 * This gives 1MB on 4Gig machines
271 * But only uses 32K on 128M machines.
272 * Bottom out at 8K on 32M and smaller.
273 * Of course, this is only a default.
274 */
275 nfsd_max_blksize = NFSSVC_MAXBLKSIZE;
276 i.totalram <<= PAGE_SHIFT - 12;
277 while (nfsd_max_blksize > i.totalram &&
278 nfsd_max_blksize >= 8*1024*2)
279 nfsd_max_blksize /= 2;
280 }
281
282 nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
283 nfsd_last_thread, nfsd, THIS_MODULE);
284 if (nfsd_serv == NULL)
285 err = -ENOMEM;
286 else
287 set_max_drc();
288
289 do_gettimeofday(&nfssvc_boot); /* record boot time */
290 return err;
291 }
292
293 static int nfsd_init_socks(int port)
294 {
295 int error;
296 if (!list_empty(&nfsd_serv->sv_permsocks))
297 return 0;
298
299 error = svc_create_xprt(nfsd_serv, "udp", PF_INET, port,
300 SVC_SOCK_DEFAULTS);
301 if (error < 0)
302 return error;
303
304 error = lockd_up();
305 if (error < 0)
306 return error;
307
308 error = svc_create_xprt(nfsd_serv, "tcp", PF_INET, port,
309 SVC_SOCK_DEFAULTS);
310 if (error < 0)
311 return error;
312
313 error = lockd_up();
314 if (error < 0)
315 return error;
316
317 return 0;
318 }
319
320 int nfsd_nrpools(void)
321 {
322 if (nfsd_serv == NULL)
323 return 0;
324 else
325 return nfsd_serv->sv_nrpools;
326 }
327
328 int nfsd_get_nrthreads(int n, int *nthreads)
329 {
330 int i = 0;
331
332 if (nfsd_serv != NULL) {
333 for (i = 0; i < nfsd_serv->sv_nrpools && i < n; i++)
334 nthreads[i] = nfsd_serv->sv_pools[i].sp_nrthreads;
335 }
336
337 return 0;
338 }
339
340 int nfsd_set_nrthreads(int n, int *nthreads)
341 {
342 int i = 0;
343 int tot = 0;
344 int err = 0;
345
346 WARN_ON(!mutex_is_locked(&nfsd_mutex));
347
348 if (nfsd_serv == NULL || n <= 0)
349 return 0;
350
351 if (n > nfsd_serv->sv_nrpools)
352 n = nfsd_serv->sv_nrpools;
353
354 /* enforce a global maximum number of threads */
355 tot = 0;
356 for (i = 0; i < n; i++) {
357 if (nthreads[i] > NFSD_MAXSERVS)
358 nthreads[i] = NFSD_MAXSERVS;
359 tot += nthreads[i];
360 }
361 if (tot > NFSD_MAXSERVS) {
362 /* total too large: scale down requested numbers */
363 for (i = 0; i < n && tot > 0; i++) {
364 int new = nthreads[i] * NFSD_MAXSERVS / tot;
365 tot -= (nthreads[i] - new);
366 nthreads[i] = new;
367 }
368 for (i = 0; i < n && tot > 0; i++) {
369 nthreads[i]--;
370 tot--;
371 }
372 }
373
374 /*
375 * There must always be a thread in pool 0; the admin
376 * can't shut down NFS completely using pool_threads.
377 */
378 if (nthreads[0] == 0)
379 nthreads[0] = 1;
380
381 /* apply the new numbers */
382 svc_get(nfsd_serv);
383 for (i = 0; i < n; i++) {
384 err = svc_set_num_threads(nfsd_serv, &nfsd_serv->sv_pools[i],
385 nthreads[i]);
386 if (err)
387 break;
388 }
389 svc_destroy(nfsd_serv);
390
391 return err;
392 }
393
394 int
395 nfsd_svc(unsigned short port, int nrservs)
396 {
397 int error;
398
399 mutex_lock(&nfsd_mutex);
400 dprintk("nfsd: creating service\n");
401 if (nrservs <= 0)
402 nrservs = 0;
403 if (nrservs > NFSD_MAXSERVS)
404 nrservs = NFSD_MAXSERVS;
405 error = 0;
406 if (nrservs == 0 && nfsd_serv == NULL)
407 goto out;
408
409 /* Readahead param cache - will no-op if it already exists */
410 error = nfsd_racache_init(2*nrservs);
411 if (error<0)
412 goto out;
413 nfs4_state_start();
414
415 nfsd_reset_versions();
416
417 error = nfsd_create_serv();
418
419 if (error)
420 goto out;
421 error = nfsd_init_socks(port);
422 if (error)
423 goto failure;
424
425 error = svc_set_num_threads(nfsd_serv, NULL, nrservs);
426 if (error == 0)
427 /* We are holding a reference to nfsd_serv which
428 * we don't want to count in the return value,
429 * so subtract 1
430 */
431 error = nfsd_serv->sv_nrthreads - 1;
432 failure:
433 svc_destroy(nfsd_serv); /* Release server */
434 out:
435 mutex_unlock(&nfsd_mutex);
436 return error;
437 }
438
439
440 /*
441 * This is the NFS server kernel thread
442 */
443 static int
444 nfsd(void *vrqstp)
445 {
446 struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
447 int err, preverr = 0;
448
449 /* Lock module and set up kernel thread */
450 mutex_lock(&nfsd_mutex);
451
452 /* At this point, the thread shares current->fs
453 * with the init process. We need to create files with a
454 * umask of 0 instead of init's umask. */
455 if (unshare_fs_struct() < 0) {
456 printk("Unable to start nfsd thread: out of memory\n");
457 goto out;
458 }
459
460 current->fs->umask = 0;
461
462 /*
463 * thread is spawned with all signals set to SIG_IGN, re-enable
464 * the ones that will bring down the thread
465 */
466 allow_signal(SIGKILL);
467 allow_signal(SIGHUP);
468 allow_signal(SIGINT);
469 allow_signal(SIGQUIT);
470
471 nfsdstats.th_cnt++;
472 mutex_unlock(&nfsd_mutex);
473
474 /*
475 * We want less throttling in balance_dirty_pages() so that nfs to
476 * localhost doesn't cause nfsd to lock up due to all the client's
477 * dirty pages.
478 */
479 current->flags |= PF_LESS_THROTTLE;
480 set_freezable();
481
482 /*
483 * The main request loop
484 */
485 for (;;) {
486 /*
487 * Find a socket with data available and call its
488 * recvfrom routine.
489 */
490 while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
491 ;
492 if (err == -EINTR)
493 break;
494 else if (err < 0) {
495 if (err != preverr) {
496 printk(KERN_WARNING "%s: unexpected error "
497 "from svc_recv (%d)\n", __func__, -err);
498 preverr = err;
499 }
500 schedule_timeout_uninterruptible(HZ);
501 continue;
502 }
503
504
505 /* Lock the export hash tables for reading. */
506 exp_readlock();
507
508 svc_process(rqstp);
509
510 /* Unlock export hash tables */
511 exp_readunlock();
512 }
513
514 /* Clear signals before calling svc_exit_thread() */
515 flush_signals(current);
516
517 mutex_lock(&nfsd_mutex);
518 nfsdstats.th_cnt --;
519
520 out:
521 /* Release the thread */
522 svc_exit_thread(rqstp);
523
524 /* Release module */
525 mutex_unlock(&nfsd_mutex);
526 module_put_and_exit(0);
527 return 0;
528 }
529
530 static __be32 map_new_errors(u32 vers, __be32 nfserr)
531 {
532 if (nfserr == nfserr_jukebox && vers == 2)
533 return nfserr_dropit;
534 if (nfserr == nfserr_wrongsec && vers < 4)
535 return nfserr_acces;
536 return nfserr;
537 }
538
539 int
540 nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
541 {
542 struct svc_procedure *proc;
543 kxdrproc_t xdr;
544 __be32 nfserr;
545 __be32 *nfserrp;
546
547 dprintk("nfsd_dispatch: vers %d proc %d\n",
548 rqstp->rq_vers, rqstp->rq_proc);
549 proc = rqstp->rq_procinfo;
550
551 /* Check whether we have this call in the cache. */
552 switch (nfsd_cache_lookup(rqstp, proc->pc_cachetype)) {
553 case RC_INTR:
554 case RC_DROPIT:
555 return 0;
556 case RC_REPLY:
557 return 1;
558 case RC_DOIT:;
559 /* do it */
560 }
561
562 /* Decode arguments */
563 xdr = proc->pc_decode;
564 if (xdr && !xdr(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base,
565 rqstp->rq_argp)) {
566 dprintk("nfsd: failed to decode arguments!\n");
567 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
568 *statp = rpc_garbage_args;
569 return 1;
570 }
571
572 /* need to grab the location to store the status, as
573 * nfsv4 does some encoding while processing
574 */
575 nfserrp = rqstp->rq_res.head[0].iov_base
576 + rqstp->rq_res.head[0].iov_len;
577 rqstp->rq_res.head[0].iov_len += sizeof(__be32);
578
579 /* NFSv4.1 DRC requires statp */
580 if (rqstp->rq_vers == 4)
581 nfsd4_set_statp(rqstp, statp);
582
583 /* Now call the procedure handler, and encode NFS status. */
584 nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
585 nfserr = map_new_errors(rqstp->rq_vers, nfserr);
586 if (nfserr == nfserr_dropit) {
587 dprintk("nfsd: Dropping request; may be revisited later\n");
588 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
589 return 0;
590 }
591
592 if (rqstp->rq_proc != 0)
593 *nfserrp++ = nfserr;
594
595 /* Encode result.
596 * For NFSv2, additional info is never returned in case of an error.
597 */
598 if (!(nfserr && rqstp->rq_vers == 2)) {
599 xdr = proc->pc_encode;
600 if (xdr && !xdr(rqstp, nfserrp,
601 rqstp->rq_resp)) {
602 /* Failed to encode result. Release cache entry */
603 dprintk("nfsd: failed to encode result!\n");
604 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
605 *statp = rpc_system_err;
606 return 1;
607 }
608 }
609
610 /* Store reply in cache. */
611 nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
612 return 1;
613 }
614
615 int nfsd_pool_stats_open(struct inode *inode, struct file *file)
616 {
617 if (nfsd_serv == NULL)
618 return -ENODEV;
619 return svc_pool_stats_open(nfsd_serv, file);
620 }
This page took 0.044691 seconds and 5 git commands to generate.