[PATCH] Add include/linux/freezer.h and move definitions from sched.h
[deliverable/linux.git] / fs / cifs / cifsfs.c
1 /*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2004
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
25
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include "cifsfs.h"
39 #include "cifspdu.h"
40 #define DECLARE_GLOBALS_HERE
41 #include "cifsglob.h"
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
45 #include <linux/mm.h>
46 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
47
48 #ifdef CONFIG_CIFS_QUOTA
49 static struct quotactl_ops cifs_quotactl_ops;
50 #endif
51
52 int cifsFYI = 0;
53 int cifsERROR = 1;
54 int traceSMB = 0;
55 unsigned int oplockEnabled = 1;
56 unsigned int experimEnabled = 0;
57 unsigned int linuxExtEnabled = 1;
58 unsigned int lookupCacheEnabled = 1;
59 unsigned int multiuser_mount = 0;
60 unsigned int extended_security = CIFSSEC_DEF;
61 /* unsigned int ntlmv2_support = 0; */
62 unsigned int sign_CIFS_PDUs = 1;
63 extern struct task_struct * oplockThread; /* remove sparse warning */
64 struct task_struct * oplockThread = NULL;
65 extern struct task_struct * dnotifyThread; /* remove sparse warning */
66 struct task_struct * dnotifyThread = NULL;
67 static struct super_operations cifs_super_ops;
68 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
69 module_param(CIFSMaxBufSize, int, 0);
70 MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
71 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
72 module_param(cifs_min_rcv, int, 0);
73 MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
74 unsigned int cifs_min_small = 30;
75 module_param(cifs_min_small, int, 0);
76 MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
77 unsigned int cifs_max_pending = CIFS_MAX_REQ;
78 module_param(cifs_max_pending, int, 0);
79 MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
80
81 extern mempool_t *cifs_sm_req_poolp;
82 extern mempool_t *cifs_req_poolp;
83 extern mempool_t *cifs_mid_poolp;
84
85 extern struct kmem_cache *cifs_oplock_cachep;
86
87 static int
88 cifs_read_super(struct super_block *sb, void *data,
89 const char *devname, int silent)
90 {
91 struct inode *inode;
92 struct cifs_sb_info *cifs_sb;
93 int rc = 0;
94
95 sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
96 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
97 cifs_sb = CIFS_SB(sb);
98 if(cifs_sb == NULL)
99 return -ENOMEM;
100
101 rc = cifs_mount(sb, cifs_sb, data, devname);
102
103 if (rc) {
104 if (!silent)
105 cERROR(1,
106 ("cifs_mount failed w/return code = %d", rc));
107 goto out_mount_failed;
108 }
109
110 sb->s_magic = CIFS_MAGIC_NUMBER;
111 sb->s_op = &cifs_super_ops;
112 /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
113 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
114 #ifdef CONFIG_CIFS_QUOTA
115 sb->s_qcop = &cifs_quotactl_ops;
116 #endif
117 sb->s_blocksize = CIFS_MAX_MSGSIZE;
118 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
119 inode = iget(sb, ROOT_I);
120
121 if (!inode) {
122 rc = -ENOMEM;
123 goto out_no_root;
124 }
125
126 sb->s_root = d_alloc_root(inode);
127
128 if (!sb->s_root) {
129 rc = -ENOMEM;
130 goto out_no_root;
131 }
132
133 return 0;
134
135 out_no_root:
136 cERROR(1, ("cifs_read_super: get root inode failed"));
137 if (inode)
138 iput(inode);
139
140 out_mount_failed:
141 if(cifs_sb) {
142 if(cifs_sb->local_nls)
143 unload_nls(cifs_sb->local_nls);
144 kfree(cifs_sb);
145 }
146 return rc;
147 }
148
149 static void
150 cifs_put_super(struct super_block *sb)
151 {
152 int rc = 0;
153 struct cifs_sb_info *cifs_sb;
154
155 cFYI(1, ("In cifs_put_super"));
156 cifs_sb = CIFS_SB(sb);
157 if(cifs_sb == NULL) {
158 cFYI(1,("Empty cifs superblock info passed to unmount"));
159 return;
160 }
161 rc = cifs_umount(sb, cifs_sb);
162 if (rc) {
163 cERROR(1, ("cifs_umount failed with return code %d", rc));
164 }
165 unload_nls(cifs_sb->local_nls);
166 kfree(cifs_sb);
167 return;
168 }
169
170 static int
171 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
172 {
173 struct super_block *sb = dentry->d_sb;
174 int xid;
175 int rc = -EOPNOTSUPP;
176 struct cifs_sb_info *cifs_sb;
177 struct cifsTconInfo *pTcon;
178
179 xid = GetXid();
180
181 cifs_sb = CIFS_SB(sb);
182 pTcon = cifs_sb->tcon;
183
184 buf->f_type = CIFS_MAGIC_NUMBER;
185
186 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
187 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
188 presumably be total path, but note
189 that some servers (includinng Samba 3)
190 have a shorter maximum path */
191 buf->f_files = 0; /* undefined */
192 buf->f_ffree = 0; /* unlimited */
193
194 /* BB we could add a second check for a QFS Unix capability bit */
195 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
196 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
197 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
198 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
199
200 /* Only need to call the old QFSInfo if failed
201 on newer one */
202 if(rc)
203 if(pTcon->ses->capabilities & CAP_NT_SMBS)
204 rc = CIFSSMBQFSInfo(xid, pTcon, buf); /* not supported by OS2 */
205
206 /* Some old Windows servers also do not support level 103, retry with
207 older level one if old server failed the previous call or we
208 bypassed it because we detected that this was an older LANMAN sess */
209 if(rc)
210 rc = SMBOldQFSInfo(xid, pTcon, buf);
211 /*
212 int f_type;
213 __fsid_t f_fsid;
214 int f_namelen; */
215 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
216 FreeXid(xid);
217 return 0; /* always return success? what if volume is no
218 longer available? */
219 }
220
221 static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
222 {
223 struct cifs_sb_info *cifs_sb;
224
225 cifs_sb = CIFS_SB(inode->i_sb);
226
227 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
228 return 0;
229 } else /* file mode might have been restricted at mount time
230 on the client (above and beyond ACL on servers) for
231 servers which do not support setting and viewing mode bits,
232 so allowing client to check permissions is useful */
233 return generic_permission(inode, mask, NULL);
234 }
235
236 static struct kmem_cache *cifs_inode_cachep;
237 static struct kmem_cache *cifs_req_cachep;
238 static struct kmem_cache *cifs_mid_cachep;
239 struct kmem_cache *cifs_oplock_cachep;
240 static struct kmem_cache *cifs_sm_req_cachep;
241 mempool_t *cifs_sm_req_poolp;
242 mempool_t *cifs_req_poolp;
243 mempool_t *cifs_mid_poolp;
244
245 static struct inode *
246 cifs_alloc_inode(struct super_block *sb)
247 {
248 struct cifsInodeInfo *cifs_inode;
249 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
250 if (!cifs_inode)
251 return NULL;
252 cifs_inode->cifsAttrs = 0x20; /* default */
253 atomic_set(&cifs_inode->inUse, 0);
254 cifs_inode->time = 0;
255 /* Until the file is open and we have gotten oplock
256 info back from the server, can not assume caching of
257 file data or metadata */
258 cifs_inode->clientCanCacheRead = FALSE;
259 cifs_inode->clientCanCacheAll = FALSE;
260 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
261 cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;
262 INIT_LIST_HEAD(&cifs_inode->openFileList);
263 return &cifs_inode->vfs_inode;
264 }
265
266 static void
267 cifs_destroy_inode(struct inode *inode)
268 {
269 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
270 }
271
272 /*
273 * cifs_show_options() is for displaying mount options in /proc/mounts.
274 * Not all settable options are displayed but most of the important
275 * ones are.
276 */
277 static int
278 cifs_show_options(struct seq_file *s, struct vfsmount *m)
279 {
280 struct cifs_sb_info *cifs_sb;
281
282 cifs_sb = CIFS_SB(m->mnt_sb);
283
284 if (cifs_sb) {
285 if (cifs_sb->tcon) {
286 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
287 if (cifs_sb->tcon->ses) {
288 if (cifs_sb->tcon->ses->userName)
289 seq_printf(s, ",username=%s",
290 cifs_sb->tcon->ses->userName);
291 if(cifs_sb->tcon->ses->domainName)
292 seq_printf(s, ",domain=%s",
293 cifs_sb->tcon->ses->domainName);
294 }
295 }
296 seq_printf(s, ",rsize=%d",cifs_sb->rsize);
297 seq_printf(s, ",wsize=%d",cifs_sb->wsize);
298 }
299 return 0;
300 }
301
302 #ifdef CONFIG_CIFS_QUOTA
303 int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
304 struct fs_disk_quota * pdquota)
305 {
306 int xid;
307 int rc = 0;
308 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
309 struct cifsTconInfo *pTcon;
310
311 if(cifs_sb)
312 pTcon = cifs_sb->tcon;
313 else
314 return -EIO;
315
316
317 xid = GetXid();
318 if(pTcon) {
319 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
320 } else {
321 return -EIO;
322 }
323
324 FreeXid(xid);
325 return rc;
326 }
327
328 int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
329 struct fs_disk_quota * pdquota)
330 {
331 int xid;
332 int rc = 0;
333 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
334 struct cifsTconInfo *pTcon;
335
336 if(cifs_sb)
337 pTcon = cifs_sb->tcon;
338 else
339 return -EIO;
340
341 xid = GetXid();
342 if(pTcon) {
343 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
344 } else {
345 rc = -EIO;
346 }
347
348 FreeXid(xid);
349 return rc;
350 }
351
352 int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
353 {
354 int xid;
355 int rc = 0;
356 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
357 struct cifsTconInfo *pTcon;
358
359 if(cifs_sb)
360 pTcon = cifs_sb->tcon;
361 else
362 return -EIO;
363
364 xid = GetXid();
365 if(pTcon) {
366 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
367 } else {
368 rc = -EIO;
369 }
370
371 FreeXid(xid);
372 return rc;
373 }
374
375 int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
376 {
377 int xid;
378 int rc = 0;
379 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
380 struct cifsTconInfo *pTcon;
381
382 if(cifs_sb) {
383 pTcon = cifs_sb->tcon;
384 } else {
385 return -EIO;
386 }
387 xid = GetXid();
388 if(pTcon) {
389 cFYI(1,("pqstats %p",qstats));
390 } else {
391 rc = -EIO;
392 }
393
394 FreeXid(xid);
395 return rc;
396 }
397
398 static struct quotactl_ops cifs_quotactl_ops = {
399 .set_xquota = cifs_xquota_set,
400 .get_xquota = cifs_xquota_set,
401 .set_xstate = cifs_xstate_set,
402 .get_xstate = cifs_xstate_get,
403 };
404 #endif
405
406 static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
407 {
408 struct cifs_sb_info *cifs_sb;
409 struct cifsTconInfo * tcon;
410
411 if (!(flags & MNT_FORCE))
412 return;
413 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
414 if(cifs_sb == NULL)
415 return;
416
417 tcon = cifs_sb->tcon;
418 if(tcon == NULL)
419 return;
420 down(&tcon->tconSem);
421 if (atomic_read(&tcon->useCount) == 1)
422 tcon->tidStatus = CifsExiting;
423 up(&tcon->tconSem);
424
425 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
426 /* cancel_notify_requests(tcon); */
427 if(tcon->ses && tcon->ses->server)
428 {
429 cFYI(1,("wake up tasks now - umount begin not complete"));
430 wake_up_all(&tcon->ses->server->request_q);
431 wake_up_all(&tcon->ses->server->response_q);
432 msleep(1); /* yield */
433 /* we have to kick the requests once more */
434 wake_up_all(&tcon->ses->server->response_q);
435 msleep(1);
436 }
437 /* BB FIXME - finish add checks for tidStatus BB */
438
439 return;
440 }
441
442 #ifdef CONFIG_CIFS_STATS2
443 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
444 {
445 /* BB FIXME */
446 return 0;
447 }
448 #endif
449
450 static int cifs_remount(struct super_block *sb, int *flags, char *data)
451 {
452 *flags |= MS_NODIRATIME;
453 return 0;
454 }
455
456 static struct super_operations cifs_super_ops = {
457 .read_inode = cifs_read_inode,
458 .put_super = cifs_put_super,
459 .statfs = cifs_statfs,
460 .alloc_inode = cifs_alloc_inode,
461 .destroy_inode = cifs_destroy_inode,
462 /* .drop_inode = generic_delete_inode,
463 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
464 unless later we add lazy close of inodes or unless the kernel forgets to call
465 us with the same number of releases (closes) as opens */
466 .show_options = cifs_show_options,
467 .umount_begin = cifs_umount_begin,
468 .remount_fs = cifs_remount,
469 #ifdef CONFIG_CIFS_STATS2
470 .show_stats = cifs_show_stats,
471 #endif
472 };
473
474 static int
475 cifs_get_sb(struct file_system_type *fs_type,
476 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
477 {
478 int rc;
479 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
480
481 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
482
483 if (IS_ERR(sb))
484 return PTR_ERR(sb);
485
486 sb->s_flags = flags;
487
488 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
489 if (rc) {
490 up_write(&sb->s_umount);
491 deactivate_super(sb);
492 return rc;
493 }
494 sb->s_flags |= MS_ACTIVE;
495 return simple_set_mnt(mnt, sb);
496 }
497
498 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
499 unsigned long nr_segs, loff_t pos)
500 {
501 struct inode *inode = iocb->ki_filp->f_dentry->d_inode;
502 ssize_t written;
503
504 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
505 if (!CIFS_I(inode)->clientCanCacheAll)
506 filemap_fdatawrite(inode->i_mapping);
507 return written;
508 }
509
510 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
511 {
512 /* origin == SEEK_END => we must revalidate the cached file length */
513 if (origin == SEEK_END) {
514 int retval = cifs_revalidate(file->f_dentry);
515 if (retval < 0)
516 return (loff_t)retval;
517 }
518 return remote_llseek(file, offset, origin);
519 }
520
521 static struct file_system_type cifs_fs_type = {
522 .owner = THIS_MODULE,
523 .name = "cifs",
524 .get_sb = cifs_get_sb,
525 .kill_sb = kill_anon_super,
526 /* .fs_flags */
527 };
528 struct inode_operations cifs_dir_inode_ops = {
529 .create = cifs_create,
530 .lookup = cifs_lookup,
531 .getattr = cifs_getattr,
532 .unlink = cifs_unlink,
533 .link = cifs_hardlink,
534 .mkdir = cifs_mkdir,
535 .rmdir = cifs_rmdir,
536 .rename = cifs_rename,
537 .permission = cifs_permission,
538 /* revalidate:cifs_revalidate, */
539 .setattr = cifs_setattr,
540 .symlink = cifs_symlink,
541 .mknod = cifs_mknod,
542 #ifdef CONFIG_CIFS_XATTR
543 .setxattr = cifs_setxattr,
544 .getxattr = cifs_getxattr,
545 .listxattr = cifs_listxattr,
546 .removexattr = cifs_removexattr,
547 #endif
548 };
549
550 struct inode_operations cifs_file_inode_ops = {
551 /* revalidate:cifs_revalidate, */
552 .setattr = cifs_setattr,
553 .getattr = cifs_getattr, /* do we need this anymore? */
554 .rename = cifs_rename,
555 .permission = cifs_permission,
556 #ifdef CONFIG_CIFS_XATTR
557 .setxattr = cifs_setxattr,
558 .getxattr = cifs_getxattr,
559 .listxattr = cifs_listxattr,
560 .removexattr = cifs_removexattr,
561 #endif
562 };
563
564 struct inode_operations cifs_symlink_inode_ops = {
565 .readlink = generic_readlink,
566 .follow_link = cifs_follow_link,
567 .put_link = cifs_put_link,
568 .permission = cifs_permission,
569 /* BB add the following two eventually */
570 /* revalidate: cifs_revalidate,
571 setattr: cifs_notify_change, *//* BB do we need notify change */
572 #ifdef CONFIG_CIFS_XATTR
573 .setxattr = cifs_setxattr,
574 .getxattr = cifs_getxattr,
575 .listxattr = cifs_listxattr,
576 .removexattr = cifs_removexattr,
577 #endif
578 };
579
580 const struct file_operations cifs_file_ops = {
581 .read = do_sync_read,
582 .write = do_sync_write,
583 .aio_read = generic_file_aio_read,
584 .aio_write = cifs_file_aio_write,
585 .open = cifs_open,
586 .release = cifs_close,
587 .lock = cifs_lock,
588 .fsync = cifs_fsync,
589 .flush = cifs_flush,
590 .mmap = cifs_file_mmap,
591 .sendfile = generic_file_sendfile,
592 .llseek = cifs_llseek,
593 #ifdef CONFIG_CIFS_POSIX
594 .ioctl = cifs_ioctl,
595 #endif /* CONFIG_CIFS_POSIX */
596
597 #ifdef CONFIG_CIFS_EXPERIMENTAL
598 .dir_notify = cifs_dir_notify,
599 #endif /* CONFIG_CIFS_EXPERIMENTAL */
600 };
601
602 const struct file_operations cifs_file_direct_ops = {
603 /* no mmap, no aio, no readv -
604 BB reevaluate whether they can be done with directio, no cache */
605 .read = cifs_user_read,
606 .write = cifs_user_write,
607 .open = cifs_open,
608 .release = cifs_close,
609 .lock = cifs_lock,
610 .fsync = cifs_fsync,
611 .flush = cifs_flush,
612 .sendfile = generic_file_sendfile, /* BB removeme BB */
613 #ifdef CONFIG_CIFS_POSIX
614 .ioctl = cifs_ioctl,
615 #endif /* CONFIG_CIFS_POSIX */
616 .llseek = cifs_llseek,
617 #ifdef CONFIG_CIFS_EXPERIMENTAL
618 .dir_notify = cifs_dir_notify,
619 #endif /* CONFIG_CIFS_EXPERIMENTAL */
620 };
621 const struct file_operations cifs_file_nobrl_ops = {
622 .read = do_sync_read,
623 .write = do_sync_write,
624 .aio_read = generic_file_aio_read,
625 .aio_write = cifs_file_aio_write,
626 .open = cifs_open,
627 .release = cifs_close,
628 .fsync = cifs_fsync,
629 .flush = cifs_flush,
630 .mmap = cifs_file_mmap,
631 .sendfile = generic_file_sendfile,
632 .llseek = cifs_llseek,
633 #ifdef CONFIG_CIFS_POSIX
634 .ioctl = cifs_ioctl,
635 #endif /* CONFIG_CIFS_POSIX */
636
637 #ifdef CONFIG_CIFS_EXPERIMENTAL
638 .dir_notify = cifs_dir_notify,
639 #endif /* CONFIG_CIFS_EXPERIMENTAL */
640 };
641
642 const struct file_operations cifs_file_direct_nobrl_ops = {
643 /* no mmap, no aio, no readv -
644 BB reevaluate whether they can be done with directio, no cache */
645 .read = cifs_user_read,
646 .write = cifs_user_write,
647 .open = cifs_open,
648 .release = cifs_close,
649 .fsync = cifs_fsync,
650 .flush = cifs_flush,
651 .sendfile = generic_file_sendfile, /* BB removeme BB */
652 #ifdef CONFIG_CIFS_POSIX
653 .ioctl = cifs_ioctl,
654 #endif /* CONFIG_CIFS_POSIX */
655 .llseek = cifs_llseek,
656 #ifdef CONFIG_CIFS_EXPERIMENTAL
657 .dir_notify = cifs_dir_notify,
658 #endif /* CONFIG_CIFS_EXPERIMENTAL */
659 };
660
661 const struct file_operations cifs_dir_ops = {
662 .readdir = cifs_readdir,
663 .release = cifs_closedir,
664 .read = generic_read_dir,
665 #ifdef CONFIG_CIFS_EXPERIMENTAL
666 .dir_notify = cifs_dir_notify,
667 #endif /* CONFIG_CIFS_EXPERIMENTAL */
668 .ioctl = cifs_ioctl,
669 };
670
671 static void
672 cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
673 {
674 struct cifsInodeInfo *cifsi = inode;
675
676 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
677 SLAB_CTOR_CONSTRUCTOR) {
678 inode_init_once(&cifsi->vfs_inode);
679 INIT_LIST_HEAD(&cifsi->lockList);
680 }
681 }
682
683 static int
684 cifs_init_inodecache(void)
685 {
686 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
687 sizeof (struct cifsInodeInfo),
688 0, (SLAB_RECLAIM_ACCOUNT|
689 SLAB_MEM_SPREAD),
690 cifs_init_once, NULL);
691 if (cifs_inode_cachep == NULL)
692 return -ENOMEM;
693
694 return 0;
695 }
696
697 static void
698 cifs_destroy_inodecache(void)
699 {
700 kmem_cache_destroy(cifs_inode_cachep);
701 }
702
703 static int
704 cifs_init_request_bufs(void)
705 {
706 if(CIFSMaxBufSize < 8192) {
707 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
708 Unicode path name has to fit in any SMB/CIFS path based frames */
709 CIFSMaxBufSize = 8192;
710 } else if (CIFSMaxBufSize > 1024*127) {
711 CIFSMaxBufSize = 1024 * 127;
712 } else {
713 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
714 }
715 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
716 cifs_req_cachep = kmem_cache_create("cifs_request",
717 CIFSMaxBufSize +
718 MAX_CIFS_HDR_SIZE, 0,
719 SLAB_HWCACHE_ALIGN, NULL, NULL);
720 if (cifs_req_cachep == NULL)
721 return -ENOMEM;
722
723 if(cifs_min_rcv < 1)
724 cifs_min_rcv = 1;
725 else if (cifs_min_rcv > 64) {
726 cifs_min_rcv = 64;
727 cERROR(1,("cifs_min_rcv set to maximum (64)"));
728 }
729
730 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
731 cifs_req_cachep);
732
733 if(cifs_req_poolp == NULL) {
734 kmem_cache_destroy(cifs_req_cachep);
735 return -ENOMEM;
736 }
737 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
738 almost all handle based requests (but not write response, nor is it
739 sufficient for path based requests). A smaller size would have
740 been more efficient (compacting multiple slab items on one 4k page)
741 for the case in which debug was on, but this larger size allows
742 more SMBs to use small buffer alloc and is still much more
743 efficient to alloc 1 per page off the slab compared to 17K (5page)
744 alloc of large cifs buffers even when page debugging is on */
745 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
746 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
747 NULL, NULL);
748 if (cifs_sm_req_cachep == NULL) {
749 mempool_destroy(cifs_req_poolp);
750 kmem_cache_destroy(cifs_req_cachep);
751 return -ENOMEM;
752 }
753
754 if(cifs_min_small < 2)
755 cifs_min_small = 2;
756 else if (cifs_min_small > 256) {
757 cifs_min_small = 256;
758 cFYI(1,("cifs_min_small set to maximum (256)"));
759 }
760
761 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
762 cifs_sm_req_cachep);
763
764 if(cifs_sm_req_poolp == NULL) {
765 mempool_destroy(cifs_req_poolp);
766 kmem_cache_destroy(cifs_req_cachep);
767 kmem_cache_destroy(cifs_sm_req_cachep);
768 return -ENOMEM;
769 }
770
771 return 0;
772 }
773
774 static void
775 cifs_destroy_request_bufs(void)
776 {
777 mempool_destroy(cifs_req_poolp);
778 kmem_cache_destroy(cifs_req_cachep);
779 mempool_destroy(cifs_sm_req_poolp);
780 kmem_cache_destroy(cifs_sm_req_cachep);
781 }
782
783 static int
784 cifs_init_mids(void)
785 {
786 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
787 sizeof (struct mid_q_entry), 0,
788 SLAB_HWCACHE_ALIGN, NULL, NULL);
789 if (cifs_mid_cachep == NULL)
790 return -ENOMEM;
791
792 /* 3 is a reasonable minimum number of simultaneous operations */
793 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
794 if(cifs_mid_poolp == NULL) {
795 kmem_cache_destroy(cifs_mid_cachep);
796 return -ENOMEM;
797 }
798
799 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
800 sizeof (struct oplock_q_entry), 0,
801 SLAB_HWCACHE_ALIGN, NULL, NULL);
802 if (cifs_oplock_cachep == NULL) {
803 kmem_cache_destroy(cifs_mid_cachep);
804 mempool_destroy(cifs_mid_poolp);
805 return -ENOMEM;
806 }
807
808 return 0;
809 }
810
811 static void
812 cifs_destroy_mids(void)
813 {
814 mempool_destroy(cifs_mid_poolp);
815 kmem_cache_destroy(cifs_mid_cachep);
816 kmem_cache_destroy(cifs_oplock_cachep);
817 }
818
819 static int cifs_oplock_thread(void * dummyarg)
820 {
821 struct oplock_q_entry * oplock_item;
822 struct cifsTconInfo *pTcon;
823 struct inode * inode;
824 __u16 netfid;
825 int rc;
826
827 do {
828 if (try_to_freeze())
829 continue;
830
831 spin_lock(&GlobalMid_Lock);
832 if(list_empty(&GlobalOplock_Q)) {
833 spin_unlock(&GlobalMid_Lock);
834 set_current_state(TASK_INTERRUPTIBLE);
835 schedule_timeout(39*HZ);
836 } else {
837 oplock_item = list_entry(GlobalOplock_Q.next,
838 struct oplock_q_entry, qhead);
839 if(oplock_item) {
840 cFYI(1,("found oplock item to write out"));
841 pTcon = oplock_item->tcon;
842 inode = oplock_item->pinode;
843 netfid = oplock_item->netfid;
844 spin_unlock(&GlobalMid_Lock);
845 DeleteOplockQEntry(oplock_item);
846 /* can not grab inode sem here since it would
847 deadlock when oplock received on delete
848 since vfs_unlink holds the i_mutex across
849 the call */
850 /* mutex_lock(&inode->i_mutex);*/
851 if (S_ISREG(inode->i_mode)) {
852 rc = filemap_fdatawrite(inode->i_mapping);
853 if(CIFS_I(inode)->clientCanCacheRead == 0) {
854 filemap_fdatawait(inode->i_mapping);
855 invalidate_remote_inode(inode);
856 }
857 } else
858 rc = 0;
859 /* mutex_unlock(&inode->i_mutex);*/
860 if (rc)
861 CIFS_I(inode)->write_behind_rc = rc;
862 cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
863
864 /* releasing a stale oplock after recent reconnection
865 of smb session using a now incorrect file
866 handle is not a data integrity issue but do
867 not bother sending an oplock release if session
868 to server still is disconnected since oplock
869 already released by the server in that case */
870 if(pTcon->tidStatus != CifsNeedReconnect) {
871 rc = CIFSSMBLock(0, pTcon, netfid,
872 0 /* len */ , 0 /* offset */, 0,
873 0, LOCKING_ANDX_OPLOCK_RELEASE,
874 0 /* wait flag */);
875 cFYI(1,("Oplock release rc = %d ",rc));
876 }
877 } else
878 spin_unlock(&GlobalMid_Lock);
879 set_current_state(TASK_INTERRUPTIBLE);
880 schedule_timeout(1); /* yield in case q were corrupt */
881 }
882 } while (!kthread_should_stop());
883
884 return 0;
885 }
886
887 static int cifs_dnotify_thread(void * dummyarg)
888 {
889 struct list_head *tmp;
890 struct cifsSesInfo *ses;
891
892 do {
893 if (try_to_freeze())
894 continue;
895 set_current_state(TASK_INTERRUPTIBLE);
896 schedule_timeout(15*HZ);
897 read_lock(&GlobalSMBSeslock);
898 /* check if any stuck requests that need
899 to be woken up and wakeq so the
900 thread can wake up and error out */
901 list_for_each(tmp, &GlobalSMBSessionList) {
902 ses = list_entry(tmp, struct cifsSesInfo,
903 cifsSessionList);
904 if(ses && ses->server &&
905 atomic_read(&ses->server->inFlight))
906 wake_up_all(&ses->server->response_q);
907 }
908 read_unlock(&GlobalSMBSeslock);
909 } while (!kthread_should_stop());
910
911 return 0;
912 }
913
914 static int __init
915 init_cifs(void)
916 {
917 int rc = 0;
918 #ifdef CONFIG_PROC_FS
919 cifs_proc_init();
920 #endif
921 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
922 INIT_LIST_HEAD(&GlobalSMBSessionList);
923 INIT_LIST_HEAD(&GlobalTreeConnectionList);
924 INIT_LIST_HEAD(&GlobalOplock_Q);
925 #ifdef CONFIG_CIFS_EXPERIMENTAL
926 INIT_LIST_HEAD(&GlobalDnotifyReqList);
927 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
928 #endif
929 /*
930 * Initialize Global counters
931 */
932 atomic_set(&sesInfoAllocCount, 0);
933 atomic_set(&tconInfoAllocCount, 0);
934 atomic_set(&tcpSesAllocCount,0);
935 atomic_set(&tcpSesReconnectCount, 0);
936 atomic_set(&tconInfoReconnectCount, 0);
937
938 atomic_set(&bufAllocCount, 0);
939 atomic_set(&smBufAllocCount, 0);
940 #ifdef CONFIG_CIFS_STATS2
941 atomic_set(&totBufAllocCount, 0);
942 atomic_set(&totSmBufAllocCount, 0);
943 #endif /* CONFIG_CIFS_STATS2 */
944
945 atomic_set(&midCount, 0);
946 GlobalCurrentXid = 0;
947 GlobalTotalActiveXid = 0;
948 GlobalMaxActiveXid = 0;
949 memset(Local_System_Name, 0, 15);
950 rwlock_init(&GlobalSMBSeslock);
951 spin_lock_init(&GlobalMid_Lock);
952
953 if(cifs_max_pending < 2) {
954 cifs_max_pending = 2;
955 cFYI(1,("cifs_max_pending set to min of 2"));
956 } else if(cifs_max_pending > 256) {
957 cifs_max_pending = 256;
958 cFYI(1,("cifs_max_pending set to max of 256"));
959 }
960
961 rc = cifs_init_inodecache();
962 if (rc)
963 goto out_clean_proc;
964
965 rc = cifs_init_mids();
966 if (rc)
967 goto out_destroy_inodecache;
968
969 rc = cifs_init_request_bufs();
970 if (rc)
971 goto out_destroy_mids;
972
973 rc = register_filesystem(&cifs_fs_type);
974 if (rc)
975 goto out_destroy_request_bufs;
976
977 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
978 if (IS_ERR(oplockThread)) {
979 rc = PTR_ERR(oplockThread);
980 cERROR(1,("error %d create oplock thread", rc));
981 goto out_unregister_filesystem;
982 }
983
984 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
985 if (IS_ERR(dnotifyThread)) {
986 rc = PTR_ERR(dnotifyThread);
987 cERROR(1,("error %d create dnotify thread", rc));
988 goto out_stop_oplock_thread;
989 }
990
991 return 0;
992
993 out_stop_oplock_thread:
994 kthread_stop(oplockThread);
995 out_unregister_filesystem:
996 unregister_filesystem(&cifs_fs_type);
997 out_destroy_request_bufs:
998 cifs_destroy_request_bufs();
999 out_destroy_mids:
1000 cifs_destroy_mids();
1001 out_destroy_inodecache:
1002 cifs_destroy_inodecache();
1003 out_clean_proc:
1004 #ifdef CONFIG_PROC_FS
1005 cifs_proc_clean();
1006 #endif
1007 return rc;
1008 }
1009
1010 static void __exit
1011 exit_cifs(void)
1012 {
1013 cFYI(0, ("In unregister ie exit_cifs"));
1014 #ifdef CONFIG_PROC_FS
1015 cifs_proc_clean();
1016 #endif
1017 unregister_filesystem(&cifs_fs_type);
1018 cifs_destroy_inodecache();
1019 cifs_destroy_mids();
1020 cifs_destroy_request_bufs();
1021 kthread_stop(oplockThread);
1022 kthread_stop(dnotifyThread);
1023 }
1024
1025 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1026 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1027 MODULE_DESCRIPTION
1028 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
1029 MODULE_VERSION(CIFS_VERSION);
1030 module_init(init_cifs)
1031 module_exit(exit_cifs)
This page took 0.063966 seconds and 5 git commands to generate.