Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / fs / cifs / cifsfs.c
1 /*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2004
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
25
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include "cifsfs.h"
38 #include "cifspdu.h"
39 #define DECLARE_GLOBALS_HERE
40 #include "cifsglob.h"
41 #include "cifsproto.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
44 #include <linux/mm.h>
45 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
46
47 #ifdef CONFIG_CIFS_QUOTA
48 static struct quotactl_ops cifs_quotactl_ops;
49 #endif
50
51 int cifsFYI = 0;
52 int cifsERROR = 1;
53 int traceSMB = 0;
54 unsigned int oplockEnabled = 1;
55 unsigned int experimEnabled = 0;
56 unsigned int linuxExtEnabled = 1;
57 unsigned int lookupCacheEnabled = 1;
58 unsigned int multiuser_mount = 0;
59 unsigned int extended_security = 0;
60 unsigned int ntlmv2_support = 0;
61 unsigned int sign_CIFS_PDUs = 1;
62 extern struct task_struct * oplockThread; /* remove sparse warning */
63 struct task_struct * oplockThread = NULL;
64 extern struct task_struct * dnotifyThread; /* remove sparse warning */
65 struct task_struct * dnotifyThread = NULL;
66 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
67 module_param(CIFSMaxBufSize, int, 0);
68 MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
69 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
70 module_param(cifs_min_rcv, int, 0);
71 MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
72 unsigned int cifs_min_small = 30;
73 module_param(cifs_min_small, int, 0);
74 MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
75 unsigned int cifs_max_pending = CIFS_MAX_REQ;
76 module_param(cifs_max_pending, int, 0);
77 MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
78
79 extern mempool_t *cifs_sm_req_poolp;
80 extern mempool_t *cifs_req_poolp;
81 extern mempool_t *cifs_mid_poolp;
82
83 extern kmem_cache_t *cifs_oplock_cachep;
84
85 static int
86 cifs_read_super(struct super_block *sb, void *data,
87 const char *devname, int silent)
88 {
89 struct inode *inode;
90 struct cifs_sb_info *cifs_sb;
91 int rc = 0;
92
93 sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
94 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
95 cifs_sb = CIFS_SB(sb);
96 if(cifs_sb == NULL)
97 return -ENOMEM;
98
99 rc = cifs_mount(sb, cifs_sb, data, devname);
100
101 if (rc) {
102 if (!silent)
103 cERROR(1,
104 ("cifs_mount failed w/return code = %d", rc));
105 goto out_mount_failed;
106 }
107
108 sb->s_magic = CIFS_MAGIC_NUMBER;
109 sb->s_op = &cifs_super_ops;
110 /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
111 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
112 #ifdef CONFIG_CIFS_QUOTA
113 sb->s_qcop = &cifs_quotactl_ops;
114 #endif
115 sb->s_blocksize = CIFS_MAX_MSGSIZE;
116 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
117 inode = iget(sb, ROOT_I);
118
119 if (!inode) {
120 rc = -ENOMEM;
121 goto out_no_root;
122 }
123
124 sb->s_root = d_alloc_root(inode);
125
126 if (!sb->s_root) {
127 rc = -ENOMEM;
128 goto out_no_root;
129 }
130
131 return 0;
132
133 out_no_root:
134 cERROR(1, ("cifs_read_super: get root inode failed"));
135 if (inode)
136 iput(inode);
137
138 out_mount_failed:
139 if(cifs_sb) {
140 if(cifs_sb->local_nls)
141 unload_nls(cifs_sb->local_nls);
142 kfree(cifs_sb);
143 }
144 return rc;
145 }
146
147 static void
148 cifs_put_super(struct super_block *sb)
149 {
150 int rc = 0;
151 struct cifs_sb_info *cifs_sb;
152
153 cFYI(1, ("In cifs_put_super"));
154 cifs_sb = CIFS_SB(sb);
155 if(cifs_sb == NULL) {
156 cFYI(1,("Empty cifs superblock info passed to unmount"));
157 return;
158 }
159 rc = cifs_umount(sb, cifs_sb);
160 if (rc) {
161 cERROR(1, ("cifs_umount failed with return code %d", rc));
162 }
163 unload_nls(cifs_sb->local_nls);
164 kfree(cifs_sb);
165 return;
166 }
167
168 static int
169 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
170 {
171 struct super_block *sb = dentry->d_sb;
172 int xid;
173 int rc = -EOPNOTSUPP;
174 struct cifs_sb_info *cifs_sb;
175 struct cifsTconInfo *pTcon;
176
177 xid = GetXid();
178
179 cifs_sb = CIFS_SB(sb);
180 pTcon = cifs_sb->tcon;
181
182 buf->f_type = CIFS_MAGIC_NUMBER;
183
184 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
185 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
186 presumably be total path, but note
187 that some servers (includinng Samba 3)
188 have a shorter maximum path */
189 buf->f_files = 0; /* undefined */
190 buf->f_ffree = 0; /* unlimited */
191
192 #ifdef CONFIG_CIFS_EXPERIMENTAL
193 /* BB we could add a second check for a QFS Unix capability bit */
194 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
195 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
196 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
197 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
198
199 /* Only need to call the old QFSInfo if failed
200 on newer one */
201 if(rc)
202 #endif /* CIFS_EXPERIMENTAL */
203 rc = CIFSSMBQFSInfo(xid, pTcon, buf);
204
205 /* Old Windows servers do not support level 103, retry with level
206 one if old server failed the previous call */
207 if(rc)
208 rc = SMBOldQFSInfo(xid, pTcon, buf);
209 /*
210 int f_type;
211 __fsid_t f_fsid;
212 int f_namelen; */
213 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
214 FreeXid(xid);
215 return 0; /* always return success? what if volume is no
216 longer available? */
217 }
218
219 static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
220 {
221 struct cifs_sb_info *cifs_sb;
222
223 cifs_sb = CIFS_SB(inode->i_sb);
224
225 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
226 return 0;
227 } else /* file mode might have been restricted at mount time
228 on the client (above and beyond ACL on servers) for
229 servers which do not support setting and viewing mode bits,
230 so allowing client to check permissions is useful */
231 return generic_permission(inode, mask, NULL);
232 }
233
234 static kmem_cache_t *cifs_inode_cachep;
235 static kmem_cache_t *cifs_req_cachep;
236 static kmem_cache_t *cifs_mid_cachep;
237 kmem_cache_t *cifs_oplock_cachep;
238 static kmem_cache_t *cifs_sm_req_cachep;
239 mempool_t *cifs_sm_req_poolp;
240 mempool_t *cifs_req_poolp;
241 mempool_t *cifs_mid_poolp;
242
243 static struct inode *
244 cifs_alloc_inode(struct super_block *sb)
245 {
246 struct cifsInodeInfo *cifs_inode;
247 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL);
248 if (!cifs_inode)
249 return NULL;
250 cifs_inode->cifsAttrs = 0x20; /* default */
251 atomic_set(&cifs_inode->inUse, 0);
252 cifs_inode->time = 0;
253 /* Until the file is open and we have gotten oplock
254 info back from the server, can not assume caching of
255 file data or metadata */
256 cifs_inode->clientCanCacheRead = FALSE;
257 cifs_inode->clientCanCacheAll = FALSE;
258 cifs_inode->vfs_inode.i_blksize = CIFS_MAX_MSGSIZE;
259 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
260 cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;
261 INIT_LIST_HEAD(&cifs_inode->openFileList);
262 return &cifs_inode->vfs_inode;
263 }
264
265 static void
266 cifs_destroy_inode(struct inode *inode)
267 {
268 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
269 }
270
271 /*
272 * cifs_show_options() is for displaying mount options in /proc/mounts.
273 * Not all settable options are displayed but most of the important
274 * ones are.
275 */
276 static int
277 cifs_show_options(struct seq_file *s, struct vfsmount *m)
278 {
279 struct cifs_sb_info *cifs_sb;
280
281 cifs_sb = CIFS_SB(m->mnt_sb);
282
283 if (cifs_sb) {
284 if (cifs_sb->tcon) {
285 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
286 if (cifs_sb->tcon->ses) {
287 if (cifs_sb->tcon->ses->userName)
288 seq_printf(s, ",username=%s",
289 cifs_sb->tcon->ses->userName);
290 if(cifs_sb->tcon->ses->domainName)
291 seq_printf(s, ",domain=%s",
292 cifs_sb->tcon->ses->domainName);
293 }
294 }
295 seq_printf(s, ",rsize=%d",cifs_sb->rsize);
296 seq_printf(s, ",wsize=%d",cifs_sb->wsize);
297 }
298 return 0;
299 }
300
301 #ifdef CONFIG_CIFS_QUOTA
302 int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
303 struct fs_disk_quota * pdquota)
304 {
305 int xid;
306 int rc = 0;
307 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
308 struct cifsTconInfo *pTcon;
309
310 if(cifs_sb)
311 pTcon = cifs_sb->tcon;
312 else
313 return -EIO;
314
315
316 xid = GetXid();
317 if(pTcon) {
318 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
319 } else {
320 return -EIO;
321 }
322
323 FreeXid(xid);
324 return rc;
325 }
326
327 int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
328 struct fs_disk_quota * pdquota)
329 {
330 int xid;
331 int rc = 0;
332 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
333 struct cifsTconInfo *pTcon;
334
335 if(cifs_sb)
336 pTcon = cifs_sb->tcon;
337 else
338 return -EIO;
339
340 xid = GetXid();
341 if(pTcon) {
342 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
343 } else {
344 rc = -EIO;
345 }
346
347 FreeXid(xid);
348 return rc;
349 }
350
351 int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
352 {
353 int xid;
354 int rc = 0;
355 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
356 struct cifsTconInfo *pTcon;
357
358 if(cifs_sb)
359 pTcon = cifs_sb->tcon;
360 else
361 return -EIO;
362
363 xid = GetXid();
364 if(pTcon) {
365 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
366 } else {
367 rc = -EIO;
368 }
369
370 FreeXid(xid);
371 return rc;
372 }
373
374 int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
375 {
376 int xid;
377 int rc = 0;
378 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
379 struct cifsTconInfo *pTcon;
380
381 if(cifs_sb) {
382 pTcon = cifs_sb->tcon;
383 } else {
384 return -EIO;
385 }
386 xid = GetXid();
387 if(pTcon) {
388 cFYI(1,("pqstats %p",qstats));
389 } else {
390 rc = -EIO;
391 }
392
393 FreeXid(xid);
394 return rc;
395 }
396
397 static struct quotactl_ops cifs_quotactl_ops = {
398 .set_xquota = cifs_xquota_set,
399 .get_xquota = cifs_xquota_set,
400 .set_xstate = cifs_xstate_set,
401 .get_xstate = cifs_xstate_get,
402 };
403 #endif
404
405 #ifdef CONFIG_CIFS_EXPERIMENTAL
406 static void cifs_umount_begin(struct super_block * sblock)
407 {
408 struct cifs_sb_info *cifs_sb;
409 struct cifsTconInfo * tcon;
410
411 cifs_sb = CIFS_SB(sblock);
412 if(cifs_sb == NULL)
413 return;
414
415 tcon = cifs_sb->tcon;
416 if(tcon == NULL)
417 return;
418 down(&tcon->tconSem);
419 if (atomic_read(&tcon->useCount) == 1)
420 tcon->tidStatus = CifsExiting;
421 up(&tcon->tconSem);
422
423 /* cancel_brl_requests(tcon); */
424 /* cancel_notify_requests(tcon); */
425 if(tcon->ses && tcon->ses->server)
426 {
427 cFYI(1,("wake up tasks now - umount begin not complete"));
428 wake_up_all(&tcon->ses->server->request_q);
429 wake_up_all(&tcon->ses->server->response_q);
430 msleep(1); /* yield */
431 /* we have to kick the requests once more */
432 wake_up_all(&tcon->ses->server->response_q);
433 msleep(1);
434 }
435 /* BB FIXME - finish add checks for tidStatus BB */
436
437 return;
438 }
439 #endif
440
441 static int cifs_remount(struct super_block *sb, int *flags, char *data)
442 {
443 *flags |= MS_NODIRATIME;
444 return 0;
445 }
446
447 struct super_operations cifs_super_ops = {
448 .read_inode = cifs_read_inode,
449 .put_super = cifs_put_super,
450 .statfs = cifs_statfs,
451 .alloc_inode = cifs_alloc_inode,
452 .destroy_inode = cifs_destroy_inode,
453 /* .drop_inode = generic_delete_inode,
454 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
455 unless later we add lazy close of inodes or unless the kernel forgets to call
456 us with the same number of releases (closes) as opens */
457 .show_options = cifs_show_options,
458 #ifdef CONFIG_CIFS_EXPERIMENTAL
459 .umount_begin = cifs_umount_begin,
460 #endif
461 .remount_fs = cifs_remount,
462 };
463
464 static int
465 cifs_get_sb(struct file_system_type *fs_type,
466 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
467 {
468 int rc;
469 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
470
471 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
472
473 if (IS_ERR(sb))
474 return PTR_ERR(sb);
475
476 sb->s_flags = flags;
477
478 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
479 if (rc) {
480 up_write(&sb->s_umount);
481 deactivate_super(sb);
482 return rc;
483 }
484 sb->s_flags |= MS_ACTIVE;
485 return simple_set_mnt(mnt, sb);
486 }
487
488 static ssize_t cifs_file_writev(struct file *file, const struct iovec *iov,
489 unsigned long nr_segs, loff_t *ppos)
490 {
491 struct inode *inode = file->f_dentry->d_inode;
492 ssize_t written;
493
494 written = generic_file_writev(file, iov, nr_segs, ppos);
495 if (!CIFS_I(inode)->clientCanCacheAll)
496 filemap_fdatawrite(inode->i_mapping);
497 return written;
498 }
499
500 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const char __user *buf,
501 size_t count, loff_t pos)
502 {
503 struct inode *inode = iocb->ki_filp->f_dentry->d_inode;
504 ssize_t written;
505
506 written = generic_file_aio_write(iocb, buf, count, pos);
507 if (!CIFS_I(inode)->clientCanCacheAll)
508 filemap_fdatawrite(inode->i_mapping);
509 return written;
510 }
511
512 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
513 {
514 /* origin == SEEK_END => we must revalidate the cached file length */
515 if (origin == 2) {
516 int retval = cifs_revalidate(file->f_dentry);
517 if (retval < 0)
518 return (loff_t)retval;
519 }
520 return remote_llseek(file, offset, origin);
521 }
522
523 static struct file_system_type cifs_fs_type = {
524 .owner = THIS_MODULE,
525 .name = "cifs",
526 .get_sb = cifs_get_sb,
527 .kill_sb = kill_anon_super,
528 /* .fs_flags */
529 };
530 struct inode_operations cifs_dir_inode_ops = {
531 .create = cifs_create,
532 .lookup = cifs_lookup,
533 .getattr = cifs_getattr,
534 .unlink = cifs_unlink,
535 .link = cifs_hardlink,
536 .mkdir = cifs_mkdir,
537 .rmdir = cifs_rmdir,
538 .rename = cifs_rename,
539 .permission = cifs_permission,
540 /* revalidate:cifs_revalidate, */
541 .setattr = cifs_setattr,
542 .symlink = cifs_symlink,
543 .mknod = cifs_mknod,
544 #ifdef CONFIG_CIFS_XATTR
545 .setxattr = cifs_setxattr,
546 .getxattr = cifs_getxattr,
547 .listxattr = cifs_listxattr,
548 .removexattr = cifs_removexattr,
549 #endif
550 };
551
552 struct inode_operations cifs_file_inode_ops = {
553 /* revalidate:cifs_revalidate, */
554 .setattr = cifs_setattr,
555 .getattr = cifs_getattr, /* do we need this anymore? */
556 .rename = cifs_rename,
557 .permission = cifs_permission,
558 #ifdef CONFIG_CIFS_XATTR
559 .setxattr = cifs_setxattr,
560 .getxattr = cifs_getxattr,
561 .listxattr = cifs_listxattr,
562 .removexattr = cifs_removexattr,
563 #endif
564 };
565
566 struct inode_operations cifs_symlink_inode_ops = {
567 .readlink = generic_readlink,
568 .follow_link = cifs_follow_link,
569 .put_link = cifs_put_link,
570 .permission = cifs_permission,
571 /* BB add the following two eventually */
572 /* revalidate: cifs_revalidate,
573 setattr: cifs_notify_change, *//* BB do we need notify change */
574 #ifdef CONFIG_CIFS_XATTR
575 .setxattr = cifs_setxattr,
576 .getxattr = cifs_getxattr,
577 .listxattr = cifs_listxattr,
578 .removexattr = cifs_removexattr,
579 #endif
580 };
581
582 const struct file_operations cifs_file_ops = {
583 .read = do_sync_read,
584 .write = do_sync_write,
585 .readv = generic_file_readv,
586 .writev = cifs_file_writev,
587 .aio_read = generic_file_aio_read,
588 .aio_write = cifs_file_aio_write,
589 .open = cifs_open,
590 .release = cifs_close,
591 .lock = cifs_lock,
592 .fsync = cifs_fsync,
593 .flush = cifs_flush,
594 .mmap = cifs_file_mmap,
595 .sendfile = generic_file_sendfile,
596 .llseek = cifs_llseek,
597 #ifdef CONFIG_CIFS_POSIX
598 .ioctl = cifs_ioctl,
599 #endif /* CONFIG_CIFS_POSIX */
600
601 #ifdef CONFIG_CIFS_EXPERIMENTAL
602 .dir_notify = cifs_dir_notify,
603 #endif /* CONFIG_CIFS_EXPERIMENTAL */
604 };
605
606 const struct file_operations cifs_file_direct_ops = {
607 /* no mmap, no aio, no readv -
608 BB reevaluate whether they can be done with directio, no cache */
609 .read = cifs_user_read,
610 .write = cifs_user_write,
611 .open = cifs_open,
612 .release = cifs_close,
613 .lock = cifs_lock,
614 .fsync = cifs_fsync,
615 .flush = cifs_flush,
616 .sendfile = generic_file_sendfile, /* BB removeme BB */
617 #ifdef CONFIG_CIFS_POSIX
618 .ioctl = cifs_ioctl,
619 #endif /* CONFIG_CIFS_POSIX */
620 .llseek = cifs_llseek,
621 #ifdef CONFIG_CIFS_EXPERIMENTAL
622 .dir_notify = cifs_dir_notify,
623 #endif /* CONFIG_CIFS_EXPERIMENTAL */
624 };
625 const struct file_operations cifs_file_nobrl_ops = {
626 .read = do_sync_read,
627 .write = do_sync_write,
628 .readv = generic_file_readv,
629 .writev = cifs_file_writev,
630 .aio_read = generic_file_aio_read,
631 .aio_write = cifs_file_aio_write,
632 .open = cifs_open,
633 .release = cifs_close,
634 .fsync = cifs_fsync,
635 .flush = cifs_flush,
636 .mmap = cifs_file_mmap,
637 .sendfile = generic_file_sendfile,
638 .llseek = cifs_llseek,
639 #ifdef CONFIG_CIFS_POSIX
640 .ioctl = cifs_ioctl,
641 #endif /* CONFIG_CIFS_POSIX */
642
643 #ifdef CONFIG_CIFS_EXPERIMENTAL
644 .dir_notify = cifs_dir_notify,
645 #endif /* CONFIG_CIFS_EXPERIMENTAL */
646 };
647
648 const struct file_operations cifs_file_direct_nobrl_ops = {
649 /* no mmap, no aio, no readv -
650 BB reevaluate whether they can be done with directio, no cache */
651 .read = cifs_user_read,
652 .write = cifs_user_write,
653 .open = cifs_open,
654 .release = cifs_close,
655 .fsync = cifs_fsync,
656 .flush = cifs_flush,
657 .sendfile = generic_file_sendfile, /* BB removeme BB */
658 #ifdef CONFIG_CIFS_POSIX
659 .ioctl = cifs_ioctl,
660 #endif /* CONFIG_CIFS_POSIX */
661 .llseek = cifs_llseek,
662 #ifdef CONFIG_CIFS_EXPERIMENTAL
663 .dir_notify = cifs_dir_notify,
664 #endif /* CONFIG_CIFS_EXPERIMENTAL */
665 };
666
667 const struct file_operations cifs_dir_ops = {
668 .readdir = cifs_readdir,
669 .release = cifs_closedir,
670 .read = generic_read_dir,
671 #ifdef CONFIG_CIFS_EXPERIMENTAL
672 .dir_notify = cifs_dir_notify,
673 #endif /* CONFIG_CIFS_EXPERIMENTAL */
674 .ioctl = cifs_ioctl,
675 };
676
677 static void
678 cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
679 {
680 struct cifsInodeInfo *cifsi = inode;
681
682 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
683 SLAB_CTOR_CONSTRUCTOR) {
684 inode_init_once(&cifsi->vfs_inode);
685 INIT_LIST_HEAD(&cifsi->lockList);
686 }
687 }
688
689 static int
690 cifs_init_inodecache(void)
691 {
692 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
693 sizeof (struct cifsInodeInfo),
694 0, (SLAB_RECLAIM_ACCOUNT|
695 SLAB_MEM_SPREAD),
696 cifs_init_once, NULL);
697 if (cifs_inode_cachep == NULL)
698 return -ENOMEM;
699
700 return 0;
701 }
702
703 static void
704 cifs_destroy_inodecache(void)
705 {
706 if (kmem_cache_destroy(cifs_inode_cachep))
707 printk(KERN_WARNING "cifs_inode_cache: error freeing\n");
708 }
709
710 static int
711 cifs_init_request_bufs(void)
712 {
713 if(CIFSMaxBufSize < 8192) {
714 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
715 Unicode path name has to fit in any SMB/CIFS path based frames */
716 CIFSMaxBufSize = 8192;
717 } else if (CIFSMaxBufSize > 1024*127) {
718 CIFSMaxBufSize = 1024 * 127;
719 } else {
720 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
721 }
722 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
723 cifs_req_cachep = kmem_cache_create("cifs_request",
724 CIFSMaxBufSize +
725 MAX_CIFS_HDR_SIZE, 0,
726 SLAB_HWCACHE_ALIGN, NULL, NULL);
727 if (cifs_req_cachep == NULL)
728 return -ENOMEM;
729
730 if(cifs_min_rcv < 1)
731 cifs_min_rcv = 1;
732 else if (cifs_min_rcv > 64) {
733 cifs_min_rcv = 64;
734 cERROR(1,("cifs_min_rcv set to maximum (64)"));
735 }
736
737 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
738 cifs_req_cachep);
739
740 if(cifs_req_poolp == NULL) {
741 kmem_cache_destroy(cifs_req_cachep);
742 return -ENOMEM;
743 }
744 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
745 almost all handle based requests (but not write response, nor is it
746 sufficient for path based requests). A smaller size would have
747 been more efficient (compacting multiple slab items on one 4k page)
748 for the case in which debug was on, but this larger size allows
749 more SMBs to use small buffer alloc and is still much more
750 efficient to alloc 1 per page off the slab compared to 17K (5page)
751 alloc of large cifs buffers even when page debugging is on */
752 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
753 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
754 NULL, NULL);
755 if (cifs_sm_req_cachep == NULL) {
756 mempool_destroy(cifs_req_poolp);
757 kmem_cache_destroy(cifs_req_cachep);
758 return -ENOMEM;
759 }
760
761 if(cifs_min_small < 2)
762 cifs_min_small = 2;
763 else if (cifs_min_small > 256) {
764 cifs_min_small = 256;
765 cFYI(1,("cifs_min_small set to maximum (256)"));
766 }
767
768 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
769 cifs_sm_req_cachep);
770
771 if(cifs_sm_req_poolp == NULL) {
772 mempool_destroy(cifs_req_poolp);
773 kmem_cache_destroy(cifs_req_cachep);
774 kmem_cache_destroy(cifs_sm_req_cachep);
775 return -ENOMEM;
776 }
777
778 return 0;
779 }
780
781 static void
782 cifs_destroy_request_bufs(void)
783 {
784 mempool_destroy(cifs_req_poolp);
785 if (kmem_cache_destroy(cifs_req_cachep))
786 printk(KERN_WARNING
787 "cifs_destroy_request_cache: error not all structures were freed\n");
788 mempool_destroy(cifs_sm_req_poolp);
789 if (kmem_cache_destroy(cifs_sm_req_cachep))
790 printk(KERN_WARNING
791 "cifs_destroy_request_cache: cifs_small_rq free error\n");
792 }
793
794 static int
795 cifs_init_mids(void)
796 {
797 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
798 sizeof (struct mid_q_entry), 0,
799 SLAB_HWCACHE_ALIGN, NULL, NULL);
800 if (cifs_mid_cachep == NULL)
801 return -ENOMEM;
802
803 /* 3 is a reasonable minimum number of simultaneous operations */
804 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
805 if(cifs_mid_poolp == NULL) {
806 kmem_cache_destroy(cifs_mid_cachep);
807 return -ENOMEM;
808 }
809
810 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
811 sizeof (struct oplock_q_entry), 0,
812 SLAB_HWCACHE_ALIGN, NULL, NULL);
813 if (cifs_oplock_cachep == NULL) {
814 kmem_cache_destroy(cifs_mid_cachep);
815 mempool_destroy(cifs_mid_poolp);
816 return -ENOMEM;
817 }
818
819 return 0;
820 }
821
822 static void
823 cifs_destroy_mids(void)
824 {
825 mempool_destroy(cifs_mid_poolp);
826 if (kmem_cache_destroy(cifs_mid_cachep))
827 printk(KERN_WARNING
828 "cifs_destroy_mids: error not all structures were freed\n");
829
830 if (kmem_cache_destroy(cifs_oplock_cachep))
831 printk(KERN_WARNING
832 "error not all oplock structures were freed\n");
833 }
834
835 static int cifs_oplock_thread(void * dummyarg)
836 {
837 struct oplock_q_entry * oplock_item;
838 struct cifsTconInfo *pTcon;
839 struct inode * inode;
840 __u16 netfid;
841 int rc;
842
843 do {
844 if (try_to_freeze())
845 continue;
846
847 spin_lock(&GlobalMid_Lock);
848 if(list_empty(&GlobalOplock_Q)) {
849 spin_unlock(&GlobalMid_Lock);
850 set_current_state(TASK_INTERRUPTIBLE);
851 schedule_timeout(39*HZ);
852 } else {
853 oplock_item = list_entry(GlobalOplock_Q.next,
854 struct oplock_q_entry, qhead);
855 if(oplock_item) {
856 cFYI(1,("found oplock item to write out"));
857 pTcon = oplock_item->tcon;
858 inode = oplock_item->pinode;
859 netfid = oplock_item->netfid;
860 spin_unlock(&GlobalMid_Lock);
861 DeleteOplockQEntry(oplock_item);
862 /* can not grab inode sem here since it would
863 deadlock when oplock received on delete
864 since vfs_unlink holds the i_mutex across
865 the call */
866 /* mutex_lock(&inode->i_mutex);*/
867 if (S_ISREG(inode->i_mode)) {
868 rc = filemap_fdatawrite(inode->i_mapping);
869 if(CIFS_I(inode)->clientCanCacheRead == 0) {
870 filemap_fdatawait(inode->i_mapping);
871 invalidate_remote_inode(inode);
872 }
873 } else
874 rc = 0;
875 /* mutex_unlock(&inode->i_mutex);*/
876 if (rc)
877 CIFS_I(inode)->write_behind_rc = rc;
878 cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
879
880 /* releasing a stale oplock after recent reconnection
881 of smb session using a now incorrect file
882 handle is not a data integrity issue but do
883 not bother sending an oplock release if session
884 to server still is disconnected since oplock
885 already released by the server in that case */
886 if(pTcon->tidStatus != CifsNeedReconnect) {
887 rc = CIFSSMBLock(0, pTcon, netfid,
888 0 /* len */ , 0 /* offset */, 0,
889 0, LOCKING_ANDX_OPLOCK_RELEASE,
890 0 /* wait flag */);
891 cFYI(1,("Oplock release rc = %d ",rc));
892 }
893 } else
894 spin_unlock(&GlobalMid_Lock);
895 set_current_state(TASK_INTERRUPTIBLE);
896 schedule_timeout(1); /* yield in case q were corrupt */
897 }
898 } while (!kthread_should_stop());
899
900 return 0;
901 }
902
903 static int cifs_dnotify_thread(void * dummyarg)
904 {
905 struct list_head *tmp;
906 struct cifsSesInfo *ses;
907
908 do {
909 if(try_to_freeze())
910 continue;
911 set_current_state(TASK_INTERRUPTIBLE);
912 schedule_timeout(15*HZ);
913 read_lock(&GlobalSMBSeslock);
914 /* check if any stuck requests that need
915 to be woken up and wakeq so the
916 thread can wake up and error out */
917 list_for_each(tmp, &GlobalSMBSessionList) {
918 ses = list_entry(tmp, struct cifsSesInfo,
919 cifsSessionList);
920 if(ses && ses->server &&
921 atomic_read(&ses->server->inFlight))
922 wake_up_all(&ses->server->response_q);
923 }
924 read_unlock(&GlobalSMBSeslock);
925 } while (!kthread_should_stop());
926
927 return 0;
928 }
929
930 static int __init
931 init_cifs(void)
932 {
933 int rc = 0;
934 #ifdef CONFIG_PROC_FS
935 cifs_proc_init();
936 #endif
937 INIT_LIST_HEAD(&GlobalServerList); /* BB not implemented yet */
938 INIT_LIST_HEAD(&GlobalSMBSessionList);
939 INIT_LIST_HEAD(&GlobalTreeConnectionList);
940 INIT_LIST_HEAD(&GlobalOplock_Q);
941 #ifdef CONFIG_CIFS_EXPERIMENTAL
942 INIT_LIST_HEAD(&GlobalDnotifyReqList);
943 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
944 #endif
945 /*
946 * Initialize Global counters
947 */
948 atomic_set(&sesInfoAllocCount, 0);
949 atomic_set(&tconInfoAllocCount, 0);
950 atomic_set(&tcpSesAllocCount,0);
951 atomic_set(&tcpSesReconnectCount, 0);
952 atomic_set(&tconInfoReconnectCount, 0);
953
954 atomic_set(&bufAllocCount, 0);
955 atomic_set(&smBufAllocCount, 0);
956 #ifdef CONFIG_CIFS_STATS2
957 atomic_set(&totBufAllocCount, 0);
958 atomic_set(&totSmBufAllocCount, 0);
959 #endif /* CONFIG_CIFS_STATS2 */
960
961 atomic_set(&midCount, 0);
962 GlobalCurrentXid = 0;
963 GlobalTotalActiveXid = 0;
964 GlobalMaxActiveXid = 0;
965 rwlock_init(&GlobalSMBSeslock);
966 spin_lock_init(&GlobalMid_Lock);
967
968 if(cifs_max_pending < 2) {
969 cifs_max_pending = 2;
970 cFYI(1,("cifs_max_pending set to min of 2"));
971 } else if(cifs_max_pending > 256) {
972 cifs_max_pending = 256;
973 cFYI(1,("cifs_max_pending set to max of 256"));
974 }
975
976 rc = cifs_init_inodecache();
977 if (rc)
978 goto out_clean_proc;
979
980 rc = cifs_init_mids();
981 if (rc)
982 goto out_destroy_inodecache;
983
984 rc = cifs_init_request_bufs();
985 if (rc)
986 goto out_destroy_mids;
987
988 rc = register_filesystem(&cifs_fs_type);
989 if (rc)
990 goto out_destroy_request_bufs;
991
992 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
993 if (IS_ERR(oplockThread)) {
994 rc = PTR_ERR(oplockThread);
995 cERROR(1,("error %d create oplock thread", rc));
996 goto out_unregister_filesystem;
997 }
998
999 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1000 if (IS_ERR(dnotifyThread)) {
1001 rc = PTR_ERR(dnotifyThread);
1002 cERROR(1,("error %d create dnotify thread", rc));
1003 goto out_stop_oplock_thread;
1004 }
1005
1006 return 0;
1007
1008 out_stop_oplock_thread:
1009 kthread_stop(oplockThread);
1010 out_unregister_filesystem:
1011 unregister_filesystem(&cifs_fs_type);
1012 out_destroy_request_bufs:
1013 cifs_destroy_request_bufs();
1014 out_destroy_mids:
1015 cifs_destroy_mids();
1016 out_destroy_inodecache:
1017 cifs_destroy_inodecache();
1018 out_clean_proc:
1019 #ifdef CONFIG_PROC_FS
1020 cifs_proc_clean();
1021 #endif
1022 return rc;
1023 }
1024
1025 static void __exit
1026 exit_cifs(void)
1027 {
1028 cFYI(0, ("In unregister ie exit_cifs"));
1029 #ifdef CONFIG_PROC_FS
1030 cifs_proc_clean();
1031 #endif
1032 unregister_filesystem(&cifs_fs_type);
1033 cifs_destroy_inodecache();
1034 cifs_destroy_mids();
1035 cifs_destroy_request_bufs();
1036 kthread_stop(oplockThread);
1037 kthread_stop(dnotifyThread);
1038 }
1039
1040 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1041 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1042 MODULE_DESCRIPTION
1043 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
1044 MODULE_VERSION(CIFS_VERSION);
1045 module_init(init_cifs)
1046 module_exit(exit_cifs)
This page took 0.050504 seconds and 6 git commands to generate.