3f121fbd6c566bf25914334a726dcd376c01c11c
[deliverable/linux.git] / fs / cifs / cifsfs.c
1 /*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
25
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/smp_lock.h>
39 #include "cifsfs.h"
40 #include "cifspdu.h"
41 #define DECLARE_GLOBALS_HERE
42 #include "cifsglob.h"
43 #include "cifsproto.h"
44 #include "cifs_debug.h"
45 #include "cifs_fs_sb.h"
46 #include <linux/mm.h>
47 #include <linux/key-type.h>
48 #include "dns_resolve.h"
49 #include "cifs_spnego.h"
50 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
51
52 #ifdef CONFIG_CIFS_QUOTA
53 static struct quotactl_ops cifs_quotactl_ops;
54 #endif /* QUOTA */
55
56 int cifsFYI = 0;
57 int cifsERROR = 1;
58 int traceSMB = 0;
59 unsigned int oplockEnabled = 1;
60 unsigned int experimEnabled = 0;
61 unsigned int linuxExtEnabled = 1;
62 unsigned int lookupCacheEnabled = 1;
63 unsigned int multiuser_mount = 0;
64 unsigned int extended_security = CIFSSEC_DEF;
65 /* unsigned int ntlmv2_support = 0; */
66 unsigned int sign_CIFS_PDUs = 1;
67 extern struct task_struct *oplockThread; /* remove sparse warning */
68 struct task_struct *oplockThread = NULL;
69 /* extern struct task_struct * dnotifyThread; remove sparse warning */
70 static const struct super_operations cifs_super_ops;
71 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
72 module_param(CIFSMaxBufSize, int, 0);
73 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
74 "Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
76 module_param(cifs_min_rcv, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
78 "1 to 64");
79 unsigned int cifs_min_small = 30;
80 module_param(cifs_min_small, int, 0);
81 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
82 "Range: 2 to 256");
83 unsigned int cifs_max_pending = CIFS_MAX_REQ;
84 module_param(cifs_max_pending, int, 0);
85 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
86 "Default: 50 Range: 2 to 256");
87
88 extern mempool_t *cifs_sm_req_poolp;
89 extern mempool_t *cifs_req_poolp;
90 extern mempool_t *cifs_mid_poolp;
91
92 extern struct kmem_cache *cifs_oplock_cachep;
93
94 static int
95 cifs_read_super(struct super_block *sb, void *data,
96 const char *devname, int silent)
97 {
98 struct inode *inode;
99 struct cifs_sb_info *cifs_sb;
100 int rc = 0;
101
102 /* BB should we make this contingent on mount parm? */
103 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
104 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
105 cifs_sb = CIFS_SB(sb);
106 if (cifs_sb == NULL)
107 return -ENOMEM;
108
109 #ifdef CONFIG_CIFS_DFS_UPCALL
110 /* copy mount params to sb for use in submounts */
111 /* BB: should we move this after the mount so we
112 * do not have to do the copy on failed mounts?
113 * BB: May be it is better to do simple copy before
114 * complex operation (mount), and in case of fail
115 * just exit instead of doing mount and attempting
116 * undo it if this copy fails?*/
117 if (data) {
118 int len = strlen(data);
119 cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
120 if (cifs_sb->mountdata == NULL) {
121 kfree(sb->s_fs_info);
122 sb->s_fs_info = NULL;
123 return -ENOMEM;
124 }
125 strncpy(cifs_sb->mountdata, data, len + 1);
126 cifs_sb->mountdata[len] = '\0';
127 }
128 #endif
129
130 rc = cifs_mount(sb, cifs_sb, data, devname);
131
132 if (rc) {
133 if (!silent)
134 cERROR(1,
135 ("cifs_mount failed w/return code = %d", rc));
136 goto out_mount_failed;
137 }
138
139 sb->s_magic = CIFS_MAGIC_NUMBER;
140 sb->s_op = &cifs_super_ops;
141 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
142 sb->s_blocksize =
143 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
144 #ifdef CONFIG_CIFS_QUOTA
145 sb->s_qcop = &cifs_quotactl_ops;
146 #endif
147 sb->s_blocksize = CIFS_MAX_MSGSIZE;
148 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
149 inode = cifs_root_iget(sb, ROOT_I);
150
151 if (IS_ERR(inode)) {
152 rc = PTR_ERR(inode);
153 inode = NULL;
154 goto out_no_root;
155 }
156
157 sb->s_root = d_alloc_root(inode);
158
159 if (!sb->s_root) {
160 rc = -ENOMEM;
161 goto out_no_root;
162 }
163
164 #ifdef CONFIG_CIFS_EXPERIMENTAL
165 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
166 cFYI(1, ("export ops supported"));
167 sb->s_export_op = &cifs_export_ops;
168 }
169 #endif /* EXPERIMENTAL */
170
171 return 0;
172
173 out_no_root:
174 cERROR(1, ("cifs_read_super: get root inode failed"));
175 if (inode)
176 iput(inode);
177
178 cifs_umount(sb, cifs_sb);
179
180 out_mount_failed:
181 if (cifs_sb) {
182 #ifdef CONFIG_CIFS_DFS_UPCALL
183 if (cifs_sb->mountdata) {
184 kfree(cifs_sb->mountdata);
185 cifs_sb->mountdata = NULL;
186 }
187 #endif
188 if (cifs_sb->local_nls)
189 unload_nls(cifs_sb->local_nls);
190 kfree(cifs_sb);
191 }
192 return rc;
193 }
194
195 static void
196 cifs_put_super(struct super_block *sb)
197 {
198 int rc = 0;
199 struct cifs_sb_info *cifs_sb;
200
201 cFYI(1, ("In cifs_put_super"));
202 cifs_sb = CIFS_SB(sb);
203 if (cifs_sb == NULL) {
204 cFYI(1, ("Empty cifs superblock info passed to unmount"));
205 return;
206 }
207
208 lock_kernel();
209
210 rc = cifs_umount(sb, cifs_sb);
211 if (rc)
212 cERROR(1, ("cifs_umount failed with return code %d", rc));
213 #ifdef CONFIG_CIFS_DFS_UPCALL
214 if (cifs_sb->mountdata) {
215 kfree(cifs_sb->mountdata);
216 cifs_sb->mountdata = NULL;
217 }
218 #endif
219
220 unload_nls(cifs_sb->local_nls);
221 kfree(cifs_sb);
222
223 unlock_kernel();
224 }
225
226 static int
227 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
228 {
229 struct super_block *sb = dentry->d_sb;
230 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
231 struct cifsTconInfo *tcon = cifs_sb->tcon;
232 int rc = -EOPNOTSUPP;
233 int xid;
234
235 xid = GetXid();
236
237 buf->f_type = CIFS_MAGIC_NUMBER;
238
239 /*
240 * PATH_MAX may be too long - it would presumably be total path,
241 * but note that some servers (includinng Samba 3) have a shorter
242 * maximum path.
243 *
244 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
245 */
246 buf->f_namelen = PATH_MAX;
247 buf->f_files = 0; /* undefined */
248 buf->f_ffree = 0; /* unlimited */
249
250 /*
251 * We could add a second check for a QFS Unix capability bit
252 */
253 if ((tcon->ses->capabilities & CAP_UNIX) &&
254 (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability)))
255 rc = CIFSSMBQFSPosixInfo(xid, tcon, buf);
256
257 /*
258 * Only need to call the old QFSInfo if failed on newer one,
259 * e.g. by OS/2.
260 **/
261 if (rc && (tcon->ses->capabilities & CAP_NT_SMBS))
262 rc = CIFSSMBQFSInfo(xid, tcon, buf);
263
264 /*
265 * Some old Windows servers also do not support level 103, retry with
266 * older level one if old server failed the previous call or we
267 * bypassed it because we detected that this was an older LANMAN sess
268 */
269 if (rc)
270 rc = SMBOldQFSInfo(xid, tcon, buf);
271
272 FreeXid(xid);
273 return 0;
274 }
275
276 static int cifs_permission(struct inode *inode, int mask)
277 {
278 struct cifs_sb_info *cifs_sb;
279
280 cifs_sb = CIFS_SB(inode->i_sb);
281
282 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
283 if ((mask & MAY_EXEC) && !execute_ok(inode))
284 return -EACCES;
285 else
286 return 0;
287 } else /* file mode might have been restricted at mount time
288 on the client (above and beyond ACL on servers) for
289 servers which do not support setting and viewing mode bits,
290 so allowing client to check permissions is useful */
291 return generic_permission(inode, mask, NULL);
292 }
293
294 static struct kmem_cache *cifs_inode_cachep;
295 static struct kmem_cache *cifs_req_cachep;
296 static struct kmem_cache *cifs_mid_cachep;
297 struct kmem_cache *cifs_oplock_cachep;
298 static struct kmem_cache *cifs_sm_req_cachep;
299 mempool_t *cifs_sm_req_poolp;
300 mempool_t *cifs_req_poolp;
301 mempool_t *cifs_mid_poolp;
302
303 static struct inode *
304 cifs_alloc_inode(struct super_block *sb)
305 {
306 struct cifsInodeInfo *cifs_inode;
307 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
308 if (!cifs_inode)
309 return NULL;
310 cifs_inode->cifsAttrs = 0x20; /* default */
311 atomic_set(&cifs_inode->inUse, 0);
312 cifs_inode->time = 0;
313 cifs_inode->write_behind_rc = 0;
314 /* Until the file is open and we have gotten oplock
315 info back from the server, can not assume caching of
316 file data or metadata */
317 cifs_inode->clientCanCacheRead = false;
318 cifs_inode->clientCanCacheAll = false;
319 cifs_inode->delete_pending = false;
320 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
321 cifs_inode->server_eof = 0;
322
323 /* Can not set i_flags here - they get immediately overwritten
324 to zero by the VFS */
325 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
326 INIT_LIST_HEAD(&cifs_inode->openFileList);
327 return &cifs_inode->vfs_inode;
328 }
329
330 static void
331 cifs_destroy_inode(struct inode *inode)
332 {
333 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
334 }
335
336 /*
337 * cifs_show_options() is for displaying mount options in /proc/mounts.
338 * Not all settable options are displayed but most of the important
339 * ones are.
340 */
341 static int
342 cifs_show_options(struct seq_file *s, struct vfsmount *m)
343 {
344 struct cifs_sb_info *cifs_sb;
345 struct cifsTconInfo *tcon;
346 struct TCP_Server_Info *server;
347
348 cifs_sb = CIFS_SB(m->mnt_sb);
349 tcon = cifs_sb->tcon;
350
351 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
352 if (tcon->ses->userName)
353 seq_printf(s, ",username=%s", tcon->ses->userName);
354 if (tcon->ses->domainName)
355 seq_printf(s, ",domain=%s", tcon->ses->domainName);
356
357 cifs_show_address(s, tcon->ses->server);
358
359 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
360 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
361
362 server = tcon->ses->server;
363 seq_printf(s, ",addr=");
364 switch (server->addr.sockAddr6.sin6_family) {
365 case AF_INET6:
366 seq_printf(s, "%pI6", &server->addr.sockAddr6.sin6_addr);
367 break;
368 case AF_INET:
369 seq_printf(s, "%pI4", &server->addr.sockAddr.sin_addr.s_addr);
370 break;
371 }
372
373 if (!tcon->unix_ext)
374 seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
375 cifs_sb->mnt_file_mode,
376 cifs_sb->mnt_dir_mode);
377 if (tcon->seal)
378 seq_printf(s, ",seal");
379 if (tcon->nocase)
380 seq_printf(s, ",nocase");
381 if (tcon->retry)
382 seq_printf(s, ",hard");
383 if (cifs_sb->prepath)
384 seq_printf(s, ",prepath=%s", cifs_sb->prepath);
385 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
386 seq_printf(s, ",posixpaths");
387 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
388 seq_printf(s, ",setuids");
389 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
390 seq_printf(s, ",serverino");
391 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
392 seq_printf(s, ",directio");
393 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
394 seq_printf(s, ",nouser_xattr");
395 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
396 seq_printf(s, ",mapchars");
397 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
398 seq_printf(s, ",sfu");
399 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
400 seq_printf(s, ",nobrl");
401 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
402 seq_printf(s, ",cifsacl");
403 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
404 seq_printf(s, ",dynperm");
405 if (m->mnt_sb->s_flags & MS_POSIXACL)
406 seq_printf(s, ",acl");
407
408 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
409 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
410
411 return 0;
412 }
413
414 #ifdef CONFIG_CIFS_QUOTA
415 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
416 struct fs_disk_quota *pdquota)
417 {
418 int xid;
419 int rc = 0;
420 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
421 struct cifsTconInfo *pTcon;
422
423 if (cifs_sb)
424 pTcon = cifs_sb->tcon;
425 else
426 return -EIO;
427
428
429 xid = GetXid();
430 if (pTcon) {
431 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
432 } else
433 rc = -EIO;
434
435 FreeXid(xid);
436 return rc;
437 }
438
439 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
440 struct fs_disk_quota *pdquota)
441 {
442 int xid;
443 int rc = 0;
444 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
445 struct cifsTconInfo *pTcon;
446
447 if (cifs_sb)
448 pTcon = cifs_sb->tcon;
449 else
450 return -EIO;
451
452 xid = GetXid();
453 if (pTcon) {
454 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
455 } else
456 rc = -EIO;
457
458 FreeXid(xid);
459 return rc;
460 }
461
462 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
463 {
464 int xid;
465 int rc = 0;
466 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
467 struct cifsTconInfo *pTcon;
468
469 if (cifs_sb)
470 pTcon = cifs_sb->tcon;
471 else
472 return -EIO;
473
474 xid = GetXid();
475 if (pTcon) {
476 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
477 } else
478 rc = -EIO;
479
480 FreeXid(xid);
481 return rc;
482 }
483
484 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
485 {
486 int xid;
487 int rc = 0;
488 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
489 struct cifsTconInfo *pTcon;
490
491 if (cifs_sb)
492 pTcon = cifs_sb->tcon;
493 else
494 return -EIO;
495
496 xid = GetXid();
497 if (pTcon) {
498 cFYI(1, ("pqstats %p", qstats));
499 } else
500 rc = -EIO;
501
502 FreeXid(xid);
503 return rc;
504 }
505
506 static struct quotactl_ops cifs_quotactl_ops = {
507 .set_xquota = cifs_xquota_set,
508 .get_xquota = cifs_xquota_get,
509 .set_xstate = cifs_xstate_set,
510 .get_xstate = cifs_xstate_get,
511 };
512 #endif
513
514 static void cifs_umount_begin(struct super_block *sb)
515 {
516 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
517 struct cifsTconInfo *tcon;
518
519 if (cifs_sb == NULL)
520 return;
521
522 tcon = cifs_sb->tcon;
523 if (tcon == NULL)
524 return;
525
526 lock_kernel();
527 read_lock(&cifs_tcp_ses_lock);
528 if (tcon->tc_count == 1)
529 tcon->tidStatus = CifsExiting;
530 read_unlock(&cifs_tcp_ses_lock);
531
532 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
533 /* cancel_notify_requests(tcon); */
534 if (tcon->ses && tcon->ses->server) {
535 cFYI(1, ("wake up tasks now - umount begin not complete"));
536 wake_up_all(&tcon->ses->server->request_q);
537 wake_up_all(&tcon->ses->server->response_q);
538 msleep(1); /* yield */
539 /* we have to kick the requests once more */
540 wake_up_all(&tcon->ses->server->response_q);
541 msleep(1);
542 }
543 /* BB FIXME - finish add checks for tidStatus BB */
544
545 unlock_kernel();
546 return;
547 }
548
549 #ifdef CONFIG_CIFS_STATS2
550 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
551 {
552 /* BB FIXME */
553 return 0;
554 }
555 #endif
556
557 static int cifs_remount(struct super_block *sb, int *flags, char *data)
558 {
559 *flags |= MS_NODIRATIME;
560 return 0;
561 }
562
563 static const struct super_operations cifs_super_ops = {
564 .put_super = cifs_put_super,
565 .statfs = cifs_statfs,
566 .alloc_inode = cifs_alloc_inode,
567 .destroy_inode = cifs_destroy_inode,
568 /* .drop_inode = generic_delete_inode,
569 .delete_inode = cifs_delete_inode, */ /* Do not need above two
570 functions unless later we add lazy close of inodes or unless the
571 kernel forgets to call us with the same number of releases (closes)
572 as opens */
573 .show_options = cifs_show_options,
574 .umount_begin = cifs_umount_begin,
575 .remount_fs = cifs_remount,
576 #ifdef CONFIG_CIFS_STATS2
577 .show_stats = cifs_show_stats,
578 #endif
579 };
580
581 static int
582 cifs_get_sb(struct file_system_type *fs_type,
583 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
584 {
585 int rc;
586 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
587
588 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
589
590 if (IS_ERR(sb))
591 return PTR_ERR(sb);
592
593 sb->s_flags = flags;
594
595 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
596 if (rc) {
597 deactivate_locked_super(sb);
598 return rc;
599 }
600 sb->s_flags |= MS_ACTIVE;
601 simple_set_mnt(mnt, sb);
602 return 0;
603 }
604
605 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
606 unsigned long nr_segs, loff_t pos)
607 {
608 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
609 ssize_t written;
610
611 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
612 if (!CIFS_I(inode)->clientCanCacheAll)
613 filemap_fdatawrite(inode->i_mapping);
614 return written;
615 }
616
617 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
618 {
619 /* origin == SEEK_END => we must revalidate the cached file length */
620 if (origin == SEEK_END) {
621 int retval;
622
623 /* some applications poll for the file length in this strange
624 way so we must seek to end on non-oplocked files by
625 setting the revalidate time to zero */
626 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
627
628 retval = cifs_revalidate(file->f_path.dentry);
629 if (retval < 0)
630 return (loff_t)retval;
631 }
632 return generic_file_llseek_unlocked(file, offset, origin);
633 }
634
635 #ifdef CONFIG_CIFS_EXPERIMENTAL
636 static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
637 {
638 /* note that this is called by vfs setlease with the BKL held
639 although I doubt that BKL is needed here in cifs */
640 struct inode *inode = file->f_path.dentry->d_inode;
641
642 if (!(S_ISREG(inode->i_mode)))
643 return -EINVAL;
644
645 /* check if file is oplocked */
646 if (((arg == F_RDLCK) &&
647 (CIFS_I(inode)->clientCanCacheRead)) ||
648 ((arg == F_WRLCK) &&
649 (CIFS_I(inode)->clientCanCacheAll)))
650 return generic_setlease(file, arg, lease);
651 else if (CIFS_SB(inode->i_sb)->tcon->local_lease &&
652 !CIFS_I(inode)->clientCanCacheRead)
653 /* If the server claims to support oplock on this
654 file, then we still need to check oplock even
655 if the local_lease mount option is set, but there
656 are servers which do not support oplock for which
657 this mount option may be useful if the user
658 knows that the file won't be changed on the server
659 by anyone else */
660 return generic_setlease(file, arg, lease);
661 else
662 return -EAGAIN;
663 }
664 #endif
665
666 struct file_system_type cifs_fs_type = {
667 .owner = THIS_MODULE,
668 .name = "cifs",
669 .get_sb = cifs_get_sb,
670 .kill_sb = kill_anon_super,
671 /* .fs_flags */
672 };
673 const struct inode_operations cifs_dir_inode_ops = {
674 .create = cifs_create,
675 .lookup = cifs_lookup,
676 .getattr = cifs_getattr,
677 .unlink = cifs_unlink,
678 .link = cifs_hardlink,
679 .mkdir = cifs_mkdir,
680 .rmdir = cifs_rmdir,
681 .rename = cifs_rename,
682 .permission = cifs_permission,
683 /* revalidate:cifs_revalidate, */
684 .setattr = cifs_setattr,
685 .symlink = cifs_symlink,
686 .mknod = cifs_mknod,
687 #ifdef CONFIG_CIFS_XATTR
688 .setxattr = cifs_setxattr,
689 .getxattr = cifs_getxattr,
690 .listxattr = cifs_listxattr,
691 .removexattr = cifs_removexattr,
692 #endif
693 };
694
695 const struct inode_operations cifs_file_inode_ops = {
696 /* revalidate:cifs_revalidate, */
697 .setattr = cifs_setattr,
698 .getattr = cifs_getattr, /* do we need this anymore? */
699 .rename = cifs_rename,
700 .permission = cifs_permission,
701 #ifdef CONFIG_CIFS_XATTR
702 .setxattr = cifs_setxattr,
703 .getxattr = cifs_getxattr,
704 .listxattr = cifs_listxattr,
705 .removexattr = cifs_removexattr,
706 #endif
707 };
708
709 const struct inode_operations cifs_symlink_inode_ops = {
710 .readlink = generic_readlink,
711 .follow_link = cifs_follow_link,
712 .put_link = cifs_put_link,
713 .permission = cifs_permission,
714 /* BB add the following two eventually */
715 /* revalidate: cifs_revalidate,
716 setattr: cifs_notify_change, *//* BB do we need notify change */
717 #ifdef CONFIG_CIFS_XATTR
718 .setxattr = cifs_setxattr,
719 .getxattr = cifs_getxattr,
720 .listxattr = cifs_listxattr,
721 .removexattr = cifs_removexattr,
722 #endif
723 };
724
725 const struct file_operations cifs_file_ops = {
726 .read = do_sync_read,
727 .write = do_sync_write,
728 .aio_read = generic_file_aio_read,
729 .aio_write = cifs_file_aio_write,
730 .open = cifs_open,
731 .release = cifs_close,
732 .lock = cifs_lock,
733 .fsync = cifs_fsync,
734 .flush = cifs_flush,
735 .mmap = cifs_file_mmap,
736 .splice_read = generic_file_splice_read,
737 .llseek = cifs_llseek,
738 #ifdef CONFIG_CIFS_POSIX
739 .unlocked_ioctl = cifs_ioctl,
740 #endif /* CONFIG_CIFS_POSIX */
741
742 #ifdef CONFIG_CIFS_EXPERIMENTAL
743 .setlease = cifs_setlease,
744 #endif /* CONFIG_CIFS_EXPERIMENTAL */
745 };
746
747 const struct file_operations cifs_file_direct_ops = {
748 /* no mmap, no aio, no readv -
749 BB reevaluate whether they can be done with directio, no cache */
750 .read = cifs_user_read,
751 .write = cifs_user_write,
752 .open = cifs_open,
753 .release = cifs_close,
754 .lock = cifs_lock,
755 .fsync = cifs_fsync,
756 .flush = cifs_flush,
757 .splice_read = generic_file_splice_read,
758 #ifdef CONFIG_CIFS_POSIX
759 .unlocked_ioctl = cifs_ioctl,
760 #endif /* CONFIG_CIFS_POSIX */
761 .llseek = cifs_llseek,
762 #ifdef CONFIG_CIFS_EXPERIMENTAL
763 .setlease = cifs_setlease,
764 #endif /* CONFIG_CIFS_EXPERIMENTAL */
765 };
766 const struct file_operations cifs_file_nobrl_ops = {
767 .read = do_sync_read,
768 .write = do_sync_write,
769 .aio_read = generic_file_aio_read,
770 .aio_write = cifs_file_aio_write,
771 .open = cifs_open,
772 .release = cifs_close,
773 .fsync = cifs_fsync,
774 .flush = cifs_flush,
775 .mmap = cifs_file_mmap,
776 .splice_read = generic_file_splice_read,
777 .llseek = cifs_llseek,
778 #ifdef CONFIG_CIFS_POSIX
779 .unlocked_ioctl = cifs_ioctl,
780 #endif /* CONFIG_CIFS_POSIX */
781
782 #ifdef CONFIG_CIFS_EXPERIMENTAL
783 .setlease = cifs_setlease,
784 #endif /* CONFIG_CIFS_EXPERIMENTAL */
785 };
786
787 const struct file_operations cifs_file_direct_nobrl_ops = {
788 /* no mmap, no aio, no readv -
789 BB reevaluate whether they can be done with directio, no cache */
790 .read = cifs_user_read,
791 .write = cifs_user_write,
792 .open = cifs_open,
793 .release = cifs_close,
794 .fsync = cifs_fsync,
795 .flush = cifs_flush,
796 .splice_read = generic_file_splice_read,
797 #ifdef CONFIG_CIFS_POSIX
798 .unlocked_ioctl = cifs_ioctl,
799 #endif /* CONFIG_CIFS_POSIX */
800 .llseek = cifs_llseek,
801 #ifdef CONFIG_CIFS_EXPERIMENTAL
802 .setlease = cifs_setlease,
803 #endif /* CONFIG_CIFS_EXPERIMENTAL */
804 };
805
806 const struct file_operations cifs_dir_ops = {
807 .readdir = cifs_readdir,
808 .release = cifs_closedir,
809 .read = generic_read_dir,
810 .unlocked_ioctl = cifs_ioctl,
811 .llseek = generic_file_llseek,
812 };
813
814 static void
815 cifs_init_once(void *inode)
816 {
817 struct cifsInodeInfo *cifsi = inode;
818
819 inode_init_once(&cifsi->vfs_inode);
820 INIT_LIST_HEAD(&cifsi->lockList);
821 }
822
823 static int
824 cifs_init_inodecache(void)
825 {
826 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
827 sizeof(struct cifsInodeInfo),
828 0, (SLAB_RECLAIM_ACCOUNT|
829 SLAB_MEM_SPREAD),
830 cifs_init_once);
831 if (cifs_inode_cachep == NULL)
832 return -ENOMEM;
833
834 return 0;
835 }
836
837 static void
838 cifs_destroy_inodecache(void)
839 {
840 kmem_cache_destroy(cifs_inode_cachep);
841 }
842
843 static int
844 cifs_init_request_bufs(void)
845 {
846 if (CIFSMaxBufSize < 8192) {
847 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
848 Unicode path name has to fit in any SMB/CIFS path based frames */
849 CIFSMaxBufSize = 8192;
850 } else if (CIFSMaxBufSize > 1024*127) {
851 CIFSMaxBufSize = 1024 * 127;
852 } else {
853 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
854 }
855 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
856 cifs_req_cachep = kmem_cache_create("cifs_request",
857 CIFSMaxBufSize +
858 MAX_CIFS_HDR_SIZE, 0,
859 SLAB_HWCACHE_ALIGN, NULL);
860 if (cifs_req_cachep == NULL)
861 return -ENOMEM;
862
863 if (cifs_min_rcv < 1)
864 cifs_min_rcv = 1;
865 else if (cifs_min_rcv > 64) {
866 cifs_min_rcv = 64;
867 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
868 }
869
870 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
871 cifs_req_cachep);
872
873 if (cifs_req_poolp == NULL) {
874 kmem_cache_destroy(cifs_req_cachep);
875 return -ENOMEM;
876 }
877 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
878 almost all handle based requests (but not write response, nor is it
879 sufficient for path based requests). A smaller size would have
880 been more efficient (compacting multiple slab items on one 4k page)
881 for the case in which debug was on, but this larger size allows
882 more SMBs to use small buffer alloc and is still much more
883 efficient to alloc 1 per page off the slab compared to 17K (5page)
884 alloc of large cifs buffers even when page debugging is on */
885 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
886 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
887 NULL);
888 if (cifs_sm_req_cachep == NULL) {
889 mempool_destroy(cifs_req_poolp);
890 kmem_cache_destroy(cifs_req_cachep);
891 return -ENOMEM;
892 }
893
894 if (cifs_min_small < 2)
895 cifs_min_small = 2;
896 else if (cifs_min_small > 256) {
897 cifs_min_small = 256;
898 cFYI(1, ("cifs_min_small set to maximum (256)"));
899 }
900
901 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
902 cifs_sm_req_cachep);
903
904 if (cifs_sm_req_poolp == NULL) {
905 mempool_destroy(cifs_req_poolp);
906 kmem_cache_destroy(cifs_req_cachep);
907 kmem_cache_destroy(cifs_sm_req_cachep);
908 return -ENOMEM;
909 }
910
911 return 0;
912 }
913
914 static void
915 cifs_destroy_request_bufs(void)
916 {
917 mempool_destroy(cifs_req_poolp);
918 kmem_cache_destroy(cifs_req_cachep);
919 mempool_destroy(cifs_sm_req_poolp);
920 kmem_cache_destroy(cifs_sm_req_cachep);
921 }
922
923 static int
924 cifs_init_mids(void)
925 {
926 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
927 sizeof(struct mid_q_entry), 0,
928 SLAB_HWCACHE_ALIGN, NULL);
929 if (cifs_mid_cachep == NULL)
930 return -ENOMEM;
931
932 /* 3 is a reasonable minimum number of simultaneous operations */
933 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
934 if (cifs_mid_poolp == NULL) {
935 kmem_cache_destroy(cifs_mid_cachep);
936 return -ENOMEM;
937 }
938
939 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
940 sizeof(struct oplock_q_entry), 0,
941 SLAB_HWCACHE_ALIGN, NULL);
942 if (cifs_oplock_cachep == NULL) {
943 mempool_destroy(cifs_mid_poolp);
944 kmem_cache_destroy(cifs_mid_cachep);
945 return -ENOMEM;
946 }
947
948 return 0;
949 }
950
951 static void
952 cifs_destroy_mids(void)
953 {
954 mempool_destroy(cifs_mid_poolp);
955 kmem_cache_destroy(cifs_mid_cachep);
956 kmem_cache_destroy(cifs_oplock_cachep);
957 }
958
959 static int cifs_oplock_thread(void *dummyarg)
960 {
961 struct oplock_q_entry *oplock_item;
962 struct cifsTconInfo *pTcon;
963 struct inode *inode;
964 __u16 netfid;
965 int rc, waitrc = 0;
966
967 set_freezable();
968 do {
969 if (try_to_freeze())
970 continue;
971
972 spin_lock(&GlobalMid_Lock);
973 if (list_empty(&GlobalOplock_Q)) {
974 spin_unlock(&GlobalMid_Lock);
975 set_current_state(TASK_INTERRUPTIBLE);
976 schedule_timeout(39*HZ);
977 } else {
978 oplock_item = list_entry(GlobalOplock_Q.next,
979 struct oplock_q_entry, qhead);
980 cFYI(1, ("found oplock item to write out"));
981 pTcon = oplock_item->tcon;
982 inode = oplock_item->pinode;
983 netfid = oplock_item->netfid;
984 spin_unlock(&GlobalMid_Lock);
985 DeleteOplockQEntry(oplock_item);
986 /* can not grab inode sem here since it would
987 deadlock when oplock received on delete
988 since vfs_unlink holds the i_mutex across
989 the call */
990 /* mutex_lock(&inode->i_mutex);*/
991 if (S_ISREG(inode->i_mode)) {
992 #ifdef CONFIG_CIFS_EXPERIMENTAL
993 if (CIFS_I(inode)->clientCanCacheAll == 0)
994 break_lease(inode, FMODE_READ);
995 else if (CIFS_I(inode)->clientCanCacheRead == 0)
996 break_lease(inode, FMODE_WRITE);
997 #endif
998 rc = filemap_fdatawrite(inode->i_mapping);
999 if (CIFS_I(inode)->clientCanCacheRead == 0) {
1000 waitrc = filemap_fdatawait(
1001 inode->i_mapping);
1002 invalidate_remote_inode(inode);
1003 }
1004 if (rc == 0)
1005 rc = waitrc;
1006 } else
1007 rc = 0;
1008 /* mutex_unlock(&inode->i_mutex);*/
1009 if (rc)
1010 CIFS_I(inode)->write_behind_rc = rc;
1011 cFYI(1, ("Oplock flush inode %p rc %d",
1012 inode, rc));
1013
1014 /* releasing stale oplock after recent reconnect
1015 of smb session using a now incorrect file
1016 handle is not a data integrity issue but do
1017 not bother sending an oplock release if session
1018 to server still is disconnected since oplock
1019 already released by the server in that case */
1020 if (!pTcon->need_reconnect) {
1021 rc = CIFSSMBLock(0, pTcon, netfid,
1022 0 /* len */ , 0 /* offset */, 0,
1023 0, LOCKING_ANDX_OPLOCK_RELEASE,
1024 false /* wait flag */);
1025 cFYI(1, ("Oplock release rc = %d", rc));
1026 }
1027 set_current_state(TASK_INTERRUPTIBLE);
1028 schedule_timeout(1); /* yield in case q were corrupt */
1029 }
1030 } while (!kthread_should_stop());
1031
1032 return 0;
1033 }
1034
1035 static int __init
1036 init_cifs(void)
1037 {
1038 int rc = 0;
1039 cifs_proc_init();
1040 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1041 INIT_LIST_HEAD(&GlobalOplock_Q);
1042 #ifdef CONFIG_CIFS_EXPERIMENTAL
1043 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1044 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1045 #endif
1046 /*
1047 * Initialize Global counters
1048 */
1049 atomic_set(&sesInfoAllocCount, 0);
1050 atomic_set(&tconInfoAllocCount, 0);
1051 atomic_set(&tcpSesAllocCount, 0);
1052 atomic_set(&tcpSesReconnectCount, 0);
1053 atomic_set(&tconInfoReconnectCount, 0);
1054
1055 atomic_set(&bufAllocCount, 0);
1056 atomic_set(&smBufAllocCount, 0);
1057 #ifdef CONFIG_CIFS_STATS2
1058 atomic_set(&totBufAllocCount, 0);
1059 atomic_set(&totSmBufAllocCount, 0);
1060 #endif /* CONFIG_CIFS_STATS2 */
1061
1062 atomic_set(&midCount, 0);
1063 GlobalCurrentXid = 0;
1064 GlobalTotalActiveXid = 0;
1065 GlobalMaxActiveXid = 0;
1066 memset(Local_System_Name, 0, 15);
1067 rwlock_init(&GlobalSMBSeslock);
1068 rwlock_init(&cifs_tcp_ses_lock);
1069 spin_lock_init(&GlobalMid_Lock);
1070
1071 if (cifs_max_pending < 2) {
1072 cifs_max_pending = 2;
1073 cFYI(1, ("cifs_max_pending set to min of 2"));
1074 } else if (cifs_max_pending > 256) {
1075 cifs_max_pending = 256;
1076 cFYI(1, ("cifs_max_pending set to max of 256"));
1077 }
1078
1079 rc = cifs_init_inodecache();
1080 if (rc)
1081 goto out_clean_proc;
1082
1083 rc = cifs_init_mids();
1084 if (rc)
1085 goto out_destroy_inodecache;
1086
1087 rc = cifs_init_request_bufs();
1088 if (rc)
1089 goto out_destroy_mids;
1090
1091 rc = register_filesystem(&cifs_fs_type);
1092 if (rc)
1093 goto out_destroy_request_bufs;
1094 #ifdef CONFIG_CIFS_UPCALL
1095 rc = register_key_type(&cifs_spnego_key_type);
1096 if (rc)
1097 goto out_unregister_filesystem;
1098 #endif
1099 #ifdef CONFIG_CIFS_DFS_UPCALL
1100 rc = register_key_type(&key_type_dns_resolver);
1101 if (rc)
1102 goto out_unregister_key_type;
1103 #endif
1104 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1105 if (IS_ERR(oplockThread)) {
1106 rc = PTR_ERR(oplockThread);
1107 cERROR(1, ("error %d create oplock thread", rc));
1108 goto out_unregister_dfs_key_type;
1109 }
1110
1111 return 0;
1112
1113 out_unregister_dfs_key_type:
1114 #ifdef CONFIG_CIFS_DFS_UPCALL
1115 unregister_key_type(&key_type_dns_resolver);
1116 out_unregister_key_type:
1117 #endif
1118 #ifdef CONFIG_CIFS_UPCALL
1119 unregister_key_type(&cifs_spnego_key_type);
1120 out_unregister_filesystem:
1121 #endif
1122 unregister_filesystem(&cifs_fs_type);
1123 out_destroy_request_bufs:
1124 cifs_destroy_request_bufs();
1125 out_destroy_mids:
1126 cifs_destroy_mids();
1127 out_destroy_inodecache:
1128 cifs_destroy_inodecache();
1129 out_clean_proc:
1130 cifs_proc_clean();
1131 return rc;
1132 }
1133
1134 static void __exit
1135 exit_cifs(void)
1136 {
1137 cFYI(DBG2, ("exit_cifs"));
1138 cifs_proc_clean();
1139 #ifdef CONFIG_CIFS_DFS_UPCALL
1140 cifs_dfs_release_automount_timer();
1141 unregister_key_type(&key_type_dns_resolver);
1142 #endif
1143 #ifdef CONFIG_CIFS_UPCALL
1144 unregister_key_type(&cifs_spnego_key_type);
1145 #endif
1146 unregister_filesystem(&cifs_fs_type);
1147 cifs_destroy_inodecache();
1148 cifs_destroy_mids();
1149 cifs_destroy_request_bufs();
1150 kthread_stop(oplockThread);
1151 }
1152
1153 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1154 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1155 MODULE_DESCRIPTION
1156 ("VFS to access servers complying with the SNIA CIFS Specification "
1157 "e.g. Samba and Windows");
1158 MODULE_VERSION(CIFS_VERSION);
1159 module_init(init_cifs)
1160 module_exit(exit_cifs)
This page took 0.057559 seconds and 4 git commands to generate.