Merge tag 'usb-4.7-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[deliverable/linux.git] / fs / cifs / cifsfs.c
1 /*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
25
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/xattr.h>
41 #include <net/ipv6.h>
42 #include "cifsfs.h"
43 #include "cifspdu.h"
44 #define DECLARE_GLOBALS_HERE
45 #include "cifsglob.h"
46 #include "cifsproto.h"
47 #include "cifs_debug.h"
48 #include "cifs_fs_sb.h"
49 #include <linux/mm.h>
50 #include <linux/key-type.h>
51 #include "cifs_spnego.h"
52 #include "fscache.h"
53 #ifdef CONFIG_CIFS_SMB2
54 #include "smb2pdu.h"
55 #endif
56
57 int cifsFYI = 0;
58 bool traceSMB;
59 bool enable_oplocks = true;
60 bool linuxExtEnabled = true;
61 bool lookupCacheEnabled = true;
62 unsigned int global_secflags = CIFSSEC_DEF;
63 /* unsigned int ntlmv2_support = 0; */
64 unsigned int sign_CIFS_PDUs = 1;
65 static const struct super_operations cifs_super_ops;
66 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
67 module_param(CIFSMaxBufSize, uint, 0);
68 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
69 "Default: 16384 Range: 8192 to 130048");
70 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
71 module_param(cifs_min_rcv, uint, 0);
72 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
73 "1 to 64");
74 unsigned int cifs_min_small = 30;
75 module_param(cifs_min_small, uint, 0);
76 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
77 "Range: 2 to 256");
78 unsigned int cifs_max_pending = CIFS_MAX_REQ;
79 module_param(cifs_max_pending, uint, 0444);
80 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
81 "Default: 32767 Range: 2 to 32767.");
82 module_param(enable_oplocks, bool, 0644);
83 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
84
85 extern mempool_t *cifs_sm_req_poolp;
86 extern mempool_t *cifs_req_poolp;
87 extern mempool_t *cifs_mid_poolp;
88
89 struct workqueue_struct *cifsiod_wq;
90 __u32 cifs_lock_secret;
91
92 /*
93 * Bumps refcount for cifs super block.
94 * Note that it should be only called if a referece to VFS super block is
95 * already held, e.g. in open-type syscalls context. Otherwise it can race with
96 * atomic_dec_and_test in deactivate_locked_super.
97 */
98 void
99 cifs_sb_active(struct super_block *sb)
100 {
101 struct cifs_sb_info *server = CIFS_SB(sb);
102
103 if (atomic_inc_return(&server->active) == 1)
104 atomic_inc(&sb->s_active);
105 }
106
107 void
108 cifs_sb_deactive(struct super_block *sb)
109 {
110 struct cifs_sb_info *server = CIFS_SB(sb);
111
112 if (atomic_dec_and_test(&server->active))
113 deactivate_super(sb);
114 }
115
116 static int
117 cifs_read_super(struct super_block *sb)
118 {
119 struct inode *inode;
120 struct cifs_sb_info *cifs_sb;
121 struct cifs_tcon *tcon;
122 int rc = 0;
123
124 cifs_sb = CIFS_SB(sb);
125 tcon = cifs_sb_master_tcon(cifs_sb);
126
127 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
128 sb->s_flags |= MS_POSIXACL;
129
130 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
131 sb->s_maxbytes = MAX_LFS_FILESIZE;
132 else
133 sb->s_maxbytes = MAX_NON_LFS;
134
135 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
136 sb->s_time_gran = 100;
137
138 sb->s_magic = CIFS_MAGIC_NUMBER;
139 sb->s_op = &cifs_super_ops;
140 sb->s_xattr = cifs_xattr_handlers;
141 sb->s_bdi = &cifs_sb->bdi;
142 sb->s_blocksize = CIFS_MAX_MSGSIZE;
143 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
144 inode = cifs_root_iget(sb);
145
146 if (IS_ERR(inode)) {
147 rc = PTR_ERR(inode);
148 goto out_no_root;
149 }
150
151 if (tcon->nocase)
152 sb->s_d_op = &cifs_ci_dentry_ops;
153 else
154 sb->s_d_op = &cifs_dentry_ops;
155
156 sb->s_root = d_make_root(inode);
157 if (!sb->s_root) {
158 rc = -ENOMEM;
159 goto out_no_root;
160 }
161
162 #ifdef CONFIG_CIFS_NFSD_EXPORT
163 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
164 cifs_dbg(FYI, "export ops supported\n");
165 sb->s_export_op = &cifs_export_ops;
166 }
167 #endif /* CONFIG_CIFS_NFSD_EXPORT */
168
169 return 0;
170
171 out_no_root:
172 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
173 return rc;
174 }
175
176 static void cifs_kill_sb(struct super_block *sb)
177 {
178 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
179 kill_anon_super(sb);
180 cifs_umount(cifs_sb);
181 }
182
183 static int
184 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
185 {
186 struct super_block *sb = dentry->d_sb;
187 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
188 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
189 struct TCP_Server_Info *server = tcon->ses->server;
190 unsigned int xid;
191 int rc = 0;
192
193 xid = get_xid();
194
195 /*
196 * PATH_MAX may be too long - it would presumably be total path,
197 * but note that some servers (includinng Samba 3) have a shorter
198 * maximum path.
199 *
200 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
201 */
202 buf->f_namelen = PATH_MAX;
203 buf->f_files = 0; /* undefined */
204 buf->f_ffree = 0; /* unlimited */
205
206 if (server->ops->queryfs)
207 rc = server->ops->queryfs(xid, tcon, buf);
208
209 free_xid(xid);
210 return 0;
211 }
212
213 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
214 {
215 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
216 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
217 struct TCP_Server_Info *server = tcon->ses->server;
218
219 if (server->ops->fallocate)
220 return server->ops->fallocate(file, tcon, mode, off, len);
221
222 return -EOPNOTSUPP;
223 }
224
225 static int cifs_permission(struct inode *inode, int mask)
226 {
227 struct cifs_sb_info *cifs_sb;
228
229 cifs_sb = CIFS_SB(inode->i_sb);
230
231 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
232 if ((mask & MAY_EXEC) && !execute_ok(inode))
233 return -EACCES;
234 else
235 return 0;
236 } else /* file mode might have been restricted at mount time
237 on the client (above and beyond ACL on servers) for
238 servers which do not support setting and viewing mode bits,
239 so allowing client to check permissions is useful */
240 return generic_permission(inode, mask);
241 }
242
243 static struct kmem_cache *cifs_inode_cachep;
244 static struct kmem_cache *cifs_req_cachep;
245 static struct kmem_cache *cifs_mid_cachep;
246 static struct kmem_cache *cifs_sm_req_cachep;
247 mempool_t *cifs_sm_req_poolp;
248 mempool_t *cifs_req_poolp;
249 mempool_t *cifs_mid_poolp;
250
251 static struct inode *
252 cifs_alloc_inode(struct super_block *sb)
253 {
254 struct cifsInodeInfo *cifs_inode;
255 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
256 if (!cifs_inode)
257 return NULL;
258 cifs_inode->cifsAttrs = 0x20; /* default */
259 cifs_inode->time = 0;
260 /*
261 * Until the file is open and we have gotten oplock info back from the
262 * server, can not assume caching of file data or metadata.
263 */
264 cifs_set_oplock_level(cifs_inode, 0);
265 cifs_inode->flags = 0;
266 spin_lock_init(&cifs_inode->writers_lock);
267 cifs_inode->writers = 0;
268 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
269 cifs_inode->server_eof = 0;
270 cifs_inode->uniqueid = 0;
271 cifs_inode->createtime = 0;
272 cifs_inode->epoch = 0;
273 #ifdef CONFIG_CIFS_SMB2
274 get_random_bytes(cifs_inode->lease_key, SMB2_LEASE_KEY_SIZE);
275 #endif
276 /*
277 * Can not set i_flags here - they get immediately overwritten to zero
278 * by the VFS.
279 */
280 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
281 INIT_LIST_HEAD(&cifs_inode->openFileList);
282 INIT_LIST_HEAD(&cifs_inode->llist);
283 return &cifs_inode->vfs_inode;
284 }
285
286 static void cifs_i_callback(struct rcu_head *head)
287 {
288 struct inode *inode = container_of(head, struct inode, i_rcu);
289 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
290 }
291
292 static void
293 cifs_destroy_inode(struct inode *inode)
294 {
295 call_rcu(&inode->i_rcu, cifs_i_callback);
296 }
297
298 static void
299 cifs_evict_inode(struct inode *inode)
300 {
301 truncate_inode_pages_final(&inode->i_data);
302 clear_inode(inode);
303 cifs_fscache_release_inode_cookie(inode);
304 }
305
306 static void
307 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
308 {
309 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
310 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
311
312 seq_puts(s, ",addr=");
313
314 switch (server->dstaddr.ss_family) {
315 case AF_INET:
316 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
317 break;
318 case AF_INET6:
319 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
320 if (sa6->sin6_scope_id)
321 seq_printf(s, "%%%u", sa6->sin6_scope_id);
322 break;
323 default:
324 seq_puts(s, "(unknown)");
325 }
326 }
327
328 static void
329 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
330 {
331 if (ses->sectype == Unspecified) {
332 if (ses->user_name == NULL)
333 seq_puts(s, ",sec=none");
334 return;
335 }
336
337 seq_puts(s, ",sec=");
338
339 switch (ses->sectype) {
340 case LANMAN:
341 seq_puts(s, "lanman");
342 break;
343 case NTLMv2:
344 seq_puts(s, "ntlmv2");
345 break;
346 case NTLM:
347 seq_puts(s, "ntlm");
348 break;
349 case Kerberos:
350 seq_puts(s, "krb5");
351 break;
352 case RawNTLMSSP:
353 seq_puts(s, "ntlmssp");
354 break;
355 default:
356 /* shouldn't ever happen */
357 seq_puts(s, "unknown");
358 break;
359 }
360
361 if (ses->sign)
362 seq_puts(s, "i");
363 }
364
365 static void
366 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
367 {
368 seq_puts(s, ",cache=");
369
370 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
371 seq_puts(s, "strict");
372 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
373 seq_puts(s, "none");
374 else
375 seq_puts(s, "loose");
376 }
377
378 static void
379 cifs_show_nls(struct seq_file *s, struct nls_table *cur)
380 {
381 struct nls_table *def;
382
383 /* Display iocharset= option if it's not default charset */
384 def = load_nls_default();
385 if (def != cur)
386 seq_printf(s, ",iocharset=%s", cur->charset);
387 unload_nls(def);
388 }
389
390 /*
391 * cifs_show_options() is for displaying mount options in /proc/mounts.
392 * Not all settable options are displayed but most of the important
393 * ones are.
394 */
395 static int
396 cifs_show_options(struct seq_file *s, struct dentry *root)
397 {
398 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
399 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
400 struct sockaddr *srcaddr;
401 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
402
403 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
404 cifs_show_security(s, tcon->ses);
405 cifs_show_cache_flavor(s, cifs_sb);
406
407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
408 seq_puts(s, ",multiuser");
409 else if (tcon->ses->user_name)
410 seq_show_option(s, "username", tcon->ses->user_name);
411
412 if (tcon->ses->domainName)
413 seq_show_option(s, "domain", tcon->ses->domainName);
414
415 if (srcaddr->sa_family != AF_UNSPEC) {
416 struct sockaddr_in *saddr4;
417 struct sockaddr_in6 *saddr6;
418 saddr4 = (struct sockaddr_in *)srcaddr;
419 saddr6 = (struct sockaddr_in6 *)srcaddr;
420 if (srcaddr->sa_family == AF_INET6)
421 seq_printf(s, ",srcaddr=%pI6c",
422 &saddr6->sin6_addr);
423 else if (srcaddr->sa_family == AF_INET)
424 seq_printf(s, ",srcaddr=%pI4",
425 &saddr4->sin_addr.s_addr);
426 else
427 seq_printf(s, ",srcaddr=BAD-AF:%i",
428 (int)(srcaddr->sa_family));
429 }
430
431 seq_printf(s, ",uid=%u",
432 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
433 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
434 seq_puts(s, ",forceuid");
435 else
436 seq_puts(s, ",noforceuid");
437
438 seq_printf(s, ",gid=%u",
439 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
440 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
441 seq_puts(s, ",forcegid");
442 else
443 seq_puts(s, ",noforcegid");
444
445 cifs_show_address(s, tcon->ses->server);
446
447 if (!tcon->unix_ext)
448 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
449 cifs_sb->mnt_file_mode,
450 cifs_sb->mnt_dir_mode);
451
452 cifs_show_nls(s, cifs_sb->local_nls);
453
454 if (tcon->seal)
455 seq_puts(s, ",seal");
456 if (tcon->nocase)
457 seq_puts(s, ",nocase");
458 if (tcon->retry)
459 seq_puts(s, ",hard");
460 if (tcon->use_persistent)
461 seq_puts(s, ",persistenthandles");
462 else if (tcon->use_resilient)
463 seq_puts(s, ",resilienthandles");
464 if (tcon->unix_ext)
465 seq_puts(s, ",unix");
466 else
467 seq_puts(s, ",nounix");
468 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
469 seq_puts(s, ",posixpaths");
470 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
471 seq_puts(s, ",setuids");
472 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
473 seq_puts(s, ",serverino");
474 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
475 seq_puts(s, ",rwpidforward");
476 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
477 seq_puts(s, ",forcemand");
478 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
479 seq_puts(s, ",nouser_xattr");
480 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
481 seq_puts(s, ",mapchars");
482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
483 seq_puts(s, ",mapposix");
484 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
485 seq_puts(s, ",sfu");
486 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
487 seq_puts(s, ",nobrl");
488 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
489 seq_puts(s, ",cifsacl");
490 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
491 seq_puts(s, ",dynperm");
492 if (root->d_sb->s_flags & MS_POSIXACL)
493 seq_puts(s, ",acl");
494 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
495 seq_puts(s, ",mfsymlinks");
496 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
497 seq_puts(s, ",fsc");
498 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
499 seq_puts(s, ",nostrictsync");
500 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
501 seq_puts(s, ",noperm");
502 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
503 seq_printf(s, ",backupuid=%u",
504 from_kuid_munged(&init_user_ns,
505 cifs_sb->mnt_backupuid));
506 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
507 seq_printf(s, ",backupgid=%u",
508 from_kgid_munged(&init_user_ns,
509 cifs_sb->mnt_backupgid));
510
511 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
512 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
513 seq_printf(s, ",echo_interval=%lu",
514 tcon->ses->server->echo_interval / HZ);
515 /* convert actimeo and display it in seconds */
516 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
517
518 return 0;
519 }
520
521 static void cifs_umount_begin(struct super_block *sb)
522 {
523 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
524 struct cifs_tcon *tcon;
525
526 if (cifs_sb == NULL)
527 return;
528
529 tcon = cifs_sb_master_tcon(cifs_sb);
530
531 spin_lock(&cifs_tcp_ses_lock);
532 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
533 /* we have other mounts to same share or we have
534 already tried to force umount this and woken up
535 all waiting network requests, nothing to do */
536 spin_unlock(&cifs_tcp_ses_lock);
537 return;
538 } else if (tcon->tc_count == 1)
539 tcon->tidStatus = CifsExiting;
540 spin_unlock(&cifs_tcp_ses_lock);
541
542 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
543 /* cancel_notify_requests(tcon); */
544 if (tcon->ses && tcon->ses->server) {
545 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
546 wake_up_all(&tcon->ses->server->request_q);
547 wake_up_all(&tcon->ses->server->response_q);
548 msleep(1); /* yield */
549 /* we have to kick the requests once more */
550 wake_up_all(&tcon->ses->server->response_q);
551 msleep(1);
552 }
553
554 return;
555 }
556
557 #ifdef CONFIG_CIFS_STATS2
558 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
559 {
560 /* BB FIXME */
561 return 0;
562 }
563 #endif
564
565 static int cifs_remount(struct super_block *sb, int *flags, char *data)
566 {
567 sync_filesystem(sb);
568 *flags |= MS_NODIRATIME;
569 return 0;
570 }
571
572 static int cifs_drop_inode(struct inode *inode)
573 {
574 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
575
576 /* no serverino => unconditional eviction */
577 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
578 generic_drop_inode(inode);
579 }
580
581 static const struct super_operations cifs_super_ops = {
582 .statfs = cifs_statfs,
583 .alloc_inode = cifs_alloc_inode,
584 .destroy_inode = cifs_destroy_inode,
585 .drop_inode = cifs_drop_inode,
586 .evict_inode = cifs_evict_inode,
587 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
588 function unless later we add lazy close of inodes or unless the
589 kernel forgets to call us with the same number of releases (closes)
590 as opens */
591 .show_options = cifs_show_options,
592 .umount_begin = cifs_umount_begin,
593 .remount_fs = cifs_remount,
594 #ifdef CONFIG_CIFS_STATS2
595 .show_stats = cifs_show_stats,
596 #endif
597 };
598
599 /*
600 * Get root dentry from superblock according to prefix path mount option.
601 * Return dentry with refcount + 1 on success and NULL otherwise.
602 */
603 static struct dentry *
604 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
605 {
606 struct dentry *dentry;
607 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
608 char *full_path = NULL;
609 char *s, *p;
610 char sep;
611
612 full_path = cifs_build_path_to_root(vol, cifs_sb,
613 cifs_sb_master_tcon(cifs_sb));
614 if (full_path == NULL)
615 return ERR_PTR(-ENOMEM);
616
617 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
618
619 sep = CIFS_DIR_SEP(cifs_sb);
620 dentry = dget(sb->s_root);
621 p = s = full_path;
622
623 do {
624 struct inode *dir = d_inode(dentry);
625 struct dentry *child;
626
627 if (!dir) {
628 dput(dentry);
629 dentry = ERR_PTR(-ENOENT);
630 break;
631 }
632 if (!S_ISDIR(dir->i_mode)) {
633 dput(dentry);
634 dentry = ERR_PTR(-ENOTDIR);
635 break;
636 }
637
638 /* skip separators */
639 while (*s == sep)
640 s++;
641 if (!*s)
642 break;
643 p = s++;
644 /* next separator */
645 while (*s && *s != sep)
646 s++;
647
648 child = lookup_one_len_unlocked(p, dentry, s - p);
649 dput(dentry);
650 dentry = child;
651 } while (!IS_ERR(dentry));
652 kfree(full_path);
653 return dentry;
654 }
655
656 static int cifs_set_super(struct super_block *sb, void *data)
657 {
658 struct cifs_mnt_data *mnt_data = data;
659 sb->s_fs_info = mnt_data->cifs_sb;
660 return set_anon_super(sb, NULL);
661 }
662
663 static struct dentry *
664 cifs_do_mount(struct file_system_type *fs_type,
665 int flags, const char *dev_name, void *data)
666 {
667 int rc;
668 struct super_block *sb;
669 struct cifs_sb_info *cifs_sb;
670 struct smb_vol *volume_info;
671 struct cifs_mnt_data mnt_data;
672 struct dentry *root;
673
674 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
675
676 volume_info = cifs_get_volume_info((char *)data, dev_name);
677 if (IS_ERR(volume_info))
678 return ERR_CAST(volume_info);
679
680 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
681 if (cifs_sb == NULL) {
682 root = ERR_PTR(-ENOMEM);
683 goto out_nls;
684 }
685
686 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
687 if (cifs_sb->mountdata == NULL) {
688 root = ERR_PTR(-ENOMEM);
689 goto out_cifs_sb;
690 }
691
692 cifs_setup_cifs_sb(volume_info, cifs_sb);
693
694 rc = cifs_mount(cifs_sb, volume_info);
695 if (rc) {
696 if (!(flags & MS_SILENT))
697 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
698 rc);
699 root = ERR_PTR(rc);
700 goto out_mountdata;
701 }
702
703 mnt_data.vol = volume_info;
704 mnt_data.cifs_sb = cifs_sb;
705 mnt_data.flags = flags;
706
707 /* BB should we make this contingent on mount parm? */
708 flags |= MS_NODIRATIME | MS_NOATIME;
709
710 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
711 if (IS_ERR(sb)) {
712 root = ERR_CAST(sb);
713 cifs_umount(cifs_sb);
714 goto out;
715 }
716
717 if (sb->s_root) {
718 cifs_dbg(FYI, "Use existing superblock\n");
719 cifs_umount(cifs_sb);
720 } else {
721 rc = cifs_read_super(sb);
722 if (rc) {
723 root = ERR_PTR(rc);
724 goto out_super;
725 }
726
727 sb->s_flags |= MS_ACTIVE;
728 }
729
730 root = cifs_get_root(volume_info, sb);
731 if (IS_ERR(root))
732 goto out_super;
733
734 cifs_dbg(FYI, "dentry root is: %p\n", root);
735 goto out;
736
737 out_super:
738 deactivate_locked_super(sb);
739 out:
740 cifs_cleanup_volume_info(volume_info);
741 return root;
742
743 out_mountdata:
744 kfree(cifs_sb->mountdata);
745 out_cifs_sb:
746 kfree(cifs_sb);
747 out_nls:
748 unload_nls(volume_info->local_nls);
749 goto out;
750 }
751
752 static ssize_t
753 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
754 {
755 ssize_t rc;
756 struct inode *inode = file_inode(iocb->ki_filp);
757
758 if (iocb->ki_filp->f_flags & O_DIRECT)
759 return cifs_user_readv(iocb, iter);
760
761 rc = cifs_revalidate_mapping(inode);
762 if (rc)
763 return rc;
764
765 return generic_file_read_iter(iocb, iter);
766 }
767
768 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
769 {
770 struct inode *inode = file_inode(iocb->ki_filp);
771 struct cifsInodeInfo *cinode = CIFS_I(inode);
772 ssize_t written;
773 int rc;
774
775 if (iocb->ki_filp->f_flags & O_DIRECT) {
776 written = cifs_user_writev(iocb, from);
777 if (written > 0 && CIFS_CACHE_READ(cinode)) {
778 cifs_zap_mapping(inode);
779 cifs_dbg(FYI,
780 "Set no oplock for inode=%p after a write operation\n",
781 inode);
782 cinode->oplock = 0;
783 }
784 return written;
785 }
786
787 written = cifs_get_writer(cinode);
788 if (written)
789 return written;
790
791 written = generic_file_write_iter(iocb, from);
792
793 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
794 goto out;
795
796 rc = filemap_fdatawrite(inode->i_mapping);
797 if (rc)
798 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
799 rc, inode);
800
801 out:
802 cifs_put_writer(cinode);
803 return written;
804 }
805
806 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
807 {
808 /*
809 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
810 * the cached file length
811 */
812 if (whence != SEEK_SET && whence != SEEK_CUR) {
813 int rc;
814 struct inode *inode = file_inode(file);
815
816 /*
817 * We need to be sure that all dirty pages are written and the
818 * server has the newest file length.
819 */
820 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
821 inode->i_mapping->nrpages != 0) {
822 rc = filemap_fdatawait(inode->i_mapping);
823 if (rc) {
824 mapping_set_error(inode->i_mapping, rc);
825 return rc;
826 }
827 }
828 /*
829 * Some applications poll for the file length in this strange
830 * way so we must seek to end on non-oplocked files by
831 * setting the revalidate time to zero.
832 */
833 CIFS_I(inode)->time = 0;
834
835 rc = cifs_revalidate_file_attr(file);
836 if (rc < 0)
837 return (loff_t)rc;
838 }
839 return generic_file_llseek(file, offset, whence);
840 }
841
842 static int
843 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
844 {
845 /*
846 * Note that this is called by vfs setlease with i_lock held to
847 * protect *lease from going away.
848 */
849 struct inode *inode = file_inode(file);
850 struct cifsFileInfo *cfile = file->private_data;
851
852 if (!(S_ISREG(inode->i_mode)))
853 return -EINVAL;
854
855 /* Check if file is oplocked if this is request for new lease */
856 if (arg == F_UNLCK ||
857 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
858 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
859 return generic_setlease(file, arg, lease, priv);
860 else if (tlink_tcon(cfile->tlink)->local_lease &&
861 !CIFS_CACHE_READ(CIFS_I(inode)))
862 /*
863 * If the server claims to support oplock on this file, then we
864 * still need to check oplock even if the local_lease mount
865 * option is set, but there are servers which do not support
866 * oplock for which this mount option may be useful if the user
867 * knows that the file won't be changed on the server by anyone
868 * else.
869 */
870 return generic_setlease(file, arg, lease, priv);
871 else
872 return -EAGAIN;
873 }
874
875 struct file_system_type cifs_fs_type = {
876 .owner = THIS_MODULE,
877 .name = "cifs",
878 .mount = cifs_do_mount,
879 .kill_sb = cifs_kill_sb,
880 /* .fs_flags */
881 };
882 MODULE_ALIAS_FS("cifs");
883 const struct inode_operations cifs_dir_inode_ops = {
884 .create = cifs_create,
885 .atomic_open = cifs_atomic_open,
886 .lookup = cifs_lookup,
887 .getattr = cifs_getattr,
888 .unlink = cifs_unlink,
889 .link = cifs_hardlink,
890 .mkdir = cifs_mkdir,
891 .rmdir = cifs_rmdir,
892 .rename2 = cifs_rename2,
893 .permission = cifs_permission,
894 .setattr = cifs_setattr,
895 .symlink = cifs_symlink,
896 .mknod = cifs_mknod,
897 .setxattr = generic_setxattr,
898 .getxattr = generic_getxattr,
899 .listxattr = cifs_listxattr,
900 .removexattr = generic_removexattr,
901 };
902
903 const struct inode_operations cifs_file_inode_ops = {
904 .setattr = cifs_setattr,
905 .getattr = cifs_getattr,
906 .permission = cifs_permission,
907 .setxattr = generic_setxattr,
908 .getxattr = generic_getxattr,
909 .listxattr = cifs_listxattr,
910 .removexattr = generic_removexattr,
911 };
912
913 const struct inode_operations cifs_symlink_inode_ops = {
914 .readlink = generic_readlink,
915 .get_link = cifs_get_link,
916 .permission = cifs_permission,
917 .setxattr = generic_setxattr,
918 .getxattr = generic_getxattr,
919 .listxattr = cifs_listxattr,
920 .removexattr = generic_removexattr,
921 };
922
923 static int cifs_clone_file_range(struct file *src_file, loff_t off,
924 struct file *dst_file, loff_t destoff, u64 len)
925 {
926 struct inode *src_inode = file_inode(src_file);
927 struct inode *target_inode = file_inode(dst_file);
928 struct cifsFileInfo *smb_file_src = src_file->private_data;
929 struct cifsFileInfo *smb_file_target = dst_file->private_data;
930 struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
931 unsigned int xid;
932 int rc;
933
934 cifs_dbg(FYI, "clone range\n");
935
936 xid = get_xid();
937
938 if (!src_file->private_data || !dst_file->private_data) {
939 rc = -EBADF;
940 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
941 goto out;
942 }
943
944 /*
945 * Note: cifs case is easier than btrfs since server responsible for
946 * checks for proper open modes and file type and if it wants
947 * server could even support copy of range where source = target
948 */
949 lock_two_nondirectories(target_inode, src_inode);
950
951 if (len == 0)
952 len = src_inode->i_size - off;
953
954 cifs_dbg(FYI, "about to flush pages\n");
955 /* should we flush first and last page first */
956 truncate_inode_pages_range(&target_inode->i_data, destoff,
957 PAGE_ALIGN(destoff + len)-1);
958
959 if (target_tcon->ses->server->ops->duplicate_extents)
960 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
961 smb_file_src, smb_file_target, off, len, destoff);
962 else
963 rc = -EOPNOTSUPP;
964
965 /* force revalidate of size and timestamps of target file now
966 that target is updated on the server */
967 CIFS_I(target_inode)->time = 0;
968 /* although unlocking in the reverse order from locking is not
969 strictly necessary here it is a little cleaner to be consistent */
970 unlock_two_nondirectories(src_inode, target_inode);
971 out:
972 free_xid(xid);
973 return rc;
974 }
975
976 const struct file_operations cifs_file_ops = {
977 .read_iter = cifs_loose_read_iter,
978 .write_iter = cifs_file_write_iter,
979 .open = cifs_open,
980 .release = cifs_close,
981 .lock = cifs_lock,
982 .fsync = cifs_fsync,
983 .flush = cifs_flush,
984 .mmap = cifs_file_mmap,
985 .splice_read = generic_file_splice_read,
986 .llseek = cifs_llseek,
987 .unlocked_ioctl = cifs_ioctl,
988 .clone_file_range = cifs_clone_file_range,
989 .setlease = cifs_setlease,
990 .fallocate = cifs_fallocate,
991 };
992
993 const struct file_operations cifs_file_strict_ops = {
994 .read_iter = cifs_strict_readv,
995 .write_iter = cifs_strict_writev,
996 .open = cifs_open,
997 .release = cifs_close,
998 .lock = cifs_lock,
999 .fsync = cifs_strict_fsync,
1000 .flush = cifs_flush,
1001 .mmap = cifs_file_strict_mmap,
1002 .splice_read = generic_file_splice_read,
1003 .llseek = cifs_llseek,
1004 .unlocked_ioctl = cifs_ioctl,
1005 .clone_file_range = cifs_clone_file_range,
1006 .setlease = cifs_setlease,
1007 .fallocate = cifs_fallocate,
1008 };
1009
1010 const struct file_operations cifs_file_direct_ops = {
1011 /* BB reevaluate whether they can be done with directio, no cache */
1012 .read_iter = cifs_user_readv,
1013 .write_iter = cifs_user_writev,
1014 .open = cifs_open,
1015 .release = cifs_close,
1016 .lock = cifs_lock,
1017 .fsync = cifs_fsync,
1018 .flush = cifs_flush,
1019 .mmap = cifs_file_mmap,
1020 .splice_read = generic_file_splice_read,
1021 .unlocked_ioctl = cifs_ioctl,
1022 .clone_file_range = cifs_clone_file_range,
1023 .llseek = cifs_llseek,
1024 .setlease = cifs_setlease,
1025 .fallocate = cifs_fallocate,
1026 };
1027
1028 const struct file_operations cifs_file_nobrl_ops = {
1029 .read_iter = cifs_loose_read_iter,
1030 .write_iter = cifs_file_write_iter,
1031 .open = cifs_open,
1032 .release = cifs_close,
1033 .fsync = cifs_fsync,
1034 .flush = cifs_flush,
1035 .mmap = cifs_file_mmap,
1036 .splice_read = generic_file_splice_read,
1037 .llseek = cifs_llseek,
1038 .unlocked_ioctl = cifs_ioctl,
1039 .clone_file_range = cifs_clone_file_range,
1040 .setlease = cifs_setlease,
1041 .fallocate = cifs_fallocate,
1042 };
1043
1044 const struct file_operations cifs_file_strict_nobrl_ops = {
1045 .read_iter = cifs_strict_readv,
1046 .write_iter = cifs_strict_writev,
1047 .open = cifs_open,
1048 .release = cifs_close,
1049 .fsync = cifs_strict_fsync,
1050 .flush = cifs_flush,
1051 .mmap = cifs_file_strict_mmap,
1052 .splice_read = generic_file_splice_read,
1053 .llseek = cifs_llseek,
1054 .unlocked_ioctl = cifs_ioctl,
1055 .clone_file_range = cifs_clone_file_range,
1056 .setlease = cifs_setlease,
1057 .fallocate = cifs_fallocate,
1058 };
1059
1060 const struct file_operations cifs_file_direct_nobrl_ops = {
1061 /* BB reevaluate whether they can be done with directio, no cache */
1062 .read_iter = cifs_user_readv,
1063 .write_iter = cifs_user_writev,
1064 .open = cifs_open,
1065 .release = cifs_close,
1066 .fsync = cifs_fsync,
1067 .flush = cifs_flush,
1068 .mmap = cifs_file_mmap,
1069 .splice_read = generic_file_splice_read,
1070 .unlocked_ioctl = cifs_ioctl,
1071 .clone_file_range = cifs_clone_file_range,
1072 .llseek = cifs_llseek,
1073 .setlease = cifs_setlease,
1074 .fallocate = cifs_fallocate,
1075 };
1076
1077 const struct file_operations cifs_dir_ops = {
1078 .iterate_shared = cifs_readdir,
1079 .release = cifs_closedir,
1080 .read = generic_read_dir,
1081 .unlocked_ioctl = cifs_ioctl,
1082 .clone_file_range = cifs_clone_file_range,
1083 .llseek = generic_file_llseek,
1084 };
1085
1086 static void
1087 cifs_init_once(void *inode)
1088 {
1089 struct cifsInodeInfo *cifsi = inode;
1090
1091 inode_init_once(&cifsi->vfs_inode);
1092 init_rwsem(&cifsi->lock_sem);
1093 }
1094
1095 static int __init
1096 cifs_init_inodecache(void)
1097 {
1098 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1099 sizeof(struct cifsInodeInfo),
1100 0, (SLAB_RECLAIM_ACCOUNT|
1101 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1102 cifs_init_once);
1103 if (cifs_inode_cachep == NULL)
1104 return -ENOMEM;
1105
1106 return 0;
1107 }
1108
1109 static void
1110 cifs_destroy_inodecache(void)
1111 {
1112 /*
1113 * Make sure all delayed rcu free inodes are flushed before we
1114 * destroy cache.
1115 */
1116 rcu_barrier();
1117 kmem_cache_destroy(cifs_inode_cachep);
1118 }
1119
1120 static int
1121 cifs_init_request_bufs(void)
1122 {
1123 size_t max_hdr_size = MAX_CIFS_HDR_SIZE;
1124 #ifdef CONFIG_CIFS_SMB2
1125 /*
1126 * SMB2 maximum header size is bigger than CIFS one - no problems to
1127 * allocate some more bytes for CIFS.
1128 */
1129 max_hdr_size = MAX_SMB2_HDR_SIZE;
1130 #endif
1131 if (CIFSMaxBufSize < 8192) {
1132 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1133 Unicode path name has to fit in any SMB/CIFS path based frames */
1134 CIFSMaxBufSize = 8192;
1135 } else if (CIFSMaxBufSize > 1024*127) {
1136 CIFSMaxBufSize = 1024 * 127;
1137 } else {
1138 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1139 }
1140 /*
1141 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1142 CIFSMaxBufSize, CIFSMaxBufSize);
1143 */
1144 cifs_req_cachep = kmem_cache_create("cifs_request",
1145 CIFSMaxBufSize + max_hdr_size, 0,
1146 SLAB_HWCACHE_ALIGN, NULL);
1147 if (cifs_req_cachep == NULL)
1148 return -ENOMEM;
1149
1150 if (cifs_min_rcv < 1)
1151 cifs_min_rcv = 1;
1152 else if (cifs_min_rcv > 64) {
1153 cifs_min_rcv = 64;
1154 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1155 }
1156
1157 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1158 cifs_req_cachep);
1159
1160 if (cifs_req_poolp == NULL) {
1161 kmem_cache_destroy(cifs_req_cachep);
1162 return -ENOMEM;
1163 }
1164 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1165 almost all handle based requests (but not write response, nor is it
1166 sufficient for path based requests). A smaller size would have
1167 been more efficient (compacting multiple slab items on one 4k page)
1168 for the case in which debug was on, but this larger size allows
1169 more SMBs to use small buffer alloc and is still much more
1170 efficient to alloc 1 per page off the slab compared to 17K (5page)
1171 alloc of large cifs buffers even when page debugging is on */
1172 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
1173 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1174 NULL);
1175 if (cifs_sm_req_cachep == NULL) {
1176 mempool_destroy(cifs_req_poolp);
1177 kmem_cache_destroy(cifs_req_cachep);
1178 return -ENOMEM;
1179 }
1180
1181 if (cifs_min_small < 2)
1182 cifs_min_small = 2;
1183 else if (cifs_min_small > 256) {
1184 cifs_min_small = 256;
1185 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1186 }
1187
1188 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1189 cifs_sm_req_cachep);
1190
1191 if (cifs_sm_req_poolp == NULL) {
1192 mempool_destroy(cifs_req_poolp);
1193 kmem_cache_destroy(cifs_req_cachep);
1194 kmem_cache_destroy(cifs_sm_req_cachep);
1195 return -ENOMEM;
1196 }
1197
1198 return 0;
1199 }
1200
1201 static void
1202 cifs_destroy_request_bufs(void)
1203 {
1204 mempool_destroy(cifs_req_poolp);
1205 kmem_cache_destroy(cifs_req_cachep);
1206 mempool_destroy(cifs_sm_req_poolp);
1207 kmem_cache_destroy(cifs_sm_req_cachep);
1208 }
1209
1210 static int
1211 cifs_init_mids(void)
1212 {
1213 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1214 sizeof(struct mid_q_entry), 0,
1215 SLAB_HWCACHE_ALIGN, NULL);
1216 if (cifs_mid_cachep == NULL)
1217 return -ENOMEM;
1218
1219 /* 3 is a reasonable minimum number of simultaneous operations */
1220 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1221 if (cifs_mid_poolp == NULL) {
1222 kmem_cache_destroy(cifs_mid_cachep);
1223 return -ENOMEM;
1224 }
1225
1226 return 0;
1227 }
1228
1229 static void
1230 cifs_destroy_mids(void)
1231 {
1232 mempool_destroy(cifs_mid_poolp);
1233 kmem_cache_destroy(cifs_mid_cachep);
1234 }
1235
1236 static int __init
1237 init_cifs(void)
1238 {
1239 int rc = 0;
1240 cifs_proc_init();
1241 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1242 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1243 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1244 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1245 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1246 /*
1247 * Initialize Global counters
1248 */
1249 atomic_set(&sesInfoAllocCount, 0);
1250 atomic_set(&tconInfoAllocCount, 0);
1251 atomic_set(&tcpSesAllocCount, 0);
1252 atomic_set(&tcpSesReconnectCount, 0);
1253 atomic_set(&tconInfoReconnectCount, 0);
1254
1255 atomic_set(&bufAllocCount, 0);
1256 atomic_set(&smBufAllocCount, 0);
1257 #ifdef CONFIG_CIFS_STATS2
1258 atomic_set(&totBufAllocCount, 0);
1259 atomic_set(&totSmBufAllocCount, 0);
1260 #endif /* CONFIG_CIFS_STATS2 */
1261
1262 atomic_set(&midCount, 0);
1263 GlobalCurrentXid = 0;
1264 GlobalTotalActiveXid = 0;
1265 GlobalMaxActiveXid = 0;
1266 spin_lock_init(&cifs_tcp_ses_lock);
1267 spin_lock_init(&cifs_file_list_lock);
1268 spin_lock_init(&GlobalMid_Lock);
1269
1270 get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
1271
1272 if (cifs_max_pending < 2) {
1273 cifs_max_pending = 2;
1274 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1275 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1276 cifs_max_pending = CIFS_MAX_REQ;
1277 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1278 CIFS_MAX_REQ);
1279 }
1280
1281 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1282 if (!cifsiod_wq) {
1283 rc = -ENOMEM;
1284 goto out_clean_proc;
1285 }
1286
1287 rc = cifs_fscache_register();
1288 if (rc)
1289 goto out_destroy_wq;
1290
1291 rc = cifs_init_inodecache();
1292 if (rc)
1293 goto out_unreg_fscache;
1294
1295 rc = cifs_init_mids();
1296 if (rc)
1297 goto out_destroy_inodecache;
1298
1299 rc = cifs_init_request_bufs();
1300 if (rc)
1301 goto out_destroy_mids;
1302
1303 #ifdef CONFIG_CIFS_UPCALL
1304 rc = init_cifs_spnego();
1305 if (rc)
1306 goto out_destroy_request_bufs;
1307 #endif /* CONFIG_CIFS_UPCALL */
1308
1309 #ifdef CONFIG_CIFS_ACL
1310 rc = init_cifs_idmap();
1311 if (rc)
1312 goto out_register_key_type;
1313 #endif /* CONFIG_CIFS_ACL */
1314
1315 rc = register_filesystem(&cifs_fs_type);
1316 if (rc)
1317 goto out_init_cifs_idmap;
1318
1319 return 0;
1320
1321 out_init_cifs_idmap:
1322 #ifdef CONFIG_CIFS_ACL
1323 exit_cifs_idmap();
1324 out_register_key_type:
1325 #endif
1326 #ifdef CONFIG_CIFS_UPCALL
1327 exit_cifs_spnego();
1328 out_destroy_request_bufs:
1329 #endif
1330 cifs_destroy_request_bufs();
1331 out_destroy_mids:
1332 cifs_destroy_mids();
1333 out_destroy_inodecache:
1334 cifs_destroy_inodecache();
1335 out_unreg_fscache:
1336 cifs_fscache_unregister();
1337 out_destroy_wq:
1338 destroy_workqueue(cifsiod_wq);
1339 out_clean_proc:
1340 cifs_proc_clean();
1341 return rc;
1342 }
1343
1344 static void __exit
1345 exit_cifs(void)
1346 {
1347 cifs_dbg(NOISY, "exit_cifs\n");
1348 unregister_filesystem(&cifs_fs_type);
1349 cifs_dfs_release_automount_timer();
1350 #ifdef CONFIG_CIFS_ACL
1351 exit_cifs_idmap();
1352 #endif
1353 #ifdef CONFIG_CIFS_UPCALL
1354 unregister_key_type(&cifs_spnego_key_type);
1355 #endif
1356 cifs_destroy_request_bufs();
1357 cifs_destroy_mids();
1358 cifs_destroy_inodecache();
1359 cifs_fscache_unregister();
1360 destroy_workqueue(cifsiod_wq);
1361 cifs_proc_clean();
1362 }
1363
1364 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1365 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1366 MODULE_DESCRIPTION
1367 ("VFS to access servers complying with the SNIA CIFS Specification "
1368 "e.g. Samba and Windows");
1369 MODULE_VERSION(CIFS_VERSION);
1370 module_init(init_cifs)
1371 module_exit(exit_cifs)
This page took 0.056429 seconds and 6 git commands to generate.