]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/cifs/cifsfs.c
Merge remote-tracking branch 'mkp-scsi/4.9/scsi-fixes' into fixes
[mirror_ubuntu-bionic-kernel.git] / fs / cifs / cifsfs.c
1 /*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
25
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/xattr.h>
41 #include <net/ipv6.h>
42 #include "cifsfs.h"
43 #include "cifspdu.h"
44 #define DECLARE_GLOBALS_HERE
45 #include "cifsglob.h"
46 #include "cifsproto.h"
47 #include "cifs_debug.h"
48 #include "cifs_fs_sb.h"
49 #include <linux/mm.h>
50 #include <linux/key-type.h>
51 #include "cifs_spnego.h"
52 #include "fscache.h"
53 #ifdef CONFIG_CIFS_SMB2
54 #include "smb2pdu.h"
55 #endif
56
57 int cifsFYI = 0;
58 bool traceSMB;
59 bool enable_oplocks = true;
60 bool linuxExtEnabled = true;
61 bool lookupCacheEnabled = true;
62 unsigned int global_secflags = CIFSSEC_DEF;
63 /* unsigned int ntlmv2_support = 0; */
64 unsigned int sign_CIFS_PDUs = 1;
65 static const struct super_operations cifs_super_ops;
66 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
67 module_param(CIFSMaxBufSize, uint, 0444);
68 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
69 "Default: 16384 Range: 8192 to 130048");
70 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
71 module_param(cifs_min_rcv, uint, 0444);
72 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
73 "1 to 64");
74 unsigned int cifs_min_small = 30;
75 module_param(cifs_min_small, uint, 0444);
76 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
77 "Range: 2 to 256");
78 unsigned int cifs_max_pending = CIFS_MAX_REQ;
79 module_param(cifs_max_pending, uint, 0444);
80 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
81 "Default: 32767 Range: 2 to 32767.");
82 module_param(enable_oplocks, bool, 0644);
83 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
84
85 extern mempool_t *cifs_sm_req_poolp;
86 extern mempool_t *cifs_req_poolp;
87 extern mempool_t *cifs_mid_poolp;
88
89 struct workqueue_struct *cifsiod_wq;
90 __u32 cifs_lock_secret;
91
92 /*
93 * Bumps refcount for cifs super block.
94 * Note that it should be only called if a referece to VFS super block is
95 * already held, e.g. in open-type syscalls context. Otherwise it can race with
96 * atomic_dec_and_test in deactivate_locked_super.
97 */
98 void
99 cifs_sb_active(struct super_block *sb)
100 {
101 struct cifs_sb_info *server = CIFS_SB(sb);
102
103 if (atomic_inc_return(&server->active) == 1)
104 atomic_inc(&sb->s_active);
105 }
106
107 void
108 cifs_sb_deactive(struct super_block *sb)
109 {
110 struct cifs_sb_info *server = CIFS_SB(sb);
111
112 if (atomic_dec_and_test(&server->active))
113 deactivate_super(sb);
114 }
115
116 static int
117 cifs_read_super(struct super_block *sb)
118 {
119 struct inode *inode;
120 struct cifs_sb_info *cifs_sb;
121 struct cifs_tcon *tcon;
122 int rc = 0;
123
124 cifs_sb = CIFS_SB(sb);
125 tcon = cifs_sb_master_tcon(cifs_sb);
126
127 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
128 sb->s_flags |= MS_POSIXACL;
129
130 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
131 sb->s_maxbytes = MAX_LFS_FILESIZE;
132 else
133 sb->s_maxbytes = MAX_NON_LFS;
134
135 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
136 sb->s_time_gran = 100;
137
138 sb->s_magic = CIFS_MAGIC_NUMBER;
139 sb->s_op = &cifs_super_ops;
140 sb->s_xattr = cifs_xattr_handlers;
141 sb->s_bdi = &cifs_sb->bdi;
142 sb->s_blocksize = CIFS_MAX_MSGSIZE;
143 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
144 inode = cifs_root_iget(sb);
145
146 if (IS_ERR(inode)) {
147 rc = PTR_ERR(inode);
148 goto out_no_root;
149 }
150
151 if (tcon->nocase)
152 sb->s_d_op = &cifs_ci_dentry_ops;
153 else
154 sb->s_d_op = &cifs_dentry_ops;
155
156 sb->s_root = d_make_root(inode);
157 if (!sb->s_root) {
158 rc = -ENOMEM;
159 goto out_no_root;
160 }
161
162 #ifdef CONFIG_CIFS_NFSD_EXPORT
163 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
164 cifs_dbg(FYI, "export ops supported\n");
165 sb->s_export_op = &cifs_export_ops;
166 }
167 #endif /* CONFIG_CIFS_NFSD_EXPORT */
168
169 return 0;
170
171 out_no_root:
172 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
173 return rc;
174 }
175
176 static void cifs_kill_sb(struct super_block *sb)
177 {
178 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
179 kill_anon_super(sb);
180 cifs_umount(cifs_sb);
181 }
182
183 static int
184 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
185 {
186 struct super_block *sb = dentry->d_sb;
187 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
188 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
189 struct TCP_Server_Info *server = tcon->ses->server;
190 unsigned int xid;
191 int rc = 0;
192
193 xid = get_xid();
194
195 /*
196 * PATH_MAX may be too long - it would presumably be total path,
197 * but note that some servers (includinng Samba 3) have a shorter
198 * maximum path.
199 *
200 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
201 */
202 buf->f_namelen = PATH_MAX;
203 buf->f_files = 0; /* undefined */
204 buf->f_ffree = 0; /* unlimited */
205
206 if (server->ops->queryfs)
207 rc = server->ops->queryfs(xid, tcon, buf);
208
209 free_xid(xid);
210 return 0;
211 }
212
213 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
214 {
215 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
216 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
217 struct TCP_Server_Info *server = tcon->ses->server;
218
219 if (server->ops->fallocate)
220 return server->ops->fallocate(file, tcon, mode, off, len);
221
222 return -EOPNOTSUPP;
223 }
224
225 static int cifs_permission(struct inode *inode, int mask)
226 {
227 struct cifs_sb_info *cifs_sb;
228
229 cifs_sb = CIFS_SB(inode->i_sb);
230
231 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
232 if ((mask & MAY_EXEC) && !execute_ok(inode))
233 return -EACCES;
234 else
235 return 0;
236 } else /* file mode might have been restricted at mount time
237 on the client (above and beyond ACL on servers) for
238 servers which do not support setting and viewing mode bits,
239 so allowing client to check permissions is useful */
240 return generic_permission(inode, mask);
241 }
242
243 static struct kmem_cache *cifs_inode_cachep;
244 static struct kmem_cache *cifs_req_cachep;
245 static struct kmem_cache *cifs_mid_cachep;
246 static struct kmem_cache *cifs_sm_req_cachep;
247 mempool_t *cifs_sm_req_poolp;
248 mempool_t *cifs_req_poolp;
249 mempool_t *cifs_mid_poolp;
250
251 static struct inode *
252 cifs_alloc_inode(struct super_block *sb)
253 {
254 struct cifsInodeInfo *cifs_inode;
255 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
256 if (!cifs_inode)
257 return NULL;
258 cifs_inode->cifsAttrs = 0x20; /* default */
259 cifs_inode->time = 0;
260 /*
261 * Until the file is open and we have gotten oplock info back from the
262 * server, can not assume caching of file data or metadata.
263 */
264 cifs_set_oplock_level(cifs_inode, 0);
265 cifs_inode->flags = 0;
266 spin_lock_init(&cifs_inode->writers_lock);
267 cifs_inode->writers = 0;
268 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
269 cifs_inode->server_eof = 0;
270 cifs_inode->uniqueid = 0;
271 cifs_inode->createtime = 0;
272 cifs_inode->epoch = 0;
273 #ifdef CONFIG_CIFS_SMB2
274 generate_random_uuid(cifs_inode->lease_key);
275 #endif
276 /*
277 * Can not set i_flags here - they get immediately overwritten to zero
278 * by the VFS.
279 */
280 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
281 INIT_LIST_HEAD(&cifs_inode->openFileList);
282 INIT_LIST_HEAD(&cifs_inode->llist);
283 return &cifs_inode->vfs_inode;
284 }
285
286 static void cifs_i_callback(struct rcu_head *head)
287 {
288 struct inode *inode = container_of(head, struct inode, i_rcu);
289 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
290 }
291
292 static void
293 cifs_destroy_inode(struct inode *inode)
294 {
295 call_rcu(&inode->i_rcu, cifs_i_callback);
296 }
297
298 static void
299 cifs_evict_inode(struct inode *inode)
300 {
301 truncate_inode_pages_final(&inode->i_data);
302 clear_inode(inode);
303 cifs_fscache_release_inode_cookie(inode);
304 }
305
306 static void
307 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
308 {
309 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
310 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
311
312 seq_puts(s, ",addr=");
313
314 switch (server->dstaddr.ss_family) {
315 case AF_INET:
316 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
317 break;
318 case AF_INET6:
319 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
320 if (sa6->sin6_scope_id)
321 seq_printf(s, "%%%u", sa6->sin6_scope_id);
322 break;
323 default:
324 seq_puts(s, "(unknown)");
325 }
326 }
327
328 static void
329 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
330 {
331 if (ses->sectype == Unspecified) {
332 if (ses->user_name == NULL)
333 seq_puts(s, ",sec=none");
334 return;
335 }
336
337 seq_puts(s, ",sec=");
338
339 switch (ses->sectype) {
340 case LANMAN:
341 seq_puts(s, "lanman");
342 break;
343 case NTLMv2:
344 seq_puts(s, "ntlmv2");
345 break;
346 case NTLM:
347 seq_puts(s, "ntlm");
348 break;
349 case Kerberos:
350 seq_puts(s, "krb5");
351 break;
352 case RawNTLMSSP:
353 seq_puts(s, "ntlmssp");
354 break;
355 default:
356 /* shouldn't ever happen */
357 seq_puts(s, "unknown");
358 break;
359 }
360
361 if (ses->sign)
362 seq_puts(s, "i");
363 }
364
365 static void
366 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
367 {
368 seq_puts(s, ",cache=");
369
370 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
371 seq_puts(s, "strict");
372 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
373 seq_puts(s, "none");
374 else
375 seq_puts(s, "loose");
376 }
377
378 static void
379 cifs_show_nls(struct seq_file *s, struct nls_table *cur)
380 {
381 struct nls_table *def;
382
383 /* Display iocharset= option if it's not default charset */
384 def = load_nls_default();
385 if (def != cur)
386 seq_printf(s, ",iocharset=%s", cur->charset);
387 unload_nls(def);
388 }
389
390 /*
391 * cifs_show_options() is for displaying mount options in /proc/mounts.
392 * Not all settable options are displayed but most of the important
393 * ones are.
394 */
395 static int
396 cifs_show_options(struct seq_file *s, struct dentry *root)
397 {
398 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
399 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
400 struct sockaddr *srcaddr;
401 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
402
403 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
404 cifs_show_security(s, tcon->ses);
405 cifs_show_cache_flavor(s, cifs_sb);
406
407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
408 seq_puts(s, ",multiuser");
409 else if (tcon->ses->user_name)
410 seq_show_option(s, "username", tcon->ses->user_name);
411
412 if (tcon->ses->domainName)
413 seq_show_option(s, "domain", tcon->ses->domainName);
414
415 if (srcaddr->sa_family != AF_UNSPEC) {
416 struct sockaddr_in *saddr4;
417 struct sockaddr_in6 *saddr6;
418 saddr4 = (struct sockaddr_in *)srcaddr;
419 saddr6 = (struct sockaddr_in6 *)srcaddr;
420 if (srcaddr->sa_family == AF_INET6)
421 seq_printf(s, ",srcaddr=%pI6c",
422 &saddr6->sin6_addr);
423 else if (srcaddr->sa_family == AF_INET)
424 seq_printf(s, ",srcaddr=%pI4",
425 &saddr4->sin_addr.s_addr);
426 else
427 seq_printf(s, ",srcaddr=BAD-AF:%i",
428 (int)(srcaddr->sa_family));
429 }
430
431 seq_printf(s, ",uid=%u",
432 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
433 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
434 seq_puts(s, ",forceuid");
435 else
436 seq_puts(s, ",noforceuid");
437
438 seq_printf(s, ",gid=%u",
439 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
440 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
441 seq_puts(s, ",forcegid");
442 else
443 seq_puts(s, ",noforcegid");
444
445 cifs_show_address(s, tcon->ses->server);
446
447 if (!tcon->unix_ext)
448 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
449 cifs_sb->mnt_file_mode,
450 cifs_sb->mnt_dir_mode);
451
452 cifs_show_nls(s, cifs_sb->local_nls);
453
454 if (tcon->seal)
455 seq_puts(s, ",seal");
456 if (tcon->nocase)
457 seq_puts(s, ",nocase");
458 if (tcon->retry)
459 seq_puts(s, ",hard");
460 if (tcon->use_persistent)
461 seq_puts(s, ",persistenthandles");
462 else if (tcon->use_resilient)
463 seq_puts(s, ",resilienthandles");
464 if (tcon->unix_ext)
465 seq_puts(s, ",unix");
466 else
467 seq_puts(s, ",nounix");
468 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
469 seq_puts(s, ",posixpaths");
470 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
471 seq_puts(s, ",setuids");
472 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
473 seq_puts(s, ",idsfromsid");
474 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
475 seq_puts(s, ",serverino");
476 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
477 seq_puts(s, ",rwpidforward");
478 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
479 seq_puts(s, ",forcemand");
480 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
481 seq_puts(s, ",nouser_xattr");
482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
483 seq_puts(s, ",mapchars");
484 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
485 seq_puts(s, ",mapposix");
486 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
487 seq_puts(s, ",sfu");
488 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
489 seq_puts(s, ",nobrl");
490 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
491 seq_puts(s, ",cifsacl");
492 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
493 seq_puts(s, ",dynperm");
494 if (root->d_sb->s_flags & MS_POSIXACL)
495 seq_puts(s, ",acl");
496 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
497 seq_puts(s, ",mfsymlinks");
498 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
499 seq_puts(s, ",fsc");
500 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
501 seq_puts(s, ",nostrictsync");
502 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
503 seq_puts(s, ",noperm");
504 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
505 seq_printf(s, ",backupuid=%u",
506 from_kuid_munged(&init_user_ns,
507 cifs_sb->mnt_backupuid));
508 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
509 seq_printf(s, ",backupgid=%u",
510 from_kgid_munged(&init_user_ns,
511 cifs_sb->mnt_backupgid));
512
513 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
514 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
515 seq_printf(s, ",echo_interval=%lu",
516 tcon->ses->server->echo_interval / HZ);
517 /* convert actimeo and display it in seconds */
518 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
519
520 return 0;
521 }
522
523 static void cifs_umount_begin(struct super_block *sb)
524 {
525 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
526 struct cifs_tcon *tcon;
527
528 if (cifs_sb == NULL)
529 return;
530
531 tcon = cifs_sb_master_tcon(cifs_sb);
532
533 spin_lock(&cifs_tcp_ses_lock);
534 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
535 /* we have other mounts to same share or we have
536 already tried to force umount this and woken up
537 all waiting network requests, nothing to do */
538 spin_unlock(&cifs_tcp_ses_lock);
539 return;
540 } else if (tcon->tc_count == 1)
541 tcon->tidStatus = CifsExiting;
542 spin_unlock(&cifs_tcp_ses_lock);
543
544 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
545 /* cancel_notify_requests(tcon); */
546 if (tcon->ses && tcon->ses->server) {
547 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
548 wake_up_all(&tcon->ses->server->request_q);
549 wake_up_all(&tcon->ses->server->response_q);
550 msleep(1); /* yield */
551 /* we have to kick the requests once more */
552 wake_up_all(&tcon->ses->server->response_q);
553 msleep(1);
554 }
555
556 return;
557 }
558
559 #ifdef CONFIG_CIFS_STATS2
560 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
561 {
562 /* BB FIXME */
563 return 0;
564 }
565 #endif
566
567 static int cifs_remount(struct super_block *sb, int *flags, char *data)
568 {
569 sync_filesystem(sb);
570 *flags |= MS_NODIRATIME;
571 return 0;
572 }
573
574 static int cifs_drop_inode(struct inode *inode)
575 {
576 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
577
578 /* no serverino => unconditional eviction */
579 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
580 generic_drop_inode(inode);
581 }
582
583 static const struct super_operations cifs_super_ops = {
584 .statfs = cifs_statfs,
585 .alloc_inode = cifs_alloc_inode,
586 .destroy_inode = cifs_destroy_inode,
587 .drop_inode = cifs_drop_inode,
588 .evict_inode = cifs_evict_inode,
589 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
590 function unless later we add lazy close of inodes or unless the
591 kernel forgets to call us with the same number of releases (closes)
592 as opens */
593 .show_options = cifs_show_options,
594 .umount_begin = cifs_umount_begin,
595 .remount_fs = cifs_remount,
596 #ifdef CONFIG_CIFS_STATS2
597 .show_stats = cifs_show_stats,
598 #endif
599 };
600
601 /*
602 * Get root dentry from superblock according to prefix path mount option.
603 * Return dentry with refcount + 1 on success and NULL otherwise.
604 */
605 static struct dentry *
606 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
607 {
608 struct dentry *dentry;
609 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
610 char *full_path = NULL;
611 char *s, *p;
612 char sep;
613
614 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
615 return dget(sb->s_root);
616
617 full_path = cifs_build_path_to_root(vol, cifs_sb,
618 cifs_sb_master_tcon(cifs_sb));
619 if (full_path == NULL)
620 return ERR_PTR(-ENOMEM);
621
622 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
623
624 sep = CIFS_DIR_SEP(cifs_sb);
625 dentry = dget(sb->s_root);
626 p = s = full_path;
627
628 do {
629 struct inode *dir = d_inode(dentry);
630 struct dentry *child;
631
632 if (!dir) {
633 dput(dentry);
634 dentry = ERR_PTR(-ENOENT);
635 break;
636 }
637 if (!S_ISDIR(dir->i_mode)) {
638 dput(dentry);
639 dentry = ERR_PTR(-ENOTDIR);
640 break;
641 }
642
643 /* skip separators */
644 while (*s == sep)
645 s++;
646 if (!*s)
647 break;
648 p = s++;
649 /* next separator */
650 while (*s && *s != sep)
651 s++;
652
653 child = lookup_one_len_unlocked(p, dentry, s - p);
654 dput(dentry);
655 dentry = child;
656 } while (!IS_ERR(dentry));
657 kfree(full_path);
658 return dentry;
659 }
660
661 static int cifs_set_super(struct super_block *sb, void *data)
662 {
663 struct cifs_mnt_data *mnt_data = data;
664 sb->s_fs_info = mnt_data->cifs_sb;
665 return set_anon_super(sb, NULL);
666 }
667
668 static struct dentry *
669 cifs_do_mount(struct file_system_type *fs_type,
670 int flags, const char *dev_name, void *data)
671 {
672 int rc;
673 struct super_block *sb;
674 struct cifs_sb_info *cifs_sb;
675 struct smb_vol *volume_info;
676 struct cifs_mnt_data mnt_data;
677 struct dentry *root;
678
679 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
680
681 volume_info = cifs_get_volume_info((char *)data, dev_name);
682 if (IS_ERR(volume_info))
683 return ERR_CAST(volume_info);
684
685 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
686 if (cifs_sb == NULL) {
687 root = ERR_PTR(-ENOMEM);
688 goto out_nls;
689 }
690
691 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
692 if (cifs_sb->mountdata == NULL) {
693 root = ERR_PTR(-ENOMEM);
694 goto out_free;
695 }
696
697 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
698 if (rc) {
699 root = ERR_PTR(rc);
700 goto out_free;
701 }
702
703 rc = cifs_mount(cifs_sb, volume_info);
704 if (rc) {
705 if (!(flags & MS_SILENT))
706 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
707 rc);
708 root = ERR_PTR(rc);
709 goto out_free;
710 }
711
712 mnt_data.vol = volume_info;
713 mnt_data.cifs_sb = cifs_sb;
714 mnt_data.flags = flags;
715
716 /* BB should we make this contingent on mount parm? */
717 flags |= MS_NODIRATIME | MS_NOATIME;
718
719 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
720 if (IS_ERR(sb)) {
721 root = ERR_CAST(sb);
722 cifs_umount(cifs_sb);
723 goto out;
724 }
725
726 if (sb->s_root) {
727 cifs_dbg(FYI, "Use existing superblock\n");
728 cifs_umount(cifs_sb);
729 } else {
730 rc = cifs_read_super(sb);
731 if (rc) {
732 root = ERR_PTR(rc);
733 goto out_super;
734 }
735
736 sb->s_flags |= MS_ACTIVE;
737 }
738
739 root = cifs_get_root(volume_info, sb);
740 if (IS_ERR(root))
741 goto out_super;
742
743 cifs_dbg(FYI, "dentry root is: %p\n", root);
744 goto out;
745
746 out_super:
747 deactivate_locked_super(sb);
748 out:
749 cifs_cleanup_volume_info(volume_info);
750 return root;
751
752 out_free:
753 kfree(cifs_sb->prepath);
754 kfree(cifs_sb->mountdata);
755 kfree(cifs_sb);
756 out_nls:
757 unload_nls(volume_info->local_nls);
758 goto out;
759 }
760
761 static ssize_t
762 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
763 {
764 ssize_t rc;
765 struct inode *inode = file_inode(iocb->ki_filp);
766
767 if (iocb->ki_filp->f_flags & O_DIRECT)
768 return cifs_user_readv(iocb, iter);
769
770 rc = cifs_revalidate_mapping(inode);
771 if (rc)
772 return rc;
773
774 return generic_file_read_iter(iocb, iter);
775 }
776
777 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
778 {
779 struct inode *inode = file_inode(iocb->ki_filp);
780 struct cifsInodeInfo *cinode = CIFS_I(inode);
781 ssize_t written;
782 int rc;
783
784 if (iocb->ki_filp->f_flags & O_DIRECT) {
785 written = cifs_user_writev(iocb, from);
786 if (written > 0 && CIFS_CACHE_READ(cinode)) {
787 cifs_zap_mapping(inode);
788 cifs_dbg(FYI,
789 "Set no oplock for inode=%p after a write operation\n",
790 inode);
791 cinode->oplock = 0;
792 }
793 return written;
794 }
795
796 written = cifs_get_writer(cinode);
797 if (written)
798 return written;
799
800 written = generic_file_write_iter(iocb, from);
801
802 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
803 goto out;
804
805 rc = filemap_fdatawrite(inode->i_mapping);
806 if (rc)
807 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
808 rc, inode);
809
810 out:
811 cifs_put_writer(cinode);
812 return written;
813 }
814
815 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
816 {
817 /*
818 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
819 * the cached file length
820 */
821 if (whence != SEEK_SET && whence != SEEK_CUR) {
822 int rc;
823 struct inode *inode = file_inode(file);
824
825 /*
826 * We need to be sure that all dirty pages are written and the
827 * server has the newest file length.
828 */
829 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
830 inode->i_mapping->nrpages != 0) {
831 rc = filemap_fdatawait(inode->i_mapping);
832 if (rc) {
833 mapping_set_error(inode->i_mapping, rc);
834 return rc;
835 }
836 }
837 /*
838 * Some applications poll for the file length in this strange
839 * way so we must seek to end on non-oplocked files by
840 * setting the revalidate time to zero.
841 */
842 CIFS_I(inode)->time = 0;
843
844 rc = cifs_revalidate_file_attr(file);
845 if (rc < 0)
846 return (loff_t)rc;
847 }
848 return generic_file_llseek(file, offset, whence);
849 }
850
851 static int
852 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
853 {
854 /*
855 * Note that this is called by vfs setlease with i_lock held to
856 * protect *lease from going away.
857 */
858 struct inode *inode = file_inode(file);
859 struct cifsFileInfo *cfile = file->private_data;
860
861 if (!(S_ISREG(inode->i_mode)))
862 return -EINVAL;
863
864 /* Check if file is oplocked if this is request for new lease */
865 if (arg == F_UNLCK ||
866 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
867 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
868 return generic_setlease(file, arg, lease, priv);
869 else if (tlink_tcon(cfile->tlink)->local_lease &&
870 !CIFS_CACHE_READ(CIFS_I(inode)))
871 /*
872 * If the server claims to support oplock on this file, then we
873 * still need to check oplock even if the local_lease mount
874 * option is set, but there are servers which do not support
875 * oplock for which this mount option may be useful if the user
876 * knows that the file won't be changed on the server by anyone
877 * else.
878 */
879 return generic_setlease(file, arg, lease, priv);
880 else
881 return -EAGAIN;
882 }
883
884 struct file_system_type cifs_fs_type = {
885 .owner = THIS_MODULE,
886 .name = "cifs",
887 .mount = cifs_do_mount,
888 .kill_sb = cifs_kill_sb,
889 /* .fs_flags */
890 };
891 MODULE_ALIAS_FS("cifs");
892 const struct inode_operations cifs_dir_inode_ops = {
893 .create = cifs_create,
894 .atomic_open = cifs_atomic_open,
895 .lookup = cifs_lookup,
896 .getattr = cifs_getattr,
897 .unlink = cifs_unlink,
898 .link = cifs_hardlink,
899 .mkdir = cifs_mkdir,
900 .rmdir = cifs_rmdir,
901 .rename = cifs_rename2,
902 .permission = cifs_permission,
903 .setattr = cifs_setattr,
904 .symlink = cifs_symlink,
905 .mknod = cifs_mknod,
906 .listxattr = cifs_listxattr,
907 };
908
909 const struct inode_operations cifs_file_inode_ops = {
910 .setattr = cifs_setattr,
911 .getattr = cifs_getattr,
912 .permission = cifs_permission,
913 .listxattr = cifs_listxattr,
914 };
915
916 const struct inode_operations cifs_symlink_inode_ops = {
917 .readlink = generic_readlink,
918 .get_link = cifs_get_link,
919 .permission = cifs_permission,
920 .listxattr = cifs_listxattr,
921 };
922
923 static int cifs_clone_file_range(struct file *src_file, loff_t off,
924 struct file *dst_file, loff_t destoff, u64 len)
925 {
926 struct inode *src_inode = file_inode(src_file);
927 struct inode *target_inode = file_inode(dst_file);
928 struct cifsFileInfo *smb_file_src = src_file->private_data;
929 struct cifsFileInfo *smb_file_target = dst_file->private_data;
930 struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
931 unsigned int xid;
932 int rc;
933
934 cifs_dbg(FYI, "clone range\n");
935
936 xid = get_xid();
937
938 if (!src_file->private_data || !dst_file->private_data) {
939 rc = -EBADF;
940 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
941 goto out;
942 }
943
944 /*
945 * Note: cifs case is easier than btrfs since server responsible for
946 * checks for proper open modes and file type and if it wants
947 * server could even support copy of range where source = target
948 */
949 lock_two_nondirectories(target_inode, src_inode);
950
951 if (len == 0)
952 len = src_inode->i_size - off;
953
954 cifs_dbg(FYI, "about to flush pages\n");
955 /* should we flush first and last page first */
956 truncate_inode_pages_range(&target_inode->i_data, destoff,
957 PAGE_ALIGN(destoff + len)-1);
958
959 if (target_tcon->ses->server->ops->duplicate_extents)
960 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
961 smb_file_src, smb_file_target, off, len, destoff);
962 else
963 rc = -EOPNOTSUPP;
964
965 /* force revalidate of size and timestamps of target file now
966 that target is updated on the server */
967 CIFS_I(target_inode)->time = 0;
968 /* although unlocking in the reverse order from locking is not
969 strictly necessary here it is a little cleaner to be consistent */
970 unlock_two_nondirectories(src_inode, target_inode);
971 out:
972 free_xid(xid);
973 return rc;
974 }
975
976 const struct file_operations cifs_file_ops = {
977 .read_iter = cifs_loose_read_iter,
978 .write_iter = cifs_file_write_iter,
979 .open = cifs_open,
980 .release = cifs_close,
981 .lock = cifs_lock,
982 .fsync = cifs_fsync,
983 .flush = cifs_flush,
984 .mmap = cifs_file_mmap,
985 .splice_read = generic_file_splice_read,
986 .llseek = cifs_llseek,
987 .unlocked_ioctl = cifs_ioctl,
988 .clone_file_range = cifs_clone_file_range,
989 .setlease = cifs_setlease,
990 .fallocate = cifs_fallocate,
991 };
992
993 const struct file_operations cifs_file_strict_ops = {
994 .read_iter = cifs_strict_readv,
995 .write_iter = cifs_strict_writev,
996 .open = cifs_open,
997 .release = cifs_close,
998 .lock = cifs_lock,
999 .fsync = cifs_strict_fsync,
1000 .flush = cifs_flush,
1001 .mmap = cifs_file_strict_mmap,
1002 .splice_read = generic_file_splice_read,
1003 .llseek = cifs_llseek,
1004 .unlocked_ioctl = cifs_ioctl,
1005 .clone_file_range = cifs_clone_file_range,
1006 .setlease = cifs_setlease,
1007 .fallocate = cifs_fallocate,
1008 };
1009
1010 const struct file_operations cifs_file_direct_ops = {
1011 /* BB reevaluate whether they can be done with directio, no cache */
1012 .read_iter = cifs_user_readv,
1013 .write_iter = cifs_user_writev,
1014 .open = cifs_open,
1015 .release = cifs_close,
1016 .lock = cifs_lock,
1017 .fsync = cifs_fsync,
1018 .flush = cifs_flush,
1019 .mmap = cifs_file_mmap,
1020 .splice_read = generic_file_splice_read,
1021 .unlocked_ioctl = cifs_ioctl,
1022 .clone_file_range = cifs_clone_file_range,
1023 .llseek = cifs_llseek,
1024 .setlease = cifs_setlease,
1025 .fallocate = cifs_fallocate,
1026 };
1027
1028 const struct file_operations cifs_file_nobrl_ops = {
1029 .read_iter = cifs_loose_read_iter,
1030 .write_iter = cifs_file_write_iter,
1031 .open = cifs_open,
1032 .release = cifs_close,
1033 .fsync = cifs_fsync,
1034 .flush = cifs_flush,
1035 .mmap = cifs_file_mmap,
1036 .splice_read = generic_file_splice_read,
1037 .llseek = cifs_llseek,
1038 .unlocked_ioctl = cifs_ioctl,
1039 .clone_file_range = cifs_clone_file_range,
1040 .setlease = cifs_setlease,
1041 .fallocate = cifs_fallocate,
1042 };
1043
1044 const struct file_operations cifs_file_strict_nobrl_ops = {
1045 .read_iter = cifs_strict_readv,
1046 .write_iter = cifs_strict_writev,
1047 .open = cifs_open,
1048 .release = cifs_close,
1049 .fsync = cifs_strict_fsync,
1050 .flush = cifs_flush,
1051 .mmap = cifs_file_strict_mmap,
1052 .splice_read = generic_file_splice_read,
1053 .llseek = cifs_llseek,
1054 .unlocked_ioctl = cifs_ioctl,
1055 .clone_file_range = cifs_clone_file_range,
1056 .setlease = cifs_setlease,
1057 .fallocate = cifs_fallocate,
1058 };
1059
1060 const struct file_operations cifs_file_direct_nobrl_ops = {
1061 /* BB reevaluate whether they can be done with directio, no cache */
1062 .read_iter = cifs_user_readv,
1063 .write_iter = cifs_user_writev,
1064 .open = cifs_open,
1065 .release = cifs_close,
1066 .fsync = cifs_fsync,
1067 .flush = cifs_flush,
1068 .mmap = cifs_file_mmap,
1069 .splice_read = generic_file_splice_read,
1070 .unlocked_ioctl = cifs_ioctl,
1071 .clone_file_range = cifs_clone_file_range,
1072 .llseek = cifs_llseek,
1073 .setlease = cifs_setlease,
1074 .fallocate = cifs_fallocate,
1075 };
1076
1077 const struct file_operations cifs_dir_ops = {
1078 .iterate_shared = cifs_readdir,
1079 .release = cifs_closedir,
1080 .read = generic_read_dir,
1081 .unlocked_ioctl = cifs_ioctl,
1082 .clone_file_range = cifs_clone_file_range,
1083 .llseek = generic_file_llseek,
1084 };
1085
1086 static void
1087 cifs_init_once(void *inode)
1088 {
1089 struct cifsInodeInfo *cifsi = inode;
1090
1091 inode_init_once(&cifsi->vfs_inode);
1092 init_rwsem(&cifsi->lock_sem);
1093 }
1094
1095 static int __init
1096 cifs_init_inodecache(void)
1097 {
1098 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1099 sizeof(struct cifsInodeInfo),
1100 0, (SLAB_RECLAIM_ACCOUNT|
1101 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1102 cifs_init_once);
1103 if (cifs_inode_cachep == NULL)
1104 return -ENOMEM;
1105
1106 return 0;
1107 }
1108
1109 static void
1110 cifs_destroy_inodecache(void)
1111 {
1112 /*
1113 * Make sure all delayed rcu free inodes are flushed before we
1114 * destroy cache.
1115 */
1116 rcu_barrier();
1117 kmem_cache_destroy(cifs_inode_cachep);
1118 }
1119
1120 static int
1121 cifs_init_request_bufs(void)
1122 {
1123 size_t max_hdr_size = MAX_CIFS_HDR_SIZE;
1124 #ifdef CONFIG_CIFS_SMB2
1125 /*
1126 * SMB2 maximum header size is bigger than CIFS one - no problems to
1127 * allocate some more bytes for CIFS.
1128 */
1129 max_hdr_size = MAX_SMB2_HDR_SIZE;
1130 #endif
1131 if (CIFSMaxBufSize < 8192) {
1132 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1133 Unicode path name has to fit in any SMB/CIFS path based frames */
1134 CIFSMaxBufSize = 8192;
1135 } else if (CIFSMaxBufSize > 1024*127) {
1136 CIFSMaxBufSize = 1024 * 127;
1137 } else {
1138 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1139 }
1140 /*
1141 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1142 CIFSMaxBufSize, CIFSMaxBufSize);
1143 */
1144 cifs_req_cachep = kmem_cache_create("cifs_request",
1145 CIFSMaxBufSize + max_hdr_size, 0,
1146 SLAB_HWCACHE_ALIGN, NULL);
1147 if (cifs_req_cachep == NULL)
1148 return -ENOMEM;
1149
1150 if (cifs_min_rcv < 1)
1151 cifs_min_rcv = 1;
1152 else if (cifs_min_rcv > 64) {
1153 cifs_min_rcv = 64;
1154 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1155 }
1156
1157 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1158 cifs_req_cachep);
1159
1160 if (cifs_req_poolp == NULL) {
1161 kmem_cache_destroy(cifs_req_cachep);
1162 return -ENOMEM;
1163 }
1164 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1165 almost all handle based requests (but not write response, nor is it
1166 sufficient for path based requests). A smaller size would have
1167 been more efficient (compacting multiple slab items on one 4k page)
1168 for the case in which debug was on, but this larger size allows
1169 more SMBs to use small buffer alloc and is still much more
1170 efficient to alloc 1 per page off the slab compared to 17K (5page)
1171 alloc of large cifs buffers even when page debugging is on */
1172 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
1173 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1174 NULL);
1175 if (cifs_sm_req_cachep == NULL) {
1176 mempool_destroy(cifs_req_poolp);
1177 kmem_cache_destroy(cifs_req_cachep);
1178 return -ENOMEM;
1179 }
1180
1181 if (cifs_min_small < 2)
1182 cifs_min_small = 2;
1183 else if (cifs_min_small > 256) {
1184 cifs_min_small = 256;
1185 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1186 }
1187
1188 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1189 cifs_sm_req_cachep);
1190
1191 if (cifs_sm_req_poolp == NULL) {
1192 mempool_destroy(cifs_req_poolp);
1193 kmem_cache_destroy(cifs_req_cachep);
1194 kmem_cache_destroy(cifs_sm_req_cachep);
1195 return -ENOMEM;
1196 }
1197
1198 return 0;
1199 }
1200
1201 static void
1202 cifs_destroy_request_bufs(void)
1203 {
1204 mempool_destroy(cifs_req_poolp);
1205 kmem_cache_destroy(cifs_req_cachep);
1206 mempool_destroy(cifs_sm_req_poolp);
1207 kmem_cache_destroy(cifs_sm_req_cachep);
1208 }
1209
1210 static int
1211 cifs_init_mids(void)
1212 {
1213 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1214 sizeof(struct mid_q_entry), 0,
1215 SLAB_HWCACHE_ALIGN, NULL);
1216 if (cifs_mid_cachep == NULL)
1217 return -ENOMEM;
1218
1219 /* 3 is a reasonable minimum number of simultaneous operations */
1220 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1221 if (cifs_mid_poolp == NULL) {
1222 kmem_cache_destroy(cifs_mid_cachep);
1223 return -ENOMEM;
1224 }
1225
1226 return 0;
1227 }
1228
1229 static void
1230 cifs_destroy_mids(void)
1231 {
1232 mempool_destroy(cifs_mid_poolp);
1233 kmem_cache_destroy(cifs_mid_cachep);
1234 }
1235
1236 static int __init
1237 init_cifs(void)
1238 {
1239 int rc = 0;
1240 cifs_proc_init();
1241 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1242 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1243 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1244 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1245 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1246 /*
1247 * Initialize Global counters
1248 */
1249 atomic_set(&sesInfoAllocCount, 0);
1250 atomic_set(&tconInfoAllocCount, 0);
1251 atomic_set(&tcpSesAllocCount, 0);
1252 atomic_set(&tcpSesReconnectCount, 0);
1253 atomic_set(&tconInfoReconnectCount, 0);
1254
1255 atomic_set(&bufAllocCount, 0);
1256 atomic_set(&smBufAllocCount, 0);
1257 #ifdef CONFIG_CIFS_STATS2
1258 atomic_set(&totBufAllocCount, 0);
1259 atomic_set(&totSmBufAllocCount, 0);
1260 #endif /* CONFIG_CIFS_STATS2 */
1261
1262 atomic_set(&midCount, 0);
1263 GlobalCurrentXid = 0;
1264 GlobalTotalActiveXid = 0;
1265 GlobalMaxActiveXid = 0;
1266 spin_lock_init(&cifs_tcp_ses_lock);
1267 spin_lock_init(&GlobalMid_Lock);
1268
1269 get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
1270
1271 if (cifs_max_pending < 2) {
1272 cifs_max_pending = 2;
1273 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1274 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1275 cifs_max_pending = CIFS_MAX_REQ;
1276 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1277 CIFS_MAX_REQ);
1278 }
1279
1280 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1281 if (!cifsiod_wq) {
1282 rc = -ENOMEM;
1283 goto out_clean_proc;
1284 }
1285
1286 rc = cifs_fscache_register();
1287 if (rc)
1288 goto out_destroy_wq;
1289
1290 rc = cifs_init_inodecache();
1291 if (rc)
1292 goto out_unreg_fscache;
1293
1294 rc = cifs_init_mids();
1295 if (rc)
1296 goto out_destroy_inodecache;
1297
1298 rc = cifs_init_request_bufs();
1299 if (rc)
1300 goto out_destroy_mids;
1301
1302 #ifdef CONFIG_CIFS_UPCALL
1303 rc = init_cifs_spnego();
1304 if (rc)
1305 goto out_destroy_request_bufs;
1306 #endif /* CONFIG_CIFS_UPCALL */
1307
1308 #ifdef CONFIG_CIFS_ACL
1309 rc = init_cifs_idmap();
1310 if (rc)
1311 goto out_register_key_type;
1312 #endif /* CONFIG_CIFS_ACL */
1313
1314 rc = register_filesystem(&cifs_fs_type);
1315 if (rc)
1316 goto out_init_cifs_idmap;
1317
1318 return 0;
1319
1320 out_init_cifs_idmap:
1321 #ifdef CONFIG_CIFS_ACL
1322 exit_cifs_idmap();
1323 out_register_key_type:
1324 #endif
1325 #ifdef CONFIG_CIFS_UPCALL
1326 exit_cifs_spnego();
1327 out_destroy_request_bufs:
1328 #endif
1329 cifs_destroy_request_bufs();
1330 out_destroy_mids:
1331 cifs_destroy_mids();
1332 out_destroy_inodecache:
1333 cifs_destroy_inodecache();
1334 out_unreg_fscache:
1335 cifs_fscache_unregister();
1336 out_destroy_wq:
1337 destroy_workqueue(cifsiod_wq);
1338 out_clean_proc:
1339 cifs_proc_clean();
1340 return rc;
1341 }
1342
1343 static void __exit
1344 exit_cifs(void)
1345 {
1346 cifs_dbg(NOISY, "exit_cifs\n");
1347 unregister_filesystem(&cifs_fs_type);
1348 cifs_dfs_release_automount_timer();
1349 #ifdef CONFIG_CIFS_ACL
1350 exit_cifs_idmap();
1351 #endif
1352 #ifdef CONFIG_CIFS_UPCALL
1353 unregister_key_type(&cifs_spnego_key_type);
1354 #endif
1355 cifs_destroy_request_bufs();
1356 cifs_destroy_mids();
1357 cifs_destroy_inodecache();
1358 cifs_fscache_unregister();
1359 destroy_workqueue(cifsiod_wq);
1360 cifs_proc_clean();
1361 }
1362
1363 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1364 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1365 MODULE_DESCRIPTION
1366 ("VFS to access servers complying with the SNIA CIFS Specification "
1367 "e.g. Samba and Windows");
1368 MODULE_VERSION(CIFS_VERSION);
1369 module_init(init_cifs)
1370 module_exit(exit_cifs)