]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/cifs/cifsfs.c
Merge branches 'acpi-ec', 'acpi-button' and 'acpi-apei'
[mirror_ubuntu-hirsute-kernel.git] / fs / cifs / cifsfs.c
1 /*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
25
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/xattr.h>
41 #include <net/ipv6.h>
42 #include "cifsfs.h"
43 #include "cifspdu.h"
44 #define DECLARE_GLOBALS_HERE
45 #include "cifsglob.h"
46 #include "cifsproto.h"
47 #include "cifs_debug.h"
48 #include "cifs_fs_sb.h"
49 #include <linux/mm.h>
50 #include <linux/key-type.h>
51 #include "cifs_spnego.h"
52 #include "fscache.h"
53 #ifdef CONFIG_CIFS_SMB2
54 #include "smb2pdu.h"
55 #endif
56
57 int cifsFYI = 0;
58 bool traceSMB;
59 bool enable_oplocks = true;
60 bool linuxExtEnabled = true;
61 bool lookupCacheEnabled = true;
62 unsigned int global_secflags = CIFSSEC_DEF;
63 /* unsigned int ntlmv2_support = 0; */
64 unsigned int sign_CIFS_PDUs = 1;
65 static const struct super_operations cifs_super_ops;
66 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
67 module_param(CIFSMaxBufSize, uint, 0444);
68 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
69 "Default: 16384 Range: 8192 to 130048");
70 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
71 module_param(cifs_min_rcv, uint, 0444);
72 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
73 "1 to 64");
74 unsigned int cifs_min_small = 30;
75 module_param(cifs_min_small, uint, 0444);
76 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
77 "Range: 2 to 256");
78 unsigned int cifs_max_pending = CIFS_MAX_REQ;
79 module_param(cifs_max_pending, uint, 0444);
80 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
81 "Default: 32767 Range: 2 to 32767.");
82 module_param(enable_oplocks, bool, 0644);
83 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
84
85 extern mempool_t *cifs_sm_req_poolp;
86 extern mempool_t *cifs_req_poolp;
87 extern mempool_t *cifs_mid_poolp;
88
89 struct workqueue_struct *cifsiod_wq;
90 __u32 cifs_lock_secret;
91
92 /*
93 * Bumps refcount for cifs super block.
94 * Note that it should be only called if a referece to VFS super block is
95 * already held, e.g. in open-type syscalls context. Otherwise it can race with
96 * atomic_dec_and_test in deactivate_locked_super.
97 */
98 void
99 cifs_sb_active(struct super_block *sb)
100 {
101 struct cifs_sb_info *server = CIFS_SB(sb);
102
103 if (atomic_inc_return(&server->active) == 1)
104 atomic_inc(&sb->s_active);
105 }
106
107 void
108 cifs_sb_deactive(struct super_block *sb)
109 {
110 struct cifs_sb_info *server = CIFS_SB(sb);
111
112 if (atomic_dec_and_test(&server->active))
113 deactivate_super(sb);
114 }
115
116 static int
117 cifs_read_super(struct super_block *sb)
118 {
119 struct inode *inode;
120 struct cifs_sb_info *cifs_sb;
121 struct cifs_tcon *tcon;
122 int rc = 0;
123
124 cifs_sb = CIFS_SB(sb);
125 tcon = cifs_sb_master_tcon(cifs_sb);
126
127 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
128 sb->s_flags |= MS_POSIXACL;
129
130 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
131 sb->s_maxbytes = MAX_LFS_FILESIZE;
132 else
133 sb->s_maxbytes = MAX_NON_LFS;
134
135 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
136 sb->s_time_gran = 100;
137
138 sb->s_magic = CIFS_MAGIC_NUMBER;
139 sb->s_op = &cifs_super_ops;
140 sb->s_xattr = cifs_xattr_handlers;
141 sb->s_bdi = &cifs_sb->bdi;
142 sb->s_blocksize = CIFS_MAX_MSGSIZE;
143 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
144 inode = cifs_root_iget(sb);
145
146 if (IS_ERR(inode)) {
147 rc = PTR_ERR(inode);
148 goto out_no_root;
149 }
150
151 if (tcon->nocase)
152 sb->s_d_op = &cifs_ci_dentry_ops;
153 else
154 sb->s_d_op = &cifs_dentry_ops;
155
156 sb->s_root = d_make_root(inode);
157 if (!sb->s_root) {
158 rc = -ENOMEM;
159 goto out_no_root;
160 }
161
162 #ifdef CONFIG_CIFS_NFSD_EXPORT
163 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
164 cifs_dbg(FYI, "export ops supported\n");
165 sb->s_export_op = &cifs_export_ops;
166 }
167 #endif /* CONFIG_CIFS_NFSD_EXPORT */
168
169 return 0;
170
171 out_no_root:
172 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
173 return rc;
174 }
175
176 static void cifs_kill_sb(struct super_block *sb)
177 {
178 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
179 kill_anon_super(sb);
180 cifs_umount(cifs_sb);
181 }
182
183 static int
184 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
185 {
186 struct super_block *sb = dentry->d_sb;
187 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
188 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
189 struct TCP_Server_Info *server = tcon->ses->server;
190 unsigned int xid;
191 int rc = 0;
192
193 xid = get_xid();
194
195 /*
196 * PATH_MAX may be too long - it would presumably be total path,
197 * but note that some servers (includinng Samba 3) have a shorter
198 * maximum path.
199 *
200 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
201 */
202 buf->f_namelen = PATH_MAX;
203 buf->f_files = 0; /* undefined */
204 buf->f_ffree = 0; /* unlimited */
205
206 if (server->ops->queryfs)
207 rc = server->ops->queryfs(xid, tcon, buf);
208
209 free_xid(xid);
210 return 0;
211 }
212
213 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
214 {
215 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
216 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
217 struct TCP_Server_Info *server = tcon->ses->server;
218
219 if (server->ops->fallocate)
220 return server->ops->fallocate(file, tcon, mode, off, len);
221
222 return -EOPNOTSUPP;
223 }
224
225 static int cifs_permission(struct inode *inode, int mask)
226 {
227 struct cifs_sb_info *cifs_sb;
228
229 cifs_sb = CIFS_SB(inode->i_sb);
230
231 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
232 if ((mask & MAY_EXEC) && !execute_ok(inode))
233 return -EACCES;
234 else
235 return 0;
236 } else /* file mode might have been restricted at mount time
237 on the client (above and beyond ACL on servers) for
238 servers which do not support setting and viewing mode bits,
239 so allowing client to check permissions is useful */
240 return generic_permission(inode, mask);
241 }
242
243 static struct kmem_cache *cifs_inode_cachep;
244 static struct kmem_cache *cifs_req_cachep;
245 static struct kmem_cache *cifs_mid_cachep;
246 static struct kmem_cache *cifs_sm_req_cachep;
247 mempool_t *cifs_sm_req_poolp;
248 mempool_t *cifs_req_poolp;
249 mempool_t *cifs_mid_poolp;
250
251 static struct inode *
252 cifs_alloc_inode(struct super_block *sb)
253 {
254 struct cifsInodeInfo *cifs_inode;
255 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
256 if (!cifs_inode)
257 return NULL;
258 cifs_inode->cifsAttrs = 0x20; /* default */
259 cifs_inode->time = 0;
260 /*
261 * Until the file is open and we have gotten oplock info back from the
262 * server, can not assume caching of file data or metadata.
263 */
264 cifs_set_oplock_level(cifs_inode, 0);
265 cifs_inode->flags = 0;
266 spin_lock_init(&cifs_inode->writers_lock);
267 cifs_inode->writers = 0;
268 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
269 cifs_inode->server_eof = 0;
270 cifs_inode->uniqueid = 0;
271 cifs_inode->createtime = 0;
272 cifs_inode->epoch = 0;
273 #ifdef CONFIG_CIFS_SMB2
274 generate_random_uuid(cifs_inode->lease_key);
275 #endif
276 /*
277 * Can not set i_flags here - they get immediately overwritten to zero
278 * by the VFS.
279 */
280 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
281 INIT_LIST_HEAD(&cifs_inode->openFileList);
282 INIT_LIST_HEAD(&cifs_inode->llist);
283 return &cifs_inode->vfs_inode;
284 }
285
286 static void cifs_i_callback(struct rcu_head *head)
287 {
288 struct inode *inode = container_of(head, struct inode, i_rcu);
289 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
290 }
291
292 static void
293 cifs_destroy_inode(struct inode *inode)
294 {
295 call_rcu(&inode->i_rcu, cifs_i_callback);
296 }
297
298 static void
299 cifs_evict_inode(struct inode *inode)
300 {
301 truncate_inode_pages_final(&inode->i_data);
302 clear_inode(inode);
303 cifs_fscache_release_inode_cookie(inode);
304 }
305
306 static void
307 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
308 {
309 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
310 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
311
312 seq_puts(s, ",addr=");
313
314 switch (server->dstaddr.ss_family) {
315 case AF_INET:
316 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
317 break;
318 case AF_INET6:
319 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
320 if (sa6->sin6_scope_id)
321 seq_printf(s, "%%%u", sa6->sin6_scope_id);
322 break;
323 default:
324 seq_puts(s, "(unknown)");
325 }
326 }
327
328 static void
329 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
330 {
331 if (ses->sectype == Unspecified) {
332 if (ses->user_name == NULL)
333 seq_puts(s, ",sec=none");
334 return;
335 }
336
337 seq_puts(s, ",sec=");
338
339 switch (ses->sectype) {
340 case LANMAN:
341 seq_puts(s, "lanman");
342 break;
343 case NTLMv2:
344 seq_puts(s, "ntlmv2");
345 break;
346 case NTLM:
347 seq_puts(s, "ntlm");
348 break;
349 case Kerberos:
350 seq_puts(s, "krb5");
351 break;
352 case RawNTLMSSP:
353 seq_puts(s, "ntlmssp");
354 break;
355 default:
356 /* shouldn't ever happen */
357 seq_puts(s, "unknown");
358 break;
359 }
360
361 if (ses->sign)
362 seq_puts(s, "i");
363 }
364
365 static void
366 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
367 {
368 seq_puts(s, ",cache=");
369
370 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
371 seq_puts(s, "strict");
372 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
373 seq_puts(s, "none");
374 else
375 seq_puts(s, "loose");
376 }
377
378 static void
379 cifs_show_nls(struct seq_file *s, struct nls_table *cur)
380 {
381 struct nls_table *def;
382
383 /* Display iocharset= option if it's not default charset */
384 def = load_nls_default();
385 if (def != cur)
386 seq_printf(s, ",iocharset=%s", cur->charset);
387 unload_nls(def);
388 }
389
390 /*
391 * cifs_show_options() is for displaying mount options in /proc/mounts.
392 * Not all settable options are displayed but most of the important
393 * ones are.
394 */
395 static int
396 cifs_show_options(struct seq_file *s, struct dentry *root)
397 {
398 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
399 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
400 struct sockaddr *srcaddr;
401 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
402
403 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
404 cifs_show_security(s, tcon->ses);
405 cifs_show_cache_flavor(s, cifs_sb);
406
407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
408 seq_puts(s, ",multiuser");
409 else if (tcon->ses->user_name)
410 seq_show_option(s, "username", tcon->ses->user_name);
411
412 if (tcon->ses->domainName)
413 seq_show_option(s, "domain", tcon->ses->domainName);
414
415 if (srcaddr->sa_family != AF_UNSPEC) {
416 struct sockaddr_in *saddr4;
417 struct sockaddr_in6 *saddr6;
418 saddr4 = (struct sockaddr_in *)srcaddr;
419 saddr6 = (struct sockaddr_in6 *)srcaddr;
420 if (srcaddr->sa_family == AF_INET6)
421 seq_printf(s, ",srcaddr=%pI6c",
422 &saddr6->sin6_addr);
423 else if (srcaddr->sa_family == AF_INET)
424 seq_printf(s, ",srcaddr=%pI4",
425 &saddr4->sin_addr.s_addr);
426 else
427 seq_printf(s, ",srcaddr=BAD-AF:%i",
428 (int)(srcaddr->sa_family));
429 }
430
431 seq_printf(s, ",uid=%u",
432 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
433 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
434 seq_puts(s, ",forceuid");
435 else
436 seq_puts(s, ",noforceuid");
437
438 seq_printf(s, ",gid=%u",
439 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
440 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
441 seq_puts(s, ",forcegid");
442 else
443 seq_puts(s, ",noforcegid");
444
445 cifs_show_address(s, tcon->ses->server);
446
447 if (!tcon->unix_ext)
448 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
449 cifs_sb->mnt_file_mode,
450 cifs_sb->mnt_dir_mode);
451
452 cifs_show_nls(s, cifs_sb->local_nls);
453
454 if (tcon->seal)
455 seq_puts(s, ",seal");
456 if (tcon->nocase)
457 seq_puts(s, ",nocase");
458 if (tcon->retry)
459 seq_puts(s, ",hard");
460 if (tcon->use_persistent)
461 seq_puts(s, ",persistenthandles");
462 else if (tcon->use_resilient)
463 seq_puts(s, ",resilienthandles");
464 if (tcon->unix_ext)
465 seq_puts(s, ",unix");
466 else
467 seq_puts(s, ",nounix");
468 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
469 seq_puts(s, ",posixpaths");
470 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
471 seq_puts(s, ",setuids");
472 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
473 seq_puts(s, ",idsfromsid");
474 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
475 seq_puts(s, ",serverino");
476 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
477 seq_puts(s, ",rwpidforward");
478 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
479 seq_puts(s, ",forcemand");
480 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
481 seq_puts(s, ",nouser_xattr");
482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
483 seq_puts(s, ",mapchars");
484 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
485 seq_puts(s, ",mapposix");
486 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
487 seq_puts(s, ",sfu");
488 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
489 seq_puts(s, ",nobrl");
490 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
491 seq_puts(s, ",cifsacl");
492 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
493 seq_puts(s, ",dynperm");
494 if (root->d_sb->s_flags & MS_POSIXACL)
495 seq_puts(s, ",acl");
496 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
497 seq_puts(s, ",mfsymlinks");
498 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
499 seq_puts(s, ",fsc");
500 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
501 seq_puts(s, ",nostrictsync");
502 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
503 seq_puts(s, ",noperm");
504 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
505 seq_printf(s, ",backupuid=%u",
506 from_kuid_munged(&init_user_ns,
507 cifs_sb->mnt_backupuid));
508 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
509 seq_printf(s, ",backupgid=%u",
510 from_kgid_munged(&init_user_ns,
511 cifs_sb->mnt_backupgid));
512
513 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
514 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
515 seq_printf(s, ",echo_interval=%lu",
516 tcon->ses->server->echo_interval / HZ);
517 /* convert actimeo and display it in seconds */
518 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
519
520 return 0;
521 }
522
523 static void cifs_umount_begin(struct super_block *sb)
524 {
525 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
526 struct cifs_tcon *tcon;
527
528 if (cifs_sb == NULL)
529 return;
530
531 tcon = cifs_sb_master_tcon(cifs_sb);
532
533 spin_lock(&cifs_tcp_ses_lock);
534 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
535 /* we have other mounts to same share or we have
536 already tried to force umount this and woken up
537 all waiting network requests, nothing to do */
538 spin_unlock(&cifs_tcp_ses_lock);
539 return;
540 } else if (tcon->tc_count == 1)
541 tcon->tidStatus = CifsExiting;
542 spin_unlock(&cifs_tcp_ses_lock);
543
544 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
545 /* cancel_notify_requests(tcon); */
546 if (tcon->ses && tcon->ses->server) {
547 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
548 wake_up_all(&tcon->ses->server->request_q);
549 wake_up_all(&tcon->ses->server->response_q);
550 msleep(1); /* yield */
551 /* we have to kick the requests once more */
552 wake_up_all(&tcon->ses->server->response_q);
553 msleep(1);
554 }
555
556 return;
557 }
558
559 #ifdef CONFIG_CIFS_STATS2
560 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
561 {
562 /* BB FIXME */
563 return 0;
564 }
565 #endif
566
567 static int cifs_remount(struct super_block *sb, int *flags, char *data)
568 {
569 sync_filesystem(sb);
570 *flags |= MS_NODIRATIME;
571 return 0;
572 }
573
574 static int cifs_drop_inode(struct inode *inode)
575 {
576 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
577
578 /* no serverino => unconditional eviction */
579 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
580 generic_drop_inode(inode);
581 }
582
583 static const struct super_operations cifs_super_ops = {
584 .statfs = cifs_statfs,
585 .alloc_inode = cifs_alloc_inode,
586 .destroy_inode = cifs_destroy_inode,
587 .drop_inode = cifs_drop_inode,
588 .evict_inode = cifs_evict_inode,
589 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
590 function unless later we add lazy close of inodes or unless the
591 kernel forgets to call us with the same number of releases (closes)
592 as opens */
593 .show_options = cifs_show_options,
594 .umount_begin = cifs_umount_begin,
595 .remount_fs = cifs_remount,
596 #ifdef CONFIG_CIFS_STATS2
597 .show_stats = cifs_show_stats,
598 #endif
599 };
600
601 /*
602 * Get root dentry from superblock according to prefix path mount option.
603 * Return dentry with refcount + 1 on success and NULL otherwise.
604 */
605 static struct dentry *
606 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
607 {
608 struct dentry *dentry;
609 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
610 char *full_path = NULL;
611 char *s, *p;
612 char sep;
613
614 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
615 return dget(sb->s_root);
616
617 full_path = cifs_build_path_to_root(vol, cifs_sb,
618 cifs_sb_master_tcon(cifs_sb), 0);
619 if (full_path == NULL)
620 return ERR_PTR(-ENOMEM);
621
622 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
623
624 sep = CIFS_DIR_SEP(cifs_sb);
625 dentry = dget(sb->s_root);
626 p = s = full_path;
627
628 do {
629 struct inode *dir = d_inode(dentry);
630 struct dentry *child;
631
632 if (!dir) {
633 dput(dentry);
634 dentry = ERR_PTR(-ENOENT);
635 break;
636 }
637 if (!S_ISDIR(dir->i_mode)) {
638 dput(dentry);
639 dentry = ERR_PTR(-ENOTDIR);
640 break;
641 }
642
643 /* skip separators */
644 while (*s == sep)
645 s++;
646 if (!*s)
647 break;
648 p = s++;
649 /* next separator */
650 while (*s && *s != sep)
651 s++;
652
653 child = lookup_one_len_unlocked(p, dentry, s - p);
654 dput(dentry);
655 dentry = child;
656 } while (!IS_ERR(dentry));
657 kfree(full_path);
658 return dentry;
659 }
660
661 static int cifs_set_super(struct super_block *sb, void *data)
662 {
663 struct cifs_mnt_data *mnt_data = data;
664 sb->s_fs_info = mnt_data->cifs_sb;
665 return set_anon_super(sb, NULL);
666 }
667
668 static struct dentry *
669 cifs_do_mount(struct file_system_type *fs_type,
670 int flags, const char *dev_name, void *data)
671 {
672 int rc;
673 struct super_block *sb;
674 struct cifs_sb_info *cifs_sb;
675 struct smb_vol *volume_info;
676 struct cifs_mnt_data mnt_data;
677 struct dentry *root;
678
679 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
680
681 volume_info = cifs_get_volume_info((char *)data, dev_name);
682 if (IS_ERR(volume_info))
683 return ERR_CAST(volume_info);
684
685 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
686 if (cifs_sb == NULL) {
687 root = ERR_PTR(-ENOMEM);
688 goto out_nls;
689 }
690
691 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
692 if (cifs_sb->mountdata == NULL) {
693 root = ERR_PTR(-ENOMEM);
694 goto out_free;
695 }
696
697 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
698 if (rc) {
699 root = ERR_PTR(rc);
700 goto out_free;
701 }
702
703 rc = cifs_mount(cifs_sb, volume_info);
704 if (rc) {
705 if (!(flags & MS_SILENT))
706 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
707 rc);
708 root = ERR_PTR(rc);
709 goto out_free;
710 }
711
712 mnt_data.vol = volume_info;
713 mnt_data.cifs_sb = cifs_sb;
714 mnt_data.flags = flags;
715
716 /* BB should we make this contingent on mount parm? */
717 flags |= MS_NODIRATIME | MS_NOATIME;
718
719 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
720 if (IS_ERR(sb)) {
721 root = ERR_CAST(sb);
722 cifs_umount(cifs_sb);
723 goto out;
724 }
725
726 if (sb->s_root) {
727 cifs_dbg(FYI, "Use existing superblock\n");
728 cifs_umount(cifs_sb);
729 } else {
730 rc = cifs_read_super(sb);
731 if (rc) {
732 root = ERR_PTR(rc);
733 goto out_super;
734 }
735
736 sb->s_flags |= MS_ACTIVE;
737 }
738
739 root = cifs_get_root(volume_info, sb);
740 if (IS_ERR(root))
741 goto out_super;
742
743 cifs_dbg(FYI, "dentry root is: %p\n", root);
744 goto out;
745
746 out_super:
747 deactivate_locked_super(sb);
748 out:
749 cifs_cleanup_volume_info(volume_info);
750 return root;
751
752 out_free:
753 kfree(cifs_sb->prepath);
754 kfree(cifs_sb->mountdata);
755 kfree(cifs_sb);
756 out_nls:
757 unload_nls(volume_info->local_nls);
758 goto out;
759 }
760
761 static ssize_t
762 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
763 {
764 ssize_t rc;
765 struct inode *inode = file_inode(iocb->ki_filp);
766
767 if (iocb->ki_filp->f_flags & O_DIRECT)
768 return cifs_user_readv(iocb, iter);
769
770 rc = cifs_revalidate_mapping(inode);
771 if (rc)
772 return rc;
773
774 return generic_file_read_iter(iocb, iter);
775 }
776
777 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
778 {
779 struct inode *inode = file_inode(iocb->ki_filp);
780 struct cifsInodeInfo *cinode = CIFS_I(inode);
781 ssize_t written;
782 int rc;
783
784 if (iocb->ki_filp->f_flags & O_DIRECT) {
785 written = cifs_user_writev(iocb, from);
786 if (written > 0 && CIFS_CACHE_READ(cinode)) {
787 cifs_zap_mapping(inode);
788 cifs_dbg(FYI,
789 "Set no oplock for inode=%p after a write operation\n",
790 inode);
791 cinode->oplock = 0;
792 }
793 return written;
794 }
795
796 written = cifs_get_writer(cinode);
797 if (written)
798 return written;
799
800 written = generic_file_write_iter(iocb, from);
801
802 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
803 goto out;
804
805 rc = filemap_fdatawrite(inode->i_mapping);
806 if (rc)
807 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
808 rc, inode);
809
810 out:
811 cifs_put_writer(cinode);
812 return written;
813 }
814
815 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
816 {
817 /*
818 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
819 * the cached file length
820 */
821 if (whence != SEEK_SET && whence != SEEK_CUR) {
822 int rc;
823 struct inode *inode = file_inode(file);
824
825 /*
826 * We need to be sure that all dirty pages are written and the
827 * server has the newest file length.
828 */
829 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
830 inode->i_mapping->nrpages != 0) {
831 rc = filemap_fdatawait(inode->i_mapping);
832 if (rc) {
833 mapping_set_error(inode->i_mapping, rc);
834 return rc;
835 }
836 }
837 /*
838 * Some applications poll for the file length in this strange
839 * way so we must seek to end on non-oplocked files by
840 * setting the revalidate time to zero.
841 */
842 CIFS_I(inode)->time = 0;
843
844 rc = cifs_revalidate_file_attr(file);
845 if (rc < 0)
846 return (loff_t)rc;
847 }
848 return generic_file_llseek(file, offset, whence);
849 }
850
851 static int
852 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
853 {
854 /*
855 * Note that this is called by vfs setlease with i_lock held to
856 * protect *lease from going away.
857 */
858 struct inode *inode = file_inode(file);
859 struct cifsFileInfo *cfile = file->private_data;
860
861 if (!(S_ISREG(inode->i_mode)))
862 return -EINVAL;
863
864 /* Check if file is oplocked if this is request for new lease */
865 if (arg == F_UNLCK ||
866 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
867 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
868 return generic_setlease(file, arg, lease, priv);
869 else if (tlink_tcon(cfile->tlink)->local_lease &&
870 !CIFS_CACHE_READ(CIFS_I(inode)))
871 /*
872 * If the server claims to support oplock on this file, then we
873 * still need to check oplock even if the local_lease mount
874 * option is set, but there are servers which do not support
875 * oplock for which this mount option may be useful if the user
876 * knows that the file won't be changed on the server by anyone
877 * else.
878 */
879 return generic_setlease(file, arg, lease, priv);
880 else
881 return -EAGAIN;
882 }
883
884 struct file_system_type cifs_fs_type = {
885 .owner = THIS_MODULE,
886 .name = "cifs",
887 .mount = cifs_do_mount,
888 .kill_sb = cifs_kill_sb,
889 /* .fs_flags */
890 };
891 MODULE_ALIAS_FS("cifs");
892 const struct inode_operations cifs_dir_inode_ops = {
893 .create = cifs_create,
894 .atomic_open = cifs_atomic_open,
895 .lookup = cifs_lookup,
896 .getattr = cifs_getattr,
897 .unlink = cifs_unlink,
898 .link = cifs_hardlink,
899 .mkdir = cifs_mkdir,
900 .rmdir = cifs_rmdir,
901 .rename = cifs_rename2,
902 .permission = cifs_permission,
903 .setattr = cifs_setattr,
904 .symlink = cifs_symlink,
905 .mknod = cifs_mknod,
906 .listxattr = cifs_listxattr,
907 };
908
909 const struct inode_operations cifs_file_inode_ops = {
910 .setattr = cifs_setattr,
911 .getattr = cifs_getattr,
912 .permission = cifs_permission,
913 .listxattr = cifs_listxattr,
914 };
915
916 const struct inode_operations cifs_symlink_inode_ops = {
917 .get_link = cifs_get_link,
918 .permission = cifs_permission,
919 .listxattr = cifs_listxattr,
920 };
921
922 static int cifs_clone_file_range(struct file *src_file, loff_t off,
923 struct file *dst_file, loff_t destoff, u64 len)
924 {
925 struct inode *src_inode = file_inode(src_file);
926 struct inode *target_inode = file_inode(dst_file);
927 struct cifsFileInfo *smb_file_src = src_file->private_data;
928 struct cifsFileInfo *smb_file_target = dst_file->private_data;
929 struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
930 unsigned int xid;
931 int rc;
932
933 cifs_dbg(FYI, "clone range\n");
934
935 xid = get_xid();
936
937 if (!src_file->private_data || !dst_file->private_data) {
938 rc = -EBADF;
939 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
940 goto out;
941 }
942
943 /*
944 * Note: cifs case is easier than btrfs since server responsible for
945 * checks for proper open modes and file type and if it wants
946 * server could even support copy of range where source = target
947 */
948 lock_two_nondirectories(target_inode, src_inode);
949
950 if (len == 0)
951 len = src_inode->i_size - off;
952
953 cifs_dbg(FYI, "about to flush pages\n");
954 /* should we flush first and last page first */
955 truncate_inode_pages_range(&target_inode->i_data, destoff,
956 PAGE_ALIGN(destoff + len)-1);
957
958 if (target_tcon->ses->server->ops->duplicate_extents)
959 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
960 smb_file_src, smb_file_target, off, len, destoff);
961 else
962 rc = -EOPNOTSUPP;
963
964 /* force revalidate of size and timestamps of target file now
965 that target is updated on the server */
966 CIFS_I(target_inode)->time = 0;
967 /* although unlocking in the reverse order from locking is not
968 strictly necessary here it is a little cleaner to be consistent */
969 unlock_two_nondirectories(src_inode, target_inode);
970 out:
971 free_xid(xid);
972 return rc;
973 }
974
975 const struct file_operations cifs_file_ops = {
976 .read_iter = cifs_loose_read_iter,
977 .write_iter = cifs_file_write_iter,
978 .open = cifs_open,
979 .release = cifs_close,
980 .lock = cifs_lock,
981 .fsync = cifs_fsync,
982 .flush = cifs_flush,
983 .mmap = cifs_file_mmap,
984 .splice_read = generic_file_splice_read,
985 .llseek = cifs_llseek,
986 .unlocked_ioctl = cifs_ioctl,
987 .clone_file_range = cifs_clone_file_range,
988 .setlease = cifs_setlease,
989 .fallocate = cifs_fallocate,
990 };
991
992 const struct file_operations cifs_file_strict_ops = {
993 .read_iter = cifs_strict_readv,
994 .write_iter = cifs_strict_writev,
995 .open = cifs_open,
996 .release = cifs_close,
997 .lock = cifs_lock,
998 .fsync = cifs_strict_fsync,
999 .flush = cifs_flush,
1000 .mmap = cifs_file_strict_mmap,
1001 .splice_read = generic_file_splice_read,
1002 .llseek = cifs_llseek,
1003 .unlocked_ioctl = cifs_ioctl,
1004 .clone_file_range = cifs_clone_file_range,
1005 .setlease = cifs_setlease,
1006 .fallocate = cifs_fallocate,
1007 };
1008
1009 const struct file_operations cifs_file_direct_ops = {
1010 /* BB reevaluate whether they can be done with directio, no cache */
1011 .read_iter = cifs_user_readv,
1012 .write_iter = cifs_user_writev,
1013 .open = cifs_open,
1014 .release = cifs_close,
1015 .lock = cifs_lock,
1016 .fsync = cifs_fsync,
1017 .flush = cifs_flush,
1018 .mmap = cifs_file_mmap,
1019 .splice_read = generic_file_splice_read,
1020 .unlocked_ioctl = cifs_ioctl,
1021 .clone_file_range = cifs_clone_file_range,
1022 .llseek = cifs_llseek,
1023 .setlease = cifs_setlease,
1024 .fallocate = cifs_fallocate,
1025 };
1026
1027 const struct file_operations cifs_file_nobrl_ops = {
1028 .read_iter = cifs_loose_read_iter,
1029 .write_iter = cifs_file_write_iter,
1030 .open = cifs_open,
1031 .release = cifs_close,
1032 .fsync = cifs_fsync,
1033 .flush = cifs_flush,
1034 .mmap = cifs_file_mmap,
1035 .splice_read = generic_file_splice_read,
1036 .llseek = cifs_llseek,
1037 .unlocked_ioctl = cifs_ioctl,
1038 .clone_file_range = cifs_clone_file_range,
1039 .setlease = cifs_setlease,
1040 .fallocate = cifs_fallocate,
1041 };
1042
1043 const struct file_operations cifs_file_strict_nobrl_ops = {
1044 .read_iter = cifs_strict_readv,
1045 .write_iter = cifs_strict_writev,
1046 .open = cifs_open,
1047 .release = cifs_close,
1048 .fsync = cifs_strict_fsync,
1049 .flush = cifs_flush,
1050 .mmap = cifs_file_strict_mmap,
1051 .splice_read = generic_file_splice_read,
1052 .llseek = cifs_llseek,
1053 .unlocked_ioctl = cifs_ioctl,
1054 .clone_file_range = cifs_clone_file_range,
1055 .setlease = cifs_setlease,
1056 .fallocate = cifs_fallocate,
1057 };
1058
1059 const struct file_operations cifs_file_direct_nobrl_ops = {
1060 /* BB reevaluate whether they can be done with directio, no cache */
1061 .read_iter = cifs_user_readv,
1062 .write_iter = cifs_user_writev,
1063 .open = cifs_open,
1064 .release = cifs_close,
1065 .fsync = cifs_fsync,
1066 .flush = cifs_flush,
1067 .mmap = cifs_file_mmap,
1068 .splice_read = generic_file_splice_read,
1069 .unlocked_ioctl = cifs_ioctl,
1070 .clone_file_range = cifs_clone_file_range,
1071 .llseek = cifs_llseek,
1072 .setlease = cifs_setlease,
1073 .fallocate = cifs_fallocate,
1074 };
1075
1076 const struct file_operations cifs_dir_ops = {
1077 .iterate_shared = cifs_readdir,
1078 .release = cifs_closedir,
1079 .read = generic_read_dir,
1080 .unlocked_ioctl = cifs_ioctl,
1081 .clone_file_range = cifs_clone_file_range,
1082 .llseek = generic_file_llseek,
1083 };
1084
1085 static void
1086 cifs_init_once(void *inode)
1087 {
1088 struct cifsInodeInfo *cifsi = inode;
1089
1090 inode_init_once(&cifsi->vfs_inode);
1091 init_rwsem(&cifsi->lock_sem);
1092 }
1093
1094 static int __init
1095 cifs_init_inodecache(void)
1096 {
1097 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1098 sizeof(struct cifsInodeInfo),
1099 0, (SLAB_RECLAIM_ACCOUNT|
1100 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1101 cifs_init_once);
1102 if (cifs_inode_cachep == NULL)
1103 return -ENOMEM;
1104
1105 return 0;
1106 }
1107
1108 static void
1109 cifs_destroy_inodecache(void)
1110 {
1111 /*
1112 * Make sure all delayed rcu free inodes are flushed before we
1113 * destroy cache.
1114 */
1115 rcu_barrier();
1116 kmem_cache_destroy(cifs_inode_cachep);
1117 }
1118
1119 static int
1120 cifs_init_request_bufs(void)
1121 {
1122 size_t max_hdr_size = MAX_CIFS_HDR_SIZE;
1123 #ifdef CONFIG_CIFS_SMB2
1124 /*
1125 * SMB2 maximum header size is bigger than CIFS one - no problems to
1126 * allocate some more bytes for CIFS.
1127 */
1128 max_hdr_size = MAX_SMB2_HDR_SIZE;
1129 #endif
1130 if (CIFSMaxBufSize < 8192) {
1131 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1132 Unicode path name has to fit in any SMB/CIFS path based frames */
1133 CIFSMaxBufSize = 8192;
1134 } else if (CIFSMaxBufSize > 1024*127) {
1135 CIFSMaxBufSize = 1024 * 127;
1136 } else {
1137 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1138 }
1139 /*
1140 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1141 CIFSMaxBufSize, CIFSMaxBufSize);
1142 */
1143 cifs_req_cachep = kmem_cache_create("cifs_request",
1144 CIFSMaxBufSize + max_hdr_size, 0,
1145 SLAB_HWCACHE_ALIGN, NULL);
1146 if (cifs_req_cachep == NULL)
1147 return -ENOMEM;
1148
1149 if (cifs_min_rcv < 1)
1150 cifs_min_rcv = 1;
1151 else if (cifs_min_rcv > 64) {
1152 cifs_min_rcv = 64;
1153 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1154 }
1155
1156 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1157 cifs_req_cachep);
1158
1159 if (cifs_req_poolp == NULL) {
1160 kmem_cache_destroy(cifs_req_cachep);
1161 return -ENOMEM;
1162 }
1163 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1164 almost all handle based requests (but not write response, nor is it
1165 sufficient for path based requests). A smaller size would have
1166 been more efficient (compacting multiple slab items on one 4k page)
1167 for the case in which debug was on, but this larger size allows
1168 more SMBs to use small buffer alloc and is still much more
1169 efficient to alloc 1 per page off the slab compared to 17K (5page)
1170 alloc of large cifs buffers even when page debugging is on */
1171 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
1172 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1173 NULL);
1174 if (cifs_sm_req_cachep == NULL) {
1175 mempool_destroy(cifs_req_poolp);
1176 kmem_cache_destroy(cifs_req_cachep);
1177 return -ENOMEM;
1178 }
1179
1180 if (cifs_min_small < 2)
1181 cifs_min_small = 2;
1182 else if (cifs_min_small > 256) {
1183 cifs_min_small = 256;
1184 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1185 }
1186
1187 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1188 cifs_sm_req_cachep);
1189
1190 if (cifs_sm_req_poolp == NULL) {
1191 mempool_destroy(cifs_req_poolp);
1192 kmem_cache_destroy(cifs_req_cachep);
1193 kmem_cache_destroy(cifs_sm_req_cachep);
1194 return -ENOMEM;
1195 }
1196
1197 return 0;
1198 }
1199
1200 static void
1201 cifs_destroy_request_bufs(void)
1202 {
1203 mempool_destroy(cifs_req_poolp);
1204 kmem_cache_destroy(cifs_req_cachep);
1205 mempool_destroy(cifs_sm_req_poolp);
1206 kmem_cache_destroy(cifs_sm_req_cachep);
1207 }
1208
1209 static int
1210 cifs_init_mids(void)
1211 {
1212 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1213 sizeof(struct mid_q_entry), 0,
1214 SLAB_HWCACHE_ALIGN, NULL);
1215 if (cifs_mid_cachep == NULL)
1216 return -ENOMEM;
1217
1218 /* 3 is a reasonable minimum number of simultaneous operations */
1219 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1220 if (cifs_mid_poolp == NULL) {
1221 kmem_cache_destroy(cifs_mid_cachep);
1222 return -ENOMEM;
1223 }
1224
1225 return 0;
1226 }
1227
1228 static void
1229 cifs_destroy_mids(void)
1230 {
1231 mempool_destroy(cifs_mid_poolp);
1232 kmem_cache_destroy(cifs_mid_cachep);
1233 }
1234
1235 static int __init
1236 init_cifs(void)
1237 {
1238 int rc = 0;
1239 cifs_proc_init();
1240 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1241 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1242 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1243 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1244 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1245 /*
1246 * Initialize Global counters
1247 */
1248 atomic_set(&sesInfoAllocCount, 0);
1249 atomic_set(&tconInfoAllocCount, 0);
1250 atomic_set(&tcpSesAllocCount, 0);
1251 atomic_set(&tcpSesReconnectCount, 0);
1252 atomic_set(&tconInfoReconnectCount, 0);
1253
1254 atomic_set(&bufAllocCount, 0);
1255 atomic_set(&smBufAllocCount, 0);
1256 #ifdef CONFIG_CIFS_STATS2
1257 atomic_set(&totBufAllocCount, 0);
1258 atomic_set(&totSmBufAllocCount, 0);
1259 #endif /* CONFIG_CIFS_STATS2 */
1260
1261 atomic_set(&midCount, 0);
1262 GlobalCurrentXid = 0;
1263 GlobalTotalActiveXid = 0;
1264 GlobalMaxActiveXid = 0;
1265 spin_lock_init(&cifs_tcp_ses_lock);
1266 spin_lock_init(&GlobalMid_Lock);
1267
1268 get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
1269
1270 if (cifs_max_pending < 2) {
1271 cifs_max_pending = 2;
1272 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1273 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1274 cifs_max_pending = CIFS_MAX_REQ;
1275 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1276 CIFS_MAX_REQ);
1277 }
1278
1279 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1280 if (!cifsiod_wq) {
1281 rc = -ENOMEM;
1282 goto out_clean_proc;
1283 }
1284
1285 rc = cifs_fscache_register();
1286 if (rc)
1287 goto out_destroy_wq;
1288
1289 rc = cifs_init_inodecache();
1290 if (rc)
1291 goto out_unreg_fscache;
1292
1293 rc = cifs_init_mids();
1294 if (rc)
1295 goto out_destroy_inodecache;
1296
1297 rc = cifs_init_request_bufs();
1298 if (rc)
1299 goto out_destroy_mids;
1300
1301 #ifdef CONFIG_CIFS_UPCALL
1302 rc = init_cifs_spnego();
1303 if (rc)
1304 goto out_destroy_request_bufs;
1305 #endif /* CONFIG_CIFS_UPCALL */
1306
1307 #ifdef CONFIG_CIFS_ACL
1308 rc = init_cifs_idmap();
1309 if (rc)
1310 goto out_register_key_type;
1311 #endif /* CONFIG_CIFS_ACL */
1312
1313 rc = register_filesystem(&cifs_fs_type);
1314 if (rc)
1315 goto out_init_cifs_idmap;
1316
1317 return 0;
1318
1319 out_init_cifs_idmap:
1320 #ifdef CONFIG_CIFS_ACL
1321 exit_cifs_idmap();
1322 out_register_key_type:
1323 #endif
1324 #ifdef CONFIG_CIFS_UPCALL
1325 exit_cifs_spnego();
1326 out_destroy_request_bufs:
1327 #endif
1328 cifs_destroy_request_bufs();
1329 out_destroy_mids:
1330 cifs_destroy_mids();
1331 out_destroy_inodecache:
1332 cifs_destroy_inodecache();
1333 out_unreg_fscache:
1334 cifs_fscache_unregister();
1335 out_destroy_wq:
1336 destroy_workqueue(cifsiod_wq);
1337 out_clean_proc:
1338 cifs_proc_clean();
1339 return rc;
1340 }
1341
1342 static void __exit
1343 exit_cifs(void)
1344 {
1345 cifs_dbg(NOISY, "exit_cifs\n");
1346 unregister_filesystem(&cifs_fs_type);
1347 cifs_dfs_release_automount_timer();
1348 #ifdef CONFIG_CIFS_ACL
1349 exit_cifs_idmap();
1350 #endif
1351 #ifdef CONFIG_CIFS_UPCALL
1352 unregister_key_type(&cifs_spnego_key_type);
1353 #endif
1354 cifs_destroy_request_bufs();
1355 cifs_destroy_mids();
1356 cifs_destroy_inodecache();
1357 cifs_fscache_unregister();
1358 destroy_workqueue(cifsiod_wq);
1359 cifs_proc_clean();
1360 }
1361
1362 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1363 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1364 MODULE_DESCRIPTION
1365 ("VFS to access servers complying with the SNIA CIFS Specification "
1366 "e.g. Samba and Windows");
1367 MODULE_VERSION(CIFS_VERSION);
1368 module_init(init_cifs)
1369 module_exit(exit_cifs)