]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/cifs/cifsfs.c
Merge tag 'pci-v4.9-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/helga...
[mirror_ubuntu-bionic-kernel.git] / fs / cifs / cifsfs.c
1 /*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
25
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/xattr.h>
41 #include <net/ipv6.h>
42 #include "cifsfs.h"
43 #include "cifspdu.h"
44 #define DECLARE_GLOBALS_HERE
45 #include "cifsglob.h"
46 #include "cifsproto.h"
47 #include "cifs_debug.h"
48 #include "cifs_fs_sb.h"
49 #include <linux/mm.h>
50 #include <linux/key-type.h>
51 #include "cifs_spnego.h"
52 #include "fscache.h"
53 #ifdef CONFIG_CIFS_SMB2
54 #include "smb2pdu.h"
55 #endif
56
57 int cifsFYI = 0;
58 bool traceSMB;
59 bool enable_oplocks = true;
60 bool linuxExtEnabled = true;
61 bool lookupCacheEnabled = true;
62 unsigned int global_secflags = CIFSSEC_DEF;
63 /* unsigned int ntlmv2_support = 0; */
64 unsigned int sign_CIFS_PDUs = 1;
65 static const struct super_operations cifs_super_ops;
66 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
67 module_param(CIFSMaxBufSize, uint, 0);
68 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
69 "Default: 16384 Range: 8192 to 130048");
70 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
71 module_param(cifs_min_rcv, uint, 0);
72 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
73 "1 to 64");
74 unsigned int cifs_min_small = 30;
75 module_param(cifs_min_small, uint, 0);
76 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
77 "Range: 2 to 256");
78 unsigned int cifs_max_pending = CIFS_MAX_REQ;
79 module_param(cifs_max_pending, uint, 0444);
80 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
81 "Default: 32767 Range: 2 to 32767.");
82 module_param(enable_oplocks, bool, 0644);
83 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
84
85 extern mempool_t *cifs_sm_req_poolp;
86 extern mempool_t *cifs_req_poolp;
87 extern mempool_t *cifs_mid_poolp;
88
89 struct workqueue_struct *cifsiod_wq;
90 __u32 cifs_lock_secret;
91
92 /*
93 * Bumps refcount for cifs super block.
94 * Note that it should be only called if a referece to VFS super block is
95 * already held, e.g. in open-type syscalls context. Otherwise it can race with
96 * atomic_dec_and_test in deactivate_locked_super.
97 */
98 void
99 cifs_sb_active(struct super_block *sb)
100 {
101 struct cifs_sb_info *server = CIFS_SB(sb);
102
103 if (atomic_inc_return(&server->active) == 1)
104 atomic_inc(&sb->s_active);
105 }
106
107 void
108 cifs_sb_deactive(struct super_block *sb)
109 {
110 struct cifs_sb_info *server = CIFS_SB(sb);
111
112 if (atomic_dec_and_test(&server->active))
113 deactivate_super(sb);
114 }
115
116 static int
117 cifs_read_super(struct super_block *sb)
118 {
119 struct inode *inode;
120 struct cifs_sb_info *cifs_sb;
121 struct cifs_tcon *tcon;
122 int rc = 0;
123
124 cifs_sb = CIFS_SB(sb);
125 tcon = cifs_sb_master_tcon(cifs_sb);
126
127 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
128 sb->s_flags |= MS_POSIXACL;
129
130 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
131 sb->s_maxbytes = MAX_LFS_FILESIZE;
132 else
133 sb->s_maxbytes = MAX_NON_LFS;
134
135 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
136 sb->s_time_gran = 100;
137
138 sb->s_magic = CIFS_MAGIC_NUMBER;
139 sb->s_op = &cifs_super_ops;
140 sb->s_xattr = cifs_xattr_handlers;
141 sb->s_bdi = &cifs_sb->bdi;
142 sb->s_blocksize = CIFS_MAX_MSGSIZE;
143 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
144 inode = cifs_root_iget(sb);
145
146 if (IS_ERR(inode)) {
147 rc = PTR_ERR(inode);
148 goto out_no_root;
149 }
150
151 if (tcon->nocase)
152 sb->s_d_op = &cifs_ci_dentry_ops;
153 else
154 sb->s_d_op = &cifs_dentry_ops;
155
156 sb->s_root = d_make_root(inode);
157 if (!sb->s_root) {
158 rc = -ENOMEM;
159 goto out_no_root;
160 }
161
162 #ifdef CONFIG_CIFS_NFSD_EXPORT
163 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
164 cifs_dbg(FYI, "export ops supported\n");
165 sb->s_export_op = &cifs_export_ops;
166 }
167 #endif /* CONFIG_CIFS_NFSD_EXPORT */
168
169 return 0;
170
171 out_no_root:
172 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
173 return rc;
174 }
175
176 static void cifs_kill_sb(struct super_block *sb)
177 {
178 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
179 kill_anon_super(sb);
180 cifs_umount(cifs_sb);
181 }
182
183 static int
184 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
185 {
186 struct super_block *sb = dentry->d_sb;
187 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
188 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
189 struct TCP_Server_Info *server = tcon->ses->server;
190 unsigned int xid;
191 int rc = 0;
192
193 xid = get_xid();
194
195 /*
196 * PATH_MAX may be too long - it would presumably be total path,
197 * but note that some servers (includinng Samba 3) have a shorter
198 * maximum path.
199 *
200 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
201 */
202 buf->f_namelen = PATH_MAX;
203 buf->f_files = 0; /* undefined */
204 buf->f_ffree = 0; /* unlimited */
205
206 if (server->ops->queryfs)
207 rc = server->ops->queryfs(xid, tcon, buf);
208
209 free_xid(xid);
210 return 0;
211 }
212
213 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
214 {
215 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
216 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
217 struct TCP_Server_Info *server = tcon->ses->server;
218
219 if (server->ops->fallocate)
220 return server->ops->fallocate(file, tcon, mode, off, len);
221
222 return -EOPNOTSUPP;
223 }
224
225 static int cifs_permission(struct inode *inode, int mask)
226 {
227 struct cifs_sb_info *cifs_sb;
228
229 cifs_sb = CIFS_SB(inode->i_sb);
230
231 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
232 if ((mask & MAY_EXEC) && !execute_ok(inode))
233 return -EACCES;
234 else
235 return 0;
236 } else /* file mode might have been restricted at mount time
237 on the client (above and beyond ACL on servers) for
238 servers which do not support setting and viewing mode bits,
239 so allowing client to check permissions is useful */
240 return generic_permission(inode, mask);
241 }
242
243 static struct kmem_cache *cifs_inode_cachep;
244 static struct kmem_cache *cifs_req_cachep;
245 static struct kmem_cache *cifs_mid_cachep;
246 static struct kmem_cache *cifs_sm_req_cachep;
247 mempool_t *cifs_sm_req_poolp;
248 mempool_t *cifs_req_poolp;
249 mempool_t *cifs_mid_poolp;
250
251 static struct inode *
252 cifs_alloc_inode(struct super_block *sb)
253 {
254 struct cifsInodeInfo *cifs_inode;
255 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
256 if (!cifs_inode)
257 return NULL;
258 cifs_inode->cifsAttrs = 0x20; /* default */
259 cifs_inode->time = 0;
260 /*
261 * Until the file is open and we have gotten oplock info back from the
262 * server, can not assume caching of file data or metadata.
263 */
264 cifs_set_oplock_level(cifs_inode, 0);
265 cifs_inode->flags = 0;
266 spin_lock_init(&cifs_inode->writers_lock);
267 cifs_inode->writers = 0;
268 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
269 cifs_inode->server_eof = 0;
270 cifs_inode->uniqueid = 0;
271 cifs_inode->createtime = 0;
272 cifs_inode->epoch = 0;
273 #ifdef CONFIG_CIFS_SMB2
274 get_random_bytes(cifs_inode->lease_key, SMB2_LEASE_KEY_SIZE);
275 #endif
276 /*
277 * Can not set i_flags here - they get immediately overwritten to zero
278 * by the VFS.
279 */
280 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
281 INIT_LIST_HEAD(&cifs_inode->openFileList);
282 INIT_LIST_HEAD(&cifs_inode->llist);
283 return &cifs_inode->vfs_inode;
284 }
285
286 static void cifs_i_callback(struct rcu_head *head)
287 {
288 struct inode *inode = container_of(head, struct inode, i_rcu);
289 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
290 }
291
292 static void
293 cifs_destroy_inode(struct inode *inode)
294 {
295 call_rcu(&inode->i_rcu, cifs_i_callback);
296 }
297
298 static void
299 cifs_evict_inode(struct inode *inode)
300 {
301 truncate_inode_pages_final(&inode->i_data);
302 clear_inode(inode);
303 cifs_fscache_release_inode_cookie(inode);
304 }
305
306 static void
307 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
308 {
309 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
310 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
311
312 seq_puts(s, ",addr=");
313
314 switch (server->dstaddr.ss_family) {
315 case AF_INET:
316 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
317 break;
318 case AF_INET6:
319 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
320 if (sa6->sin6_scope_id)
321 seq_printf(s, "%%%u", sa6->sin6_scope_id);
322 break;
323 default:
324 seq_puts(s, "(unknown)");
325 }
326 }
327
328 static void
329 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
330 {
331 if (ses->sectype == Unspecified) {
332 if (ses->user_name == NULL)
333 seq_puts(s, ",sec=none");
334 return;
335 }
336
337 seq_puts(s, ",sec=");
338
339 switch (ses->sectype) {
340 case LANMAN:
341 seq_puts(s, "lanman");
342 break;
343 case NTLMv2:
344 seq_puts(s, "ntlmv2");
345 break;
346 case NTLM:
347 seq_puts(s, "ntlm");
348 break;
349 case Kerberos:
350 seq_puts(s, "krb5");
351 break;
352 case RawNTLMSSP:
353 seq_puts(s, "ntlmssp");
354 break;
355 default:
356 /* shouldn't ever happen */
357 seq_puts(s, "unknown");
358 break;
359 }
360
361 if (ses->sign)
362 seq_puts(s, "i");
363 }
364
365 static void
366 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
367 {
368 seq_puts(s, ",cache=");
369
370 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
371 seq_puts(s, "strict");
372 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
373 seq_puts(s, "none");
374 else
375 seq_puts(s, "loose");
376 }
377
378 static void
379 cifs_show_nls(struct seq_file *s, struct nls_table *cur)
380 {
381 struct nls_table *def;
382
383 /* Display iocharset= option if it's not default charset */
384 def = load_nls_default();
385 if (def != cur)
386 seq_printf(s, ",iocharset=%s", cur->charset);
387 unload_nls(def);
388 }
389
390 /*
391 * cifs_show_options() is for displaying mount options in /proc/mounts.
392 * Not all settable options are displayed but most of the important
393 * ones are.
394 */
395 static int
396 cifs_show_options(struct seq_file *s, struct dentry *root)
397 {
398 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
399 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
400 struct sockaddr *srcaddr;
401 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
402
403 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
404 cifs_show_security(s, tcon->ses);
405 cifs_show_cache_flavor(s, cifs_sb);
406
407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
408 seq_puts(s, ",multiuser");
409 else if (tcon->ses->user_name)
410 seq_show_option(s, "username", tcon->ses->user_name);
411
412 if (tcon->ses->domainName)
413 seq_show_option(s, "domain", tcon->ses->domainName);
414
415 if (srcaddr->sa_family != AF_UNSPEC) {
416 struct sockaddr_in *saddr4;
417 struct sockaddr_in6 *saddr6;
418 saddr4 = (struct sockaddr_in *)srcaddr;
419 saddr6 = (struct sockaddr_in6 *)srcaddr;
420 if (srcaddr->sa_family == AF_INET6)
421 seq_printf(s, ",srcaddr=%pI6c",
422 &saddr6->sin6_addr);
423 else if (srcaddr->sa_family == AF_INET)
424 seq_printf(s, ",srcaddr=%pI4",
425 &saddr4->sin_addr.s_addr);
426 else
427 seq_printf(s, ",srcaddr=BAD-AF:%i",
428 (int)(srcaddr->sa_family));
429 }
430
431 seq_printf(s, ",uid=%u",
432 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
433 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
434 seq_puts(s, ",forceuid");
435 else
436 seq_puts(s, ",noforceuid");
437
438 seq_printf(s, ",gid=%u",
439 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
440 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
441 seq_puts(s, ",forcegid");
442 else
443 seq_puts(s, ",noforcegid");
444
445 cifs_show_address(s, tcon->ses->server);
446
447 if (!tcon->unix_ext)
448 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
449 cifs_sb->mnt_file_mode,
450 cifs_sb->mnt_dir_mode);
451
452 cifs_show_nls(s, cifs_sb->local_nls);
453
454 if (tcon->seal)
455 seq_puts(s, ",seal");
456 if (tcon->nocase)
457 seq_puts(s, ",nocase");
458 if (tcon->retry)
459 seq_puts(s, ",hard");
460 if (tcon->use_persistent)
461 seq_puts(s, ",persistenthandles");
462 else if (tcon->use_resilient)
463 seq_puts(s, ",resilienthandles");
464 if (tcon->unix_ext)
465 seq_puts(s, ",unix");
466 else
467 seq_puts(s, ",nounix");
468 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
469 seq_puts(s, ",posixpaths");
470 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
471 seq_puts(s, ",setuids");
472 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
473 seq_puts(s, ",serverino");
474 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
475 seq_puts(s, ",rwpidforward");
476 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
477 seq_puts(s, ",forcemand");
478 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
479 seq_puts(s, ",nouser_xattr");
480 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
481 seq_puts(s, ",mapchars");
482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
483 seq_puts(s, ",mapposix");
484 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
485 seq_puts(s, ",sfu");
486 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
487 seq_puts(s, ",nobrl");
488 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
489 seq_puts(s, ",cifsacl");
490 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
491 seq_puts(s, ",dynperm");
492 if (root->d_sb->s_flags & MS_POSIXACL)
493 seq_puts(s, ",acl");
494 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
495 seq_puts(s, ",mfsymlinks");
496 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
497 seq_puts(s, ",fsc");
498 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
499 seq_puts(s, ",nostrictsync");
500 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
501 seq_puts(s, ",noperm");
502 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
503 seq_printf(s, ",backupuid=%u",
504 from_kuid_munged(&init_user_ns,
505 cifs_sb->mnt_backupuid));
506 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
507 seq_printf(s, ",backupgid=%u",
508 from_kgid_munged(&init_user_ns,
509 cifs_sb->mnt_backupgid));
510
511 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
512 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
513 seq_printf(s, ",echo_interval=%lu",
514 tcon->ses->server->echo_interval / HZ);
515 /* convert actimeo and display it in seconds */
516 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
517
518 return 0;
519 }
520
521 static void cifs_umount_begin(struct super_block *sb)
522 {
523 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
524 struct cifs_tcon *tcon;
525
526 if (cifs_sb == NULL)
527 return;
528
529 tcon = cifs_sb_master_tcon(cifs_sb);
530
531 spin_lock(&cifs_tcp_ses_lock);
532 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
533 /* we have other mounts to same share or we have
534 already tried to force umount this and woken up
535 all waiting network requests, nothing to do */
536 spin_unlock(&cifs_tcp_ses_lock);
537 return;
538 } else if (tcon->tc_count == 1)
539 tcon->tidStatus = CifsExiting;
540 spin_unlock(&cifs_tcp_ses_lock);
541
542 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
543 /* cancel_notify_requests(tcon); */
544 if (tcon->ses && tcon->ses->server) {
545 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
546 wake_up_all(&tcon->ses->server->request_q);
547 wake_up_all(&tcon->ses->server->response_q);
548 msleep(1); /* yield */
549 /* we have to kick the requests once more */
550 wake_up_all(&tcon->ses->server->response_q);
551 msleep(1);
552 }
553
554 return;
555 }
556
557 #ifdef CONFIG_CIFS_STATS2
558 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
559 {
560 /* BB FIXME */
561 return 0;
562 }
563 #endif
564
565 static int cifs_remount(struct super_block *sb, int *flags, char *data)
566 {
567 sync_filesystem(sb);
568 *flags |= MS_NODIRATIME;
569 return 0;
570 }
571
572 static int cifs_drop_inode(struct inode *inode)
573 {
574 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
575
576 /* no serverino => unconditional eviction */
577 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
578 generic_drop_inode(inode);
579 }
580
581 static const struct super_operations cifs_super_ops = {
582 .statfs = cifs_statfs,
583 .alloc_inode = cifs_alloc_inode,
584 .destroy_inode = cifs_destroy_inode,
585 .drop_inode = cifs_drop_inode,
586 .evict_inode = cifs_evict_inode,
587 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
588 function unless later we add lazy close of inodes or unless the
589 kernel forgets to call us with the same number of releases (closes)
590 as opens */
591 .show_options = cifs_show_options,
592 .umount_begin = cifs_umount_begin,
593 .remount_fs = cifs_remount,
594 #ifdef CONFIG_CIFS_STATS2
595 .show_stats = cifs_show_stats,
596 #endif
597 };
598
599 /*
600 * Get root dentry from superblock according to prefix path mount option.
601 * Return dentry with refcount + 1 on success and NULL otherwise.
602 */
603 static struct dentry *
604 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
605 {
606 struct dentry *dentry;
607 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
608 char *full_path = NULL;
609 char *s, *p;
610 char sep;
611
612 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
613 return dget(sb->s_root);
614
615 full_path = cifs_build_path_to_root(vol, cifs_sb,
616 cifs_sb_master_tcon(cifs_sb));
617 if (full_path == NULL)
618 return ERR_PTR(-ENOMEM);
619
620 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
621
622 sep = CIFS_DIR_SEP(cifs_sb);
623 dentry = dget(sb->s_root);
624 p = s = full_path;
625
626 do {
627 struct inode *dir = d_inode(dentry);
628 struct dentry *child;
629
630 if (!dir) {
631 dput(dentry);
632 dentry = ERR_PTR(-ENOENT);
633 break;
634 }
635 if (!S_ISDIR(dir->i_mode)) {
636 dput(dentry);
637 dentry = ERR_PTR(-ENOTDIR);
638 break;
639 }
640
641 /* skip separators */
642 while (*s == sep)
643 s++;
644 if (!*s)
645 break;
646 p = s++;
647 /* next separator */
648 while (*s && *s != sep)
649 s++;
650
651 child = lookup_one_len_unlocked(p, dentry, s - p);
652 dput(dentry);
653 dentry = child;
654 } while (!IS_ERR(dentry));
655 kfree(full_path);
656 return dentry;
657 }
658
659 static int cifs_set_super(struct super_block *sb, void *data)
660 {
661 struct cifs_mnt_data *mnt_data = data;
662 sb->s_fs_info = mnt_data->cifs_sb;
663 return set_anon_super(sb, NULL);
664 }
665
666 static struct dentry *
667 cifs_do_mount(struct file_system_type *fs_type,
668 int flags, const char *dev_name, void *data)
669 {
670 int rc;
671 struct super_block *sb;
672 struct cifs_sb_info *cifs_sb;
673 struct smb_vol *volume_info;
674 struct cifs_mnt_data mnt_data;
675 struct dentry *root;
676
677 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
678
679 volume_info = cifs_get_volume_info((char *)data, dev_name);
680 if (IS_ERR(volume_info))
681 return ERR_CAST(volume_info);
682
683 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
684 if (cifs_sb == NULL) {
685 root = ERR_PTR(-ENOMEM);
686 goto out_nls;
687 }
688
689 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
690 if (cifs_sb->mountdata == NULL) {
691 root = ERR_PTR(-ENOMEM);
692 goto out_free;
693 }
694
695 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
696 if (rc) {
697 root = ERR_PTR(rc);
698 goto out_free;
699 }
700
701 rc = cifs_mount(cifs_sb, volume_info);
702 if (rc) {
703 if (!(flags & MS_SILENT))
704 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
705 rc);
706 root = ERR_PTR(rc);
707 goto out_free;
708 }
709
710 mnt_data.vol = volume_info;
711 mnt_data.cifs_sb = cifs_sb;
712 mnt_data.flags = flags;
713
714 /* BB should we make this contingent on mount parm? */
715 flags |= MS_NODIRATIME | MS_NOATIME;
716
717 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
718 if (IS_ERR(sb)) {
719 root = ERR_CAST(sb);
720 cifs_umount(cifs_sb);
721 goto out;
722 }
723
724 if (sb->s_root) {
725 cifs_dbg(FYI, "Use existing superblock\n");
726 cifs_umount(cifs_sb);
727 } else {
728 rc = cifs_read_super(sb);
729 if (rc) {
730 root = ERR_PTR(rc);
731 goto out_super;
732 }
733
734 sb->s_flags |= MS_ACTIVE;
735 }
736
737 root = cifs_get_root(volume_info, sb);
738 if (IS_ERR(root))
739 goto out_super;
740
741 cifs_dbg(FYI, "dentry root is: %p\n", root);
742 goto out;
743
744 out_super:
745 deactivate_locked_super(sb);
746 out:
747 cifs_cleanup_volume_info(volume_info);
748 return root;
749
750 out_free:
751 kfree(cifs_sb->prepath);
752 kfree(cifs_sb->mountdata);
753 kfree(cifs_sb);
754 out_nls:
755 unload_nls(volume_info->local_nls);
756 goto out;
757 }
758
759 static ssize_t
760 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
761 {
762 ssize_t rc;
763 struct inode *inode = file_inode(iocb->ki_filp);
764
765 if (iocb->ki_filp->f_flags & O_DIRECT)
766 return cifs_user_readv(iocb, iter);
767
768 rc = cifs_revalidate_mapping(inode);
769 if (rc)
770 return rc;
771
772 return generic_file_read_iter(iocb, iter);
773 }
774
775 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
776 {
777 struct inode *inode = file_inode(iocb->ki_filp);
778 struct cifsInodeInfo *cinode = CIFS_I(inode);
779 ssize_t written;
780 int rc;
781
782 if (iocb->ki_filp->f_flags & O_DIRECT) {
783 written = cifs_user_writev(iocb, from);
784 if (written > 0 && CIFS_CACHE_READ(cinode)) {
785 cifs_zap_mapping(inode);
786 cifs_dbg(FYI,
787 "Set no oplock for inode=%p after a write operation\n",
788 inode);
789 cinode->oplock = 0;
790 }
791 return written;
792 }
793
794 written = cifs_get_writer(cinode);
795 if (written)
796 return written;
797
798 written = generic_file_write_iter(iocb, from);
799
800 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
801 goto out;
802
803 rc = filemap_fdatawrite(inode->i_mapping);
804 if (rc)
805 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
806 rc, inode);
807
808 out:
809 cifs_put_writer(cinode);
810 return written;
811 }
812
813 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
814 {
815 /*
816 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
817 * the cached file length
818 */
819 if (whence != SEEK_SET && whence != SEEK_CUR) {
820 int rc;
821 struct inode *inode = file_inode(file);
822
823 /*
824 * We need to be sure that all dirty pages are written and the
825 * server has the newest file length.
826 */
827 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
828 inode->i_mapping->nrpages != 0) {
829 rc = filemap_fdatawait(inode->i_mapping);
830 if (rc) {
831 mapping_set_error(inode->i_mapping, rc);
832 return rc;
833 }
834 }
835 /*
836 * Some applications poll for the file length in this strange
837 * way so we must seek to end on non-oplocked files by
838 * setting the revalidate time to zero.
839 */
840 CIFS_I(inode)->time = 0;
841
842 rc = cifs_revalidate_file_attr(file);
843 if (rc < 0)
844 return (loff_t)rc;
845 }
846 return generic_file_llseek(file, offset, whence);
847 }
848
849 static int
850 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
851 {
852 /*
853 * Note that this is called by vfs setlease with i_lock held to
854 * protect *lease from going away.
855 */
856 struct inode *inode = file_inode(file);
857 struct cifsFileInfo *cfile = file->private_data;
858
859 if (!(S_ISREG(inode->i_mode)))
860 return -EINVAL;
861
862 /* Check if file is oplocked if this is request for new lease */
863 if (arg == F_UNLCK ||
864 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
865 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
866 return generic_setlease(file, arg, lease, priv);
867 else if (tlink_tcon(cfile->tlink)->local_lease &&
868 !CIFS_CACHE_READ(CIFS_I(inode)))
869 /*
870 * If the server claims to support oplock on this file, then we
871 * still need to check oplock even if the local_lease mount
872 * option is set, but there are servers which do not support
873 * oplock for which this mount option may be useful if the user
874 * knows that the file won't be changed on the server by anyone
875 * else.
876 */
877 return generic_setlease(file, arg, lease, priv);
878 else
879 return -EAGAIN;
880 }
881
882 struct file_system_type cifs_fs_type = {
883 .owner = THIS_MODULE,
884 .name = "cifs",
885 .mount = cifs_do_mount,
886 .kill_sb = cifs_kill_sb,
887 /* .fs_flags */
888 };
889 MODULE_ALIAS_FS("cifs");
890 const struct inode_operations cifs_dir_inode_ops = {
891 .create = cifs_create,
892 .atomic_open = cifs_atomic_open,
893 .lookup = cifs_lookup,
894 .getattr = cifs_getattr,
895 .unlink = cifs_unlink,
896 .link = cifs_hardlink,
897 .mkdir = cifs_mkdir,
898 .rmdir = cifs_rmdir,
899 .rename = cifs_rename2,
900 .permission = cifs_permission,
901 .setattr = cifs_setattr,
902 .symlink = cifs_symlink,
903 .mknod = cifs_mknod,
904 .listxattr = cifs_listxattr,
905 };
906
907 const struct inode_operations cifs_file_inode_ops = {
908 .setattr = cifs_setattr,
909 .getattr = cifs_getattr,
910 .permission = cifs_permission,
911 .listxattr = cifs_listxattr,
912 };
913
914 const struct inode_operations cifs_symlink_inode_ops = {
915 .readlink = generic_readlink,
916 .get_link = cifs_get_link,
917 .permission = cifs_permission,
918 .listxattr = cifs_listxattr,
919 };
920
921 static int cifs_clone_file_range(struct file *src_file, loff_t off,
922 struct file *dst_file, loff_t destoff, u64 len)
923 {
924 struct inode *src_inode = file_inode(src_file);
925 struct inode *target_inode = file_inode(dst_file);
926 struct cifsFileInfo *smb_file_src = src_file->private_data;
927 struct cifsFileInfo *smb_file_target = dst_file->private_data;
928 struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
929 unsigned int xid;
930 int rc;
931
932 cifs_dbg(FYI, "clone range\n");
933
934 xid = get_xid();
935
936 if (!src_file->private_data || !dst_file->private_data) {
937 rc = -EBADF;
938 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
939 goto out;
940 }
941
942 /*
943 * Note: cifs case is easier than btrfs since server responsible for
944 * checks for proper open modes and file type and if it wants
945 * server could even support copy of range where source = target
946 */
947 lock_two_nondirectories(target_inode, src_inode);
948
949 if (len == 0)
950 len = src_inode->i_size - off;
951
952 cifs_dbg(FYI, "about to flush pages\n");
953 /* should we flush first and last page first */
954 truncate_inode_pages_range(&target_inode->i_data, destoff,
955 PAGE_ALIGN(destoff + len)-1);
956
957 if (target_tcon->ses->server->ops->duplicate_extents)
958 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
959 smb_file_src, smb_file_target, off, len, destoff);
960 else
961 rc = -EOPNOTSUPP;
962
963 /* force revalidate of size and timestamps of target file now
964 that target is updated on the server */
965 CIFS_I(target_inode)->time = 0;
966 /* although unlocking in the reverse order from locking is not
967 strictly necessary here it is a little cleaner to be consistent */
968 unlock_two_nondirectories(src_inode, target_inode);
969 out:
970 free_xid(xid);
971 return rc;
972 }
973
974 const struct file_operations cifs_file_ops = {
975 .read_iter = cifs_loose_read_iter,
976 .write_iter = cifs_file_write_iter,
977 .open = cifs_open,
978 .release = cifs_close,
979 .lock = cifs_lock,
980 .fsync = cifs_fsync,
981 .flush = cifs_flush,
982 .mmap = cifs_file_mmap,
983 .splice_read = generic_file_splice_read,
984 .llseek = cifs_llseek,
985 .unlocked_ioctl = cifs_ioctl,
986 .clone_file_range = cifs_clone_file_range,
987 .setlease = cifs_setlease,
988 .fallocate = cifs_fallocate,
989 };
990
991 const struct file_operations cifs_file_strict_ops = {
992 .read_iter = cifs_strict_readv,
993 .write_iter = cifs_strict_writev,
994 .open = cifs_open,
995 .release = cifs_close,
996 .lock = cifs_lock,
997 .fsync = cifs_strict_fsync,
998 .flush = cifs_flush,
999 .mmap = cifs_file_strict_mmap,
1000 .splice_read = generic_file_splice_read,
1001 .llseek = cifs_llseek,
1002 .unlocked_ioctl = cifs_ioctl,
1003 .clone_file_range = cifs_clone_file_range,
1004 .setlease = cifs_setlease,
1005 .fallocate = cifs_fallocate,
1006 };
1007
1008 const struct file_operations cifs_file_direct_ops = {
1009 /* BB reevaluate whether they can be done with directio, no cache */
1010 .read_iter = cifs_user_readv,
1011 .write_iter = cifs_user_writev,
1012 .open = cifs_open,
1013 .release = cifs_close,
1014 .lock = cifs_lock,
1015 .fsync = cifs_fsync,
1016 .flush = cifs_flush,
1017 .mmap = cifs_file_mmap,
1018 .splice_read = generic_file_splice_read,
1019 .unlocked_ioctl = cifs_ioctl,
1020 .clone_file_range = cifs_clone_file_range,
1021 .llseek = cifs_llseek,
1022 .setlease = cifs_setlease,
1023 .fallocate = cifs_fallocate,
1024 };
1025
1026 const struct file_operations cifs_file_nobrl_ops = {
1027 .read_iter = cifs_loose_read_iter,
1028 .write_iter = cifs_file_write_iter,
1029 .open = cifs_open,
1030 .release = cifs_close,
1031 .fsync = cifs_fsync,
1032 .flush = cifs_flush,
1033 .mmap = cifs_file_mmap,
1034 .splice_read = generic_file_splice_read,
1035 .llseek = cifs_llseek,
1036 .unlocked_ioctl = cifs_ioctl,
1037 .clone_file_range = cifs_clone_file_range,
1038 .setlease = cifs_setlease,
1039 .fallocate = cifs_fallocate,
1040 };
1041
1042 const struct file_operations cifs_file_strict_nobrl_ops = {
1043 .read_iter = cifs_strict_readv,
1044 .write_iter = cifs_strict_writev,
1045 .open = cifs_open,
1046 .release = cifs_close,
1047 .fsync = cifs_strict_fsync,
1048 .flush = cifs_flush,
1049 .mmap = cifs_file_strict_mmap,
1050 .splice_read = generic_file_splice_read,
1051 .llseek = cifs_llseek,
1052 .unlocked_ioctl = cifs_ioctl,
1053 .clone_file_range = cifs_clone_file_range,
1054 .setlease = cifs_setlease,
1055 .fallocate = cifs_fallocate,
1056 };
1057
1058 const struct file_operations cifs_file_direct_nobrl_ops = {
1059 /* BB reevaluate whether they can be done with directio, no cache */
1060 .read_iter = cifs_user_readv,
1061 .write_iter = cifs_user_writev,
1062 .open = cifs_open,
1063 .release = cifs_close,
1064 .fsync = cifs_fsync,
1065 .flush = cifs_flush,
1066 .mmap = cifs_file_mmap,
1067 .splice_read = generic_file_splice_read,
1068 .unlocked_ioctl = cifs_ioctl,
1069 .clone_file_range = cifs_clone_file_range,
1070 .llseek = cifs_llseek,
1071 .setlease = cifs_setlease,
1072 .fallocate = cifs_fallocate,
1073 };
1074
1075 const struct file_operations cifs_dir_ops = {
1076 .iterate_shared = cifs_readdir,
1077 .release = cifs_closedir,
1078 .read = generic_read_dir,
1079 .unlocked_ioctl = cifs_ioctl,
1080 .clone_file_range = cifs_clone_file_range,
1081 .llseek = generic_file_llseek,
1082 };
1083
1084 static void
1085 cifs_init_once(void *inode)
1086 {
1087 struct cifsInodeInfo *cifsi = inode;
1088
1089 inode_init_once(&cifsi->vfs_inode);
1090 init_rwsem(&cifsi->lock_sem);
1091 }
1092
1093 static int __init
1094 cifs_init_inodecache(void)
1095 {
1096 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1097 sizeof(struct cifsInodeInfo),
1098 0, (SLAB_RECLAIM_ACCOUNT|
1099 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1100 cifs_init_once);
1101 if (cifs_inode_cachep == NULL)
1102 return -ENOMEM;
1103
1104 return 0;
1105 }
1106
1107 static void
1108 cifs_destroy_inodecache(void)
1109 {
1110 /*
1111 * Make sure all delayed rcu free inodes are flushed before we
1112 * destroy cache.
1113 */
1114 rcu_barrier();
1115 kmem_cache_destroy(cifs_inode_cachep);
1116 }
1117
1118 static int
1119 cifs_init_request_bufs(void)
1120 {
1121 size_t max_hdr_size = MAX_CIFS_HDR_SIZE;
1122 #ifdef CONFIG_CIFS_SMB2
1123 /*
1124 * SMB2 maximum header size is bigger than CIFS one - no problems to
1125 * allocate some more bytes for CIFS.
1126 */
1127 max_hdr_size = MAX_SMB2_HDR_SIZE;
1128 #endif
1129 if (CIFSMaxBufSize < 8192) {
1130 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1131 Unicode path name has to fit in any SMB/CIFS path based frames */
1132 CIFSMaxBufSize = 8192;
1133 } else if (CIFSMaxBufSize > 1024*127) {
1134 CIFSMaxBufSize = 1024 * 127;
1135 } else {
1136 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1137 }
1138 /*
1139 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1140 CIFSMaxBufSize, CIFSMaxBufSize);
1141 */
1142 cifs_req_cachep = kmem_cache_create("cifs_request",
1143 CIFSMaxBufSize + max_hdr_size, 0,
1144 SLAB_HWCACHE_ALIGN, NULL);
1145 if (cifs_req_cachep == NULL)
1146 return -ENOMEM;
1147
1148 if (cifs_min_rcv < 1)
1149 cifs_min_rcv = 1;
1150 else if (cifs_min_rcv > 64) {
1151 cifs_min_rcv = 64;
1152 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1153 }
1154
1155 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1156 cifs_req_cachep);
1157
1158 if (cifs_req_poolp == NULL) {
1159 kmem_cache_destroy(cifs_req_cachep);
1160 return -ENOMEM;
1161 }
1162 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1163 almost all handle based requests (but not write response, nor is it
1164 sufficient for path based requests). A smaller size would have
1165 been more efficient (compacting multiple slab items on one 4k page)
1166 for the case in which debug was on, but this larger size allows
1167 more SMBs to use small buffer alloc and is still much more
1168 efficient to alloc 1 per page off the slab compared to 17K (5page)
1169 alloc of large cifs buffers even when page debugging is on */
1170 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
1171 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1172 NULL);
1173 if (cifs_sm_req_cachep == NULL) {
1174 mempool_destroy(cifs_req_poolp);
1175 kmem_cache_destroy(cifs_req_cachep);
1176 return -ENOMEM;
1177 }
1178
1179 if (cifs_min_small < 2)
1180 cifs_min_small = 2;
1181 else if (cifs_min_small > 256) {
1182 cifs_min_small = 256;
1183 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1184 }
1185
1186 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1187 cifs_sm_req_cachep);
1188
1189 if (cifs_sm_req_poolp == NULL) {
1190 mempool_destroy(cifs_req_poolp);
1191 kmem_cache_destroy(cifs_req_cachep);
1192 kmem_cache_destroy(cifs_sm_req_cachep);
1193 return -ENOMEM;
1194 }
1195
1196 return 0;
1197 }
1198
1199 static void
1200 cifs_destroy_request_bufs(void)
1201 {
1202 mempool_destroy(cifs_req_poolp);
1203 kmem_cache_destroy(cifs_req_cachep);
1204 mempool_destroy(cifs_sm_req_poolp);
1205 kmem_cache_destroy(cifs_sm_req_cachep);
1206 }
1207
1208 static int
1209 cifs_init_mids(void)
1210 {
1211 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1212 sizeof(struct mid_q_entry), 0,
1213 SLAB_HWCACHE_ALIGN, NULL);
1214 if (cifs_mid_cachep == NULL)
1215 return -ENOMEM;
1216
1217 /* 3 is a reasonable minimum number of simultaneous operations */
1218 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1219 if (cifs_mid_poolp == NULL) {
1220 kmem_cache_destroy(cifs_mid_cachep);
1221 return -ENOMEM;
1222 }
1223
1224 return 0;
1225 }
1226
1227 static void
1228 cifs_destroy_mids(void)
1229 {
1230 mempool_destroy(cifs_mid_poolp);
1231 kmem_cache_destroy(cifs_mid_cachep);
1232 }
1233
1234 static int __init
1235 init_cifs(void)
1236 {
1237 int rc = 0;
1238 cifs_proc_init();
1239 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1240 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1241 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1242 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1243 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1244 /*
1245 * Initialize Global counters
1246 */
1247 atomic_set(&sesInfoAllocCount, 0);
1248 atomic_set(&tconInfoAllocCount, 0);
1249 atomic_set(&tcpSesAllocCount, 0);
1250 atomic_set(&tcpSesReconnectCount, 0);
1251 atomic_set(&tconInfoReconnectCount, 0);
1252
1253 atomic_set(&bufAllocCount, 0);
1254 atomic_set(&smBufAllocCount, 0);
1255 #ifdef CONFIG_CIFS_STATS2
1256 atomic_set(&totBufAllocCount, 0);
1257 atomic_set(&totSmBufAllocCount, 0);
1258 #endif /* CONFIG_CIFS_STATS2 */
1259
1260 atomic_set(&midCount, 0);
1261 GlobalCurrentXid = 0;
1262 GlobalTotalActiveXid = 0;
1263 GlobalMaxActiveXid = 0;
1264 spin_lock_init(&cifs_tcp_ses_lock);
1265 spin_lock_init(&cifs_file_list_lock);
1266 spin_lock_init(&GlobalMid_Lock);
1267
1268 get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
1269
1270 if (cifs_max_pending < 2) {
1271 cifs_max_pending = 2;
1272 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1273 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1274 cifs_max_pending = CIFS_MAX_REQ;
1275 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1276 CIFS_MAX_REQ);
1277 }
1278
1279 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1280 if (!cifsiod_wq) {
1281 rc = -ENOMEM;
1282 goto out_clean_proc;
1283 }
1284
1285 rc = cifs_fscache_register();
1286 if (rc)
1287 goto out_destroy_wq;
1288
1289 rc = cifs_init_inodecache();
1290 if (rc)
1291 goto out_unreg_fscache;
1292
1293 rc = cifs_init_mids();
1294 if (rc)
1295 goto out_destroy_inodecache;
1296
1297 rc = cifs_init_request_bufs();
1298 if (rc)
1299 goto out_destroy_mids;
1300
1301 #ifdef CONFIG_CIFS_UPCALL
1302 rc = init_cifs_spnego();
1303 if (rc)
1304 goto out_destroy_request_bufs;
1305 #endif /* CONFIG_CIFS_UPCALL */
1306
1307 #ifdef CONFIG_CIFS_ACL
1308 rc = init_cifs_idmap();
1309 if (rc)
1310 goto out_register_key_type;
1311 #endif /* CONFIG_CIFS_ACL */
1312
1313 rc = register_filesystem(&cifs_fs_type);
1314 if (rc)
1315 goto out_init_cifs_idmap;
1316
1317 return 0;
1318
1319 out_init_cifs_idmap:
1320 #ifdef CONFIG_CIFS_ACL
1321 exit_cifs_idmap();
1322 out_register_key_type:
1323 #endif
1324 #ifdef CONFIG_CIFS_UPCALL
1325 exit_cifs_spnego();
1326 out_destroy_request_bufs:
1327 #endif
1328 cifs_destroy_request_bufs();
1329 out_destroy_mids:
1330 cifs_destroy_mids();
1331 out_destroy_inodecache:
1332 cifs_destroy_inodecache();
1333 out_unreg_fscache:
1334 cifs_fscache_unregister();
1335 out_destroy_wq:
1336 destroy_workqueue(cifsiod_wq);
1337 out_clean_proc:
1338 cifs_proc_clean();
1339 return rc;
1340 }
1341
1342 static void __exit
1343 exit_cifs(void)
1344 {
1345 cifs_dbg(NOISY, "exit_cifs\n");
1346 unregister_filesystem(&cifs_fs_type);
1347 cifs_dfs_release_automount_timer();
1348 #ifdef CONFIG_CIFS_ACL
1349 exit_cifs_idmap();
1350 #endif
1351 #ifdef CONFIG_CIFS_UPCALL
1352 unregister_key_type(&cifs_spnego_key_type);
1353 #endif
1354 cifs_destroy_request_bufs();
1355 cifs_destroy_mids();
1356 cifs_destroy_inodecache();
1357 cifs_fscache_unregister();
1358 destroy_workqueue(cifsiod_wq);
1359 cifs_proc_clean();
1360 }
1361
1362 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1363 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1364 MODULE_DESCRIPTION
1365 ("VFS to access servers complying with the SNIA CIFS Specification "
1366 "e.g. Samba and Windows");
1367 MODULE_VERSION(CIFS_VERSION);
1368 module_init(init_cifs)
1369 module_exit(exit_cifs)