]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/cifs/cifsfs.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / fs / cifs / cifsfs.c
1 /*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
25
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/uuid.h>
41 #include <linux/xattr.h>
42 #include <net/ipv6.h>
43 #include "cifsfs.h"
44 #include "cifspdu.h"
45 #define DECLARE_GLOBALS_HERE
46 #include "cifsglob.h"
47 #include "cifsproto.h"
48 #include "cifs_debug.h"
49 #include "cifs_fs_sb.h"
50 #include <linux/mm.h>
51 #include <linux/key-type.h>
52 #include "cifs_spnego.h"
53 #include "fscache.h"
54 #include "smb2pdu.h"
55
56 int cifsFYI = 0;
57 bool traceSMB;
58 bool enable_oplocks = true;
59 bool linuxExtEnabled = true;
60 bool lookupCacheEnabled = true;
61 unsigned int global_secflags = CIFSSEC_DEF;
62 /* unsigned int ntlmv2_support = 0; */
63 unsigned int sign_CIFS_PDUs = 1;
64 static const struct super_operations cifs_super_ops;
65 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
66 module_param(CIFSMaxBufSize, uint, 0444);
67 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
68 "Default: 16384 Range: 8192 to 130048");
69 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
70 module_param(cifs_min_rcv, uint, 0444);
71 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
72 "1 to 64");
73 unsigned int cifs_min_small = 30;
74 module_param(cifs_min_small, uint, 0444);
75 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
76 "Range: 2 to 256");
77 unsigned int cifs_max_pending = CIFS_MAX_REQ;
78 module_param(cifs_max_pending, uint, 0444);
79 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
80 "Default: 32767 Range: 2 to 32767.");
81 module_param(enable_oplocks, bool, 0644);
82 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
83
84 extern mempool_t *cifs_sm_req_poolp;
85 extern mempool_t *cifs_req_poolp;
86 extern mempool_t *cifs_mid_poolp;
87
88 struct workqueue_struct *cifsiod_wq;
89 struct workqueue_struct *cifsoplockd_wq;
90 __u32 cifs_lock_secret;
91
92 /*
93 * Bumps refcount for cifs super block.
94 * Note that it should be only called if a referece to VFS super block is
95 * already held, e.g. in open-type syscalls context. Otherwise it can race with
96 * atomic_dec_and_test in deactivate_locked_super.
97 */
98 void
99 cifs_sb_active(struct super_block *sb)
100 {
101 struct cifs_sb_info *server = CIFS_SB(sb);
102
103 if (atomic_inc_return(&server->active) == 1)
104 atomic_inc(&sb->s_active);
105 }
106
107 void
108 cifs_sb_deactive(struct super_block *sb)
109 {
110 struct cifs_sb_info *server = CIFS_SB(sb);
111
112 if (atomic_dec_and_test(&server->active))
113 deactivate_super(sb);
114 }
115
116 static int
117 cifs_read_super(struct super_block *sb)
118 {
119 struct inode *inode;
120 struct cifs_sb_info *cifs_sb;
121 struct cifs_tcon *tcon;
122 int rc = 0;
123
124 cifs_sb = CIFS_SB(sb);
125 tcon = cifs_sb_master_tcon(cifs_sb);
126
127 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
128 sb->s_flags |= MS_POSIXACL;
129
130 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
131 sb->s_maxbytes = MAX_LFS_FILESIZE;
132 else
133 sb->s_maxbytes = MAX_NON_LFS;
134
135 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
136 sb->s_time_gran = 100;
137
138 sb->s_magic = CIFS_MAGIC_NUMBER;
139 sb->s_op = &cifs_super_ops;
140 sb->s_xattr = cifs_xattr_handlers;
141 rc = super_setup_bdi(sb);
142 if (rc)
143 goto out_no_root;
144 /* tune readahead according to rsize */
145 sb->s_bdi->ra_pages = cifs_sb->rsize / PAGE_SIZE;
146
147 sb->s_blocksize = CIFS_MAX_MSGSIZE;
148 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
149 inode = cifs_root_iget(sb);
150
151 if (IS_ERR(inode)) {
152 rc = PTR_ERR(inode);
153 goto out_no_root;
154 }
155
156 if (tcon->nocase)
157 sb->s_d_op = &cifs_ci_dentry_ops;
158 else
159 sb->s_d_op = &cifs_dentry_ops;
160
161 sb->s_root = d_make_root(inode);
162 if (!sb->s_root) {
163 rc = -ENOMEM;
164 goto out_no_root;
165 }
166
167 #ifdef CONFIG_CIFS_NFSD_EXPORT
168 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
169 cifs_dbg(FYI, "export ops supported\n");
170 sb->s_export_op = &cifs_export_ops;
171 }
172 #endif /* CONFIG_CIFS_NFSD_EXPORT */
173
174 return 0;
175
176 out_no_root:
177 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
178 return rc;
179 }
180
181 static void cifs_kill_sb(struct super_block *sb)
182 {
183 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
184 kill_anon_super(sb);
185 cifs_umount(cifs_sb);
186 }
187
188 static int
189 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
190 {
191 struct super_block *sb = dentry->d_sb;
192 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
193 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
194 struct TCP_Server_Info *server = tcon->ses->server;
195 unsigned int xid;
196 int rc = 0;
197
198 xid = get_xid();
199
200 /*
201 * PATH_MAX may be too long - it would presumably be total path,
202 * but note that some servers (includinng Samba 3) have a shorter
203 * maximum path.
204 *
205 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
206 */
207 buf->f_namelen = PATH_MAX;
208 buf->f_files = 0; /* undefined */
209 buf->f_ffree = 0; /* unlimited */
210
211 if (server->ops->queryfs)
212 rc = server->ops->queryfs(xid, tcon, buf);
213
214 free_xid(xid);
215 return 0;
216 }
217
218 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
219 {
220 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
221 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
222 struct TCP_Server_Info *server = tcon->ses->server;
223
224 if (server->ops->fallocate)
225 return server->ops->fallocate(file, tcon, mode, off, len);
226
227 return -EOPNOTSUPP;
228 }
229
230 static int cifs_permission(struct inode *inode, int mask)
231 {
232 struct cifs_sb_info *cifs_sb;
233
234 cifs_sb = CIFS_SB(inode->i_sb);
235
236 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
237 if ((mask & MAY_EXEC) && !execute_ok(inode))
238 return -EACCES;
239 else
240 return 0;
241 } else /* file mode might have been restricted at mount time
242 on the client (above and beyond ACL on servers) for
243 servers which do not support setting and viewing mode bits,
244 so allowing client to check permissions is useful */
245 return generic_permission(inode, mask);
246 }
247
248 static struct kmem_cache *cifs_inode_cachep;
249 static struct kmem_cache *cifs_req_cachep;
250 static struct kmem_cache *cifs_mid_cachep;
251 static struct kmem_cache *cifs_sm_req_cachep;
252 mempool_t *cifs_sm_req_poolp;
253 mempool_t *cifs_req_poolp;
254 mempool_t *cifs_mid_poolp;
255
256 static struct inode *
257 cifs_alloc_inode(struct super_block *sb)
258 {
259 struct cifsInodeInfo *cifs_inode;
260 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
261 if (!cifs_inode)
262 return NULL;
263 cifs_inode->cifsAttrs = 0x20; /* default */
264 cifs_inode->time = 0;
265 /*
266 * Until the file is open and we have gotten oplock info back from the
267 * server, can not assume caching of file data or metadata.
268 */
269 cifs_set_oplock_level(cifs_inode, 0);
270 cifs_inode->flags = 0;
271 spin_lock_init(&cifs_inode->writers_lock);
272 cifs_inode->writers = 0;
273 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
274 cifs_inode->server_eof = 0;
275 cifs_inode->uniqueid = 0;
276 cifs_inode->createtime = 0;
277 cifs_inode->epoch = 0;
278 generate_random_uuid(cifs_inode->lease_key);
279
280 /*
281 * Can not set i_flags here - they get immediately overwritten to zero
282 * by the VFS.
283 */
284 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
285 INIT_LIST_HEAD(&cifs_inode->openFileList);
286 INIT_LIST_HEAD(&cifs_inode->llist);
287 return &cifs_inode->vfs_inode;
288 }
289
290 static void cifs_i_callback(struct rcu_head *head)
291 {
292 struct inode *inode = container_of(head, struct inode, i_rcu);
293 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
294 }
295
296 static void
297 cifs_destroy_inode(struct inode *inode)
298 {
299 call_rcu(&inode->i_rcu, cifs_i_callback);
300 }
301
302 static void
303 cifs_evict_inode(struct inode *inode)
304 {
305 truncate_inode_pages_final(&inode->i_data);
306 clear_inode(inode);
307 cifs_fscache_release_inode_cookie(inode);
308 }
309
310 static void
311 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
312 {
313 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
314 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
315
316 seq_puts(s, ",addr=");
317
318 switch (server->dstaddr.ss_family) {
319 case AF_INET:
320 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
321 break;
322 case AF_INET6:
323 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
324 if (sa6->sin6_scope_id)
325 seq_printf(s, "%%%u", sa6->sin6_scope_id);
326 break;
327 default:
328 seq_puts(s, "(unknown)");
329 }
330 }
331
332 static void
333 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
334 {
335 if (ses->sectype == Unspecified) {
336 if (ses->user_name == NULL)
337 seq_puts(s, ",sec=none");
338 return;
339 }
340
341 seq_puts(s, ",sec=");
342
343 switch (ses->sectype) {
344 case LANMAN:
345 seq_puts(s, "lanman");
346 break;
347 case NTLMv2:
348 seq_puts(s, "ntlmv2");
349 break;
350 case NTLM:
351 seq_puts(s, "ntlm");
352 break;
353 case Kerberos:
354 seq_puts(s, "krb5");
355 break;
356 case RawNTLMSSP:
357 seq_puts(s, "ntlmssp");
358 break;
359 default:
360 /* shouldn't ever happen */
361 seq_puts(s, "unknown");
362 break;
363 }
364
365 if (ses->sign)
366 seq_puts(s, "i");
367 }
368
369 static void
370 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
371 {
372 seq_puts(s, ",cache=");
373
374 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
375 seq_puts(s, "strict");
376 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
377 seq_puts(s, "none");
378 else
379 seq_puts(s, "loose");
380 }
381
382 static void
383 cifs_show_nls(struct seq_file *s, struct nls_table *cur)
384 {
385 struct nls_table *def;
386
387 /* Display iocharset= option if it's not default charset */
388 def = load_nls_default();
389 if (def != cur)
390 seq_printf(s, ",iocharset=%s", cur->charset);
391 unload_nls(def);
392 }
393
394 /*
395 * cifs_show_options() is for displaying mount options in /proc/mounts.
396 * Not all settable options are displayed but most of the important
397 * ones are.
398 */
399 static int
400 cifs_show_options(struct seq_file *s, struct dentry *root)
401 {
402 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
403 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
404 struct sockaddr *srcaddr;
405 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
406
407 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
408 cifs_show_security(s, tcon->ses);
409 cifs_show_cache_flavor(s, cifs_sb);
410
411 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
412 seq_puts(s, ",multiuser");
413 else if (tcon->ses->user_name)
414 seq_show_option(s, "username", tcon->ses->user_name);
415
416 if (tcon->ses->domainName)
417 seq_show_option(s, "domain", tcon->ses->domainName);
418
419 if (srcaddr->sa_family != AF_UNSPEC) {
420 struct sockaddr_in *saddr4;
421 struct sockaddr_in6 *saddr6;
422 saddr4 = (struct sockaddr_in *)srcaddr;
423 saddr6 = (struct sockaddr_in6 *)srcaddr;
424 if (srcaddr->sa_family == AF_INET6)
425 seq_printf(s, ",srcaddr=%pI6c",
426 &saddr6->sin6_addr);
427 else if (srcaddr->sa_family == AF_INET)
428 seq_printf(s, ",srcaddr=%pI4",
429 &saddr4->sin_addr.s_addr);
430 else
431 seq_printf(s, ",srcaddr=BAD-AF:%i",
432 (int)(srcaddr->sa_family));
433 }
434
435 seq_printf(s, ",uid=%u",
436 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
437 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
438 seq_puts(s, ",forceuid");
439 else
440 seq_puts(s, ",noforceuid");
441
442 seq_printf(s, ",gid=%u",
443 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
444 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
445 seq_puts(s, ",forcegid");
446 else
447 seq_puts(s, ",noforcegid");
448
449 cifs_show_address(s, tcon->ses->server);
450
451 if (!tcon->unix_ext)
452 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
453 cifs_sb->mnt_file_mode,
454 cifs_sb->mnt_dir_mode);
455
456 cifs_show_nls(s, cifs_sb->local_nls);
457
458 if (tcon->seal)
459 seq_puts(s, ",seal");
460 if (tcon->nocase)
461 seq_puts(s, ",nocase");
462 if (tcon->retry)
463 seq_puts(s, ",hard");
464 if (tcon->use_persistent)
465 seq_puts(s, ",persistenthandles");
466 else if (tcon->use_resilient)
467 seq_puts(s, ",resilienthandles");
468 if (tcon->unix_ext)
469 seq_puts(s, ",unix");
470 else
471 seq_puts(s, ",nounix");
472 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
473 seq_puts(s, ",posixpaths");
474 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
475 seq_puts(s, ",setuids");
476 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
477 seq_puts(s, ",idsfromsid");
478 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
479 seq_puts(s, ",serverino");
480 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
481 seq_puts(s, ",rwpidforward");
482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
483 seq_puts(s, ",forcemand");
484 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
485 seq_puts(s, ",nouser_xattr");
486 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
487 seq_puts(s, ",mapchars");
488 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
489 seq_puts(s, ",mapposix");
490 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
491 seq_puts(s, ",sfu");
492 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
493 seq_puts(s, ",nobrl");
494 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
495 seq_puts(s, ",cifsacl");
496 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
497 seq_puts(s, ",dynperm");
498 if (root->d_sb->s_flags & MS_POSIXACL)
499 seq_puts(s, ",acl");
500 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
501 seq_puts(s, ",mfsymlinks");
502 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
503 seq_puts(s, ",fsc");
504 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
505 seq_puts(s, ",nostrictsync");
506 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
507 seq_puts(s, ",noperm");
508 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
509 seq_printf(s, ",backupuid=%u",
510 from_kuid_munged(&init_user_ns,
511 cifs_sb->mnt_backupuid));
512 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
513 seq_printf(s, ",backupgid=%u",
514 from_kgid_munged(&init_user_ns,
515 cifs_sb->mnt_backupgid));
516
517 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
518 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
519 seq_printf(s, ",echo_interval=%lu",
520 tcon->ses->server->echo_interval / HZ);
521 /* convert actimeo and display it in seconds */
522 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
523
524 return 0;
525 }
526
527 static void cifs_umount_begin(struct super_block *sb)
528 {
529 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
530 struct cifs_tcon *tcon;
531
532 if (cifs_sb == NULL)
533 return;
534
535 tcon = cifs_sb_master_tcon(cifs_sb);
536
537 spin_lock(&cifs_tcp_ses_lock);
538 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
539 /* we have other mounts to same share or we have
540 already tried to force umount this and woken up
541 all waiting network requests, nothing to do */
542 spin_unlock(&cifs_tcp_ses_lock);
543 return;
544 } else if (tcon->tc_count == 1)
545 tcon->tidStatus = CifsExiting;
546 spin_unlock(&cifs_tcp_ses_lock);
547
548 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
549 /* cancel_notify_requests(tcon); */
550 if (tcon->ses && tcon->ses->server) {
551 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
552 wake_up_all(&tcon->ses->server->request_q);
553 wake_up_all(&tcon->ses->server->response_q);
554 msleep(1); /* yield */
555 /* we have to kick the requests once more */
556 wake_up_all(&tcon->ses->server->response_q);
557 msleep(1);
558 }
559
560 return;
561 }
562
563 #ifdef CONFIG_CIFS_STATS2
564 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
565 {
566 /* BB FIXME */
567 return 0;
568 }
569 #endif
570
571 static int cifs_remount(struct super_block *sb, int *flags, char *data)
572 {
573 sync_filesystem(sb);
574 *flags |= MS_NODIRATIME;
575 return 0;
576 }
577
578 static int cifs_drop_inode(struct inode *inode)
579 {
580 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
581
582 /* no serverino => unconditional eviction */
583 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
584 generic_drop_inode(inode);
585 }
586
587 static const struct super_operations cifs_super_ops = {
588 .statfs = cifs_statfs,
589 .alloc_inode = cifs_alloc_inode,
590 .destroy_inode = cifs_destroy_inode,
591 .drop_inode = cifs_drop_inode,
592 .evict_inode = cifs_evict_inode,
593 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
594 function unless later we add lazy close of inodes or unless the
595 kernel forgets to call us with the same number of releases (closes)
596 as opens */
597 .show_options = cifs_show_options,
598 .umount_begin = cifs_umount_begin,
599 .remount_fs = cifs_remount,
600 #ifdef CONFIG_CIFS_STATS2
601 .show_stats = cifs_show_stats,
602 #endif
603 };
604
605 /*
606 * Get root dentry from superblock according to prefix path mount option.
607 * Return dentry with refcount + 1 on success and NULL otherwise.
608 */
609 static struct dentry *
610 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
611 {
612 struct dentry *dentry;
613 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
614 char *full_path = NULL;
615 char *s, *p;
616 char sep;
617
618 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
619 return dget(sb->s_root);
620
621 full_path = cifs_build_path_to_root(vol, cifs_sb,
622 cifs_sb_master_tcon(cifs_sb), 0);
623 if (full_path == NULL)
624 return ERR_PTR(-ENOMEM);
625
626 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
627
628 sep = CIFS_DIR_SEP(cifs_sb);
629 dentry = dget(sb->s_root);
630 p = s = full_path;
631
632 do {
633 struct inode *dir = d_inode(dentry);
634 struct dentry *child;
635
636 if (!dir) {
637 dput(dentry);
638 dentry = ERR_PTR(-ENOENT);
639 break;
640 }
641 if (!S_ISDIR(dir->i_mode)) {
642 dput(dentry);
643 dentry = ERR_PTR(-ENOTDIR);
644 break;
645 }
646
647 /* skip separators */
648 while (*s == sep)
649 s++;
650 if (!*s)
651 break;
652 p = s++;
653 /* next separator */
654 while (*s && *s != sep)
655 s++;
656
657 child = lookup_one_len_unlocked(p, dentry, s - p);
658 dput(dentry);
659 dentry = child;
660 } while (!IS_ERR(dentry));
661 kfree(full_path);
662 return dentry;
663 }
664
665 static int cifs_set_super(struct super_block *sb, void *data)
666 {
667 struct cifs_mnt_data *mnt_data = data;
668 sb->s_fs_info = mnt_data->cifs_sb;
669 return set_anon_super(sb, NULL);
670 }
671
672 static struct dentry *
673 cifs_do_mount(struct file_system_type *fs_type,
674 int flags, const char *dev_name, void *data)
675 {
676 int rc;
677 struct super_block *sb;
678 struct cifs_sb_info *cifs_sb;
679 struct smb_vol *volume_info;
680 struct cifs_mnt_data mnt_data;
681 struct dentry *root;
682
683 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
684
685 volume_info = cifs_get_volume_info((char *)data, dev_name);
686 if (IS_ERR(volume_info))
687 return ERR_CAST(volume_info);
688
689 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
690 if (cifs_sb == NULL) {
691 root = ERR_PTR(-ENOMEM);
692 goto out_nls;
693 }
694
695 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
696 if (cifs_sb->mountdata == NULL) {
697 root = ERR_PTR(-ENOMEM);
698 goto out_free;
699 }
700
701 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
702 if (rc) {
703 root = ERR_PTR(rc);
704 goto out_free;
705 }
706
707 rc = cifs_mount(cifs_sb, volume_info);
708 if (rc) {
709 if (!(flags & MS_SILENT))
710 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
711 rc);
712 root = ERR_PTR(rc);
713 goto out_free;
714 }
715
716 mnt_data.vol = volume_info;
717 mnt_data.cifs_sb = cifs_sb;
718 mnt_data.flags = flags;
719
720 /* BB should we make this contingent on mount parm? */
721 flags |= MS_NODIRATIME | MS_NOATIME;
722
723 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
724 if (IS_ERR(sb)) {
725 root = ERR_CAST(sb);
726 cifs_umount(cifs_sb);
727 goto out;
728 }
729
730 if (sb->s_root) {
731 cifs_dbg(FYI, "Use existing superblock\n");
732 cifs_umount(cifs_sb);
733 } else {
734 rc = cifs_read_super(sb);
735 if (rc) {
736 root = ERR_PTR(rc);
737 goto out_super;
738 }
739
740 sb->s_flags |= MS_ACTIVE;
741 }
742
743 root = cifs_get_root(volume_info, sb);
744 if (IS_ERR(root))
745 goto out_super;
746
747 cifs_dbg(FYI, "dentry root is: %p\n", root);
748 goto out;
749
750 out_super:
751 deactivate_locked_super(sb);
752 out:
753 cifs_cleanup_volume_info(volume_info);
754 return root;
755
756 out_free:
757 kfree(cifs_sb->prepath);
758 kfree(cifs_sb->mountdata);
759 kfree(cifs_sb);
760 out_nls:
761 unload_nls(volume_info->local_nls);
762 goto out;
763 }
764
765 static ssize_t
766 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
767 {
768 ssize_t rc;
769 struct inode *inode = file_inode(iocb->ki_filp);
770
771 if (iocb->ki_filp->f_flags & O_DIRECT)
772 return cifs_user_readv(iocb, iter);
773
774 rc = cifs_revalidate_mapping(inode);
775 if (rc)
776 return rc;
777
778 return generic_file_read_iter(iocb, iter);
779 }
780
781 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
782 {
783 struct inode *inode = file_inode(iocb->ki_filp);
784 struct cifsInodeInfo *cinode = CIFS_I(inode);
785 ssize_t written;
786 int rc;
787
788 if (iocb->ki_filp->f_flags & O_DIRECT) {
789 written = cifs_user_writev(iocb, from);
790 if (written > 0 && CIFS_CACHE_READ(cinode)) {
791 cifs_zap_mapping(inode);
792 cifs_dbg(FYI,
793 "Set no oplock for inode=%p after a write operation\n",
794 inode);
795 cinode->oplock = 0;
796 }
797 return written;
798 }
799
800 written = cifs_get_writer(cinode);
801 if (written)
802 return written;
803
804 written = generic_file_write_iter(iocb, from);
805
806 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
807 goto out;
808
809 rc = filemap_fdatawrite(inode->i_mapping);
810 if (rc)
811 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
812 rc, inode);
813
814 out:
815 cifs_put_writer(cinode);
816 return written;
817 }
818
819 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
820 {
821 /*
822 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
823 * the cached file length
824 */
825 if (whence != SEEK_SET && whence != SEEK_CUR) {
826 int rc;
827 struct inode *inode = file_inode(file);
828
829 /*
830 * We need to be sure that all dirty pages are written and the
831 * server has the newest file length.
832 */
833 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
834 inode->i_mapping->nrpages != 0) {
835 rc = filemap_fdatawait(inode->i_mapping);
836 if (rc) {
837 mapping_set_error(inode->i_mapping, rc);
838 return rc;
839 }
840 }
841 /*
842 * Some applications poll for the file length in this strange
843 * way so we must seek to end on non-oplocked files by
844 * setting the revalidate time to zero.
845 */
846 CIFS_I(inode)->time = 0;
847
848 rc = cifs_revalidate_file_attr(file);
849 if (rc < 0)
850 return (loff_t)rc;
851 }
852 return generic_file_llseek(file, offset, whence);
853 }
854
855 static int
856 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
857 {
858 /*
859 * Note that this is called by vfs setlease with i_lock held to
860 * protect *lease from going away.
861 */
862 struct inode *inode = file_inode(file);
863 struct cifsFileInfo *cfile = file->private_data;
864
865 if (!(S_ISREG(inode->i_mode)))
866 return -EINVAL;
867
868 /* Check if file is oplocked if this is request for new lease */
869 if (arg == F_UNLCK ||
870 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
871 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
872 return generic_setlease(file, arg, lease, priv);
873 else if (tlink_tcon(cfile->tlink)->local_lease &&
874 !CIFS_CACHE_READ(CIFS_I(inode)))
875 /*
876 * If the server claims to support oplock on this file, then we
877 * still need to check oplock even if the local_lease mount
878 * option is set, but there are servers which do not support
879 * oplock for which this mount option may be useful if the user
880 * knows that the file won't be changed on the server by anyone
881 * else.
882 */
883 return generic_setlease(file, arg, lease, priv);
884 else
885 return -EAGAIN;
886 }
887
888 struct file_system_type cifs_fs_type = {
889 .owner = THIS_MODULE,
890 .name = "cifs",
891 .mount = cifs_do_mount,
892 .kill_sb = cifs_kill_sb,
893 /* .fs_flags */
894 };
895 MODULE_ALIAS_FS("cifs");
896 const struct inode_operations cifs_dir_inode_ops = {
897 .create = cifs_create,
898 .atomic_open = cifs_atomic_open,
899 .lookup = cifs_lookup,
900 .getattr = cifs_getattr,
901 .unlink = cifs_unlink,
902 .link = cifs_hardlink,
903 .mkdir = cifs_mkdir,
904 .rmdir = cifs_rmdir,
905 .rename = cifs_rename2,
906 .permission = cifs_permission,
907 .setattr = cifs_setattr,
908 .symlink = cifs_symlink,
909 .mknod = cifs_mknod,
910 .listxattr = cifs_listxattr,
911 };
912
913 const struct inode_operations cifs_file_inode_ops = {
914 .setattr = cifs_setattr,
915 .getattr = cifs_getattr,
916 .permission = cifs_permission,
917 .listxattr = cifs_listxattr,
918 };
919
920 const struct inode_operations cifs_symlink_inode_ops = {
921 .get_link = cifs_get_link,
922 .permission = cifs_permission,
923 .listxattr = cifs_listxattr,
924 };
925
926 static int cifs_clone_file_range(struct file *src_file, loff_t off,
927 struct file *dst_file, loff_t destoff, u64 len)
928 {
929 struct inode *src_inode = file_inode(src_file);
930 struct inode *target_inode = file_inode(dst_file);
931 struct cifsFileInfo *smb_file_src = src_file->private_data;
932 struct cifsFileInfo *smb_file_target = dst_file->private_data;
933 struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
934 unsigned int xid;
935 int rc;
936
937 cifs_dbg(FYI, "clone range\n");
938
939 xid = get_xid();
940
941 if (!src_file->private_data || !dst_file->private_data) {
942 rc = -EBADF;
943 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
944 goto out;
945 }
946
947 /*
948 * Note: cifs case is easier than btrfs since server responsible for
949 * checks for proper open modes and file type and if it wants
950 * server could even support copy of range where source = target
951 */
952 lock_two_nondirectories(target_inode, src_inode);
953
954 if (len == 0)
955 len = src_inode->i_size - off;
956
957 cifs_dbg(FYI, "about to flush pages\n");
958 /* should we flush first and last page first */
959 truncate_inode_pages_range(&target_inode->i_data, destoff,
960 PAGE_ALIGN(destoff + len)-1);
961
962 if (target_tcon->ses->server->ops->duplicate_extents)
963 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
964 smb_file_src, smb_file_target, off, len, destoff);
965 else
966 rc = -EOPNOTSUPP;
967
968 /* force revalidate of size and timestamps of target file now
969 that target is updated on the server */
970 CIFS_I(target_inode)->time = 0;
971 /* although unlocking in the reverse order from locking is not
972 strictly necessary here it is a little cleaner to be consistent */
973 unlock_two_nondirectories(src_inode, target_inode);
974 out:
975 free_xid(xid);
976 return rc;
977 }
978
979 ssize_t cifs_file_copychunk_range(unsigned int xid,
980 struct file *src_file, loff_t off,
981 struct file *dst_file, loff_t destoff,
982 size_t len, unsigned int flags)
983 {
984 struct inode *src_inode = file_inode(src_file);
985 struct inode *target_inode = file_inode(dst_file);
986 struct cifsFileInfo *smb_file_src;
987 struct cifsFileInfo *smb_file_target;
988 struct cifs_tcon *src_tcon;
989 struct cifs_tcon *target_tcon;
990 ssize_t rc;
991
992 cifs_dbg(FYI, "copychunk range\n");
993
994 if (src_inode == target_inode) {
995 rc = -EINVAL;
996 goto out;
997 }
998
999 if (!src_file->private_data || !dst_file->private_data) {
1000 rc = -EBADF;
1001 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1002 goto out;
1003 }
1004
1005 rc = -EXDEV;
1006 smb_file_target = dst_file->private_data;
1007 smb_file_src = src_file->private_data;
1008 src_tcon = tlink_tcon(smb_file_src->tlink);
1009 target_tcon = tlink_tcon(smb_file_target->tlink);
1010
1011 if (src_tcon->ses != target_tcon->ses) {
1012 cifs_dbg(VFS, "source and target of copy not on same server\n");
1013 goto out;
1014 }
1015
1016 /*
1017 * Note: cifs case is easier than btrfs since server responsible for
1018 * checks for proper open modes and file type and if it wants
1019 * server could even support copy of range where source = target
1020 */
1021 lock_two_nondirectories(target_inode, src_inode);
1022
1023 cifs_dbg(FYI, "about to flush pages\n");
1024 /* should we flush first and last page first */
1025 truncate_inode_pages(&target_inode->i_data, 0);
1026
1027 if (target_tcon->ses->server->ops->copychunk_range)
1028 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1029 smb_file_src, smb_file_target, off, len, destoff);
1030 else
1031 rc = -EOPNOTSUPP;
1032
1033 /* force revalidate of size and timestamps of target file now
1034 * that target is updated on the server
1035 */
1036 CIFS_I(target_inode)->time = 0;
1037 /* although unlocking in the reverse order from locking is not
1038 * strictly necessary here it is a little cleaner to be consistent
1039 */
1040 unlock_two_nondirectories(src_inode, target_inode);
1041
1042 out:
1043 return rc;
1044 }
1045
1046 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1047 struct file *dst_file, loff_t destoff,
1048 size_t len, unsigned int flags)
1049 {
1050 unsigned int xid = get_xid();
1051 ssize_t rc;
1052
1053 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1054 len, flags);
1055 free_xid(xid);
1056 return rc;
1057 }
1058
1059 const struct file_operations cifs_file_ops = {
1060 .read_iter = cifs_loose_read_iter,
1061 .write_iter = cifs_file_write_iter,
1062 .open = cifs_open,
1063 .release = cifs_close,
1064 .lock = cifs_lock,
1065 .fsync = cifs_fsync,
1066 .flush = cifs_flush,
1067 .mmap = cifs_file_mmap,
1068 .splice_read = generic_file_splice_read,
1069 .llseek = cifs_llseek,
1070 .unlocked_ioctl = cifs_ioctl,
1071 .copy_file_range = cifs_copy_file_range,
1072 .clone_file_range = cifs_clone_file_range,
1073 .setlease = cifs_setlease,
1074 .fallocate = cifs_fallocate,
1075 };
1076
1077 const struct file_operations cifs_file_strict_ops = {
1078 .read_iter = cifs_strict_readv,
1079 .write_iter = cifs_strict_writev,
1080 .open = cifs_open,
1081 .release = cifs_close,
1082 .lock = cifs_lock,
1083 .fsync = cifs_strict_fsync,
1084 .flush = cifs_flush,
1085 .mmap = cifs_file_strict_mmap,
1086 .splice_read = generic_file_splice_read,
1087 .llseek = cifs_llseek,
1088 .unlocked_ioctl = cifs_ioctl,
1089 .copy_file_range = cifs_copy_file_range,
1090 .clone_file_range = cifs_clone_file_range,
1091 .setlease = cifs_setlease,
1092 .fallocate = cifs_fallocate,
1093 };
1094
1095 const struct file_operations cifs_file_direct_ops = {
1096 /* BB reevaluate whether they can be done with directio, no cache */
1097 .read_iter = cifs_user_readv,
1098 .write_iter = cifs_user_writev,
1099 .open = cifs_open,
1100 .release = cifs_close,
1101 .lock = cifs_lock,
1102 .fsync = cifs_fsync,
1103 .flush = cifs_flush,
1104 .mmap = cifs_file_mmap,
1105 .splice_read = generic_file_splice_read,
1106 .unlocked_ioctl = cifs_ioctl,
1107 .copy_file_range = cifs_copy_file_range,
1108 .clone_file_range = cifs_clone_file_range,
1109 .llseek = cifs_llseek,
1110 .setlease = cifs_setlease,
1111 .fallocate = cifs_fallocate,
1112 };
1113
1114 const struct file_operations cifs_file_nobrl_ops = {
1115 .read_iter = cifs_loose_read_iter,
1116 .write_iter = cifs_file_write_iter,
1117 .open = cifs_open,
1118 .release = cifs_close,
1119 .fsync = cifs_fsync,
1120 .flush = cifs_flush,
1121 .mmap = cifs_file_mmap,
1122 .splice_read = generic_file_splice_read,
1123 .llseek = cifs_llseek,
1124 .unlocked_ioctl = cifs_ioctl,
1125 .copy_file_range = cifs_copy_file_range,
1126 .clone_file_range = cifs_clone_file_range,
1127 .setlease = cifs_setlease,
1128 .fallocate = cifs_fallocate,
1129 };
1130
1131 const struct file_operations cifs_file_strict_nobrl_ops = {
1132 .read_iter = cifs_strict_readv,
1133 .write_iter = cifs_strict_writev,
1134 .open = cifs_open,
1135 .release = cifs_close,
1136 .fsync = cifs_strict_fsync,
1137 .flush = cifs_flush,
1138 .mmap = cifs_file_strict_mmap,
1139 .splice_read = generic_file_splice_read,
1140 .llseek = cifs_llseek,
1141 .unlocked_ioctl = cifs_ioctl,
1142 .copy_file_range = cifs_copy_file_range,
1143 .clone_file_range = cifs_clone_file_range,
1144 .setlease = cifs_setlease,
1145 .fallocate = cifs_fallocate,
1146 };
1147
1148 const struct file_operations cifs_file_direct_nobrl_ops = {
1149 /* BB reevaluate whether they can be done with directio, no cache */
1150 .read_iter = cifs_user_readv,
1151 .write_iter = cifs_user_writev,
1152 .open = cifs_open,
1153 .release = cifs_close,
1154 .fsync = cifs_fsync,
1155 .flush = cifs_flush,
1156 .mmap = cifs_file_mmap,
1157 .splice_read = generic_file_splice_read,
1158 .unlocked_ioctl = cifs_ioctl,
1159 .copy_file_range = cifs_copy_file_range,
1160 .clone_file_range = cifs_clone_file_range,
1161 .llseek = cifs_llseek,
1162 .setlease = cifs_setlease,
1163 .fallocate = cifs_fallocate,
1164 };
1165
1166 const struct file_operations cifs_dir_ops = {
1167 .iterate_shared = cifs_readdir,
1168 .release = cifs_closedir,
1169 .read = generic_read_dir,
1170 .unlocked_ioctl = cifs_ioctl,
1171 .copy_file_range = cifs_copy_file_range,
1172 .clone_file_range = cifs_clone_file_range,
1173 .llseek = generic_file_llseek,
1174 };
1175
1176 static void
1177 cifs_init_once(void *inode)
1178 {
1179 struct cifsInodeInfo *cifsi = inode;
1180
1181 inode_init_once(&cifsi->vfs_inode);
1182 init_rwsem(&cifsi->lock_sem);
1183 }
1184
1185 static int __init
1186 cifs_init_inodecache(void)
1187 {
1188 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1189 sizeof(struct cifsInodeInfo),
1190 0, (SLAB_RECLAIM_ACCOUNT|
1191 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1192 cifs_init_once);
1193 if (cifs_inode_cachep == NULL)
1194 return -ENOMEM;
1195
1196 return 0;
1197 }
1198
1199 static void
1200 cifs_destroy_inodecache(void)
1201 {
1202 /*
1203 * Make sure all delayed rcu free inodes are flushed before we
1204 * destroy cache.
1205 */
1206 rcu_barrier();
1207 kmem_cache_destroy(cifs_inode_cachep);
1208 }
1209
1210 static int
1211 cifs_init_request_bufs(void)
1212 {
1213 /*
1214 * SMB2 maximum header size is bigger than CIFS one - no problems to
1215 * allocate some more bytes for CIFS.
1216 */
1217 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1218
1219 if (CIFSMaxBufSize < 8192) {
1220 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1221 Unicode path name has to fit in any SMB/CIFS path based frames */
1222 CIFSMaxBufSize = 8192;
1223 } else if (CIFSMaxBufSize > 1024*127) {
1224 CIFSMaxBufSize = 1024 * 127;
1225 } else {
1226 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1227 }
1228 /*
1229 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1230 CIFSMaxBufSize, CIFSMaxBufSize);
1231 */
1232 cifs_req_cachep = kmem_cache_create("cifs_request",
1233 CIFSMaxBufSize + max_hdr_size, 0,
1234 SLAB_HWCACHE_ALIGN, NULL);
1235 if (cifs_req_cachep == NULL)
1236 return -ENOMEM;
1237
1238 if (cifs_min_rcv < 1)
1239 cifs_min_rcv = 1;
1240 else if (cifs_min_rcv > 64) {
1241 cifs_min_rcv = 64;
1242 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1243 }
1244
1245 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1246 cifs_req_cachep);
1247
1248 if (cifs_req_poolp == NULL) {
1249 kmem_cache_destroy(cifs_req_cachep);
1250 return -ENOMEM;
1251 }
1252 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1253 almost all handle based requests (but not write response, nor is it
1254 sufficient for path based requests). A smaller size would have
1255 been more efficient (compacting multiple slab items on one 4k page)
1256 for the case in which debug was on, but this larger size allows
1257 more SMBs to use small buffer alloc and is still much more
1258 efficient to alloc 1 per page off the slab compared to 17K (5page)
1259 alloc of large cifs buffers even when page debugging is on */
1260 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
1261 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1262 NULL);
1263 if (cifs_sm_req_cachep == NULL) {
1264 mempool_destroy(cifs_req_poolp);
1265 kmem_cache_destroy(cifs_req_cachep);
1266 return -ENOMEM;
1267 }
1268
1269 if (cifs_min_small < 2)
1270 cifs_min_small = 2;
1271 else if (cifs_min_small > 256) {
1272 cifs_min_small = 256;
1273 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1274 }
1275
1276 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1277 cifs_sm_req_cachep);
1278
1279 if (cifs_sm_req_poolp == NULL) {
1280 mempool_destroy(cifs_req_poolp);
1281 kmem_cache_destroy(cifs_req_cachep);
1282 kmem_cache_destroy(cifs_sm_req_cachep);
1283 return -ENOMEM;
1284 }
1285
1286 return 0;
1287 }
1288
1289 static void
1290 cifs_destroy_request_bufs(void)
1291 {
1292 mempool_destroy(cifs_req_poolp);
1293 kmem_cache_destroy(cifs_req_cachep);
1294 mempool_destroy(cifs_sm_req_poolp);
1295 kmem_cache_destroy(cifs_sm_req_cachep);
1296 }
1297
1298 static int
1299 cifs_init_mids(void)
1300 {
1301 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1302 sizeof(struct mid_q_entry), 0,
1303 SLAB_HWCACHE_ALIGN, NULL);
1304 if (cifs_mid_cachep == NULL)
1305 return -ENOMEM;
1306
1307 /* 3 is a reasonable minimum number of simultaneous operations */
1308 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1309 if (cifs_mid_poolp == NULL) {
1310 kmem_cache_destroy(cifs_mid_cachep);
1311 return -ENOMEM;
1312 }
1313
1314 return 0;
1315 }
1316
1317 static void
1318 cifs_destroy_mids(void)
1319 {
1320 mempool_destroy(cifs_mid_poolp);
1321 kmem_cache_destroy(cifs_mid_cachep);
1322 }
1323
1324 static int __init
1325 init_cifs(void)
1326 {
1327 int rc = 0;
1328 cifs_proc_init();
1329 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1330 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1331 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1332 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1333 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1334 /*
1335 * Initialize Global counters
1336 */
1337 atomic_set(&sesInfoAllocCount, 0);
1338 atomic_set(&tconInfoAllocCount, 0);
1339 atomic_set(&tcpSesAllocCount, 0);
1340 atomic_set(&tcpSesReconnectCount, 0);
1341 atomic_set(&tconInfoReconnectCount, 0);
1342
1343 atomic_set(&bufAllocCount, 0);
1344 atomic_set(&smBufAllocCount, 0);
1345 #ifdef CONFIG_CIFS_STATS2
1346 atomic_set(&totBufAllocCount, 0);
1347 atomic_set(&totSmBufAllocCount, 0);
1348 #endif /* CONFIG_CIFS_STATS2 */
1349
1350 atomic_set(&midCount, 0);
1351 GlobalCurrentXid = 0;
1352 GlobalTotalActiveXid = 0;
1353 GlobalMaxActiveXid = 0;
1354 spin_lock_init(&cifs_tcp_ses_lock);
1355 spin_lock_init(&GlobalMid_Lock);
1356
1357 cifs_lock_secret = get_random_u32();
1358
1359 if (cifs_max_pending < 2) {
1360 cifs_max_pending = 2;
1361 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1362 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1363 cifs_max_pending = CIFS_MAX_REQ;
1364 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1365 CIFS_MAX_REQ);
1366 }
1367
1368 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1369 if (!cifsiod_wq) {
1370 rc = -ENOMEM;
1371 goto out_clean_proc;
1372 }
1373
1374 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1375 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1376 if (!cifsoplockd_wq) {
1377 rc = -ENOMEM;
1378 goto out_destroy_cifsiod_wq;
1379 }
1380
1381 rc = cifs_fscache_register();
1382 if (rc)
1383 goto out_destroy_cifsoplockd_wq;
1384
1385 rc = cifs_init_inodecache();
1386 if (rc)
1387 goto out_unreg_fscache;
1388
1389 rc = cifs_init_mids();
1390 if (rc)
1391 goto out_destroy_inodecache;
1392
1393 rc = cifs_init_request_bufs();
1394 if (rc)
1395 goto out_destroy_mids;
1396
1397 #ifdef CONFIG_CIFS_UPCALL
1398 rc = init_cifs_spnego();
1399 if (rc)
1400 goto out_destroy_request_bufs;
1401 #endif /* CONFIG_CIFS_UPCALL */
1402
1403 #ifdef CONFIG_CIFS_ACL
1404 rc = init_cifs_idmap();
1405 if (rc)
1406 goto out_register_key_type;
1407 #endif /* CONFIG_CIFS_ACL */
1408
1409 rc = register_filesystem(&cifs_fs_type);
1410 if (rc)
1411 goto out_init_cifs_idmap;
1412
1413 return 0;
1414
1415 out_init_cifs_idmap:
1416 #ifdef CONFIG_CIFS_ACL
1417 exit_cifs_idmap();
1418 out_register_key_type:
1419 #endif
1420 #ifdef CONFIG_CIFS_UPCALL
1421 exit_cifs_spnego();
1422 out_destroy_request_bufs:
1423 #endif
1424 cifs_destroy_request_bufs();
1425 out_destroy_mids:
1426 cifs_destroy_mids();
1427 out_destroy_inodecache:
1428 cifs_destroy_inodecache();
1429 out_unreg_fscache:
1430 cifs_fscache_unregister();
1431 out_destroy_cifsoplockd_wq:
1432 destroy_workqueue(cifsoplockd_wq);
1433 out_destroy_cifsiod_wq:
1434 destroy_workqueue(cifsiod_wq);
1435 out_clean_proc:
1436 cifs_proc_clean();
1437 return rc;
1438 }
1439
1440 static void __exit
1441 exit_cifs(void)
1442 {
1443 cifs_dbg(NOISY, "exit_cifs\n");
1444 unregister_filesystem(&cifs_fs_type);
1445 cifs_dfs_release_automount_timer();
1446 #ifdef CONFIG_CIFS_ACL
1447 exit_cifs_idmap();
1448 #endif
1449 #ifdef CONFIG_CIFS_UPCALL
1450 exit_cifs_spnego();
1451 #endif
1452 cifs_destroy_request_bufs();
1453 cifs_destroy_mids();
1454 cifs_destroy_inodecache();
1455 cifs_fscache_unregister();
1456 destroy_workqueue(cifsoplockd_wq);
1457 destroy_workqueue(cifsiod_wq);
1458 cifs_proc_clean();
1459 }
1460
1461 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1462 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1463 MODULE_DESCRIPTION
1464 ("VFS to access servers complying with the SNIA CIFS Specification "
1465 "e.g. Samba and Windows");
1466 MODULE_VERSION(CIFS_VERSION);
1467 MODULE_SOFTDEP("pre: arc4");
1468 MODULE_SOFTDEP("pre: des");
1469 MODULE_SOFTDEP("pre: ecb");
1470 MODULE_SOFTDEP("pre: hmac");
1471 MODULE_SOFTDEP("pre: md4");
1472 MODULE_SOFTDEP("pre: md5");
1473 MODULE_SOFTDEP("pre: nls");
1474 MODULE_SOFTDEP("pre: aes");
1475 MODULE_SOFTDEP("pre: cmac");
1476 MODULE_SOFTDEP("pre: sha256");
1477 MODULE_SOFTDEP("pre: aead2");
1478 MODULE_SOFTDEP("pre: ccm");
1479 module_init(init_cifs)
1480 module_exit(exit_cifs)