]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * fs/cifs/cifsfs.c | |
3 | * | |
4 | * Copyright (C) International Business Machines Corp., 2002,2008 | |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | |
6 | * | |
7 | * Common Internet FileSystem (CIFS) client | |
8 | * | |
9 | * This library is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU Lesser General Public License as published | |
11 | * by the Free Software Foundation; either version 2.1 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | |
17 | * the GNU Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public License | |
20 | * along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
22 | */ | |
23 | ||
24 | /* Note that BB means BUGBUG (ie something to fix eventually) */ | |
25 | ||
26 | #include <linux/module.h> | |
27 | #include <linux/fs.h> | |
28 | #include <linux/mount.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/list.h> | |
32 | #include <linux/seq_file.h> | |
33 | #include <linux/vfs.h> | |
34 | #include <linux/mempool.h> | |
35 | #include <linux/delay.h> | |
36 | #include <linux/kthread.h> | |
37 | #include <linux/freezer.h> | |
38 | #include "cifsfs.h" | |
39 | #include "cifspdu.h" | |
40 | #define DECLARE_GLOBALS_HERE | |
41 | #include "cifsglob.h" | |
42 | #include "cifsproto.h" | |
43 | #include "cifs_debug.h" | |
44 | #include "cifs_fs_sb.h" | |
45 | #include <linux/mm.h> | |
46 | #include <linux/key-type.h> | |
47 | #include "dns_resolve.h" | |
48 | #include "cifs_spnego.h" | |
49 | #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */ | |
50 | ||
51 | #ifdef CONFIG_CIFS_QUOTA | |
52 | static struct quotactl_ops cifs_quotactl_ops; | |
53 | #endif /* QUOTA */ | |
54 | ||
55 | int cifsFYI = 0; | |
56 | int cifsERROR = 1; | |
57 | int traceSMB = 0; | |
58 | unsigned int oplockEnabled = 1; | |
59 | unsigned int experimEnabled = 0; | |
60 | unsigned int linuxExtEnabled = 1; | |
61 | unsigned int lookupCacheEnabled = 1; | |
62 | unsigned int multiuser_mount = 0; | |
63 | unsigned int extended_security = CIFSSEC_DEF; | |
64 | /* unsigned int ntlmv2_support = 0; */ | |
65 | unsigned int sign_CIFS_PDUs = 1; | |
66 | extern struct task_struct *oplockThread; /* remove sparse warning */ | |
67 | struct task_struct *oplockThread = NULL; | |
68 | /* extern struct task_struct * dnotifyThread; remove sparse warning */ | |
69 | static struct task_struct *dnotifyThread = NULL; | |
70 | static const struct super_operations cifs_super_ops; | |
71 | unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; | |
72 | module_param(CIFSMaxBufSize, int, 0); | |
73 | MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). " | |
74 | "Default: 16384 Range: 8192 to 130048"); | |
75 | unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL; | |
76 | module_param(cifs_min_rcv, int, 0); | |
77 | MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: " | |
78 | "1 to 64"); | |
79 | unsigned int cifs_min_small = 30; | |
80 | module_param(cifs_min_small, int, 0); | |
81 | MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " | |
82 | "Range: 2 to 256"); | |
83 | unsigned int cifs_max_pending = CIFS_MAX_REQ; | |
84 | module_param(cifs_max_pending, int, 0); | |
85 | MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " | |
86 | "Default: 50 Range: 2 to 256"); | |
87 | ||
88 | extern mempool_t *cifs_sm_req_poolp; | |
89 | extern mempool_t *cifs_req_poolp; | |
90 | extern mempool_t *cifs_mid_poolp; | |
91 | ||
92 | extern struct kmem_cache *cifs_oplock_cachep; | |
93 | ||
94 | static int | |
95 | cifs_read_super(struct super_block *sb, void *data, | |
96 | const char *devname, int silent) | |
97 | { | |
98 | struct inode *inode; | |
99 | struct cifs_sb_info *cifs_sb; | |
100 | int rc = 0; | |
101 | ||
102 | /* BB should we make this contingent on mount parm? */ | |
103 | sb->s_flags |= MS_NODIRATIME | MS_NOATIME; | |
104 | sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); | |
105 | cifs_sb = CIFS_SB(sb); | |
106 | if (cifs_sb == NULL) | |
107 | return -ENOMEM; | |
108 | ||
109 | #ifdef CONFIG_CIFS_DFS_UPCALL | |
110 | /* copy mount params to sb for use in submounts */ | |
111 | /* BB: should we move this after the mount so we | |
112 | * do not have to do the copy on failed mounts? | |
113 | * BB: May be it is better to do simple copy before | |
114 | * complex operation (mount), and in case of fail | |
115 | * just exit instead of doing mount and attempting | |
116 | * undo it if this copy fails?*/ | |
117 | if (data) { | |
118 | int len = strlen(data); | |
119 | cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL); | |
120 | if (cifs_sb->mountdata == NULL) { | |
121 | kfree(sb->s_fs_info); | |
122 | sb->s_fs_info = NULL; | |
123 | return -ENOMEM; | |
124 | } | |
125 | strncpy(cifs_sb->mountdata, data, len + 1); | |
126 | cifs_sb->mountdata[len] = '\0'; | |
127 | } | |
128 | #endif | |
129 | ||
130 | rc = cifs_mount(sb, cifs_sb, data, devname); | |
131 | ||
132 | if (rc) { | |
133 | if (!silent) | |
134 | cERROR(1, | |
135 | ("cifs_mount failed w/return code = %d", rc)); | |
136 | goto out_mount_failed; | |
137 | } | |
138 | ||
139 | sb->s_magic = CIFS_MAGIC_NUMBER; | |
140 | sb->s_op = &cifs_super_ops; | |
141 | /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512) | |
142 | sb->s_blocksize = | |
143 | cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */ | |
144 | #ifdef CONFIG_CIFS_QUOTA | |
145 | sb->s_qcop = &cifs_quotactl_ops; | |
146 | #endif | |
147 | sb->s_blocksize = CIFS_MAX_MSGSIZE; | |
148 | sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ | |
149 | inode = cifs_iget(sb, ROOT_I); | |
150 | ||
151 | if (IS_ERR(inode)) { | |
152 | rc = PTR_ERR(inode); | |
153 | inode = NULL; | |
154 | goto out_no_root; | |
155 | } | |
156 | ||
157 | sb->s_root = d_alloc_root(inode); | |
158 | ||
159 | if (!sb->s_root) { | |
160 | rc = -ENOMEM; | |
161 | goto out_no_root; | |
162 | } | |
163 | ||
164 | #ifdef CONFIG_CIFS_EXPERIMENTAL | |
165 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { | |
166 | cFYI(1, ("export ops supported")); | |
167 | sb->s_export_op = &cifs_export_ops; | |
168 | } | |
169 | #endif /* EXPERIMENTAL */ | |
170 | ||
171 | return 0; | |
172 | ||
173 | out_no_root: | |
174 | cERROR(1, ("cifs_read_super: get root inode failed")); | |
175 | if (inode) | |
176 | iput(inode); | |
177 | ||
178 | cifs_umount(sb, cifs_sb); | |
179 | ||
180 | out_mount_failed: | |
181 | if (cifs_sb) { | |
182 | #ifdef CONFIG_CIFS_DFS_UPCALL | |
183 | if (cifs_sb->mountdata) { | |
184 | kfree(cifs_sb->mountdata); | |
185 | cifs_sb->mountdata = NULL; | |
186 | } | |
187 | #endif | |
188 | if (cifs_sb->local_nls) | |
189 | unload_nls(cifs_sb->local_nls); | |
190 | kfree(cifs_sb); | |
191 | } | |
192 | return rc; | |
193 | } | |
194 | ||
195 | static void | |
196 | cifs_put_super(struct super_block *sb) | |
197 | { | |
198 | int rc = 0; | |
199 | struct cifs_sb_info *cifs_sb; | |
200 | ||
201 | cFYI(1, ("In cifs_put_super")); | |
202 | cifs_sb = CIFS_SB(sb); | |
203 | if (cifs_sb == NULL) { | |
204 | cFYI(1, ("Empty cifs superblock info passed to unmount")); | |
205 | return; | |
206 | } | |
207 | rc = cifs_umount(sb, cifs_sb); | |
208 | if (rc) | |
209 | cERROR(1, ("cifs_umount failed with return code %d", rc)); | |
210 | #ifdef CONFIG_CIFS_DFS_UPCALL | |
211 | if (cifs_sb->mountdata) { | |
212 | kfree(cifs_sb->mountdata); | |
213 | cifs_sb->mountdata = NULL; | |
214 | } | |
215 | #endif | |
216 | ||
217 | unload_nls(cifs_sb->local_nls); | |
218 | kfree(cifs_sb); | |
219 | return; | |
220 | } | |
221 | ||
222 | static int | |
223 | cifs_statfs(struct dentry *dentry, struct kstatfs *buf) | |
224 | { | |
225 | struct super_block *sb = dentry->d_sb; | |
226 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | |
227 | struct cifsTconInfo *tcon = cifs_sb->tcon; | |
228 | int rc = -EOPNOTSUPP; | |
229 | int xid; | |
230 | ||
231 | xid = GetXid(); | |
232 | ||
233 | buf->f_type = CIFS_MAGIC_NUMBER; | |
234 | ||
235 | /* | |
236 | * PATH_MAX may be too long - it would presumably be total path, | |
237 | * but note that some servers (includinng Samba 3) have a shorter | |
238 | * maximum path. | |
239 | * | |
240 | * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO. | |
241 | */ | |
242 | buf->f_namelen = PATH_MAX; | |
243 | buf->f_files = 0; /* undefined */ | |
244 | buf->f_ffree = 0; /* unlimited */ | |
245 | ||
246 | /* | |
247 | * We could add a second check for a QFS Unix capability bit | |
248 | */ | |
249 | if ((tcon->ses->capabilities & CAP_UNIX) && | |
250 | (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability))) | |
251 | rc = CIFSSMBQFSPosixInfo(xid, tcon, buf); | |
252 | ||
253 | /* | |
254 | * Only need to call the old QFSInfo if failed on newer one, | |
255 | * e.g. by OS/2. | |
256 | **/ | |
257 | if (rc && (tcon->ses->capabilities & CAP_NT_SMBS)) | |
258 | rc = CIFSSMBQFSInfo(xid, tcon, buf); | |
259 | ||
260 | /* | |
261 | * Some old Windows servers also do not support level 103, retry with | |
262 | * older level one if old server failed the previous call or we | |
263 | * bypassed it because we detected that this was an older LANMAN sess | |
264 | */ | |
265 | if (rc) | |
266 | rc = SMBOldQFSInfo(xid, tcon, buf); | |
267 | ||
268 | FreeXid(xid); | |
269 | return 0; | |
270 | } | |
271 | ||
272 | static int cifs_permission(struct inode *inode, int mask) | |
273 | { | |
274 | struct cifs_sb_info *cifs_sb; | |
275 | ||
276 | cifs_sb = CIFS_SB(inode->i_sb); | |
277 | ||
278 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { | |
279 | if ((mask & MAY_EXEC) && !execute_ok(inode)) | |
280 | return -EACCES; | |
281 | else | |
282 | return 0; | |
283 | } else /* file mode might have been restricted at mount time | |
284 | on the client (above and beyond ACL on servers) for | |
285 | servers which do not support setting and viewing mode bits, | |
286 | so allowing client to check permissions is useful */ | |
287 | return generic_permission(inode, mask, NULL); | |
288 | } | |
289 | ||
290 | static struct kmem_cache *cifs_inode_cachep; | |
291 | static struct kmem_cache *cifs_req_cachep; | |
292 | static struct kmem_cache *cifs_mid_cachep; | |
293 | struct kmem_cache *cifs_oplock_cachep; | |
294 | static struct kmem_cache *cifs_sm_req_cachep; | |
295 | mempool_t *cifs_sm_req_poolp; | |
296 | mempool_t *cifs_req_poolp; | |
297 | mempool_t *cifs_mid_poolp; | |
298 | ||
299 | static struct inode * | |
300 | cifs_alloc_inode(struct super_block *sb) | |
301 | { | |
302 | struct cifsInodeInfo *cifs_inode; | |
303 | cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL); | |
304 | if (!cifs_inode) | |
305 | return NULL; | |
306 | cifs_inode->cifsAttrs = 0x20; /* default */ | |
307 | atomic_set(&cifs_inode->inUse, 0); | |
308 | cifs_inode->time = 0; | |
309 | cifs_inode->write_behind_rc = 0; | |
310 | /* Until the file is open and we have gotten oplock | |
311 | info back from the server, can not assume caching of | |
312 | file data or metadata */ | |
313 | cifs_inode->clientCanCacheRead = false; | |
314 | cifs_inode->clientCanCacheAll = false; | |
315 | cifs_inode->delete_pending = false; | |
316 | cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ | |
317 | ||
318 | /* Can not set i_flags here - they get immediately overwritten | |
319 | to zero by the VFS */ | |
320 | /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/ | |
321 | INIT_LIST_HEAD(&cifs_inode->openFileList); | |
322 | return &cifs_inode->vfs_inode; | |
323 | } | |
324 | ||
325 | static void | |
326 | cifs_destroy_inode(struct inode *inode) | |
327 | { | |
328 | kmem_cache_free(cifs_inode_cachep, CIFS_I(inode)); | |
329 | } | |
330 | ||
331 | /* | |
332 | * cifs_show_options() is for displaying mount options in /proc/mounts. | |
333 | * Not all settable options are displayed but most of the important | |
334 | * ones are. | |
335 | */ | |
336 | static int | |
337 | cifs_show_options(struct seq_file *s, struct vfsmount *m) | |
338 | { | |
339 | struct cifs_sb_info *cifs_sb; | |
340 | ||
341 | cifs_sb = CIFS_SB(m->mnt_sb); | |
342 | ||
343 | if (cifs_sb) { | |
344 | if (cifs_sb->tcon) { | |
345 | /* BB add prepath to mount options displayed */ | |
346 | seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName); | |
347 | if (cifs_sb->tcon->ses) { | |
348 | if (cifs_sb->tcon->ses->userName) | |
349 | seq_printf(s, ",username=%s", | |
350 | cifs_sb->tcon->ses->userName); | |
351 | if (cifs_sb->tcon->ses->domainName) | |
352 | seq_printf(s, ",domain=%s", | |
353 | cifs_sb->tcon->ses->domainName); | |
354 | } | |
355 | if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) || | |
356 | !(cifs_sb->tcon->unix_ext)) | |
357 | seq_printf(s, ",uid=%d", cifs_sb->mnt_uid); | |
358 | if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) || | |
359 | !(cifs_sb->tcon->unix_ext)) | |
360 | seq_printf(s, ",gid=%d", cifs_sb->mnt_gid); | |
361 | if (!cifs_sb->tcon->unix_ext) { | |
362 | seq_printf(s, ",file_mode=0%o,dir_mode=0%o", | |
363 | cifs_sb->mnt_file_mode, | |
364 | cifs_sb->mnt_dir_mode); | |
365 | } | |
366 | if (cifs_sb->tcon->seal) | |
367 | seq_printf(s, ",seal"); | |
368 | if (cifs_sb->tcon->nocase) | |
369 | seq_printf(s, ",nocase"); | |
370 | if (cifs_sb->tcon->retry) | |
371 | seq_printf(s, ",hard"); | |
372 | } | |
373 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) | |
374 | seq_printf(s, ",posixpaths"); | |
375 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) | |
376 | seq_printf(s, ",setuids"); | |
377 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) | |
378 | seq_printf(s, ",serverino"); | |
379 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) | |
380 | seq_printf(s, ",directio"); | |
381 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) | |
382 | seq_printf(s, ",nouser_xattr"); | |
383 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) | |
384 | seq_printf(s, ",mapchars"); | |
385 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) | |
386 | seq_printf(s, ",sfu"); | |
387 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) | |
388 | seq_printf(s, ",nobrl"); | |
389 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) | |
390 | seq_printf(s, ",cifsacl"); | |
391 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) | |
392 | seq_printf(s, ",dynperm"); | |
393 | if (m->mnt_sb->s_flags & MS_POSIXACL) | |
394 | seq_printf(s, ",acl"); | |
395 | ||
396 | seq_printf(s, ",rsize=%d", cifs_sb->rsize); | |
397 | seq_printf(s, ",wsize=%d", cifs_sb->wsize); | |
398 | } | |
399 | return 0; | |
400 | } | |
401 | ||
402 | #ifdef CONFIG_CIFS_QUOTA | |
403 | int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid, | |
404 | struct fs_disk_quota *pdquota) | |
405 | { | |
406 | int xid; | |
407 | int rc = 0; | |
408 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | |
409 | struct cifsTconInfo *pTcon; | |
410 | ||
411 | if (cifs_sb) | |
412 | pTcon = cifs_sb->tcon; | |
413 | else | |
414 | return -EIO; | |
415 | ||
416 | ||
417 | xid = GetXid(); | |
418 | if (pTcon) { | |
419 | cFYI(1, ("set type: 0x%x id: %d", quota_type, qid)); | |
420 | } else { | |
421 | rc = -EIO; | |
422 | } | |
423 | ||
424 | FreeXid(xid); | |
425 | return rc; | |
426 | } | |
427 | ||
428 | int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid, | |
429 | struct fs_disk_quota *pdquota) | |
430 | { | |
431 | int xid; | |
432 | int rc = 0; | |
433 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | |
434 | struct cifsTconInfo *pTcon; | |
435 | ||
436 | if (cifs_sb) | |
437 | pTcon = cifs_sb->tcon; | |
438 | else | |
439 | return -EIO; | |
440 | ||
441 | xid = GetXid(); | |
442 | if (pTcon) { | |
443 | cFYI(1, ("set type: 0x%x id: %d", quota_type, qid)); | |
444 | } else { | |
445 | rc = -EIO; | |
446 | } | |
447 | ||
448 | FreeXid(xid); | |
449 | return rc; | |
450 | } | |
451 | ||
452 | int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation) | |
453 | { | |
454 | int xid; | |
455 | int rc = 0; | |
456 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | |
457 | struct cifsTconInfo *pTcon; | |
458 | ||
459 | if (cifs_sb) | |
460 | pTcon = cifs_sb->tcon; | |
461 | else | |
462 | return -EIO; | |
463 | ||
464 | xid = GetXid(); | |
465 | if (pTcon) { | |
466 | cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation)); | |
467 | } else { | |
468 | rc = -EIO; | |
469 | } | |
470 | ||
471 | FreeXid(xid); | |
472 | return rc; | |
473 | } | |
474 | ||
475 | int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats) | |
476 | { | |
477 | int xid; | |
478 | int rc = 0; | |
479 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | |
480 | struct cifsTconInfo *pTcon; | |
481 | ||
482 | if (cifs_sb) { | |
483 | pTcon = cifs_sb->tcon; | |
484 | } else { | |
485 | return -EIO; | |
486 | } | |
487 | xid = GetXid(); | |
488 | if (pTcon) { | |
489 | cFYI(1, ("pqstats %p", qstats)); | |
490 | } else { | |
491 | rc = -EIO; | |
492 | } | |
493 | ||
494 | FreeXid(xid); | |
495 | return rc; | |
496 | } | |
497 | ||
498 | static struct quotactl_ops cifs_quotactl_ops = { | |
499 | .set_xquota = cifs_xquota_set, | |
500 | .get_xquota = cifs_xquota_get, | |
501 | .set_xstate = cifs_xstate_set, | |
502 | .get_xstate = cifs_xstate_get, | |
503 | }; | |
504 | #endif | |
505 | ||
506 | static void cifs_umount_begin(struct super_block *sb) | |
507 | { | |
508 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | |
509 | struct cifsTconInfo *tcon; | |
510 | ||
511 | if (cifs_sb == NULL) | |
512 | return; | |
513 | ||
514 | tcon = cifs_sb->tcon; | |
515 | if (tcon == NULL) | |
516 | return; | |
517 | down(&tcon->tconSem); | |
518 | if (atomic_read(&tcon->useCount) == 1) | |
519 | tcon->tidStatus = CifsExiting; | |
520 | up(&tcon->tconSem); | |
521 | ||
522 | /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ | |
523 | /* cancel_notify_requests(tcon); */ | |
524 | if (tcon->ses && tcon->ses->server) { | |
525 | cFYI(1, ("wake up tasks now - umount begin not complete")); | |
526 | wake_up_all(&tcon->ses->server->request_q); | |
527 | wake_up_all(&tcon->ses->server->response_q); | |
528 | msleep(1); /* yield */ | |
529 | /* we have to kick the requests once more */ | |
530 | wake_up_all(&tcon->ses->server->response_q); | |
531 | msleep(1); | |
532 | } | |
533 | /* BB FIXME - finish add checks for tidStatus BB */ | |
534 | ||
535 | return; | |
536 | } | |
537 | ||
538 | #ifdef CONFIG_CIFS_STATS2 | |
539 | static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt) | |
540 | { | |
541 | /* BB FIXME */ | |
542 | return 0; | |
543 | } | |
544 | #endif | |
545 | ||
546 | static int cifs_remount(struct super_block *sb, int *flags, char *data) | |
547 | { | |
548 | *flags |= MS_NODIRATIME; | |
549 | return 0; | |
550 | } | |
551 | ||
552 | static const struct super_operations cifs_super_ops = { | |
553 | .put_super = cifs_put_super, | |
554 | .statfs = cifs_statfs, | |
555 | .alloc_inode = cifs_alloc_inode, | |
556 | .destroy_inode = cifs_destroy_inode, | |
557 | /* .drop_inode = generic_delete_inode, | |
558 | .delete_inode = cifs_delete_inode, */ /* Do not need above two | |
559 | functions unless later we add lazy close of inodes or unless the | |
560 | kernel forgets to call us with the same number of releases (closes) | |
561 | as opens */ | |
562 | .show_options = cifs_show_options, | |
563 | .umount_begin = cifs_umount_begin, | |
564 | .remount_fs = cifs_remount, | |
565 | #ifdef CONFIG_CIFS_STATS2 | |
566 | .show_stats = cifs_show_stats, | |
567 | #endif | |
568 | }; | |
569 | ||
570 | static int | |
571 | cifs_get_sb(struct file_system_type *fs_type, | |
572 | int flags, const char *dev_name, void *data, struct vfsmount *mnt) | |
573 | { | |
574 | int rc; | |
575 | struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL); | |
576 | ||
577 | cFYI(1, ("Devname: %s flags: %d ", dev_name, flags)); | |
578 | ||
579 | if (IS_ERR(sb)) | |
580 | return PTR_ERR(sb); | |
581 | ||
582 | sb->s_flags = flags; | |
583 | ||
584 | rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0); | |
585 | if (rc) { | |
586 | up_write(&sb->s_umount); | |
587 | deactivate_super(sb); | |
588 | return rc; | |
589 | } | |
590 | sb->s_flags |= MS_ACTIVE; | |
591 | return simple_set_mnt(mnt, sb); | |
592 | } | |
593 | ||
594 | static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |
595 | unsigned long nr_segs, loff_t pos) | |
596 | { | |
597 | struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; | |
598 | ssize_t written; | |
599 | ||
600 | written = generic_file_aio_write(iocb, iov, nr_segs, pos); | |
601 | if (!CIFS_I(inode)->clientCanCacheAll) | |
602 | filemap_fdatawrite(inode->i_mapping); | |
603 | return written; | |
604 | } | |
605 | ||
606 | static loff_t cifs_llseek(struct file *file, loff_t offset, int origin) | |
607 | { | |
608 | /* origin == SEEK_END => we must revalidate the cached file length */ | |
609 | if (origin == SEEK_END) { | |
610 | int retval; | |
611 | ||
612 | /* some applications poll for the file length in this strange | |
613 | way so we must seek to end on non-oplocked files by | |
614 | setting the revalidate time to zero */ | |
615 | CIFS_I(file->f_path.dentry->d_inode)->time = 0; | |
616 | ||
617 | retval = cifs_revalidate(file->f_path.dentry); | |
618 | if (retval < 0) | |
619 | return (loff_t)retval; | |
620 | } | |
621 | return generic_file_llseek_unlocked(file, offset, origin); | |
622 | } | |
623 | ||
624 | #ifdef CONFIG_CIFS_EXPERIMENTAL | |
625 | static int cifs_setlease(struct file *file, long arg, struct file_lock **lease) | |
626 | { | |
627 | /* note that this is called by vfs setlease with the BKL held | |
628 | although I doubt that BKL is needed here in cifs */ | |
629 | struct inode *inode = file->f_path.dentry->d_inode; | |
630 | ||
631 | if (!(S_ISREG(inode->i_mode))) | |
632 | return -EINVAL; | |
633 | ||
634 | /* check if file is oplocked */ | |
635 | if (((arg == F_RDLCK) && | |
636 | (CIFS_I(inode)->clientCanCacheRead)) || | |
637 | ((arg == F_WRLCK) && | |
638 | (CIFS_I(inode)->clientCanCacheAll))) | |
639 | return generic_setlease(file, arg, lease); | |
640 | else if (CIFS_SB(inode->i_sb)->tcon->local_lease && | |
641 | !CIFS_I(inode)->clientCanCacheRead) | |
642 | /* If the server claims to support oplock on this | |
643 | file, then we still need to check oplock even | |
644 | if the local_lease mount option is set, but there | |
645 | are servers which do not support oplock for which | |
646 | this mount option may be useful if the user | |
647 | knows that the file won't be changed on the server | |
648 | by anyone else */ | |
649 | return generic_setlease(file, arg, lease); | |
650 | else | |
651 | return -EAGAIN; | |
652 | } | |
653 | #endif | |
654 | ||
655 | struct file_system_type cifs_fs_type = { | |
656 | .owner = THIS_MODULE, | |
657 | .name = "cifs", | |
658 | .get_sb = cifs_get_sb, | |
659 | .kill_sb = kill_anon_super, | |
660 | /* .fs_flags */ | |
661 | }; | |
662 | const struct inode_operations cifs_dir_inode_ops = { | |
663 | .create = cifs_create, | |
664 | .lookup = cifs_lookup, | |
665 | .getattr = cifs_getattr, | |
666 | .unlink = cifs_unlink, | |
667 | .link = cifs_hardlink, | |
668 | .mkdir = cifs_mkdir, | |
669 | .rmdir = cifs_rmdir, | |
670 | .rename = cifs_rename, | |
671 | .permission = cifs_permission, | |
672 | /* revalidate:cifs_revalidate, */ | |
673 | .setattr = cifs_setattr, | |
674 | .symlink = cifs_symlink, | |
675 | .mknod = cifs_mknod, | |
676 | #ifdef CONFIG_CIFS_XATTR | |
677 | .setxattr = cifs_setxattr, | |
678 | .getxattr = cifs_getxattr, | |
679 | .listxattr = cifs_listxattr, | |
680 | .removexattr = cifs_removexattr, | |
681 | #endif | |
682 | }; | |
683 | ||
684 | const struct inode_operations cifs_file_inode_ops = { | |
685 | /* revalidate:cifs_revalidate, */ | |
686 | .setattr = cifs_setattr, | |
687 | .getattr = cifs_getattr, /* do we need this anymore? */ | |
688 | .rename = cifs_rename, | |
689 | .permission = cifs_permission, | |
690 | #ifdef CONFIG_CIFS_XATTR | |
691 | .setxattr = cifs_setxattr, | |
692 | .getxattr = cifs_getxattr, | |
693 | .listxattr = cifs_listxattr, | |
694 | .removexattr = cifs_removexattr, | |
695 | #endif | |
696 | }; | |
697 | ||
698 | const struct inode_operations cifs_symlink_inode_ops = { | |
699 | .readlink = generic_readlink, | |
700 | .follow_link = cifs_follow_link, | |
701 | .put_link = cifs_put_link, | |
702 | .permission = cifs_permission, | |
703 | /* BB add the following two eventually */ | |
704 | /* revalidate: cifs_revalidate, | |
705 | setattr: cifs_notify_change, *//* BB do we need notify change */ | |
706 | #ifdef CONFIG_CIFS_XATTR | |
707 | .setxattr = cifs_setxattr, | |
708 | .getxattr = cifs_getxattr, | |
709 | .listxattr = cifs_listxattr, | |
710 | .removexattr = cifs_removexattr, | |
711 | #endif | |
712 | }; | |
713 | ||
714 | const struct file_operations cifs_file_ops = { | |
715 | .read = do_sync_read, | |
716 | .write = do_sync_write, | |
717 | .aio_read = generic_file_aio_read, | |
718 | .aio_write = cifs_file_aio_write, | |
719 | .open = cifs_open, | |
720 | .release = cifs_close, | |
721 | .lock = cifs_lock, | |
722 | .fsync = cifs_fsync, | |
723 | .flush = cifs_flush, | |
724 | .mmap = cifs_file_mmap, | |
725 | .splice_read = generic_file_splice_read, | |
726 | .llseek = cifs_llseek, | |
727 | #ifdef CONFIG_CIFS_POSIX | |
728 | .unlocked_ioctl = cifs_ioctl, | |
729 | #endif /* CONFIG_CIFS_POSIX */ | |
730 | ||
731 | #ifdef CONFIG_CIFS_EXPERIMENTAL | |
732 | .dir_notify = cifs_dir_notify, | |
733 | .setlease = cifs_setlease, | |
734 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | |
735 | }; | |
736 | ||
737 | const struct file_operations cifs_file_direct_ops = { | |
738 | /* no mmap, no aio, no readv - | |
739 | BB reevaluate whether they can be done with directio, no cache */ | |
740 | .read = cifs_user_read, | |
741 | .write = cifs_user_write, | |
742 | .open = cifs_open, | |
743 | .release = cifs_close, | |
744 | .lock = cifs_lock, | |
745 | .fsync = cifs_fsync, | |
746 | .flush = cifs_flush, | |
747 | .splice_read = generic_file_splice_read, | |
748 | #ifdef CONFIG_CIFS_POSIX | |
749 | .unlocked_ioctl = cifs_ioctl, | |
750 | #endif /* CONFIG_CIFS_POSIX */ | |
751 | .llseek = cifs_llseek, | |
752 | #ifdef CONFIG_CIFS_EXPERIMENTAL | |
753 | .dir_notify = cifs_dir_notify, | |
754 | .setlease = cifs_setlease, | |
755 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | |
756 | }; | |
757 | const struct file_operations cifs_file_nobrl_ops = { | |
758 | .read = do_sync_read, | |
759 | .write = do_sync_write, | |
760 | .aio_read = generic_file_aio_read, | |
761 | .aio_write = cifs_file_aio_write, | |
762 | .open = cifs_open, | |
763 | .release = cifs_close, | |
764 | .fsync = cifs_fsync, | |
765 | .flush = cifs_flush, | |
766 | .mmap = cifs_file_mmap, | |
767 | .splice_read = generic_file_splice_read, | |
768 | .llseek = cifs_llseek, | |
769 | #ifdef CONFIG_CIFS_POSIX | |
770 | .unlocked_ioctl = cifs_ioctl, | |
771 | #endif /* CONFIG_CIFS_POSIX */ | |
772 | ||
773 | #ifdef CONFIG_CIFS_EXPERIMENTAL | |
774 | .dir_notify = cifs_dir_notify, | |
775 | .setlease = cifs_setlease, | |
776 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | |
777 | }; | |
778 | ||
779 | const struct file_operations cifs_file_direct_nobrl_ops = { | |
780 | /* no mmap, no aio, no readv - | |
781 | BB reevaluate whether they can be done with directio, no cache */ | |
782 | .read = cifs_user_read, | |
783 | .write = cifs_user_write, | |
784 | .open = cifs_open, | |
785 | .release = cifs_close, | |
786 | .fsync = cifs_fsync, | |
787 | .flush = cifs_flush, | |
788 | .splice_read = generic_file_splice_read, | |
789 | #ifdef CONFIG_CIFS_POSIX | |
790 | .unlocked_ioctl = cifs_ioctl, | |
791 | #endif /* CONFIG_CIFS_POSIX */ | |
792 | .llseek = cifs_llseek, | |
793 | #ifdef CONFIG_CIFS_EXPERIMENTAL | |
794 | .dir_notify = cifs_dir_notify, | |
795 | .setlease = cifs_setlease, | |
796 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | |
797 | }; | |
798 | ||
799 | const struct file_operations cifs_dir_ops = { | |
800 | .readdir = cifs_readdir, | |
801 | .release = cifs_closedir, | |
802 | .read = generic_read_dir, | |
803 | #ifdef CONFIG_CIFS_EXPERIMENTAL | |
804 | .dir_notify = cifs_dir_notify, | |
805 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | |
806 | .unlocked_ioctl = cifs_ioctl, | |
807 | .llseek = generic_file_llseek, | |
808 | }; | |
809 | ||
810 | static void | |
811 | cifs_init_once(void *inode) | |
812 | { | |
813 | struct cifsInodeInfo *cifsi = inode; | |
814 | ||
815 | inode_init_once(&cifsi->vfs_inode); | |
816 | INIT_LIST_HEAD(&cifsi->lockList); | |
817 | } | |
818 | ||
819 | static int | |
820 | cifs_init_inodecache(void) | |
821 | { | |
822 | cifs_inode_cachep = kmem_cache_create("cifs_inode_cache", | |
823 | sizeof(struct cifsInodeInfo), | |
824 | 0, (SLAB_RECLAIM_ACCOUNT| | |
825 | SLAB_MEM_SPREAD), | |
826 | cifs_init_once); | |
827 | if (cifs_inode_cachep == NULL) | |
828 | return -ENOMEM; | |
829 | ||
830 | return 0; | |
831 | } | |
832 | ||
833 | static void | |
834 | cifs_destroy_inodecache(void) | |
835 | { | |
836 | kmem_cache_destroy(cifs_inode_cachep); | |
837 | } | |
838 | ||
839 | static int | |
840 | cifs_init_request_bufs(void) | |
841 | { | |
842 | if (CIFSMaxBufSize < 8192) { | |
843 | /* Buffer size can not be smaller than 2 * PATH_MAX since maximum | |
844 | Unicode path name has to fit in any SMB/CIFS path based frames */ | |
845 | CIFSMaxBufSize = 8192; | |
846 | } else if (CIFSMaxBufSize > 1024*127) { | |
847 | CIFSMaxBufSize = 1024 * 127; | |
848 | } else { | |
849 | CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/ | |
850 | } | |
851 | /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */ | |
852 | cifs_req_cachep = kmem_cache_create("cifs_request", | |
853 | CIFSMaxBufSize + | |
854 | MAX_CIFS_HDR_SIZE, 0, | |
855 | SLAB_HWCACHE_ALIGN, NULL); | |
856 | if (cifs_req_cachep == NULL) | |
857 | return -ENOMEM; | |
858 | ||
859 | if (cifs_min_rcv < 1) | |
860 | cifs_min_rcv = 1; | |
861 | else if (cifs_min_rcv > 64) { | |
862 | cifs_min_rcv = 64; | |
863 | cERROR(1, ("cifs_min_rcv set to maximum (64)")); | |
864 | } | |
865 | ||
866 | cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, | |
867 | cifs_req_cachep); | |
868 | ||
869 | if (cifs_req_poolp == NULL) { | |
870 | kmem_cache_destroy(cifs_req_cachep); | |
871 | return -ENOMEM; | |
872 | } | |
873 | /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and | |
874 | almost all handle based requests (but not write response, nor is it | |
875 | sufficient for path based requests). A smaller size would have | |
876 | been more efficient (compacting multiple slab items on one 4k page) | |
877 | for the case in which debug was on, but this larger size allows | |
878 | more SMBs to use small buffer alloc and is still much more | |
879 | efficient to alloc 1 per page off the slab compared to 17K (5page) | |
880 | alloc of large cifs buffers even when page debugging is on */ | |
881 | cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", | |
882 | MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, | |
883 | NULL); | |
884 | if (cifs_sm_req_cachep == NULL) { | |
885 | mempool_destroy(cifs_req_poolp); | |
886 | kmem_cache_destroy(cifs_req_cachep); | |
887 | return -ENOMEM; | |
888 | } | |
889 | ||
890 | if (cifs_min_small < 2) | |
891 | cifs_min_small = 2; | |
892 | else if (cifs_min_small > 256) { | |
893 | cifs_min_small = 256; | |
894 | cFYI(1, ("cifs_min_small set to maximum (256)")); | |
895 | } | |
896 | ||
897 | cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, | |
898 | cifs_sm_req_cachep); | |
899 | ||
900 | if (cifs_sm_req_poolp == NULL) { | |
901 | mempool_destroy(cifs_req_poolp); | |
902 | kmem_cache_destroy(cifs_req_cachep); | |
903 | kmem_cache_destroy(cifs_sm_req_cachep); | |
904 | return -ENOMEM; | |
905 | } | |
906 | ||
907 | return 0; | |
908 | } | |
909 | ||
910 | static void | |
911 | cifs_destroy_request_bufs(void) | |
912 | { | |
913 | mempool_destroy(cifs_req_poolp); | |
914 | kmem_cache_destroy(cifs_req_cachep); | |
915 | mempool_destroy(cifs_sm_req_poolp); | |
916 | kmem_cache_destroy(cifs_sm_req_cachep); | |
917 | } | |
918 | ||
919 | static int | |
920 | cifs_init_mids(void) | |
921 | { | |
922 | cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", | |
923 | sizeof(struct mid_q_entry), 0, | |
924 | SLAB_HWCACHE_ALIGN, NULL); | |
925 | if (cifs_mid_cachep == NULL) | |
926 | return -ENOMEM; | |
927 | ||
928 | /* 3 is a reasonable minimum number of simultaneous operations */ | |
929 | cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep); | |
930 | if (cifs_mid_poolp == NULL) { | |
931 | kmem_cache_destroy(cifs_mid_cachep); | |
932 | return -ENOMEM; | |
933 | } | |
934 | ||
935 | cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs", | |
936 | sizeof(struct oplock_q_entry), 0, | |
937 | SLAB_HWCACHE_ALIGN, NULL); | |
938 | if (cifs_oplock_cachep == NULL) { | |
939 | mempool_destroy(cifs_mid_poolp); | |
940 | kmem_cache_destroy(cifs_mid_cachep); | |
941 | return -ENOMEM; | |
942 | } | |
943 | ||
944 | return 0; | |
945 | } | |
946 | ||
947 | static void | |
948 | cifs_destroy_mids(void) | |
949 | { | |
950 | mempool_destroy(cifs_mid_poolp); | |
951 | kmem_cache_destroy(cifs_mid_cachep); | |
952 | kmem_cache_destroy(cifs_oplock_cachep); | |
953 | } | |
954 | ||
955 | static int cifs_oplock_thread(void *dummyarg) | |
956 | { | |
957 | struct oplock_q_entry *oplock_item; | |
958 | struct cifsTconInfo *pTcon; | |
959 | struct inode *inode; | |
960 | __u16 netfid; | |
961 | int rc, waitrc = 0; | |
962 | ||
963 | set_freezable(); | |
964 | do { | |
965 | if (try_to_freeze()) | |
966 | continue; | |
967 | ||
968 | spin_lock(&GlobalMid_Lock); | |
969 | if (list_empty(&GlobalOplock_Q)) { | |
970 | spin_unlock(&GlobalMid_Lock); | |
971 | set_current_state(TASK_INTERRUPTIBLE); | |
972 | schedule_timeout(39*HZ); | |
973 | } else { | |
974 | oplock_item = list_entry(GlobalOplock_Q.next, | |
975 | struct oplock_q_entry, qhead); | |
976 | cFYI(1, ("found oplock item to write out")); | |
977 | pTcon = oplock_item->tcon; | |
978 | inode = oplock_item->pinode; | |
979 | netfid = oplock_item->netfid; | |
980 | spin_unlock(&GlobalMid_Lock); | |
981 | DeleteOplockQEntry(oplock_item); | |
982 | /* can not grab inode sem here since it would | |
983 | deadlock when oplock received on delete | |
984 | since vfs_unlink holds the i_mutex across | |
985 | the call */ | |
986 | /* mutex_lock(&inode->i_mutex);*/ | |
987 | if (S_ISREG(inode->i_mode)) { | |
988 | #ifdef CONFIG_CIFS_EXPERIMENTAL | |
989 | if (CIFS_I(inode)->clientCanCacheAll == 0) | |
990 | break_lease(inode, FMODE_READ); | |
991 | else if (CIFS_I(inode)->clientCanCacheRead == 0) | |
992 | break_lease(inode, FMODE_WRITE); | |
993 | #endif | |
994 | rc = filemap_fdatawrite(inode->i_mapping); | |
995 | if (CIFS_I(inode)->clientCanCacheRead == 0) { | |
996 | waitrc = filemap_fdatawait( | |
997 | inode->i_mapping); | |
998 | invalidate_remote_inode(inode); | |
999 | } | |
1000 | if (rc == 0) | |
1001 | rc = waitrc; | |
1002 | } else | |
1003 | rc = 0; | |
1004 | /* mutex_unlock(&inode->i_mutex);*/ | |
1005 | if (rc) | |
1006 | CIFS_I(inode)->write_behind_rc = rc; | |
1007 | cFYI(1, ("Oplock flush inode %p rc %d", | |
1008 | inode, rc)); | |
1009 | ||
1010 | /* releasing stale oplock after recent reconnect | |
1011 | of smb session using a now incorrect file | |
1012 | handle is not a data integrity issue but do | |
1013 | not bother sending an oplock release if session | |
1014 | to server still is disconnected since oplock | |
1015 | already released by the server in that case */ | |
1016 | if (!pTcon->need_reconnect) { | |
1017 | rc = CIFSSMBLock(0, pTcon, netfid, | |
1018 | 0 /* len */ , 0 /* offset */, 0, | |
1019 | 0, LOCKING_ANDX_OPLOCK_RELEASE, | |
1020 | false /* wait flag */); | |
1021 | cFYI(1, ("Oplock release rc = %d", rc)); | |
1022 | } | |
1023 | set_current_state(TASK_INTERRUPTIBLE); | |
1024 | schedule_timeout(1); /* yield in case q were corrupt */ | |
1025 | } | |
1026 | } while (!kthread_should_stop()); | |
1027 | ||
1028 | return 0; | |
1029 | } | |
1030 | ||
1031 | static int cifs_dnotify_thread(void *dummyarg) | |
1032 | { | |
1033 | struct list_head *tmp; | |
1034 | struct cifsSesInfo *ses; | |
1035 | ||
1036 | do { | |
1037 | if (try_to_freeze()) | |
1038 | continue; | |
1039 | set_current_state(TASK_INTERRUPTIBLE); | |
1040 | schedule_timeout(15*HZ); | |
1041 | read_lock(&GlobalSMBSeslock); | |
1042 | /* check if any stuck requests that need | |
1043 | to be woken up and wakeq so the | |
1044 | thread can wake up and error out */ | |
1045 | list_for_each(tmp, &GlobalSMBSessionList) { | |
1046 | ses = list_entry(tmp, struct cifsSesInfo, | |
1047 | cifsSessionList); | |
1048 | if (ses->server && atomic_read(&ses->server->inFlight)) | |
1049 | wake_up_all(&ses->server->response_q); | |
1050 | } | |
1051 | read_unlock(&GlobalSMBSeslock); | |
1052 | } while (!kthread_should_stop()); | |
1053 | ||
1054 | return 0; | |
1055 | } | |
1056 | ||
1057 | static int __init | |
1058 | init_cifs(void) | |
1059 | { | |
1060 | int rc = 0; | |
1061 | cifs_proc_init(); | |
1062 | INIT_LIST_HEAD(&global_cifs_sock_list); | |
1063 | INIT_LIST_HEAD(&GlobalSMBSessionList); /* BB to be removed by jl */ | |
1064 | INIT_LIST_HEAD(&GlobalTreeConnectionList); /* BB to be removed by jl */ | |
1065 | INIT_LIST_HEAD(&GlobalOplock_Q); | |
1066 | #ifdef CONFIG_CIFS_EXPERIMENTAL | |
1067 | INIT_LIST_HEAD(&GlobalDnotifyReqList); | |
1068 | INIT_LIST_HEAD(&GlobalDnotifyRsp_Q); | |
1069 | #endif | |
1070 | /* | |
1071 | * Initialize Global counters | |
1072 | */ | |
1073 | atomic_set(&sesInfoAllocCount, 0); | |
1074 | atomic_set(&tconInfoAllocCount, 0); | |
1075 | atomic_set(&tcpSesAllocCount, 0); | |
1076 | atomic_set(&tcpSesReconnectCount, 0); | |
1077 | atomic_set(&tconInfoReconnectCount, 0); | |
1078 | ||
1079 | atomic_set(&bufAllocCount, 0); | |
1080 | atomic_set(&smBufAllocCount, 0); | |
1081 | #ifdef CONFIG_CIFS_STATS2 | |
1082 | atomic_set(&totBufAllocCount, 0); | |
1083 | atomic_set(&totSmBufAllocCount, 0); | |
1084 | #endif /* CONFIG_CIFS_STATS2 */ | |
1085 | ||
1086 | atomic_set(&midCount, 0); | |
1087 | GlobalCurrentXid = 0; | |
1088 | GlobalTotalActiveXid = 0; | |
1089 | GlobalMaxActiveXid = 0; | |
1090 | memset(Local_System_Name, 0, 15); | |
1091 | rwlock_init(&GlobalSMBSeslock); | |
1092 | spin_lock_init(&GlobalMid_Lock); | |
1093 | ||
1094 | if (cifs_max_pending < 2) { | |
1095 | cifs_max_pending = 2; | |
1096 | cFYI(1, ("cifs_max_pending set to min of 2")); | |
1097 | } else if (cifs_max_pending > 256) { | |
1098 | cifs_max_pending = 256; | |
1099 | cFYI(1, ("cifs_max_pending set to max of 256")); | |
1100 | } | |
1101 | ||
1102 | rc = cifs_init_inodecache(); | |
1103 | if (rc) | |
1104 | goto out_clean_proc; | |
1105 | ||
1106 | rc = cifs_init_mids(); | |
1107 | if (rc) | |
1108 | goto out_destroy_inodecache; | |
1109 | ||
1110 | rc = cifs_init_request_bufs(); | |
1111 | if (rc) | |
1112 | goto out_destroy_mids; | |
1113 | ||
1114 | rc = register_filesystem(&cifs_fs_type); | |
1115 | if (rc) | |
1116 | goto out_destroy_request_bufs; | |
1117 | #ifdef CONFIG_CIFS_UPCALL | |
1118 | rc = register_key_type(&cifs_spnego_key_type); | |
1119 | if (rc) | |
1120 | goto out_unregister_filesystem; | |
1121 | #endif | |
1122 | #ifdef CONFIG_CIFS_DFS_UPCALL | |
1123 | rc = register_key_type(&key_type_dns_resolver); | |
1124 | if (rc) | |
1125 | goto out_unregister_key_type; | |
1126 | #endif | |
1127 | oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd"); | |
1128 | if (IS_ERR(oplockThread)) { | |
1129 | rc = PTR_ERR(oplockThread); | |
1130 | cERROR(1, ("error %d create oplock thread", rc)); | |
1131 | goto out_unregister_dfs_key_type; | |
1132 | } | |
1133 | ||
1134 | dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd"); | |
1135 | if (IS_ERR(dnotifyThread)) { | |
1136 | rc = PTR_ERR(dnotifyThread); | |
1137 | cERROR(1, ("error %d create dnotify thread", rc)); | |
1138 | goto out_stop_oplock_thread; | |
1139 | } | |
1140 | ||
1141 | return 0; | |
1142 | ||
1143 | out_stop_oplock_thread: | |
1144 | kthread_stop(oplockThread); | |
1145 | out_unregister_dfs_key_type: | |
1146 | #ifdef CONFIG_CIFS_DFS_UPCALL | |
1147 | unregister_key_type(&key_type_dns_resolver); | |
1148 | out_unregister_key_type: | |
1149 | #endif | |
1150 | #ifdef CONFIG_CIFS_UPCALL | |
1151 | unregister_key_type(&cifs_spnego_key_type); | |
1152 | out_unregister_filesystem: | |
1153 | #endif | |
1154 | unregister_filesystem(&cifs_fs_type); | |
1155 | out_destroy_request_bufs: | |
1156 | cifs_destroy_request_bufs(); | |
1157 | out_destroy_mids: | |
1158 | cifs_destroy_mids(); | |
1159 | out_destroy_inodecache: | |
1160 | cifs_destroy_inodecache(); | |
1161 | out_clean_proc: | |
1162 | cifs_proc_clean(); | |
1163 | return rc; | |
1164 | } | |
1165 | ||
1166 | static void __exit | |
1167 | exit_cifs(void) | |
1168 | { | |
1169 | cFYI(DBG2, ("exit_cifs")); | |
1170 | cifs_proc_clean(); | |
1171 | #ifdef CONFIG_CIFS_DFS_UPCALL | |
1172 | cifs_dfs_release_automount_timer(); | |
1173 | unregister_key_type(&key_type_dns_resolver); | |
1174 | #endif | |
1175 | #ifdef CONFIG_CIFS_UPCALL | |
1176 | unregister_key_type(&cifs_spnego_key_type); | |
1177 | #endif | |
1178 | unregister_filesystem(&cifs_fs_type); | |
1179 | cifs_destroy_inodecache(); | |
1180 | cifs_destroy_mids(); | |
1181 | cifs_destroy_request_bufs(); | |
1182 | kthread_stop(oplockThread); | |
1183 | kthread_stop(dnotifyThread); | |
1184 | } | |
1185 | ||
1186 | MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>"); | |
1187 | MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */ | |
1188 | MODULE_DESCRIPTION | |
1189 | ("VFS to access servers complying with the SNIA CIFS Specification " | |
1190 | "e.g. Samba and Windows"); | |
1191 | MODULE_VERSION(CIFS_VERSION); | |
1192 | module_init(init_cifs) | |
1193 | module_exit(exit_cifs) |