]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ceph/super.c
ceph: fix use-after-free in ceph_statfs()
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / super.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/backing-dev.h>
5 #include <linux/ctype.h>
6 #include <linux/fs.h>
7 #include <linux/inet.h>
8 #include <linux/in6.h>
9 #include <linux/module.h>
10 #include <linux/mount.h>
11 #include <linux/parser.h>
12 #include <linux/sched.h>
13 #include <linux/seq_file.h>
14 #include <linux/slab.h>
15 #include <linux/statfs.h>
16 #include <linux/string.h>
17
18 #include "super.h"
19 #include "mds_client.h"
20 #include "cache.h"
21
22 #include <linux/ceph/ceph_features.h>
23 #include <linux/ceph/decode.h>
24 #include <linux/ceph/mon_client.h>
25 #include <linux/ceph/auth.h>
26 #include <linux/ceph/debugfs.h>
27
28 /*
29 * Ceph superblock operations
30 *
31 * Handle the basics of mounting, unmounting.
32 */
33
34 /*
35 * super ops
36 */
37 static void ceph_put_super(struct super_block *s)
38 {
39 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
40
41 dout("put_super\n");
42 ceph_mdsc_close_sessions(fsc->mdsc);
43 }
44
45 static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
46 {
47 struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
48 struct ceph_mon_client *monc = &fsc->client->monc;
49 struct ceph_statfs st;
50 u64 fsid;
51 int err;
52 u64 data_pool;
53
54 if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
55 data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
56 } else {
57 data_pool = CEPH_NOPOOL;
58 }
59
60 dout("statfs\n");
61 err = ceph_monc_do_statfs(monc, data_pool, &st);
62 if (err < 0)
63 return err;
64
65 /* fill in kstatfs */
66 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
67
68 /*
69 * express utilization in terms of large blocks to avoid
70 * overflow on 32-bit machines.
71 *
72 * NOTE: for the time being, we make bsize == frsize to humor
73 * not-yet-ancient versions of glibc that are broken.
74 * Someday, we will probably want to report a real block
75 * size... whatever that may mean for a network file system!
76 */
77 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
78 buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
79 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
80 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
81 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
82
83 buf->f_files = le64_to_cpu(st.num_objects);
84 buf->f_ffree = -1;
85 buf->f_namelen = NAME_MAX;
86
87 /* Must convert the fsid, for consistent values across arches */
88 mutex_lock(&monc->mutex);
89 fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
90 le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
91 mutex_unlock(&monc->mutex);
92
93 buf->f_fsid.val[0] = fsid & 0xffffffff;
94 buf->f_fsid.val[1] = fsid >> 32;
95
96 return 0;
97 }
98
99
100 static int ceph_sync_fs(struct super_block *sb, int wait)
101 {
102 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
103
104 if (!wait) {
105 dout("sync_fs (non-blocking)\n");
106 ceph_flush_dirty_caps(fsc->mdsc);
107 dout("sync_fs (non-blocking) done\n");
108 return 0;
109 }
110
111 dout("sync_fs (blocking)\n");
112 ceph_osdc_sync(&fsc->client->osdc);
113 ceph_mdsc_sync(fsc->mdsc);
114 dout("sync_fs (blocking) done\n");
115 return 0;
116 }
117
118 /*
119 * mount options
120 */
121 enum {
122 Opt_wsize,
123 Opt_rsize,
124 Opt_rasize,
125 Opt_caps_wanted_delay_min,
126 Opt_caps_wanted_delay_max,
127 Opt_readdir_max_entries,
128 Opt_readdir_max_bytes,
129 Opt_congestion_kb,
130 Opt_last_int,
131 /* int args above */
132 Opt_snapdirname,
133 Opt_mds_namespace,
134 Opt_fscache_uniq,
135 Opt_last_string,
136 /* string args above */
137 Opt_dirstat,
138 Opt_nodirstat,
139 Opt_rbytes,
140 Opt_norbytes,
141 Opt_asyncreaddir,
142 Opt_noasyncreaddir,
143 Opt_dcache,
144 Opt_nodcache,
145 Opt_ino32,
146 Opt_noino32,
147 Opt_fscache,
148 Opt_nofscache,
149 Opt_poolperm,
150 Opt_nopoolperm,
151 Opt_require_active_mds,
152 Opt_norequire_active_mds,
153 #ifdef CONFIG_CEPH_FS_POSIX_ACL
154 Opt_acl,
155 #endif
156 Opt_noacl,
157 };
158
159 static match_table_t fsopt_tokens = {
160 {Opt_wsize, "wsize=%d"},
161 {Opt_rsize, "rsize=%d"},
162 {Opt_rasize, "rasize=%d"},
163 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
164 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
165 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
166 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
167 {Opt_congestion_kb, "write_congestion_kb=%d"},
168 /* int args above */
169 {Opt_snapdirname, "snapdirname=%s"},
170 {Opt_mds_namespace, "mds_namespace=%s"},
171 {Opt_fscache_uniq, "fsc=%s"},
172 /* string args above */
173 {Opt_dirstat, "dirstat"},
174 {Opt_nodirstat, "nodirstat"},
175 {Opt_rbytes, "rbytes"},
176 {Opt_norbytes, "norbytes"},
177 {Opt_asyncreaddir, "asyncreaddir"},
178 {Opt_noasyncreaddir, "noasyncreaddir"},
179 {Opt_dcache, "dcache"},
180 {Opt_nodcache, "nodcache"},
181 {Opt_ino32, "ino32"},
182 {Opt_noino32, "noino32"},
183 {Opt_fscache, "fsc"},
184 {Opt_nofscache, "nofsc"},
185 {Opt_poolperm, "poolperm"},
186 {Opt_nopoolperm, "nopoolperm"},
187 {Opt_require_active_mds, "require_active_mds"},
188 {Opt_norequire_active_mds, "norequire_active_mds"},
189 #ifdef CONFIG_CEPH_FS_POSIX_ACL
190 {Opt_acl, "acl"},
191 #endif
192 {Opt_noacl, "noacl"},
193 {-1, NULL}
194 };
195
196 static int parse_fsopt_token(char *c, void *private)
197 {
198 struct ceph_mount_options *fsopt = private;
199 substring_t argstr[MAX_OPT_ARGS];
200 int token, intval, ret;
201
202 token = match_token((char *)c, fsopt_tokens, argstr);
203 if (token < 0)
204 return -EINVAL;
205
206 if (token < Opt_last_int) {
207 ret = match_int(&argstr[0], &intval);
208 if (ret < 0) {
209 pr_err("bad mount option arg (not int) "
210 "at '%s'\n", c);
211 return ret;
212 }
213 dout("got int token %d val %d\n", token, intval);
214 } else if (token > Opt_last_int && token < Opt_last_string) {
215 dout("got string token %d val %s\n", token,
216 argstr[0].from);
217 } else {
218 dout("got token %d\n", token);
219 }
220
221 switch (token) {
222 case Opt_snapdirname:
223 kfree(fsopt->snapdir_name);
224 fsopt->snapdir_name = kstrndup(argstr[0].from,
225 argstr[0].to-argstr[0].from,
226 GFP_KERNEL);
227 if (!fsopt->snapdir_name)
228 return -ENOMEM;
229 break;
230 case Opt_mds_namespace:
231 kfree(fsopt->mds_namespace);
232 fsopt->mds_namespace = kstrndup(argstr[0].from,
233 argstr[0].to-argstr[0].from,
234 GFP_KERNEL);
235 if (!fsopt->mds_namespace)
236 return -ENOMEM;
237 break;
238 case Opt_fscache_uniq:
239 kfree(fsopt->fscache_uniq);
240 fsopt->fscache_uniq = kstrndup(argstr[0].from,
241 argstr[0].to-argstr[0].from,
242 GFP_KERNEL);
243 if (!fsopt->fscache_uniq)
244 return -ENOMEM;
245 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
246 break;
247 /* misc */
248 case Opt_wsize:
249 if (intval < PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE)
250 return -EINVAL;
251 fsopt->wsize = ALIGN(intval, PAGE_SIZE);
252 break;
253 case Opt_rsize:
254 if (intval < PAGE_SIZE || intval > CEPH_MAX_READ_SIZE)
255 return -EINVAL;
256 fsopt->rsize = ALIGN(intval, PAGE_SIZE);
257 break;
258 case Opt_rasize:
259 if (intval < 0)
260 return -EINVAL;
261 fsopt->rasize = ALIGN(intval, PAGE_SIZE);
262 break;
263 case Opt_caps_wanted_delay_min:
264 if (intval < 1)
265 return -EINVAL;
266 fsopt->caps_wanted_delay_min = intval;
267 break;
268 case Opt_caps_wanted_delay_max:
269 if (intval < 1)
270 return -EINVAL;
271 fsopt->caps_wanted_delay_max = intval;
272 break;
273 case Opt_readdir_max_entries:
274 if (intval < 1)
275 return -EINVAL;
276 fsopt->max_readdir = intval;
277 break;
278 case Opt_readdir_max_bytes:
279 if (intval < PAGE_SIZE && intval != 0)
280 return -EINVAL;
281 fsopt->max_readdir_bytes = intval;
282 break;
283 case Opt_congestion_kb:
284 if (intval < 1024) /* at least 1M */
285 return -EINVAL;
286 fsopt->congestion_kb = intval;
287 break;
288 case Opt_dirstat:
289 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
290 break;
291 case Opt_nodirstat:
292 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
293 break;
294 case Opt_rbytes:
295 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
296 break;
297 case Opt_norbytes:
298 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
299 break;
300 case Opt_asyncreaddir:
301 fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
302 break;
303 case Opt_noasyncreaddir:
304 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
305 break;
306 case Opt_dcache:
307 fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
308 break;
309 case Opt_nodcache:
310 fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
311 break;
312 case Opt_ino32:
313 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
314 break;
315 case Opt_noino32:
316 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
317 break;
318 case Opt_fscache:
319 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
320 break;
321 case Opt_nofscache:
322 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
323 break;
324 case Opt_poolperm:
325 fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
326 printk ("pool perm");
327 break;
328 case Opt_nopoolperm:
329 fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
330 break;
331 case Opt_require_active_mds:
332 fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
333 break;
334 case Opt_norequire_active_mds:
335 fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
336 break;
337 #ifdef CONFIG_CEPH_FS_POSIX_ACL
338 case Opt_acl:
339 fsopt->sb_flags |= SB_POSIXACL;
340 break;
341 #endif
342 case Opt_noacl:
343 fsopt->sb_flags &= ~SB_POSIXACL;
344 break;
345 default:
346 BUG_ON(token);
347 }
348 return 0;
349 }
350
351 static void destroy_mount_options(struct ceph_mount_options *args)
352 {
353 dout("destroy_mount_options %p\n", args);
354 kfree(args->snapdir_name);
355 kfree(args->mds_namespace);
356 kfree(args->server_path);
357 kfree(args->fscache_uniq);
358 kfree(args);
359 }
360
361 static int strcmp_null(const char *s1, const char *s2)
362 {
363 if (!s1 && !s2)
364 return 0;
365 if (s1 && !s2)
366 return -1;
367 if (!s1 && s2)
368 return 1;
369 return strcmp(s1, s2);
370 }
371
372 static int compare_mount_options(struct ceph_mount_options *new_fsopt,
373 struct ceph_options *new_opt,
374 struct ceph_fs_client *fsc)
375 {
376 struct ceph_mount_options *fsopt1 = new_fsopt;
377 struct ceph_mount_options *fsopt2 = fsc->mount_options;
378 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
379 int ret;
380
381 ret = memcmp(fsopt1, fsopt2, ofs);
382 if (ret)
383 return ret;
384
385 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
386 if (ret)
387 return ret;
388 ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
389 if (ret)
390 return ret;
391 ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
392 if (ret)
393 return ret;
394 ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
395 if (ret)
396 return ret;
397
398 return ceph_compare_options(new_opt, fsc->client);
399 }
400
401 static int parse_mount_options(struct ceph_mount_options **pfsopt,
402 struct ceph_options **popt,
403 int flags, char *options,
404 const char *dev_name)
405 {
406 struct ceph_mount_options *fsopt;
407 const char *dev_name_end;
408 int err;
409
410 if (!dev_name || !*dev_name)
411 return -EINVAL;
412
413 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
414 if (!fsopt)
415 return -ENOMEM;
416
417 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
418
419 fsopt->sb_flags = flags;
420 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
421
422 fsopt->wsize = CEPH_MAX_WRITE_SIZE;
423 fsopt->rsize = CEPH_MAX_READ_SIZE;
424 fsopt->rasize = CEPH_RASIZE_DEFAULT;
425 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
426 if (!fsopt->snapdir_name) {
427 err = -ENOMEM;
428 goto out;
429 }
430
431 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
432 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
433 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
434 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
435 fsopt->congestion_kb = default_congestion_kb();
436
437 /*
438 * Distinguish the server list from the path in "dev_name".
439 * Internally we do not include the leading '/' in the path.
440 *
441 * "dev_name" will look like:
442 * <server_spec>[,<server_spec>...]:[<path>]
443 * where
444 * <server_spec> is <ip>[:<port>]
445 * <path> is optional, but if present must begin with '/'
446 */
447 dev_name_end = strchr(dev_name, '/');
448 if (dev_name_end) {
449 if (strlen(dev_name_end) > 1) {
450 fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
451 if (!fsopt->server_path) {
452 err = -ENOMEM;
453 goto out;
454 }
455 }
456 } else {
457 dev_name_end = dev_name + strlen(dev_name);
458 }
459 err = -EINVAL;
460 dev_name_end--; /* back up to ':' separator */
461 if (dev_name_end < dev_name || *dev_name_end != ':') {
462 pr_err("device name is missing path (no : separator in %s)\n",
463 dev_name);
464 goto out;
465 }
466 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
467 if (fsopt->server_path)
468 dout("server path '%s'\n", fsopt->server_path);
469
470 *popt = ceph_parse_options(options, dev_name, dev_name_end,
471 parse_fsopt_token, (void *)fsopt);
472 if (IS_ERR(*popt)) {
473 err = PTR_ERR(*popt);
474 goto out;
475 }
476
477 /* success */
478 *pfsopt = fsopt;
479 return 0;
480
481 out:
482 destroy_mount_options(fsopt);
483 return err;
484 }
485
486 /**
487 * ceph_show_options - Show mount options in /proc/mounts
488 * @m: seq_file to write to
489 * @root: root of that (sub)tree
490 */
491 static int ceph_show_options(struct seq_file *m, struct dentry *root)
492 {
493 struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
494 struct ceph_mount_options *fsopt = fsc->mount_options;
495 size_t pos;
496 int ret;
497
498 /* a comma between MNT/MS and client options */
499 seq_putc(m, ',');
500 pos = m->count;
501
502 ret = ceph_print_client_options(m, fsc->client);
503 if (ret)
504 return ret;
505
506 /* retract our comma if no client options */
507 if (m->count == pos)
508 m->count--;
509
510 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
511 seq_puts(m, ",dirstat");
512 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
513 seq_puts(m, ",rbytes");
514 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
515 seq_puts(m, ",noasyncreaddir");
516 if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
517 seq_puts(m, ",nodcache");
518 if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) {
519 if (fsopt->fscache_uniq)
520 seq_printf(m, ",fsc=%s", fsopt->fscache_uniq);
521 else
522 seq_puts(m, ",fsc");
523 }
524 if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
525 seq_puts(m, ",nopoolperm");
526
527 #ifdef CONFIG_CEPH_FS_POSIX_ACL
528 if (fsopt->sb_flags & SB_POSIXACL)
529 seq_puts(m, ",acl");
530 else
531 seq_puts(m, ",noacl");
532 #endif
533
534 if (fsopt->mds_namespace)
535 seq_printf(m, ",mds_namespace=%s", fsopt->mds_namespace);
536 if (fsopt->wsize)
537 seq_printf(m, ",wsize=%d", fsopt->wsize);
538 if (fsopt->rsize != CEPH_MAX_READ_SIZE)
539 seq_printf(m, ",rsize=%d", fsopt->rsize);
540 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
541 seq_printf(m, ",rasize=%d", fsopt->rasize);
542 if (fsopt->congestion_kb != default_congestion_kb())
543 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
544 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
545 seq_printf(m, ",caps_wanted_delay_min=%d",
546 fsopt->caps_wanted_delay_min);
547 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
548 seq_printf(m, ",caps_wanted_delay_max=%d",
549 fsopt->caps_wanted_delay_max);
550 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
551 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
552 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
553 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
554 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
555 seq_show_option(m, "snapdirname", fsopt->snapdir_name);
556
557 return 0;
558 }
559
560 /*
561 * handle any mon messages the standard library doesn't understand.
562 * return error if we don't either.
563 */
564 static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
565 {
566 struct ceph_fs_client *fsc = client->private;
567 int type = le16_to_cpu(msg->hdr.type);
568
569 switch (type) {
570 case CEPH_MSG_MDS_MAP:
571 ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
572 return 0;
573 case CEPH_MSG_FS_MAP_USER:
574 ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
575 return 0;
576 default:
577 return -1;
578 }
579 }
580
581 /*
582 * create a new fs client
583 */
584 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
585 struct ceph_options *opt)
586 {
587 struct ceph_fs_client *fsc;
588 int page_count;
589 size_t size;
590 int err = -ENOMEM;
591
592 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
593 if (!fsc)
594 return ERR_PTR(-ENOMEM);
595
596 fsc->client = ceph_create_client(opt, fsc);
597 if (IS_ERR(fsc->client)) {
598 err = PTR_ERR(fsc->client);
599 goto fail;
600 }
601 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
602
603 if (!fsopt->mds_namespace) {
604 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
605 0, true);
606 } else {
607 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
608 0, false);
609 }
610
611 fsc->mount_options = fsopt;
612
613 fsc->sb = NULL;
614 fsc->mount_state = CEPH_MOUNT_MOUNTING;
615
616 atomic_long_set(&fsc->writeback_count, 0);
617
618 err = -ENOMEM;
619 /*
620 * The number of concurrent works can be high but they don't need
621 * to be processed in parallel, limit concurrency.
622 */
623 fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
624 if (!fsc->wb_wq)
625 goto fail_client;
626 fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
627 if (!fsc->pg_inv_wq)
628 goto fail_wb_wq;
629 fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
630 if (!fsc->trunc_wq)
631 goto fail_pg_inv_wq;
632
633 /* set up mempools */
634 err = -ENOMEM;
635 page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
636 size = sizeof (struct page *) * (page_count ? page_count : 1);
637 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
638 if (!fsc->wb_pagevec_pool)
639 goto fail_trunc_wq;
640
641 /* caps */
642 fsc->min_caps = fsopt->max_readdir;
643
644 return fsc;
645
646 fail_trunc_wq:
647 destroy_workqueue(fsc->trunc_wq);
648 fail_pg_inv_wq:
649 destroy_workqueue(fsc->pg_inv_wq);
650 fail_wb_wq:
651 destroy_workqueue(fsc->wb_wq);
652 fail_client:
653 ceph_destroy_client(fsc->client);
654 fail:
655 kfree(fsc);
656 return ERR_PTR(err);
657 }
658
659 static void destroy_fs_client(struct ceph_fs_client *fsc)
660 {
661 dout("destroy_fs_client %p\n", fsc);
662
663 destroy_workqueue(fsc->wb_wq);
664 destroy_workqueue(fsc->pg_inv_wq);
665 destroy_workqueue(fsc->trunc_wq);
666
667 mempool_destroy(fsc->wb_pagevec_pool);
668
669 destroy_mount_options(fsc->mount_options);
670
671 ceph_destroy_client(fsc->client);
672
673 kfree(fsc);
674 dout("destroy_fs_client %p done\n", fsc);
675 }
676
677 /*
678 * caches
679 */
680 struct kmem_cache *ceph_inode_cachep;
681 struct kmem_cache *ceph_cap_cachep;
682 struct kmem_cache *ceph_cap_flush_cachep;
683 struct kmem_cache *ceph_dentry_cachep;
684 struct kmem_cache *ceph_file_cachep;
685
686 static void ceph_inode_init_once(void *foo)
687 {
688 struct ceph_inode_info *ci = foo;
689 inode_init_once(&ci->vfs_inode);
690 }
691
692 static int __init init_caches(void)
693 {
694 int error = -ENOMEM;
695
696 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
697 sizeof(struct ceph_inode_info),
698 __alignof__(struct ceph_inode_info),
699 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
700 SLAB_ACCOUNT, ceph_inode_init_once);
701 if (!ceph_inode_cachep)
702 return -ENOMEM;
703
704 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
705 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
706 if (!ceph_cap_cachep)
707 goto bad_cap;
708 ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
709 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
710 if (!ceph_cap_flush_cachep)
711 goto bad_cap_flush;
712
713 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
714 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
715 if (!ceph_dentry_cachep)
716 goto bad_dentry;
717
718 ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
719 if (!ceph_file_cachep)
720 goto bad_file;
721
722 error = ceph_fscache_register();
723 if (error)
724 goto bad_fscache;
725
726 return 0;
727
728 bad_fscache:
729 kmem_cache_destroy(ceph_file_cachep);
730 bad_file:
731 kmem_cache_destroy(ceph_dentry_cachep);
732 bad_dentry:
733 kmem_cache_destroy(ceph_cap_flush_cachep);
734 bad_cap_flush:
735 kmem_cache_destroy(ceph_cap_cachep);
736 bad_cap:
737 kmem_cache_destroy(ceph_inode_cachep);
738 return error;
739 }
740
741 static void destroy_caches(void)
742 {
743 /*
744 * Make sure all delayed rcu free inodes are flushed before we
745 * destroy cache.
746 */
747 rcu_barrier();
748
749 kmem_cache_destroy(ceph_inode_cachep);
750 kmem_cache_destroy(ceph_cap_cachep);
751 kmem_cache_destroy(ceph_cap_flush_cachep);
752 kmem_cache_destroy(ceph_dentry_cachep);
753 kmem_cache_destroy(ceph_file_cachep);
754
755 ceph_fscache_unregister();
756 }
757
758
759 /*
760 * ceph_umount_begin - initiate forced umount. Tear down down the
761 * mount, skipping steps that may hang while waiting for server(s).
762 */
763 static void ceph_umount_begin(struct super_block *sb)
764 {
765 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
766
767 dout("ceph_umount_begin - starting forced umount\n");
768 if (!fsc)
769 return;
770 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
771 ceph_mdsc_force_umount(fsc->mdsc);
772 return;
773 }
774
775 static const struct super_operations ceph_super_ops = {
776 .alloc_inode = ceph_alloc_inode,
777 .destroy_inode = ceph_destroy_inode,
778 .write_inode = ceph_write_inode,
779 .drop_inode = ceph_drop_inode,
780 .sync_fs = ceph_sync_fs,
781 .put_super = ceph_put_super,
782 .show_options = ceph_show_options,
783 .statfs = ceph_statfs,
784 .umount_begin = ceph_umount_begin,
785 };
786
787 /*
788 * Bootstrap mount by opening the root directory. Note the mount
789 * @started time from caller, and time out if this takes too long.
790 */
791 static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
792 const char *path,
793 unsigned long started)
794 {
795 struct ceph_mds_client *mdsc = fsc->mdsc;
796 struct ceph_mds_request *req = NULL;
797 int err;
798 struct dentry *root;
799
800 /* open dir */
801 dout("open_root_inode opening '%s'\n", path);
802 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
803 if (IS_ERR(req))
804 return ERR_CAST(req);
805 req->r_path1 = kstrdup(path, GFP_NOFS);
806 if (!req->r_path1) {
807 root = ERR_PTR(-ENOMEM);
808 goto out;
809 }
810
811 req->r_ino1.ino = CEPH_INO_ROOT;
812 req->r_ino1.snap = CEPH_NOSNAP;
813 req->r_started = started;
814 req->r_timeout = fsc->client->options->mount_timeout;
815 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
816 req->r_num_caps = 2;
817 err = ceph_mdsc_do_request(mdsc, NULL, req);
818 if (err == 0) {
819 struct inode *inode = req->r_target_inode;
820 req->r_target_inode = NULL;
821 dout("open_root_inode success\n");
822 root = d_make_root(inode);
823 if (!root) {
824 root = ERR_PTR(-ENOMEM);
825 goto out;
826 }
827 dout("open_root_inode success, root dentry is %p\n", root);
828 } else {
829 root = ERR_PTR(err);
830 }
831 out:
832 ceph_mdsc_put_request(req);
833 return root;
834 }
835
836
837
838
839 /*
840 * mount: join the ceph cluster, and open root directory.
841 */
842 static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
843 {
844 int err;
845 unsigned long started = jiffies; /* note the start time */
846 struct dentry *root;
847
848 dout("mount start %p\n", fsc);
849 mutex_lock(&fsc->client->mount_mutex);
850
851 if (!fsc->sb->s_root) {
852 const char *path;
853 err = __ceph_open_session(fsc->client, started);
854 if (err < 0)
855 goto out;
856
857 /* setup fscache */
858 if (fsc->mount_options->flags & CEPH_MOUNT_OPT_FSCACHE) {
859 err = ceph_fscache_register_fs(fsc);
860 if (err < 0)
861 goto out;
862 }
863
864 if (!fsc->mount_options->server_path) {
865 path = "";
866 dout("mount opening path \\t\n");
867 } else {
868 path = fsc->mount_options->server_path + 1;
869 dout("mount opening path %s\n", path);
870 }
871
872 err = ceph_fs_debugfs_init(fsc);
873 if (err < 0)
874 goto out;
875
876 root = open_root_dentry(fsc, path, started);
877 if (IS_ERR(root)) {
878 err = PTR_ERR(root);
879 goto out;
880 }
881 fsc->sb->s_root = dget(root);
882 } else {
883 root = dget(fsc->sb->s_root);
884 }
885
886 fsc->mount_state = CEPH_MOUNT_MOUNTED;
887 dout("mount success\n");
888 mutex_unlock(&fsc->client->mount_mutex);
889 return root;
890
891 out:
892 mutex_unlock(&fsc->client->mount_mutex);
893 return ERR_PTR(err);
894 }
895
896 static int ceph_set_super(struct super_block *s, void *data)
897 {
898 struct ceph_fs_client *fsc = data;
899 int ret;
900
901 dout("set_super %p data %p\n", s, data);
902
903 s->s_flags = fsc->mount_options->sb_flags;
904 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
905
906 s->s_xattr = ceph_xattr_handlers;
907 s->s_fs_info = fsc;
908 fsc->sb = s;
909
910 s->s_op = &ceph_super_ops;
911 s->s_d_op = &ceph_dentry_ops;
912 s->s_export_op = &ceph_export_ops;
913
914 s->s_time_gran = 1000; /* 1000 ns == 1 us */
915
916 ret = set_anon_super(s, NULL); /* what is that second arg for? */
917 if (ret != 0)
918 goto fail;
919
920 return ret;
921
922 fail:
923 s->s_fs_info = NULL;
924 fsc->sb = NULL;
925 return ret;
926 }
927
928 /*
929 * share superblock if same fs AND options
930 */
931 static int ceph_compare_super(struct super_block *sb, void *data)
932 {
933 struct ceph_fs_client *new = data;
934 struct ceph_mount_options *fsopt = new->mount_options;
935 struct ceph_options *opt = new->client->options;
936 struct ceph_fs_client *other = ceph_sb_to_client(sb);
937
938 dout("ceph_compare_super %p\n", sb);
939
940 if (compare_mount_options(fsopt, opt, other)) {
941 dout("monitor(s)/mount options don't match\n");
942 return 0;
943 }
944 if ((opt->flags & CEPH_OPT_FSID) &&
945 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
946 dout("fsid doesn't match\n");
947 return 0;
948 }
949 if (fsopt->sb_flags != other->mount_options->sb_flags) {
950 dout("flags differ\n");
951 return 0;
952 }
953 return 1;
954 }
955
956 /*
957 * construct our own bdi so we can control readahead, etc.
958 */
959 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
960
961 static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
962 {
963 int err;
964
965 err = super_setup_bdi_name(sb, "ceph-%ld",
966 atomic_long_inc_return(&bdi_seq));
967 if (err)
968 return err;
969
970 /* set ra_pages based on rasize mount option? */
971 sb->s_bdi->ra_pages = fsc->mount_options->rasize >> PAGE_SHIFT;
972
973 /* set io_pages based on max osd read size */
974 sb->s_bdi->io_pages = fsc->mount_options->rsize >> PAGE_SHIFT;
975
976 return 0;
977 }
978
979 static struct dentry *ceph_mount(struct file_system_type *fs_type,
980 int flags, const char *dev_name, void *data)
981 {
982 struct super_block *sb;
983 struct ceph_fs_client *fsc;
984 struct dentry *res;
985 int err;
986 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
987 struct ceph_mount_options *fsopt = NULL;
988 struct ceph_options *opt = NULL;
989
990 dout("ceph_mount\n");
991
992 #ifdef CONFIG_CEPH_FS_POSIX_ACL
993 flags |= SB_POSIXACL;
994 #endif
995 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
996 if (err < 0) {
997 res = ERR_PTR(err);
998 goto out_final;
999 }
1000
1001 /* create client (which we may/may not use) */
1002 fsc = create_fs_client(fsopt, opt);
1003 if (IS_ERR(fsc)) {
1004 res = ERR_CAST(fsc);
1005 destroy_mount_options(fsopt);
1006 ceph_destroy_options(opt);
1007 goto out_final;
1008 }
1009
1010 err = ceph_mdsc_init(fsc);
1011 if (err < 0) {
1012 res = ERR_PTR(err);
1013 goto out;
1014 }
1015
1016 if (ceph_test_opt(fsc->client, NOSHARE))
1017 compare_super = NULL;
1018 sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
1019 if (IS_ERR(sb)) {
1020 res = ERR_CAST(sb);
1021 goto out;
1022 }
1023
1024 if (ceph_sb_to_client(sb) != fsc) {
1025 ceph_mdsc_destroy(fsc);
1026 destroy_fs_client(fsc);
1027 fsc = ceph_sb_to_client(sb);
1028 dout("get_sb got existing client %p\n", fsc);
1029 } else {
1030 dout("get_sb using new client %p\n", fsc);
1031 err = ceph_setup_bdi(sb, fsc);
1032 if (err < 0) {
1033 res = ERR_PTR(err);
1034 goto out_splat;
1035 }
1036 }
1037
1038 res = ceph_real_mount(fsc);
1039 if (IS_ERR(res))
1040 goto out_splat;
1041 dout("root %p inode %p ino %llx.%llx\n", res,
1042 d_inode(res), ceph_vinop(d_inode(res)));
1043 return res;
1044
1045 out_splat:
1046 ceph_mdsc_close_sessions(fsc->mdsc);
1047 deactivate_locked_super(sb);
1048 goto out_final;
1049
1050 out:
1051 ceph_mdsc_destroy(fsc);
1052 destroy_fs_client(fsc);
1053 out_final:
1054 dout("ceph_mount fail %ld\n", PTR_ERR(res));
1055 return res;
1056 }
1057
1058 static void ceph_kill_sb(struct super_block *s)
1059 {
1060 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1061 dev_t dev = s->s_dev;
1062
1063 dout("kill_sb %p\n", s);
1064
1065 ceph_mdsc_pre_umount(fsc->mdsc);
1066 generic_shutdown_super(s);
1067
1068 fsc->client->extra_mon_dispatch = NULL;
1069 ceph_fs_debugfs_cleanup(fsc);
1070
1071 ceph_fscache_unregister_fs(fsc);
1072
1073 ceph_mdsc_destroy(fsc);
1074
1075 destroy_fs_client(fsc);
1076 free_anon_bdev(dev);
1077 }
1078
1079 static struct file_system_type ceph_fs_type = {
1080 .owner = THIS_MODULE,
1081 .name = "ceph",
1082 .mount = ceph_mount,
1083 .kill_sb = ceph_kill_sb,
1084 .fs_flags = FS_RENAME_DOES_D_MOVE,
1085 };
1086 MODULE_ALIAS_FS("ceph");
1087
1088 static int __init init_ceph(void)
1089 {
1090 int ret = init_caches();
1091 if (ret)
1092 goto out;
1093
1094 ceph_flock_init();
1095 ceph_xattr_init();
1096 ret = register_filesystem(&ceph_fs_type);
1097 if (ret)
1098 goto out_xattr;
1099
1100 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1101
1102 return 0;
1103
1104 out_xattr:
1105 ceph_xattr_exit();
1106 destroy_caches();
1107 out:
1108 return ret;
1109 }
1110
1111 static void __exit exit_ceph(void)
1112 {
1113 dout("exit_ceph\n");
1114 unregister_filesystem(&ceph_fs_type);
1115 ceph_xattr_exit();
1116 destroy_caches();
1117 }
1118
1119 module_init(init_ceph);
1120 module_exit(exit_ceph);
1121
1122 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1123 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1124 MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1125 MODULE_DESCRIPTION("Ceph filesystem for Linux");
1126 MODULE_LICENSE("GPL");