]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - fs/ceph/super.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / super.c
... / ...
CommitLineData
1
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/backing-dev.h>
5#include <linux/ctype.h>
6#include <linux/fs.h>
7#include <linux/inet.h>
8#include <linux/in6.h>
9#include <linux/module.h>
10#include <linux/mount.h>
11#include <linux/parser.h>
12#include <linux/sched.h>
13#include <linux/seq_file.h>
14#include <linux/slab.h>
15#include <linux/statfs.h>
16#include <linux/string.h>
17
18#include "super.h"
19#include "mds_client.h"
20#include "cache.h"
21
22#include <linux/ceph/ceph_features.h>
23#include <linux/ceph/decode.h>
24#include <linux/ceph/mon_client.h>
25#include <linux/ceph/auth.h>
26#include <linux/ceph/debugfs.h>
27
28/*
29 * Ceph superblock operations
30 *
31 * Handle the basics of mounting, unmounting.
32 */
33
34/*
35 * super ops
36 */
37static void ceph_put_super(struct super_block *s)
38{
39 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
40
41 dout("put_super\n");
42 ceph_mdsc_close_sessions(fsc->mdsc);
43}
44
45static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
46{
47 struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
48 struct ceph_mon_client *monc = &fsc->client->monc;
49 struct ceph_statfs st;
50 u64 fsid;
51 int err;
52 u64 data_pool;
53
54 if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
55 data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
56 } else {
57 data_pool = CEPH_NOPOOL;
58 }
59
60 dout("statfs\n");
61 err = ceph_monc_do_statfs(monc, data_pool, &st);
62 if (err < 0)
63 return err;
64
65 /* fill in kstatfs */
66 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
67
68 /*
69 * express utilization in terms of large blocks to avoid
70 * overflow on 32-bit machines.
71 *
72 * NOTE: for the time being, we make bsize == frsize to humor
73 * not-yet-ancient versions of glibc that are broken.
74 * Someday, we will probably want to report a real block
75 * size... whatever that may mean for a network file system!
76 */
77 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
78 buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
79 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
80 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
81 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
82
83 buf->f_files = le64_to_cpu(st.num_objects);
84 buf->f_ffree = -1;
85 buf->f_namelen = NAME_MAX;
86
87 /* Must convert the fsid, for consistent values across arches */
88 mutex_lock(&monc->mutex);
89 fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
90 le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
91 mutex_unlock(&monc->mutex);
92
93 buf->f_fsid.val[0] = fsid & 0xffffffff;
94 buf->f_fsid.val[1] = fsid >> 32;
95
96 return 0;
97}
98
99
100static int ceph_sync_fs(struct super_block *sb, int wait)
101{
102 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
103
104 if (!wait) {
105 dout("sync_fs (non-blocking)\n");
106 ceph_flush_dirty_caps(fsc->mdsc);
107 dout("sync_fs (non-blocking) done\n");
108 return 0;
109 }
110
111 dout("sync_fs (blocking)\n");
112 ceph_osdc_sync(&fsc->client->osdc);
113 ceph_mdsc_sync(fsc->mdsc);
114 dout("sync_fs (blocking) done\n");
115 return 0;
116}
117
118/*
119 * mount options
120 */
121enum {
122 Opt_wsize,
123 Opt_rsize,
124 Opt_rasize,
125 Opt_caps_wanted_delay_min,
126 Opt_caps_wanted_delay_max,
127 Opt_readdir_max_entries,
128 Opt_readdir_max_bytes,
129 Opt_congestion_kb,
130 Opt_last_int,
131 /* int args above */
132 Opt_snapdirname,
133 Opt_mds_namespace,
134 Opt_fscache_uniq,
135 Opt_last_string,
136 /* string args above */
137 Opt_dirstat,
138 Opt_nodirstat,
139 Opt_rbytes,
140 Opt_norbytes,
141 Opt_asyncreaddir,
142 Opt_noasyncreaddir,
143 Opt_dcache,
144 Opt_nodcache,
145 Opt_ino32,
146 Opt_noino32,
147 Opt_fscache,
148 Opt_nofscache,
149 Opt_poolperm,
150 Opt_nopoolperm,
151 Opt_require_active_mds,
152 Opt_norequire_active_mds,
153#ifdef CONFIG_CEPH_FS_POSIX_ACL
154 Opt_acl,
155#endif
156 Opt_noacl,
157};
158
159static match_table_t fsopt_tokens = {
160 {Opt_wsize, "wsize=%d"},
161 {Opt_rsize, "rsize=%d"},
162 {Opt_rasize, "rasize=%d"},
163 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
164 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
165 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
166 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
167 {Opt_congestion_kb, "write_congestion_kb=%d"},
168 /* int args above */
169 {Opt_snapdirname, "snapdirname=%s"},
170 {Opt_mds_namespace, "mds_namespace=%s"},
171 {Opt_fscache_uniq, "fsc=%s"},
172 /* string args above */
173 {Opt_dirstat, "dirstat"},
174 {Opt_nodirstat, "nodirstat"},
175 {Opt_rbytes, "rbytes"},
176 {Opt_norbytes, "norbytes"},
177 {Opt_asyncreaddir, "asyncreaddir"},
178 {Opt_noasyncreaddir, "noasyncreaddir"},
179 {Opt_dcache, "dcache"},
180 {Opt_nodcache, "nodcache"},
181 {Opt_ino32, "ino32"},
182 {Opt_noino32, "noino32"},
183 {Opt_fscache, "fsc"},
184 {Opt_nofscache, "nofsc"},
185 {Opt_poolperm, "poolperm"},
186 {Opt_nopoolperm, "nopoolperm"},
187 {Opt_require_active_mds, "require_active_mds"},
188 {Opt_norequire_active_mds, "norequire_active_mds"},
189#ifdef CONFIG_CEPH_FS_POSIX_ACL
190 {Opt_acl, "acl"},
191#endif
192 {Opt_noacl, "noacl"},
193 {-1, NULL}
194};
195
196static int parse_fsopt_token(char *c, void *private)
197{
198 struct ceph_mount_options *fsopt = private;
199 substring_t argstr[MAX_OPT_ARGS];
200 int token, intval, ret;
201
202 token = match_token((char *)c, fsopt_tokens, argstr);
203 if (token < 0)
204 return -EINVAL;
205
206 if (token < Opt_last_int) {
207 ret = match_int(&argstr[0], &intval);
208 if (ret < 0) {
209 pr_err("bad mount option arg (not int) "
210 "at '%s'\n", c);
211 return ret;
212 }
213 dout("got int token %d val %d\n", token, intval);
214 } else if (token > Opt_last_int && token < Opt_last_string) {
215 dout("got string token %d val %s\n", token,
216 argstr[0].from);
217 } else {
218 dout("got token %d\n", token);
219 }
220
221 switch (token) {
222 case Opt_snapdirname:
223 kfree(fsopt->snapdir_name);
224 fsopt->snapdir_name = kstrndup(argstr[0].from,
225 argstr[0].to-argstr[0].from,
226 GFP_KERNEL);
227 if (!fsopt->snapdir_name)
228 return -ENOMEM;
229 break;
230 case Opt_mds_namespace:
231 kfree(fsopt->mds_namespace);
232 fsopt->mds_namespace = kstrndup(argstr[0].from,
233 argstr[0].to-argstr[0].from,
234 GFP_KERNEL);
235 if (!fsopt->mds_namespace)
236 return -ENOMEM;
237 break;
238 case Opt_fscache_uniq:
239#ifdef CONFIG_CEPH_FSCACHE
240 kfree(fsopt->fscache_uniq);
241 fsopt->fscache_uniq = kstrndup(argstr[0].from,
242 argstr[0].to-argstr[0].from,
243 GFP_KERNEL);
244 if (!fsopt->fscache_uniq)
245 return -ENOMEM;
246 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
247 break;
248#else
249 pr_err("fscache support is disabled\n");
250 return -EINVAL;
251#endif
252 case Opt_wsize:
253 if (intval < PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE)
254 return -EINVAL;
255 fsopt->wsize = ALIGN(intval, PAGE_SIZE);
256 break;
257 case Opt_rsize:
258 if (intval < PAGE_SIZE || intval > CEPH_MAX_READ_SIZE)
259 return -EINVAL;
260 fsopt->rsize = ALIGN(intval, PAGE_SIZE);
261 break;
262 case Opt_rasize:
263 if (intval < 0)
264 return -EINVAL;
265 fsopt->rasize = ALIGN(intval, PAGE_SIZE);
266 break;
267 case Opt_caps_wanted_delay_min:
268 if (intval < 1)
269 return -EINVAL;
270 fsopt->caps_wanted_delay_min = intval;
271 break;
272 case Opt_caps_wanted_delay_max:
273 if (intval < 1)
274 return -EINVAL;
275 fsopt->caps_wanted_delay_max = intval;
276 break;
277 case Opt_readdir_max_entries:
278 if (intval < 1)
279 return -EINVAL;
280 fsopt->max_readdir = intval;
281 break;
282 case Opt_readdir_max_bytes:
283 if (intval < PAGE_SIZE && intval != 0)
284 return -EINVAL;
285 fsopt->max_readdir_bytes = intval;
286 break;
287 case Opt_congestion_kb:
288 if (intval < 1024) /* at least 1M */
289 return -EINVAL;
290 fsopt->congestion_kb = intval;
291 break;
292 case Opt_dirstat:
293 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
294 break;
295 case Opt_nodirstat:
296 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
297 break;
298 case Opt_rbytes:
299 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
300 break;
301 case Opt_norbytes:
302 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
303 break;
304 case Opt_asyncreaddir:
305 fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
306 break;
307 case Opt_noasyncreaddir:
308 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
309 break;
310 case Opt_dcache:
311 fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
312 break;
313 case Opt_nodcache:
314 fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
315 break;
316 case Opt_ino32:
317 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
318 break;
319 case Opt_noino32:
320 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
321 break;
322 case Opt_fscache:
323#ifdef CONFIG_CEPH_FSCACHE
324 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
325 break;
326#else
327 pr_err("fscache support is disabled\n");
328 return -EINVAL;
329#endif
330 case Opt_nofscache:
331 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
332 break;
333 case Opt_poolperm:
334 fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
335 printk ("pool perm");
336 break;
337 case Opt_nopoolperm:
338 fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
339 break;
340 case Opt_require_active_mds:
341 fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
342 break;
343 case Opt_norequire_active_mds:
344 fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
345 break;
346#ifdef CONFIG_CEPH_FS_POSIX_ACL
347 case Opt_acl:
348 fsopt->sb_flags |= SB_POSIXACL;
349 break;
350#endif
351 case Opt_noacl:
352 fsopt->sb_flags &= ~SB_POSIXACL;
353 break;
354 default:
355 BUG_ON(token);
356 }
357 return 0;
358}
359
360static void destroy_mount_options(struct ceph_mount_options *args)
361{
362 dout("destroy_mount_options %p\n", args);
363 kfree(args->snapdir_name);
364 kfree(args->mds_namespace);
365 kfree(args->server_path);
366 kfree(args->fscache_uniq);
367 kfree(args);
368}
369
370static int strcmp_null(const char *s1, const char *s2)
371{
372 if (!s1 && !s2)
373 return 0;
374 if (s1 && !s2)
375 return -1;
376 if (!s1 && s2)
377 return 1;
378 return strcmp(s1, s2);
379}
380
381static int compare_mount_options(struct ceph_mount_options *new_fsopt,
382 struct ceph_options *new_opt,
383 struct ceph_fs_client *fsc)
384{
385 struct ceph_mount_options *fsopt1 = new_fsopt;
386 struct ceph_mount_options *fsopt2 = fsc->mount_options;
387 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
388 int ret;
389
390 ret = memcmp(fsopt1, fsopt2, ofs);
391 if (ret)
392 return ret;
393
394 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
395 if (ret)
396 return ret;
397 ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
398 if (ret)
399 return ret;
400 ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
401 if (ret)
402 return ret;
403 ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
404 if (ret)
405 return ret;
406
407 return ceph_compare_options(new_opt, fsc->client);
408}
409
410static int parse_mount_options(struct ceph_mount_options **pfsopt,
411 struct ceph_options **popt,
412 int flags, char *options,
413 const char *dev_name)
414{
415 struct ceph_mount_options *fsopt;
416 const char *dev_name_end;
417 int err;
418
419 if (!dev_name || !*dev_name)
420 return -EINVAL;
421
422 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
423 if (!fsopt)
424 return -ENOMEM;
425
426 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
427
428 fsopt->sb_flags = flags;
429 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
430
431 fsopt->wsize = CEPH_MAX_WRITE_SIZE;
432 fsopt->rsize = CEPH_MAX_READ_SIZE;
433 fsopt->rasize = CEPH_RASIZE_DEFAULT;
434 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
435 if (!fsopt->snapdir_name) {
436 err = -ENOMEM;
437 goto out;
438 }
439
440 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
441 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
442 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
443 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
444 fsopt->congestion_kb = default_congestion_kb();
445
446 /*
447 * Distinguish the server list from the path in "dev_name".
448 * Internally we do not include the leading '/' in the path.
449 *
450 * "dev_name" will look like:
451 * <server_spec>[,<server_spec>...]:[<path>]
452 * where
453 * <server_spec> is <ip>[:<port>]
454 * <path> is optional, but if present must begin with '/'
455 */
456 dev_name_end = strchr(dev_name, '/');
457 if (dev_name_end) {
458 if (strlen(dev_name_end) > 1) {
459 fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
460 if (!fsopt->server_path) {
461 err = -ENOMEM;
462 goto out;
463 }
464 }
465 } else {
466 dev_name_end = dev_name + strlen(dev_name);
467 }
468 err = -EINVAL;
469 dev_name_end--; /* back up to ':' separator */
470 if (dev_name_end < dev_name || *dev_name_end != ':') {
471 pr_err("device name is missing path (no : separator in %s)\n",
472 dev_name);
473 goto out;
474 }
475 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
476 if (fsopt->server_path)
477 dout("server path '%s'\n", fsopt->server_path);
478
479 *popt = ceph_parse_options(options, dev_name, dev_name_end,
480 parse_fsopt_token, (void *)fsopt);
481 if (IS_ERR(*popt)) {
482 err = PTR_ERR(*popt);
483 goto out;
484 }
485
486 /* success */
487 *pfsopt = fsopt;
488 return 0;
489
490out:
491 destroy_mount_options(fsopt);
492 return err;
493}
494
495/**
496 * ceph_show_options - Show mount options in /proc/mounts
497 * @m: seq_file to write to
498 * @root: root of that (sub)tree
499 */
500static int ceph_show_options(struct seq_file *m, struct dentry *root)
501{
502 struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
503 struct ceph_mount_options *fsopt = fsc->mount_options;
504 size_t pos;
505 int ret;
506
507 /* a comma between MNT/MS and client options */
508 seq_putc(m, ',');
509 pos = m->count;
510
511 ret = ceph_print_client_options(m, fsc->client);
512 if (ret)
513 return ret;
514
515 /* retract our comma if no client options */
516 if (m->count == pos)
517 m->count--;
518
519 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
520 seq_puts(m, ",dirstat");
521 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
522 seq_puts(m, ",rbytes");
523 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
524 seq_puts(m, ",noasyncreaddir");
525 if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
526 seq_puts(m, ",nodcache");
527 if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) {
528 if (fsopt->fscache_uniq)
529 seq_printf(m, ",fsc=%s", fsopt->fscache_uniq);
530 else
531 seq_puts(m, ",fsc");
532 }
533 if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
534 seq_puts(m, ",nopoolperm");
535
536#ifdef CONFIG_CEPH_FS_POSIX_ACL
537 if (fsopt->sb_flags & SB_POSIXACL)
538 seq_puts(m, ",acl");
539 else
540 seq_puts(m, ",noacl");
541#endif
542
543 if (fsopt->mds_namespace)
544 seq_printf(m, ",mds_namespace=%s", fsopt->mds_namespace);
545 if (fsopt->wsize)
546 seq_printf(m, ",wsize=%d", fsopt->wsize);
547 if (fsopt->rsize != CEPH_MAX_READ_SIZE)
548 seq_printf(m, ",rsize=%d", fsopt->rsize);
549 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
550 seq_printf(m, ",rasize=%d", fsopt->rasize);
551 if (fsopt->congestion_kb != default_congestion_kb())
552 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
553 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
554 seq_printf(m, ",caps_wanted_delay_min=%d",
555 fsopt->caps_wanted_delay_min);
556 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
557 seq_printf(m, ",caps_wanted_delay_max=%d",
558 fsopt->caps_wanted_delay_max);
559 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
560 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
561 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
562 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
563 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
564 seq_show_option(m, "snapdirname", fsopt->snapdir_name);
565
566 return 0;
567}
568
569/*
570 * handle any mon messages the standard library doesn't understand.
571 * return error if we don't either.
572 */
573static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
574{
575 struct ceph_fs_client *fsc = client->private;
576 int type = le16_to_cpu(msg->hdr.type);
577
578 switch (type) {
579 case CEPH_MSG_MDS_MAP:
580 ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
581 return 0;
582 case CEPH_MSG_FS_MAP_USER:
583 ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
584 return 0;
585 default:
586 return -1;
587 }
588}
589
590/*
591 * create a new fs client
592 *
593 * Success or not, this function consumes @fsopt and @opt.
594 */
595static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
596 struct ceph_options *opt)
597{
598 struct ceph_fs_client *fsc;
599 int page_count;
600 size_t size;
601 int err;
602
603 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
604 if (!fsc) {
605 err = -ENOMEM;
606 goto fail;
607 }
608
609 fsc->client = ceph_create_client(opt, fsc);
610 if (IS_ERR(fsc->client)) {
611 err = PTR_ERR(fsc->client);
612 goto fail;
613 }
614 opt = NULL; /* fsc->client now owns this */
615
616 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
617
618 if (!fsopt->mds_namespace) {
619 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
620 0, true);
621 } else {
622 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
623 0, false);
624 }
625
626 fsc->mount_options = fsopt;
627
628 fsc->sb = NULL;
629 fsc->mount_state = CEPH_MOUNT_MOUNTING;
630
631 atomic_long_set(&fsc->writeback_count, 0);
632
633 err = -ENOMEM;
634 /*
635 * The number of concurrent works can be high but they don't need
636 * to be processed in parallel, limit concurrency.
637 */
638 fsc->inode_wq = alloc_workqueue("ceph-inode", WQ_UNBOUND, 0);
639 if (!fsc->inode_wq)
640 goto fail_client;
641 fsc->cap_wq = alloc_workqueue("ceph-cap", 0, 1);
642 if (!fsc->cap_wq)
643 goto fail_inode_wq;
644
645 /* set up mempools */
646 err = -ENOMEM;
647 page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
648 size = sizeof (struct page *) * (page_count ? page_count : 1);
649 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
650 if (!fsc->wb_pagevec_pool)
651 goto fail_cap_wq;
652
653 /* caps */
654 fsc->min_caps = fsopt->max_readdir;
655
656 return fsc;
657
658fail_cap_wq:
659 destroy_workqueue(fsc->cap_wq);
660fail_inode_wq:
661 destroy_workqueue(fsc->inode_wq);
662fail_client:
663 ceph_destroy_client(fsc->client);
664fail:
665 kfree(fsc);
666 if (opt)
667 ceph_destroy_options(opt);
668 destroy_mount_options(fsopt);
669 return ERR_PTR(err);
670}
671
672static void flush_fs_workqueues(struct ceph_fs_client *fsc)
673{
674 flush_workqueue(fsc->inode_wq);
675 flush_workqueue(fsc->cap_wq);
676}
677
678static void destroy_fs_client(struct ceph_fs_client *fsc)
679{
680 dout("destroy_fs_client %p\n", fsc);
681
682 destroy_workqueue(fsc->inode_wq);
683 destroy_workqueue(fsc->cap_wq);
684
685 mempool_destroy(fsc->wb_pagevec_pool);
686
687 destroy_mount_options(fsc->mount_options);
688
689 ceph_destroy_client(fsc->client);
690
691 kfree(fsc);
692 dout("destroy_fs_client %p done\n", fsc);
693}
694
695/*
696 * caches
697 */
698struct kmem_cache *ceph_inode_cachep;
699struct kmem_cache *ceph_cap_cachep;
700struct kmem_cache *ceph_cap_flush_cachep;
701struct kmem_cache *ceph_dentry_cachep;
702struct kmem_cache *ceph_file_cachep;
703
704static void ceph_inode_init_once(void *foo)
705{
706 struct ceph_inode_info *ci = foo;
707 inode_init_once(&ci->vfs_inode);
708}
709
710static int __init init_caches(void)
711{
712 int error = -ENOMEM;
713
714 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
715 sizeof(struct ceph_inode_info),
716 __alignof__(struct ceph_inode_info),
717 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
718 SLAB_ACCOUNT, ceph_inode_init_once);
719 if (!ceph_inode_cachep)
720 return -ENOMEM;
721
722 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
723 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
724 if (!ceph_cap_cachep)
725 goto bad_cap;
726 ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
727 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
728 if (!ceph_cap_flush_cachep)
729 goto bad_cap_flush;
730
731 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
732 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
733 if (!ceph_dentry_cachep)
734 goto bad_dentry;
735
736 ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
737 if (!ceph_file_cachep)
738 goto bad_file;
739
740 error = ceph_fscache_register();
741 if (error)
742 goto bad_fscache;
743
744 return 0;
745
746bad_fscache:
747 kmem_cache_destroy(ceph_file_cachep);
748bad_file:
749 kmem_cache_destroy(ceph_dentry_cachep);
750bad_dentry:
751 kmem_cache_destroy(ceph_cap_flush_cachep);
752bad_cap_flush:
753 kmem_cache_destroy(ceph_cap_cachep);
754bad_cap:
755 kmem_cache_destroy(ceph_inode_cachep);
756 return error;
757}
758
759static void destroy_caches(void)
760{
761 /*
762 * Make sure all delayed rcu free inodes are flushed before we
763 * destroy cache.
764 */
765 rcu_barrier();
766
767 kmem_cache_destroy(ceph_inode_cachep);
768 kmem_cache_destroy(ceph_cap_cachep);
769 kmem_cache_destroy(ceph_cap_flush_cachep);
770 kmem_cache_destroy(ceph_dentry_cachep);
771 kmem_cache_destroy(ceph_file_cachep);
772
773 ceph_fscache_unregister();
774}
775
776
777/*
778 * ceph_umount_begin - initiate forced umount. Tear down down the
779 * mount, skipping steps that may hang while waiting for server(s).
780 */
781static void ceph_umount_begin(struct super_block *sb)
782{
783 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
784
785 dout("ceph_umount_begin - starting forced umount\n");
786 if (!fsc)
787 return;
788 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
789 ceph_mdsc_force_umount(fsc->mdsc);
790 return;
791}
792
793static int ceph_remount(struct super_block *sb, int *flags, char *data)
794{
795 sync_filesystem(sb);
796 return 0;
797}
798
799static const struct super_operations ceph_super_ops = {
800 .alloc_inode = ceph_alloc_inode,
801 .destroy_inode = ceph_destroy_inode,
802 .write_inode = ceph_write_inode,
803 .drop_inode = ceph_drop_inode,
804 .evict_inode = ceph_evict_inode,
805 .sync_fs = ceph_sync_fs,
806 .put_super = ceph_put_super,
807 .remount_fs = ceph_remount,
808 .show_options = ceph_show_options,
809 .statfs = ceph_statfs,
810 .umount_begin = ceph_umount_begin,
811};
812
813/*
814 * Bootstrap mount by opening the root directory. Note the mount
815 * @started time from caller, and time out if this takes too long.
816 */
817static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
818 const char *path,
819 unsigned long started)
820{
821 struct ceph_mds_client *mdsc = fsc->mdsc;
822 struct ceph_mds_request *req = NULL;
823 int err;
824 struct dentry *root;
825
826 /* open dir */
827 dout("open_root_inode opening '%s'\n", path);
828 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
829 if (IS_ERR(req))
830 return ERR_CAST(req);
831 req->r_path1 = kstrdup(path, GFP_NOFS);
832 if (!req->r_path1) {
833 root = ERR_PTR(-ENOMEM);
834 goto out;
835 }
836
837 req->r_ino1.ino = CEPH_INO_ROOT;
838 req->r_ino1.snap = CEPH_NOSNAP;
839 req->r_started = started;
840 req->r_timeout = fsc->client->options->mount_timeout;
841 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
842 req->r_num_caps = 2;
843 err = ceph_mdsc_do_request(mdsc, NULL, req);
844 if (err == 0) {
845 struct inode *inode = req->r_target_inode;
846 req->r_target_inode = NULL;
847 dout("open_root_inode success\n");
848 root = d_make_root(inode);
849 if (!root) {
850 root = ERR_PTR(-ENOMEM);
851 goto out;
852 }
853 dout("open_root_inode success, root dentry is %p\n", root);
854 } else {
855 root = ERR_PTR(err);
856 }
857out:
858 ceph_mdsc_put_request(req);
859 return root;
860}
861
862
863
864
865/*
866 * mount: join the ceph cluster, and open root directory.
867 */
868static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
869{
870 int err;
871 unsigned long started = jiffies; /* note the start time */
872 struct dentry *root;
873
874 dout("mount start %p\n", fsc);
875 mutex_lock(&fsc->client->mount_mutex);
876
877 if (!fsc->sb->s_root) {
878 const char *path;
879 err = __ceph_open_session(fsc->client, started);
880 if (err < 0)
881 goto out;
882
883 /* setup fscache */
884 if (fsc->mount_options->flags & CEPH_MOUNT_OPT_FSCACHE) {
885 err = ceph_fscache_register_fs(fsc);
886 if (err < 0)
887 goto out;
888 }
889
890 if (!fsc->mount_options->server_path) {
891 path = "";
892 dout("mount opening path \\t\n");
893 } else {
894 path = fsc->mount_options->server_path + 1;
895 dout("mount opening path %s\n", path);
896 }
897
898 err = ceph_fs_debugfs_init(fsc);
899 if (err < 0)
900 goto out;
901
902 root = open_root_dentry(fsc, path, started);
903 if (IS_ERR(root)) {
904 err = PTR_ERR(root);
905 goto out;
906 }
907 fsc->sb->s_root = dget(root);
908 } else {
909 root = dget(fsc->sb->s_root);
910 }
911
912 fsc->mount_state = CEPH_MOUNT_MOUNTED;
913 dout("mount success\n");
914 mutex_unlock(&fsc->client->mount_mutex);
915 return root;
916
917out:
918 mutex_unlock(&fsc->client->mount_mutex);
919 return ERR_PTR(err);
920}
921
922static int ceph_set_super(struct super_block *s, void *data)
923{
924 struct ceph_fs_client *fsc = data;
925 int ret;
926
927 dout("set_super %p data %p\n", s, data);
928
929 s->s_flags = fsc->mount_options->sb_flags;
930 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
931
932 s->s_xattr = ceph_xattr_handlers;
933 s->s_fs_info = fsc;
934 fsc->sb = s;
935
936 s->s_op = &ceph_super_ops;
937 s->s_d_op = &ceph_dentry_ops;
938 s->s_export_op = &ceph_export_ops;
939
940 s->s_time_gran = 1000; /* 1000 ns == 1 us */
941
942 ret = set_anon_super(s, NULL); /* what is that second arg for? */
943 if (ret != 0)
944 goto fail;
945
946 return ret;
947
948fail:
949 s->s_fs_info = NULL;
950 fsc->sb = NULL;
951 return ret;
952}
953
954/*
955 * share superblock if same fs AND options
956 */
957static int ceph_compare_super(struct super_block *sb, void *data)
958{
959 struct ceph_fs_client *new = data;
960 struct ceph_mount_options *fsopt = new->mount_options;
961 struct ceph_options *opt = new->client->options;
962 struct ceph_fs_client *other = ceph_sb_to_client(sb);
963
964 dout("ceph_compare_super %p\n", sb);
965
966 if (compare_mount_options(fsopt, opt, other)) {
967 dout("monitor(s)/mount options don't match\n");
968 return 0;
969 }
970 if ((opt->flags & CEPH_OPT_FSID) &&
971 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
972 dout("fsid doesn't match\n");
973 return 0;
974 }
975 if (fsopt->sb_flags != other->mount_options->sb_flags) {
976 dout("flags differ\n");
977 return 0;
978 }
979 return 1;
980}
981
982/*
983 * construct our own bdi so we can control readahead, etc.
984 */
985static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
986
987static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
988{
989 int err;
990
991 err = super_setup_bdi_name(sb, "ceph-%ld",
992 atomic_long_inc_return(&bdi_seq));
993 if (err)
994 return err;
995
996 /* set ra_pages based on rasize mount option? */
997 sb->s_bdi->ra_pages = fsc->mount_options->rasize >> PAGE_SHIFT;
998
999 /* set io_pages based on max osd read size */
1000 sb->s_bdi->io_pages = fsc->mount_options->rsize >> PAGE_SHIFT;
1001
1002 return 0;
1003}
1004
1005static struct dentry *ceph_mount(struct file_system_type *fs_type,
1006 int flags, const char *dev_name, void *data)
1007{
1008 struct super_block *sb;
1009 struct ceph_fs_client *fsc;
1010 struct dentry *res;
1011 int err;
1012 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
1013 struct ceph_mount_options *fsopt = NULL;
1014 struct ceph_options *opt = NULL;
1015
1016 dout("ceph_mount\n");
1017
1018#ifdef CONFIG_CEPH_FS_POSIX_ACL
1019 flags |= SB_POSIXACL;
1020#endif
1021 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
1022 if (err < 0) {
1023 res = ERR_PTR(err);
1024 goto out_final;
1025 }
1026
1027 /* create client (which we may/may not use) */
1028 fsc = create_fs_client(fsopt, opt);
1029 if (IS_ERR(fsc)) {
1030 res = ERR_CAST(fsc);
1031 goto out_final;
1032 }
1033
1034 err = ceph_mdsc_init(fsc);
1035 if (err < 0) {
1036 res = ERR_PTR(err);
1037 goto out;
1038 }
1039
1040 if (ceph_test_opt(fsc->client, NOSHARE))
1041 compare_super = NULL;
1042 sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
1043 if (IS_ERR(sb)) {
1044 res = ERR_CAST(sb);
1045 goto out;
1046 }
1047
1048 if (ceph_sb_to_client(sb) != fsc) {
1049 ceph_mdsc_destroy(fsc);
1050 destroy_fs_client(fsc);
1051 fsc = ceph_sb_to_client(sb);
1052 dout("get_sb got existing client %p\n", fsc);
1053 } else {
1054 dout("get_sb using new client %p\n", fsc);
1055 err = ceph_setup_bdi(sb, fsc);
1056 if (err < 0) {
1057 res = ERR_PTR(err);
1058 goto out_splat;
1059 }
1060 }
1061
1062 res = ceph_real_mount(fsc);
1063 if (IS_ERR(res))
1064 goto out_splat;
1065 dout("root %p inode %p ino %llx.%llx\n", res,
1066 d_inode(res), ceph_vinop(d_inode(res)));
1067 return res;
1068
1069out_splat:
1070 ceph_mdsc_close_sessions(fsc->mdsc);
1071 deactivate_locked_super(sb);
1072 goto out_final;
1073
1074out:
1075 ceph_mdsc_destroy(fsc);
1076 destroy_fs_client(fsc);
1077out_final:
1078 dout("ceph_mount fail %ld\n", PTR_ERR(res));
1079 return res;
1080}
1081
1082static void ceph_kill_sb(struct super_block *s)
1083{
1084 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1085 dev_t dev = s->s_dev;
1086
1087 dout("kill_sb %p\n", s);
1088
1089 ceph_mdsc_pre_umount(fsc->mdsc);
1090 flush_fs_workqueues(fsc);
1091
1092 generic_shutdown_super(s);
1093
1094 fsc->client->extra_mon_dispatch = NULL;
1095 ceph_fs_debugfs_cleanup(fsc);
1096
1097 ceph_fscache_unregister_fs(fsc);
1098
1099 ceph_mdsc_destroy(fsc);
1100
1101 destroy_fs_client(fsc);
1102 free_anon_bdev(dev);
1103}
1104
1105static struct file_system_type ceph_fs_type = {
1106 .owner = THIS_MODULE,
1107 .name = "ceph",
1108 .mount = ceph_mount,
1109 .kill_sb = ceph_kill_sb,
1110 .fs_flags = FS_RENAME_DOES_D_MOVE,
1111};
1112MODULE_ALIAS_FS("ceph");
1113
1114static int __init init_ceph(void)
1115{
1116 int ret = init_caches();
1117 if (ret)
1118 goto out;
1119
1120 ceph_flock_init();
1121 ceph_xattr_init();
1122 ret = register_filesystem(&ceph_fs_type);
1123 if (ret)
1124 goto out_xattr;
1125
1126 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1127
1128 return 0;
1129
1130out_xattr:
1131 ceph_xattr_exit();
1132 destroy_caches();
1133out:
1134 return ret;
1135}
1136
1137static void __exit exit_ceph(void)
1138{
1139 dout("exit_ceph\n");
1140 unregister_filesystem(&ceph_fs_type);
1141 ceph_xattr_exit();
1142 destroy_caches();
1143}
1144
1145module_init(init_ceph);
1146module_exit(exit_ceph);
1147
1148MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1149MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1150MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1151MODULE_DESCRIPTION("Ceph filesystem for Linux");
1152MODULE_LICENSE("GPL");