]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ceph/super.c
vfs: export symbol d_find_any_alias()
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / super.c
CommitLineData
16725b9d 1
3d14c5d2 2#include <linux/ceph/ceph_debug.h>
16725b9d
SW
3
4#include <linux/backing-dev.h>
c309f0ab 5#include <linux/ctype.h>
16725b9d
SW
6#include <linux/fs.h>
7#include <linux/inet.h>
8#include <linux/in6.h>
9#include <linux/module.h>
10#include <linux/mount.h>
11#include <linux/parser.h>
16725b9d
SW
12#include <linux/sched.h>
13#include <linux/seq_file.h>
5a0e3ad6 14#include <linux/slab.h>
16725b9d
SW
15#include <linux/statfs.h>
16#include <linux/string.h>
16725b9d 17
16725b9d 18#include "super.h"
3d14c5d2
YS
19#include "mds_client.h"
20
21#include <linux/ceph/decode.h>
22#include <linux/ceph/mon_client.h>
23#include <linux/ceph/auth.h>
24#include <linux/ceph/debugfs.h>
16725b9d
SW
25
26/*
27 * Ceph superblock operations
28 *
29 * Handle the basics of mounting, unmounting.
30 */
31
16725b9d
SW
32/*
33 * super ops
34 */
35static void ceph_put_super(struct super_block *s)
36{
3d14c5d2 37 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
16725b9d
SW
38
39 dout("put_super\n");
3d14c5d2 40 ceph_mdsc_close_sessions(fsc->mdsc);
5dfc589a
SW
41
42 /*
43 * ensure we release the bdi before put_anon_super releases
44 * the device name.
45 */
3d14c5d2
YS
46 if (s->s_bdi == &fsc->backing_dev_info) {
47 bdi_unregister(&fsc->backing_dev_info);
5dfc589a
SW
48 s->s_bdi = NULL;
49 }
50
16725b9d
SW
51 return;
52}
53
54static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
55{
3d14c5d2
YS
56 struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode);
57 struct ceph_monmap *monmap = fsc->client->monc.monmap;
16725b9d
SW
58 struct ceph_statfs st;
59 u64 fsid;
60 int err;
61
62 dout("statfs\n");
3d14c5d2 63 err = ceph_monc_do_statfs(&fsc->client->monc, &st);
16725b9d
SW
64 if (err < 0)
65 return err;
66
67 /* fill in kstatfs */
68 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
69
70 /*
71 * express utilization in terms of large blocks to avoid
72 * overflow on 32-bit machines.
73 */
74 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
75 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
8f04d422 76 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
16725b9d
SW
77 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
78
79 buf->f_files = le64_to_cpu(st.num_objects);
80 buf->f_ffree = -1;
558d3499 81 buf->f_namelen = NAME_MAX;
16725b9d
SW
82 buf->f_frsize = PAGE_CACHE_SIZE;
83
84 /* leave fsid little-endian, regardless of host endianness */
85 fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
86 buf->f_fsid.val[0] = fsid & 0xffffffff;
87 buf->f_fsid.val[1] = fsid >> 32;
88
89 return 0;
90}
91
92
2d9c98ae 93static int ceph_sync_fs(struct super_block *sb, int wait)
16725b9d 94{
3d14c5d2 95 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
2d9c98ae
SW
96
97 if (!wait) {
98 dout("sync_fs (non-blocking)\n");
3d14c5d2 99 ceph_flush_dirty_caps(fsc->mdsc);
2d9c98ae
SW
100 dout("sync_fs (non-blocking) done\n");
101 return 0;
102 }
103
104 dout("sync_fs (blocking)\n");
3d14c5d2
YS
105 ceph_osdc_sync(&fsc->client->osdc);
106 ceph_mdsc_sync(fsc->mdsc);
2d9c98ae 107 dout("sync_fs (blocking) done\n");
16725b9d
SW
108 return 0;
109}
110
16725b9d
SW
111/*
112 * mount options
113 */
114enum {
16725b9d
SW
115 Opt_wsize,
116 Opt_rsize,
83817e35 117 Opt_rasize,
16725b9d
SW
118 Opt_caps_wanted_delay_min,
119 Opt_caps_wanted_delay_max,
6e19a16e 120 Opt_cap_release_safety,
16725b9d 121 Opt_readdir_max_entries,
23804d91 122 Opt_readdir_max_bytes,
2baba250 123 Opt_congestion_kb,
e53c2fe0 124 Opt_last_int,
16725b9d
SW
125 /* int args above */
126 Opt_snapdirname,
e53c2fe0 127 Opt_last_string,
16725b9d 128 /* string args above */
16725b9d
SW
129 Opt_dirstat,
130 Opt_nodirstat,
131 Opt_rbytes,
132 Opt_norbytes,
16725b9d 133 Opt_noasyncreaddir,
ad1fee96 134 Opt_ino32,
16725b9d
SW
135};
136
3d14c5d2 137static match_table_t fsopt_tokens = {
16725b9d
SW
138 {Opt_wsize, "wsize=%d"},
139 {Opt_rsize, "rsize=%d"},
83817e35 140 {Opt_rasize, "rasize=%d"},
16725b9d
SW
141 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
142 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
6e19a16e 143 {Opt_cap_release_safety, "cap_release_safety=%d"},
16725b9d 144 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
23804d91 145 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
2baba250 146 {Opt_congestion_kb, "write_congestion_kb=%d"},
16725b9d
SW
147 /* int args above */
148 {Opt_snapdirname, "snapdirname=%s"},
16725b9d 149 /* string args above */
16725b9d
SW
150 {Opt_dirstat, "dirstat"},
151 {Opt_nodirstat, "nodirstat"},
152 {Opt_rbytes, "rbytes"},
153 {Opt_norbytes, "norbytes"},
16725b9d 154 {Opt_noasyncreaddir, "noasyncreaddir"},
ad1fee96 155 {Opt_ino32, "ino32"},
16725b9d
SW
156 {-1, NULL}
157};
158
3d14c5d2 159static int parse_fsopt_token(char *c, void *private)
c309f0ab 160{
3d14c5d2
YS
161 struct ceph_mount_options *fsopt = private;
162 substring_t argstr[MAX_OPT_ARGS];
163 int token, intval, ret;
164
165 token = match_token((char *)c, fsopt_tokens, argstr);
166 if (token < 0)
167 return -EINVAL;
168
169 if (token < Opt_last_int) {
170 ret = match_int(&argstr[0], &intval);
171 if (ret < 0) {
172 pr_err("bad mount option arg (not int) "
173 "at '%s'\n", c);
174 return ret;
c309f0ab 175 }
3d14c5d2
YS
176 dout("got int token %d val %d\n", token, intval);
177 } else if (token > Opt_last_int && token < Opt_last_string) {
178 dout("got string token %d val %s\n", token,
179 argstr[0].from);
180 } else {
181 dout("got token %d\n", token);
c309f0ab
SW
182 }
183
3d14c5d2
YS
184 switch (token) {
185 case Opt_snapdirname:
186 kfree(fsopt->snapdir_name);
187 fsopt->snapdir_name = kstrndup(argstr[0].from,
188 argstr[0].to-argstr[0].from,
189 GFP_KERNEL);
190 if (!fsopt->snapdir_name)
191 return -ENOMEM;
192 break;
193
194 /* misc */
195 case Opt_wsize:
196 fsopt->wsize = intval;
197 break;
198 case Opt_rsize:
199 fsopt->rsize = intval;
200 break;
83817e35
SW
201 case Opt_rasize:
202 fsopt->rasize = intval;
203 break;
3d14c5d2
YS
204 case Opt_caps_wanted_delay_min:
205 fsopt->caps_wanted_delay_min = intval;
206 break;
207 case Opt_caps_wanted_delay_max:
208 fsopt->caps_wanted_delay_max = intval;
209 break;
210 case Opt_readdir_max_entries:
211 fsopt->max_readdir = intval;
212 break;
213 case Opt_readdir_max_bytes:
214 fsopt->max_readdir_bytes = intval;
215 break;
216 case Opt_congestion_kb:
217 fsopt->congestion_kb = intval;
218 break;
219 case Opt_dirstat:
220 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
221 break;
222 case Opt_nodirstat:
223 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
224 break;
225 case Opt_rbytes:
226 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
227 break;
228 case Opt_norbytes:
229 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
230 break;
231 case Opt_noasyncreaddir:
232 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
233 break;
ad1fee96
YS
234 case Opt_ino32:
235 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
236 break;
3d14c5d2
YS
237 default:
238 BUG_ON(token);
239 }
240 return 0;
c309f0ab 241}
16725b9d 242
3d14c5d2 243static void destroy_mount_options(struct ceph_mount_options *args)
16725b9d 244{
3d14c5d2
YS
245 dout("destroy_mount_options %p\n", args);
246 kfree(args->snapdir_name);
247 kfree(args);
248}
16725b9d 249
3d14c5d2
YS
250static int strcmp_null(const char *s1, const char *s2)
251{
252 if (!s1 && !s2)
253 return 0;
254 if (s1 && !s2)
255 return -1;
256 if (!s1 && s2)
257 return 1;
258 return strcmp(s1, s2);
259}
16725b9d 260
3d14c5d2
YS
261static int compare_mount_options(struct ceph_mount_options *new_fsopt,
262 struct ceph_options *new_opt,
263 struct ceph_fs_client *fsc)
264{
265 struct ceph_mount_options *fsopt1 = new_fsopt;
266 struct ceph_mount_options *fsopt2 = fsc->mount_options;
267 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
268 int ret;
16725b9d 269
3d14c5d2
YS
270 ret = memcmp(fsopt1, fsopt2, ofs);
271 if (ret)
272 return ret;
273
274 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
275 if (ret)
276 return ret;
277
278 return ceph_compare_options(new_opt, fsc->client);
279}
280
281static int parse_mount_options(struct ceph_mount_options **pfsopt,
282 struct ceph_options **popt,
283 int flags, char *options,
284 const char *dev_name,
285 const char **path)
286{
287 struct ceph_mount_options *fsopt;
288 const char *dev_name_end;
289 int err = -ENOMEM;
290
291 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
292 if (!fsopt)
293 return -ENOMEM;
294
295 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
296
80db8bea
NW
297 fsopt->sb_flags = flags;
298 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
3d14c5d2 299
80db8bea
NW
300 fsopt->rsize = CEPH_RSIZE_DEFAULT;
301 fsopt->rasize = CEPH_RASIZE_DEFAULT;
302 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
50aac4fe
SW
303 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
304 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
80db8bea
NW
305 fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
306 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
307 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
308 fsopt->congestion_kb = default_congestion_kb();
309
310 /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
311 err = -EINVAL;
312 if (!dev_name)
313 goto out;
314 *path = strstr(dev_name, ":/");
315 if (*path == NULL) {
316 pr_err("device name is missing path (no :/ in %s)\n",
317 dev_name);
318 goto out;
319 }
3d14c5d2
YS
320 dev_name_end = *path;
321 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
16725b9d 322
16725b9d
SW
323 /* path on server */
324 *path += 2;
325 dout("server path '%s'\n", *path);
326
3d14c5d2
YS
327 err = ceph_parse_options(popt, options, dev_name, dev_name_end,
328 parse_fsopt_token, (void *)fsopt);
329 if (err)
330 goto out;
331
332 /* success */
333 *pfsopt = fsopt;
334 return 0;
16725b9d 335
7b813c46 336out:
3d14c5d2
YS
337 destroy_mount_options(fsopt);
338 return err;
16725b9d
SW
339}
340
3d14c5d2
YS
341/**
342 * ceph_show_options - Show mount options in /proc/mounts
343 * @m: seq_file to write to
344 * @mnt: mount descriptor
345 */
346static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
16725b9d 347{
3d14c5d2
YS
348 struct ceph_fs_client *fsc = ceph_sb_to_client(mnt->mnt_sb);
349 struct ceph_mount_options *fsopt = fsc->mount_options;
350 struct ceph_options *opt = fsc->client->options;
351
352 if (opt->flags & CEPH_OPT_FSID)
353 seq_printf(m, ",fsid=%pU", &opt->fsid);
354 if (opt->flags & CEPH_OPT_NOSHARE)
355 seq_puts(m, ",noshare");
356 if (opt->flags & CEPH_OPT_NOCRC)
357 seq_puts(m, ",nocrc");
358
359 if (opt->name)
360 seq_printf(m, ",name=%s", opt->name);
8323c3aa 361 if (opt->key)
3d14c5d2
YS
362 seq_puts(m, ",secret=<hidden>");
363
364 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
365 seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
366 if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
367 seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
368 if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
369 seq_printf(m, ",osdtimeout=%d", opt->osd_timeout);
370 if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
371 seq_printf(m, ",osdkeepalivetimeout=%d",
372 opt->osd_keepalive_timeout);
373
374 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
375 seq_puts(m, ",dirstat");
376 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0)
377 seq_puts(m, ",norbytes");
378 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
379 seq_puts(m, ",noasyncreaddir");
380
381 if (fsopt->wsize)
382 seq_printf(m, ",wsize=%d", fsopt->wsize);
80456f86 383 if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
3d14c5d2 384 seq_printf(m, ",rsize=%d", fsopt->rsize);
83817e35 385 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
2151937d 386 seq_printf(m, ",rasize=%d", fsopt->rasize);
3d14c5d2
YS
387 if (fsopt->congestion_kb != default_congestion_kb())
388 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
389 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
390 seq_printf(m, ",caps_wanted_delay_min=%d",
391 fsopt->caps_wanted_delay_min);
392 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
393 seq_printf(m, ",caps_wanted_delay_max=%d",
394 fsopt->caps_wanted_delay_max);
395 if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
396 seq_printf(m, ",cap_release_safety=%d",
397 fsopt->cap_release_safety);
398 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
399 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
400 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
401 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
402 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
403 seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
404 return 0;
16725b9d
SW
405}
406
407/*
3d14c5d2
YS
408 * handle any mon messages the standard library doesn't understand.
409 * return error if we don't either.
16725b9d 410 */
3d14c5d2 411static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
16725b9d 412{
3d14c5d2
YS
413 struct ceph_fs_client *fsc = client->private;
414 int type = le16_to_cpu(msg->hdr.type);
415
416 switch (type) {
417 case CEPH_MSG_MDS_MAP:
418 ceph_mdsc_handle_map(fsc->mdsc, msg);
419 return 0;
420
421 default:
422 return -1;
423 }
424}
425
426/*
427 * create a new fs client
428 */
0c6d4b4e 429static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
3d14c5d2
YS
430 struct ceph_options *opt)
431{
432 struct ceph_fs_client *fsc;
6ab00d46
SW
433 const unsigned supported_features =
434 CEPH_FEATURE_FLOCK |
435 CEPH_FEATURE_DIRLAYOUTHASH;
436 const unsigned required_features = 0;
16725b9d
SW
437 int err = -ENOMEM;
438
3d14c5d2
YS
439 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
440 if (!fsc)
16725b9d
SW
441 return ERR_PTR(-ENOMEM);
442
6ab00d46
SW
443 fsc->client = ceph_create_client(opt, fsc, supported_features,
444 required_features);
3d14c5d2
YS
445 if (IS_ERR(fsc->client)) {
446 err = PTR_ERR(fsc->client);
447 goto fail;
448 }
449 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
3d14c5d2 450 fsc->client->monc.want_mdsmap = 1;
16725b9d 451
3d14c5d2 452 fsc->mount_options = fsopt;
16725b9d 453
3d14c5d2
YS
454 fsc->sb = NULL;
455 fsc->mount_state = CEPH_MOUNT_MOUNTING;
16725b9d 456
3d14c5d2 457 atomic_long_set(&fsc->writeback_count, 0);
16725b9d 458
3d14c5d2 459 err = bdi_init(&fsc->backing_dev_info);
859e7b14 460 if (err < 0)
3d14c5d2 461 goto fail_client;
859e7b14 462
16725b9d 463 err = -ENOMEM;
01e6acc4
TH
464 /*
465 * The number of concurrent works can be high but they don't need
466 * to be processed in parallel, limit concurrency.
467 */
468 fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
3d14c5d2 469 if (fsc->wb_wq == NULL)
859e7b14 470 goto fail_bdi;
01e6acc4 471 fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
3d14c5d2 472 if (fsc->pg_inv_wq == NULL)
16725b9d 473 goto fail_wb_wq;
01e6acc4 474 fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
3d14c5d2 475 if (fsc->trunc_wq == NULL)
16725b9d
SW
476 goto fail_pg_inv_wq;
477
b9bfb93c
SW
478 /* set up mempools */
479 err = -ENOMEM;
3d14c5d2
YS
480 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
481 fsc->mount_options->wsize >> PAGE_CACHE_SHIFT);
482 if (!fsc->wb_pagevec_pool)
b9bfb93c
SW
483 goto fail_trunc_wq;
484
85ccce43 485 /* caps */
3d14c5d2
YS
486 fsc->min_caps = fsopt->max_readdir;
487
488 return fsc;
b9bfb93c 489
16725b9d 490fail_trunc_wq:
3d14c5d2 491 destroy_workqueue(fsc->trunc_wq);
16725b9d 492fail_pg_inv_wq:
3d14c5d2 493 destroy_workqueue(fsc->pg_inv_wq);
16725b9d 494fail_wb_wq:
3d14c5d2 495 destroy_workqueue(fsc->wb_wq);
859e7b14 496fail_bdi:
3d14c5d2
YS
497 bdi_destroy(&fsc->backing_dev_info);
498fail_client:
499 ceph_destroy_client(fsc->client);
16725b9d 500fail:
3d14c5d2 501 kfree(fsc);
16725b9d
SW
502 return ERR_PTR(err);
503}
504
0c6d4b4e 505static void destroy_fs_client(struct ceph_fs_client *fsc)
16725b9d 506{
3d14c5d2 507 dout("destroy_fs_client %p\n", fsc);
16725b9d 508
3d14c5d2
YS
509 destroy_workqueue(fsc->wb_wq);
510 destroy_workqueue(fsc->pg_inv_wq);
511 destroy_workqueue(fsc->trunc_wq);
16725b9d 512
3d14c5d2 513 bdi_destroy(&fsc->backing_dev_info);
a922d38f 514
3d14c5d2 515 mempool_destroy(fsc->wb_pagevec_pool);
16725b9d 516
3d14c5d2 517 destroy_mount_options(fsc->mount_options);
5dfc589a 518
3d14c5d2 519 ceph_fs_debugfs_cleanup(fsc);
16725b9d 520
3d14c5d2 521 ceph_destroy_client(fsc->client);
16725b9d 522
3d14c5d2
YS
523 kfree(fsc);
524 dout("destroy_fs_client %p done\n", fsc);
16725b9d
SW
525}
526
0743304d 527/*
3d14c5d2 528 * caches
0743304d 529 */
3d14c5d2
YS
530struct kmem_cache *ceph_inode_cachep;
531struct kmem_cache *ceph_cap_cachep;
532struct kmem_cache *ceph_dentry_cachep;
533struct kmem_cache *ceph_file_cachep;
534
535static void ceph_inode_init_once(void *foo)
0743304d 536{
3d14c5d2
YS
537 struct ceph_inode_info *ci = foo;
538 inode_init_once(&ci->vfs_inode);
539}
540
541static int __init init_caches(void)
542{
543 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
544 sizeof(struct ceph_inode_info),
545 __alignof__(struct ceph_inode_info),
546 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
547 ceph_inode_init_once);
548 if (ceph_inode_cachep == NULL)
549 return -ENOMEM;
550
551 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
552 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
553 if (ceph_cap_cachep == NULL)
554 goto bad_cap;
555
556 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
557 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
558 if (ceph_dentry_cachep == NULL)
559 goto bad_dentry;
560
561 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
562 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
563 if (ceph_file_cachep == NULL)
564 goto bad_file;
565
0743304d 566 return 0;
3d14c5d2
YS
567
568bad_file:
569 kmem_cache_destroy(ceph_dentry_cachep);
570bad_dentry:
571 kmem_cache_destroy(ceph_cap_cachep);
572bad_cap:
573 kmem_cache_destroy(ceph_inode_cachep);
574 return -ENOMEM;
0743304d
SW
575}
576
3d14c5d2
YS
577static void destroy_caches(void)
578{
579 kmem_cache_destroy(ceph_inode_cachep);
580 kmem_cache_destroy(ceph_cap_cachep);
581 kmem_cache_destroy(ceph_dentry_cachep);
582 kmem_cache_destroy(ceph_file_cachep);
583}
584
585
16725b9d 586/*
3d14c5d2
YS
587 * ceph_umount_begin - initiate forced umount. Tear down down the
588 * mount, skipping steps that may hang while waiting for server(s).
16725b9d 589 */
3d14c5d2 590static void ceph_umount_begin(struct super_block *sb)
16725b9d 591{
3d14c5d2
YS
592 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
593
594 dout("ceph_umount_begin - starting forced umount\n");
595 if (!fsc)
596 return;
597 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
598 return;
16725b9d
SW
599}
600
3d14c5d2
YS
601static const struct super_operations ceph_super_ops = {
602 .alloc_inode = ceph_alloc_inode,
603 .destroy_inode = ceph_destroy_inode,
604 .write_inode = ceph_write_inode,
605 .sync_fs = ceph_sync_fs,
606 .put_super = ceph_put_super,
607 .show_options = ceph_show_options,
608 .statfs = ceph_statfs,
609 .umount_begin = ceph_umount_begin,
610};
611
16725b9d
SW
612/*
613 * Bootstrap mount by opening the root directory. Note the mount
614 * @started time from caller, and time out if this takes too long.
615 */
3d14c5d2 616static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
16725b9d
SW
617 const char *path,
618 unsigned long started)
619{
3d14c5d2 620 struct ceph_mds_client *mdsc = fsc->mdsc;
16725b9d
SW
621 struct ceph_mds_request *req = NULL;
622 int err;
623 struct dentry *root;
624
625 /* open dir */
626 dout("open_root_inode opening '%s'\n", path);
627 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
628 if (IS_ERR(req))
7e34bc52 629 return ERR_CAST(req);
16725b9d
SW
630 req->r_path1 = kstrdup(path, GFP_NOFS);
631 req->r_ino1.ino = CEPH_INO_ROOT;
632 req->r_ino1.snap = CEPH_NOSNAP;
633 req->r_started = started;
3d14c5d2 634 req->r_timeout = fsc->client->options->mount_timeout * HZ;
16725b9d
SW
635 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
636 req->r_num_caps = 2;
637 err = ceph_mdsc_do_request(mdsc, NULL, req);
638 if (err == 0) {
639 dout("open_root_inode success\n");
640 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
d46cfba5 641 fsc->sb->s_root == NULL)
16725b9d 642 root = d_alloc_root(req->r_target_inode);
d46cfba5 643 else
16725b9d 644 root = d_obtain_alias(req->r_target_inode);
d46cfba5 645 ceph_init_dentry(root);
16725b9d
SW
646 req->r_target_inode = NULL;
647 dout("open_root_inode success, root dentry is %p\n", root);
648 } else {
649 root = ERR_PTR(err);
650 }
651 ceph_mdsc_put_request(req);
652 return root;
653}
654
3d14c5d2
YS
655
656
657
16725b9d
SW
658/*
659 * mount: join the ceph cluster, and open root directory.
660 */
a7f9fb20 661static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
16725b9d
SW
662 const char *path)
663{
16725b9d 664 int err;
16725b9d
SW
665 unsigned long started = jiffies; /* note the start time */
666 struct dentry *root;
3d14c5d2 667 int first = 0; /* first vfsmount for this super_block */
16725b9d
SW
668
669 dout("mount start\n");
3d14c5d2 670 mutex_lock(&fsc->client->mount_mutex);
16725b9d 671
3d14c5d2 672 err = __ceph_open_session(fsc->client, started);
16725b9d
SW
673 if (err < 0)
674 goto out;
675
16725b9d 676 dout("mount opening root\n");
3d14c5d2 677 root = open_root_dentry(fsc, "", started);
16725b9d
SW
678 if (IS_ERR(root)) {
679 err = PTR_ERR(root);
680 goto out;
681 }
3d14c5d2 682 if (fsc->sb->s_root) {
16725b9d 683 dput(root);
3d14c5d2
YS
684 } else {
685 fsc->sb->s_root = root;
686 first = 1;
687
688 err = ceph_fs_debugfs_init(fsc);
689 if (err < 0)
690 goto fail;
691 }
16725b9d
SW
692
693 if (path[0] == 0) {
694 dget(root);
695 } else {
696 dout("mount opening base mountpoint\n");
3d14c5d2 697 root = open_root_dentry(fsc, path, started);
16725b9d
SW
698 if (IS_ERR(root)) {
699 err = PTR_ERR(root);
3d14c5d2 700 goto fail;
16725b9d
SW
701 }
702 }
703
3d14c5d2 704 fsc->mount_state = CEPH_MOUNT_MOUNTED;
16725b9d 705 dout("mount success\n");
a7f9fb20
AV
706 mutex_unlock(&fsc->client->mount_mutex);
707 return root;
16725b9d
SW
708
709out:
3d14c5d2 710 mutex_unlock(&fsc->client->mount_mutex);
a7f9fb20 711 return ERR_PTR(err);
3d14c5d2
YS
712
713fail:
714 if (first) {
715 dput(fsc->sb->s_root);
716 fsc->sb->s_root = NULL;
717 }
718 goto out;
16725b9d
SW
719}
720
721static int ceph_set_super(struct super_block *s, void *data)
722{
3d14c5d2 723 struct ceph_fs_client *fsc = data;
16725b9d
SW
724 int ret;
725
726 dout("set_super %p data %p\n", s, data);
727
3d14c5d2 728 s->s_flags = fsc->mount_options->sb_flags;
16725b9d
SW
729 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
730
3d14c5d2
YS
731 s->s_fs_info = fsc;
732 fsc->sb = s;
16725b9d
SW
733
734 s->s_op = &ceph_super_ops;
735 s->s_export_op = &ceph_export_ops;
736
737 s->s_time_gran = 1000; /* 1000 ns == 1 us */
738
739 ret = set_anon_super(s, NULL); /* what is that second arg for? */
740 if (ret != 0)
741 goto fail;
742
743 return ret;
744
745fail:
746 s->s_fs_info = NULL;
3d14c5d2 747 fsc->sb = NULL;
16725b9d
SW
748 return ret;
749}
750
751/*
752 * share superblock if same fs AND options
753 */
754static int ceph_compare_super(struct super_block *sb, void *data)
755{
3d14c5d2
YS
756 struct ceph_fs_client *new = data;
757 struct ceph_mount_options *fsopt = new->mount_options;
758 struct ceph_options *opt = new->client->options;
759 struct ceph_fs_client *other = ceph_sb_to_client(sb);
16725b9d
SW
760
761 dout("ceph_compare_super %p\n", sb);
3d14c5d2
YS
762
763 if (compare_mount_options(fsopt, opt, other)) {
764 dout("monitor(s)/mount options don't match\n");
765 return 0;
16725b9d 766 }
3d14c5d2
YS
767 if ((opt->flags & CEPH_OPT_FSID) &&
768 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
769 dout("fsid doesn't match\n");
770 return 0;
771 }
772 if (fsopt->sb_flags != other->mount_options->sb_flags) {
16725b9d
SW
773 dout("flags differ\n");
774 return 0;
775 }
776 return 1;
777}
778
779/*
780 * construct our own bdi so we can control readahead, etc.
781 */
00d5643e 782static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
31e0cf8f 783
3d14c5d2
YS
784static int ceph_register_bdi(struct super_block *sb,
785 struct ceph_fs_client *fsc)
16725b9d
SW
786{
787 int err;
788
83817e35
SW
789 /* set ra_pages based on rasize mount option? */
790 if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE)
3d14c5d2 791 fsc->backing_dev_info.ra_pages =
83817e35 792 (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1)
16725b9d 793 >> PAGE_SHIFT;
e9852227
YS
794 else
795 fsc->backing_dev_info.ra_pages =
796 default_backing_dev_info.ra_pages;
797
3d14c5d2 798 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d",
31e0cf8f 799 atomic_long_inc_return(&bdi_seq));
5dfc589a 800 if (!err)
3d14c5d2 801 sb->s_bdi = &fsc->backing_dev_info;
16725b9d
SW
802 return err;
803}
804
a7f9fb20
AV
805static struct dentry *ceph_mount(struct file_system_type *fs_type,
806 int flags, const char *dev_name, void *data)
16725b9d
SW
807{
808 struct super_block *sb;
3d14c5d2 809 struct ceph_fs_client *fsc;
a7f9fb20 810 struct dentry *res;
16725b9d
SW
811 int err;
812 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
6a18be16 813 const char *path = NULL;
3d14c5d2
YS
814 struct ceph_mount_options *fsopt = NULL;
815 struct ceph_options *opt = NULL;
16725b9d 816
a7f9fb20 817 dout("ceph_mount\n");
3d14c5d2 818 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
a7f9fb20
AV
819 if (err < 0) {
820 res = ERR_PTR(err);
6b805185 821 goto out_final;
a7f9fb20 822 }
16725b9d
SW
823
824 /* create client (which we may/may not use) */
3d14c5d2
YS
825 fsc = create_fs_client(fsopt, opt);
826 if (IS_ERR(fsc)) {
a7f9fb20 827 res = ERR_CAST(fsc);
259a187a
NW
828 destroy_mount_options(fsopt);
829 ceph_destroy_options(opt);
6b805185
SW
830 goto out_final;
831 }
16725b9d 832
3d14c5d2 833 err = ceph_mdsc_init(fsc);
a7f9fb20
AV
834 if (err < 0) {
835 res = ERR_PTR(err);
3d14c5d2 836 goto out;
a7f9fb20 837 }
3d14c5d2
YS
838
839 if (ceph_test_opt(fsc->client, NOSHARE))
16725b9d 840 compare_super = NULL;
3d14c5d2 841 sb = sget(fs_type, compare_super, ceph_set_super, fsc);
16725b9d 842 if (IS_ERR(sb)) {
a7f9fb20 843 res = ERR_CAST(sb);
16725b9d
SW
844 goto out;
845 }
846
3d14c5d2
YS
847 if (ceph_sb_to_client(sb) != fsc) {
848 ceph_mdsc_destroy(fsc);
849 destroy_fs_client(fsc);
850 fsc = ceph_sb_to_client(sb);
851 dout("get_sb got existing client %p\n", fsc);
16725b9d 852 } else {
3d14c5d2
YS
853 dout("get_sb using new client %p\n", fsc);
854 err = ceph_register_bdi(sb, fsc);
a7f9fb20
AV
855 if (err < 0) {
856 res = ERR_PTR(err);
16725b9d 857 goto out_splat;
a7f9fb20 858 }
16725b9d
SW
859 }
860
a7f9fb20
AV
861 res = ceph_real_mount(fsc, path);
862 if (IS_ERR(res))
16725b9d 863 goto out_splat;
a7f9fb20
AV
864 dout("root %p inode %p ino %llx.%llx\n", res,
865 res->d_inode, ceph_vinop(res->d_inode));
866 return res;
16725b9d
SW
867
868out_splat:
3d14c5d2 869 ceph_mdsc_close_sessions(fsc->mdsc);
3981f2e2 870 deactivate_locked_super(sb);
16725b9d
SW
871 goto out_final;
872
873out:
3d14c5d2
YS
874 ceph_mdsc_destroy(fsc);
875 destroy_fs_client(fsc);
16725b9d 876out_final:
a7f9fb20
AV
877 dout("ceph_mount fail %ld\n", PTR_ERR(res));
878 return res;
16725b9d
SW
879}
880
881static void ceph_kill_sb(struct super_block *s)
882{
3d14c5d2 883 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
16725b9d 884 dout("kill_sb %p\n", s);
3d14c5d2 885 ceph_mdsc_pre_umount(fsc->mdsc);
16725b9d 886 kill_anon_super(s); /* will call put_super after sb is r/o */
3d14c5d2
YS
887 ceph_mdsc_destroy(fsc);
888 destroy_fs_client(fsc);
16725b9d
SW
889}
890
891static struct file_system_type ceph_fs_type = {
892 .owner = THIS_MODULE,
893 .name = "ceph",
a7f9fb20 894 .mount = ceph_mount,
16725b9d
SW
895 .kill_sb = ceph_kill_sb,
896 .fs_flags = FS_RENAME_DOES_D_MOVE,
897};
898
899#define _STRINGIFY(x) #x
900#define STRINGIFY(x) _STRINGIFY(x)
901
902static int __init init_ceph(void)
903{
3d14c5d2 904 int ret = init_caches();
16725b9d 905 if (ret)
3d14c5d2 906 goto out;
16725b9d 907
16725b9d
SW
908 ret = register_filesystem(&ceph_fs_type);
909 if (ret)
910 goto out_icache;
911
3d14c5d2
YS
912 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
913
16725b9d
SW
914 return 0;
915
916out_icache:
917 destroy_caches();
16725b9d
SW
918out:
919 return ret;
920}
921
922static void __exit exit_ceph(void)
923{
924 dout("exit_ceph\n");
925 unregister_filesystem(&ceph_fs_type);
16725b9d 926 destroy_caches();
16725b9d
SW
927}
928
929module_init(init_ceph);
930module_exit(exit_ceph);
931
932MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
933MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
934MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
935MODULE_DESCRIPTION("Ceph filesystem for Linux");
936MODULE_LICENSE("GPL");