]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ocfs2/dlmfs/dlmfs.c
a43ebb11ad3b9eecb0986891229f51c8299a3913
[mirror_ubuntu-artful-kernel.git] / fs / ocfs2 / dlmfs / dlmfs.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmfs.c
5 *
6 * Code which implements the kernel side of a minimal userspace
7 * interface to our DLM. This file handles the virtual file system
8 * used for communication with userspace. Credit should go to ramfs,
9 * which was a template for the fs side of this module.
10 *
11 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public
15 * License as published by the Free Software Foundation; either
16 * version 2 of the License, or (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public
24 * License along with this program; if not, write to the
25 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
26 * Boston, MA 021110-1307, USA.
27 */
28
29 /* Simple VFS hooks based on: */
30 /*
31 * Resizable simple ram filesystem for Linux.
32 *
33 * Copyright (C) 2000 Linus Torvalds.
34 * 2000 Transmeta Corp.
35 */
36
37 #include <linux/module.h>
38 #include <linux/fs.h>
39 #include <linux/pagemap.h>
40 #include <linux/types.h>
41 #include <linux/slab.h>
42 #include <linux/highmem.h>
43 #include <linux/init.h>
44 #include <linux/string.h>
45 #include <linux/backing-dev.h>
46 #include <linux/poll.h>
47
48 #include <asm/uaccess.h>
49
50 #include "stackglue.h"
51 #include "userdlm.h"
52 #include "dlmfsver.h"
53
54 #define MLOG_MASK_PREFIX ML_DLMFS
55 #include "cluster/masklog.h"
56
57
58 static const struct super_operations dlmfs_ops;
59 static const struct file_operations dlmfs_file_operations;
60 static const struct inode_operations dlmfs_dir_inode_operations;
61 static const struct inode_operations dlmfs_root_inode_operations;
62 static const struct inode_operations dlmfs_file_inode_operations;
63 static struct kmem_cache *dlmfs_inode_cache;
64
65 struct workqueue_struct *user_dlm_worker;
66
67
68
69 /*
70 * These are the ABI capabilities of dlmfs.
71 *
72 * Over time, dlmfs has added some features that were not part of the
73 * initial ABI. Unfortunately, some of these features are not detectable
74 * via standard usage. For example, Linux's default poll always returns
75 * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs
76 * added poll support. Instead, we provide this list of new capabilities.
77 *
78 * Capabilities is a read-only attribute. We do it as a module parameter
79 * so we can discover it whether dlmfs is built in, loaded, or even not
80 * loaded.
81 *
82 * The ABI features are local to this machine's dlmfs mount. This is
83 * distinct from the locking protocol, which is concerned with inter-node
84 * interaction.
85 *
86 * Capabilities:
87 * - bast : POLLIN against the file descriptor of a held lock
88 * signifies a bast fired on the lock.
89 */
90 #define DLMFS_CAPABILITIES "bast stackglue"
91 extern int param_set_dlmfs_capabilities(const char *val,
92 struct kernel_param *kp)
93 {
94 printk(KERN_ERR "%s: readonly parameter\n", kp->name);
95 return -EINVAL;
96 }
97 static int param_get_dlmfs_capabilities(char *buffer,
98 struct kernel_param *kp)
99 {
100 return strlcpy(buffer, DLMFS_CAPABILITIES,
101 strlen(DLMFS_CAPABILITIES) + 1);
102 }
103 module_param_call(capabilities, param_set_dlmfs_capabilities,
104 param_get_dlmfs_capabilities, NULL, 0444);
105 MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
106
107
108 /*
109 * decodes a set of open flags into a valid lock level and a set of flags.
110 * returns < 0 if we have invalid flags
111 * flags which mean something to us:
112 * O_RDONLY -> PRMODE level
113 * O_WRONLY -> EXMODE level
114 *
115 * O_NONBLOCK -> NOQUEUE
116 */
117 static int dlmfs_decode_open_flags(int open_flags,
118 int *level,
119 int *flags)
120 {
121 if (open_flags & (O_WRONLY|O_RDWR))
122 *level = DLM_LOCK_EX;
123 else
124 *level = DLM_LOCK_PR;
125
126 *flags = 0;
127 if (open_flags & O_NONBLOCK)
128 *flags |= DLM_LKF_NOQUEUE;
129
130 return 0;
131 }
132
133 static int dlmfs_file_open(struct inode *inode,
134 struct file *file)
135 {
136 int status, level, flags;
137 struct dlmfs_filp_private *fp = NULL;
138 struct dlmfs_inode_private *ip;
139
140 if (S_ISDIR(inode->i_mode))
141 BUG();
142
143 mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino,
144 file->f_flags);
145
146 status = dlmfs_decode_open_flags(file->f_flags, &level, &flags);
147 if (status < 0)
148 goto bail;
149
150 /* We don't want to honor O_APPEND at read/write time as it
151 * doesn't make sense for LVB writes. */
152 file->f_flags &= ~O_APPEND;
153
154 fp = kmalloc(sizeof(*fp), GFP_NOFS);
155 if (!fp) {
156 status = -ENOMEM;
157 goto bail;
158 }
159 fp->fp_lock_level = level;
160
161 ip = DLMFS_I(inode);
162
163 status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags);
164 if (status < 0) {
165 /* this is a strange error to return here but I want
166 * to be able userspace to be able to distinguish a
167 * valid lock request from one that simply couldn't be
168 * granted. */
169 if (flags & DLM_LKF_NOQUEUE && status == -EAGAIN)
170 status = -ETXTBSY;
171 kfree(fp);
172 goto bail;
173 }
174
175 file->private_data = fp;
176 bail:
177 return status;
178 }
179
180 static int dlmfs_file_release(struct inode *inode,
181 struct file *file)
182 {
183 int level, status;
184 struct dlmfs_inode_private *ip = DLMFS_I(inode);
185 struct dlmfs_filp_private *fp =
186 (struct dlmfs_filp_private *) file->private_data;
187
188 if (S_ISDIR(inode->i_mode))
189 BUG();
190
191 mlog(0, "close called on inode %lu\n", inode->i_ino);
192
193 status = 0;
194 if (fp) {
195 level = fp->fp_lock_level;
196 if (level != DLM_LOCK_IV)
197 user_dlm_cluster_unlock(&ip->ip_lockres, level);
198
199 kfree(fp);
200 file->private_data = NULL;
201 }
202
203 return 0;
204 }
205
206 /*
207 * We do ->setattr() just to override size changes. Our size is the size
208 * of the LVB and nothing else.
209 */
210 static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
211 {
212 int error;
213 struct inode *inode = dentry->d_inode;
214
215 attr->ia_valid &= ~ATTR_SIZE;
216 error = inode_change_ok(inode, attr);
217 if (error)
218 return error;
219
220 setattr_copy(inode, attr);
221 mark_inode_dirty(inode);
222 return 0;
223 }
224
225 static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
226 {
227 int event = 0;
228 struct inode *inode = file->f_path.dentry->d_inode;
229 struct dlmfs_inode_private *ip = DLMFS_I(inode);
230
231 poll_wait(file, &ip->ip_lockres.l_event, wait);
232
233 spin_lock(&ip->ip_lockres.l_lock);
234 if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
235 event = POLLIN | POLLRDNORM;
236 spin_unlock(&ip->ip_lockres.l_lock);
237
238 return event;
239 }
240
241 static ssize_t dlmfs_file_read(struct file *filp,
242 char __user *buf,
243 size_t count,
244 loff_t *ppos)
245 {
246 int bytes_left;
247 ssize_t readlen, got;
248 char *lvb_buf;
249 struct inode *inode = filp->f_path.dentry->d_inode;
250
251 mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
252 inode->i_ino, count, *ppos);
253
254 if (*ppos >= i_size_read(inode))
255 return 0;
256
257 if (!count)
258 return 0;
259
260 if (!access_ok(VERIFY_WRITE, buf, count))
261 return -EFAULT;
262
263 /* don't read past the lvb */
264 if ((count + *ppos) > i_size_read(inode))
265 readlen = i_size_read(inode) - *ppos;
266 else
267 readlen = count;
268
269 lvb_buf = kmalloc(readlen, GFP_NOFS);
270 if (!lvb_buf)
271 return -ENOMEM;
272
273 got = user_dlm_read_lvb(inode, lvb_buf, readlen);
274 if (got) {
275 BUG_ON(got != readlen);
276 bytes_left = __copy_to_user(buf, lvb_buf, readlen);
277 readlen -= bytes_left;
278 } else
279 readlen = 0;
280
281 kfree(lvb_buf);
282
283 *ppos = *ppos + readlen;
284
285 mlog(0, "read %zd bytes\n", readlen);
286 return readlen;
287 }
288
289 static ssize_t dlmfs_file_write(struct file *filp,
290 const char __user *buf,
291 size_t count,
292 loff_t *ppos)
293 {
294 int bytes_left;
295 ssize_t writelen;
296 char *lvb_buf;
297 struct inode *inode = filp->f_path.dentry->d_inode;
298
299 mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
300 inode->i_ino, count, *ppos);
301
302 if (*ppos >= i_size_read(inode))
303 return -ENOSPC;
304
305 if (!count)
306 return 0;
307
308 if (!access_ok(VERIFY_READ, buf, count))
309 return -EFAULT;
310
311 /* don't write past the lvb */
312 if ((count + *ppos) > i_size_read(inode))
313 writelen = i_size_read(inode) - *ppos;
314 else
315 writelen = count - *ppos;
316
317 lvb_buf = kmalloc(writelen, GFP_NOFS);
318 if (!lvb_buf)
319 return -ENOMEM;
320
321 bytes_left = copy_from_user(lvb_buf, buf, writelen);
322 writelen -= bytes_left;
323 if (writelen)
324 user_dlm_write_lvb(inode, lvb_buf, writelen);
325
326 kfree(lvb_buf);
327
328 *ppos = *ppos + writelen;
329 mlog(0, "wrote %zd bytes\n", writelen);
330 return writelen;
331 }
332
333 static void dlmfs_init_once(void *foo)
334 {
335 struct dlmfs_inode_private *ip =
336 (struct dlmfs_inode_private *) foo;
337
338 ip->ip_conn = NULL;
339 ip->ip_parent = NULL;
340
341 inode_init_once(&ip->ip_vfs_inode);
342 }
343
344 static struct inode *dlmfs_alloc_inode(struct super_block *sb)
345 {
346 struct dlmfs_inode_private *ip;
347
348 ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS);
349 if (!ip)
350 return NULL;
351
352 return &ip->ip_vfs_inode;
353 }
354
355 static void dlmfs_destroy_inode(struct inode *inode)
356 {
357 kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
358 }
359
360 static void dlmfs_evict_inode(struct inode *inode)
361 {
362 int status;
363 struct dlmfs_inode_private *ip;
364
365 end_writeback(inode);
366
367 mlog(0, "inode %lu\n", inode->i_ino);
368
369 ip = DLMFS_I(inode);
370
371 if (S_ISREG(inode->i_mode)) {
372 status = user_dlm_destroy_lock(&ip->ip_lockres);
373 if (status < 0)
374 mlog_errno(status);
375 iput(ip->ip_parent);
376 goto clear_fields;
377 }
378
379 mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
380 /* we must be a directory. If required, lets unregister the
381 * dlm context now. */
382 if (ip->ip_conn)
383 user_dlm_unregister(ip->ip_conn);
384 clear_fields:
385 ip->ip_parent = NULL;
386 ip->ip_conn = NULL;
387 }
388
389 static struct backing_dev_info dlmfs_backing_dev_info = {
390 .name = "ocfs2-dlmfs",
391 .ra_pages = 0, /* No readahead */
392 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
393 };
394
395 static struct inode *dlmfs_get_root_inode(struct super_block *sb)
396 {
397 struct inode *inode = new_inode(sb);
398 int mode = S_IFDIR | 0755;
399 struct dlmfs_inode_private *ip;
400
401 if (inode) {
402 ip = DLMFS_I(inode);
403
404 inode->i_mode = mode;
405 inode->i_uid = current_fsuid();
406 inode->i_gid = current_fsgid();
407 inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
408 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
409 inc_nlink(inode);
410
411 inode->i_fop = &simple_dir_operations;
412 inode->i_op = &dlmfs_root_inode_operations;
413 }
414
415 return inode;
416 }
417
418 static struct inode *dlmfs_get_inode(struct inode *parent,
419 struct dentry *dentry,
420 int mode)
421 {
422 struct super_block *sb = parent->i_sb;
423 struct inode * inode = new_inode(sb);
424 struct dlmfs_inode_private *ip;
425
426 if (!inode)
427 return NULL;
428
429 inode->i_mode = mode;
430 inode->i_uid = current_fsuid();
431 inode->i_gid = current_fsgid();
432 inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
433 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
434
435 ip = DLMFS_I(inode);
436 ip->ip_conn = DLMFS_I(parent)->ip_conn;
437
438 switch (mode & S_IFMT) {
439 default:
440 /* for now we don't support anything other than
441 * directories and regular files. */
442 BUG();
443 break;
444 case S_IFREG:
445 inode->i_op = &dlmfs_file_inode_operations;
446 inode->i_fop = &dlmfs_file_operations;
447
448 i_size_write(inode, DLM_LVB_LEN);
449
450 user_dlm_lock_res_init(&ip->ip_lockres, dentry);
451
452 /* released at clear_inode time, this insures that we
453 * get to drop the dlm reference on each lock *before*
454 * we call the unregister code for releasing parent
455 * directories. */
456 ip->ip_parent = igrab(parent);
457 BUG_ON(!ip->ip_parent);
458 break;
459 case S_IFDIR:
460 inode->i_op = &dlmfs_dir_inode_operations;
461 inode->i_fop = &simple_dir_operations;
462
463 /* directory inodes start off with i_nlink ==
464 * 2 (for "." entry) */
465 inc_nlink(inode);
466 break;
467 }
468
469 if (parent->i_mode & S_ISGID) {
470 inode->i_gid = parent->i_gid;
471 if (S_ISDIR(mode))
472 inode->i_mode |= S_ISGID;
473 }
474
475 return inode;
476 }
477
478 /*
479 * File creation. Allocate an inode, and we're done..
480 */
481 /* SMP-safe */
482 static int dlmfs_mkdir(struct inode * dir,
483 struct dentry * dentry,
484 int mode)
485 {
486 int status;
487 struct inode *inode = NULL;
488 struct qstr *domain = &dentry->d_name;
489 struct dlmfs_inode_private *ip;
490 struct ocfs2_cluster_connection *conn;
491
492 mlog(0, "mkdir %.*s\n", domain->len, domain->name);
493
494 /* verify that we have a proper domain */
495 if (domain->len >= GROUP_NAME_MAX) {
496 status = -EINVAL;
497 mlog(ML_ERROR, "invalid domain name for directory.\n");
498 goto bail;
499 }
500
501 inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR);
502 if (!inode) {
503 status = -ENOMEM;
504 mlog_errno(status);
505 goto bail;
506 }
507
508 ip = DLMFS_I(inode);
509
510 conn = user_dlm_register(domain);
511 if (IS_ERR(conn)) {
512 status = PTR_ERR(conn);
513 mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
514 status, domain->len, domain->name);
515 goto bail;
516 }
517 ip->ip_conn = conn;
518
519 inc_nlink(dir);
520 d_instantiate(dentry, inode);
521 dget(dentry); /* Extra count - pin the dentry in core */
522
523 status = 0;
524 bail:
525 if (status < 0)
526 iput(inode);
527 return status;
528 }
529
530 static int dlmfs_create(struct inode *dir,
531 struct dentry *dentry,
532 int mode,
533 struct nameidata *nd)
534 {
535 int status = 0;
536 struct inode *inode;
537 struct qstr *name = &dentry->d_name;
538
539 mlog(0, "create %.*s\n", name->len, name->name);
540
541 /* verify name is valid and doesn't contain any dlm reserved
542 * characters */
543 if (name->len >= USER_DLM_LOCK_ID_MAX_LEN ||
544 name->name[0] == '$') {
545 status = -EINVAL;
546 mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len,
547 name->name);
548 goto bail;
549 }
550
551 inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG);
552 if (!inode) {
553 status = -ENOMEM;
554 mlog_errno(status);
555 goto bail;
556 }
557
558 d_instantiate(dentry, inode);
559 dget(dentry); /* Extra count - pin the dentry in core */
560 bail:
561 return status;
562 }
563
564 static int dlmfs_unlink(struct inode *dir,
565 struct dentry *dentry)
566 {
567 int status;
568 struct inode *inode = dentry->d_inode;
569
570 mlog(0, "unlink inode %lu\n", inode->i_ino);
571
572 /* if there are no current holders, or none that are waiting
573 * to acquire a lock, this basically destroys our lockres. */
574 status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres);
575 if (status < 0) {
576 mlog(ML_ERROR, "unlink %.*s, error %d from destroy\n",
577 dentry->d_name.len, dentry->d_name.name, status);
578 goto bail;
579 }
580 status = simple_unlink(dir, dentry);
581 bail:
582 return status;
583 }
584
585 static int dlmfs_fill_super(struct super_block * sb,
586 void * data,
587 int silent)
588 {
589 struct inode * inode;
590 struct dentry * root;
591
592 sb->s_maxbytes = MAX_LFS_FILESIZE;
593 sb->s_blocksize = PAGE_CACHE_SIZE;
594 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
595 sb->s_magic = DLMFS_MAGIC;
596 sb->s_op = &dlmfs_ops;
597 inode = dlmfs_get_root_inode(sb);
598 if (!inode)
599 return -ENOMEM;
600
601 root = d_alloc_root(inode);
602 if (!root) {
603 iput(inode);
604 return -ENOMEM;
605 }
606 sb->s_root = root;
607 return 0;
608 }
609
610 static const struct file_operations dlmfs_file_operations = {
611 .open = dlmfs_file_open,
612 .release = dlmfs_file_release,
613 .poll = dlmfs_file_poll,
614 .read = dlmfs_file_read,
615 .write = dlmfs_file_write,
616 };
617
618 static const struct inode_operations dlmfs_dir_inode_operations = {
619 .create = dlmfs_create,
620 .lookup = simple_lookup,
621 .unlink = dlmfs_unlink,
622 };
623
624 /* this way we can restrict mkdir to only the toplevel of the fs. */
625 static const struct inode_operations dlmfs_root_inode_operations = {
626 .lookup = simple_lookup,
627 .mkdir = dlmfs_mkdir,
628 .rmdir = simple_rmdir,
629 };
630
631 static const struct super_operations dlmfs_ops = {
632 .statfs = simple_statfs,
633 .alloc_inode = dlmfs_alloc_inode,
634 .destroy_inode = dlmfs_destroy_inode,
635 .evict_inode = dlmfs_evict_inode,
636 .drop_inode = generic_delete_inode,
637 };
638
639 static const struct inode_operations dlmfs_file_inode_operations = {
640 .getattr = simple_getattr,
641 .setattr = dlmfs_file_setattr,
642 };
643
644 static int dlmfs_get_sb(struct file_system_type *fs_type,
645 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
646 {
647 return get_sb_nodev(fs_type, flags, data, dlmfs_fill_super, mnt);
648 }
649
650 static struct file_system_type dlmfs_fs_type = {
651 .owner = THIS_MODULE,
652 .name = "ocfs2_dlmfs",
653 .get_sb = dlmfs_get_sb,
654 .kill_sb = kill_litter_super,
655 };
656
657 static int __init init_dlmfs_fs(void)
658 {
659 int status;
660 int cleanup_inode = 0, cleanup_worker = 0;
661
662 dlmfs_print_version();
663
664 status = bdi_init(&dlmfs_backing_dev_info);
665 if (status)
666 return status;
667
668 dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
669 sizeof(struct dlmfs_inode_private),
670 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
671 SLAB_MEM_SPREAD),
672 dlmfs_init_once);
673 if (!dlmfs_inode_cache) {
674 status = -ENOMEM;
675 goto bail;
676 }
677 cleanup_inode = 1;
678
679 user_dlm_worker = create_singlethread_workqueue("user_dlm");
680 if (!user_dlm_worker) {
681 status = -ENOMEM;
682 goto bail;
683 }
684 cleanup_worker = 1;
685
686 user_dlm_set_locking_protocol();
687 status = register_filesystem(&dlmfs_fs_type);
688 bail:
689 if (status) {
690 if (cleanup_inode)
691 kmem_cache_destroy(dlmfs_inode_cache);
692 if (cleanup_worker)
693 destroy_workqueue(user_dlm_worker);
694 bdi_destroy(&dlmfs_backing_dev_info);
695 } else
696 printk("OCFS2 User DLM kernel interface loaded\n");
697 return status;
698 }
699
700 static void __exit exit_dlmfs_fs(void)
701 {
702 unregister_filesystem(&dlmfs_fs_type);
703
704 flush_workqueue(user_dlm_worker);
705 destroy_workqueue(user_dlm_worker);
706
707 kmem_cache_destroy(dlmfs_inode_cache);
708
709 bdi_destroy(&dlmfs_backing_dev_info);
710 }
711
712 MODULE_AUTHOR("Oracle");
713 MODULE_LICENSE("GPL");
714
715 module_init(init_dlmfs_fs)
716 module_exit(exit_dlmfs_fs)