]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/hfsplus/super.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / fs / hfsplus / super.c
1 /*
2 * linux/fs/hfsplus/super.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 */
9
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/pagemap.h>
13 #include <linux/blkdev.h>
14 #include <linux/backing-dev.h>
15 #include <linux/fs.h>
16 #include <linux/slab.h>
17 #include <linux/vfs.h>
18 #include <linux/nls.h>
19
20 static struct inode *hfsplus_alloc_inode(struct super_block *sb);
21 static void hfsplus_destroy_inode(struct inode *inode);
22
23 #include "hfsplus_fs.h"
24 #include "xattr.h"
25
26 static int hfsplus_system_read_inode(struct inode *inode)
27 {
28 struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr;
29
30 switch (inode->i_ino) {
31 case HFSPLUS_EXT_CNID:
32 hfsplus_inode_read_fork(inode, &vhdr->ext_file);
33 inode->i_mapping->a_ops = &hfsplus_btree_aops;
34 break;
35 case HFSPLUS_CAT_CNID:
36 hfsplus_inode_read_fork(inode, &vhdr->cat_file);
37 inode->i_mapping->a_ops = &hfsplus_btree_aops;
38 break;
39 case HFSPLUS_ALLOC_CNID:
40 hfsplus_inode_read_fork(inode, &vhdr->alloc_file);
41 inode->i_mapping->a_ops = &hfsplus_aops;
42 break;
43 case HFSPLUS_START_CNID:
44 hfsplus_inode_read_fork(inode, &vhdr->start_file);
45 break;
46 case HFSPLUS_ATTR_CNID:
47 hfsplus_inode_read_fork(inode, &vhdr->attr_file);
48 inode->i_mapping->a_ops = &hfsplus_btree_aops;
49 break;
50 default:
51 return -EIO;
52 }
53
54 return 0;
55 }
56
57 struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
58 {
59 struct hfs_find_data fd;
60 struct inode *inode;
61 int err;
62
63 inode = iget_locked(sb, ino);
64 if (!inode)
65 return ERR_PTR(-ENOMEM);
66 if (!(inode->i_state & I_NEW))
67 return inode;
68
69 INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
70 spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock);
71 mutex_init(&HFSPLUS_I(inode)->extents_lock);
72 HFSPLUS_I(inode)->flags = 0;
73 HFSPLUS_I(inode)->extent_state = 0;
74 HFSPLUS_I(inode)->rsrc_inode = NULL;
75 atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
76
77 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
78 inode->i_ino == HFSPLUS_ROOT_CNID) {
79 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
80 if (!err) {
81 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
82 if (!err)
83 err = hfsplus_cat_read_inode(inode, &fd);
84 hfs_find_exit(&fd);
85 }
86 } else {
87 err = hfsplus_system_read_inode(inode);
88 }
89
90 if (err) {
91 iget_failed(inode);
92 return ERR_PTR(err);
93 }
94
95 unlock_new_inode(inode);
96 return inode;
97 }
98
99 static int hfsplus_system_write_inode(struct inode *inode)
100 {
101 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
102 struct hfsplus_vh *vhdr = sbi->s_vhdr;
103 struct hfsplus_fork_raw *fork;
104 struct hfs_btree *tree = NULL;
105
106 switch (inode->i_ino) {
107 case HFSPLUS_EXT_CNID:
108 fork = &vhdr->ext_file;
109 tree = sbi->ext_tree;
110 break;
111 case HFSPLUS_CAT_CNID:
112 fork = &vhdr->cat_file;
113 tree = sbi->cat_tree;
114 break;
115 case HFSPLUS_ALLOC_CNID:
116 fork = &vhdr->alloc_file;
117 break;
118 case HFSPLUS_START_CNID:
119 fork = &vhdr->start_file;
120 break;
121 case HFSPLUS_ATTR_CNID:
122 fork = &vhdr->attr_file;
123 tree = sbi->attr_tree;
124 break;
125 default:
126 return -EIO;
127 }
128
129 if (fork->total_size != cpu_to_be64(inode->i_size)) {
130 set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags);
131 hfsplus_mark_mdb_dirty(inode->i_sb);
132 }
133 hfsplus_inode_write_fork(inode, fork);
134 if (tree) {
135 int err = hfs_btree_write(tree);
136
137 if (err) {
138 pr_err("b-tree write err: %d, ino %lu\n",
139 err, inode->i_ino);
140 return err;
141 }
142 }
143 return 0;
144 }
145
146 static int hfsplus_write_inode(struct inode *inode,
147 struct writeback_control *wbc)
148 {
149 int err;
150
151 hfs_dbg(INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
152
153 err = hfsplus_ext_write_extent(inode);
154 if (err)
155 return err;
156
157 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
158 inode->i_ino == HFSPLUS_ROOT_CNID)
159 return hfsplus_cat_write_inode(inode);
160 else
161 return hfsplus_system_write_inode(inode);
162 }
163
164 static void hfsplus_evict_inode(struct inode *inode)
165 {
166 hfs_dbg(INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
167 truncate_inode_pages_final(&inode->i_data);
168 clear_inode(inode);
169 if (HFSPLUS_IS_RSRC(inode)) {
170 HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
171 iput(HFSPLUS_I(inode)->rsrc_inode);
172 }
173 }
174
175 static int hfsplus_sync_fs(struct super_block *sb, int wait)
176 {
177 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
178 struct hfsplus_vh *vhdr = sbi->s_vhdr;
179 int write_backup = 0;
180 int error, error2;
181
182 if (!wait)
183 return 0;
184
185 hfs_dbg(SUPER, "hfsplus_sync_fs\n");
186
187 /*
188 * Explicitly write out the special metadata inodes.
189 *
190 * While these special inodes are marked as hashed and written
191 * out peridocically by the flusher threads we redirty them
192 * during writeout of normal inodes, and thus the life lock
193 * prevents us from getting the latest state to disk.
194 */
195 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
196 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
197 if (!error)
198 error = error2;
199 if (sbi->attr_tree) {
200 error2 =
201 filemap_write_and_wait(sbi->attr_tree->inode->i_mapping);
202 if (!error)
203 error = error2;
204 }
205 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
206 if (!error)
207 error = error2;
208
209 mutex_lock(&sbi->vh_mutex);
210 mutex_lock(&sbi->alloc_mutex);
211 vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
212 vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
213 vhdr->folder_count = cpu_to_be32(sbi->folder_count);
214 vhdr->file_count = cpu_to_be32(sbi->file_count);
215
216 if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
217 memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr));
218 write_backup = 1;
219 }
220
221 error2 = hfsplus_submit_bio(sb,
222 sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
223 sbi->s_vhdr_buf, NULL, REQ_OP_WRITE,
224 REQ_SYNC);
225 if (!error)
226 error = error2;
227 if (!write_backup)
228 goto out;
229
230 error2 = hfsplus_submit_bio(sb,
231 sbi->part_start + sbi->sect_count - 2,
232 sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE,
233 REQ_SYNC);
234 if (!error)
235 error2 = error;
236 out:
237 mutex_unlock(&sbi->alloc_mutex);
238 mutex_unlock(&sbi->vh_mutex);
239
240 if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
241 blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
242
243 return error;
244 }
245
246 static void delayed_sync_fs(struct work_struct *work)
247 {
248 int err;
249 struct hfsplus_sb_info *sbi;
250
251 sbi = container_of(work, struct hfsplus_sb_info, sync_work.work);
252
253 spin_lock(&sbi->work_lock);
254 sbi->work_queued = 0;
255 spin_unlock(&sbi->work_lock);
256
257 err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
258 if (err)
259 pr_err("delayed sync fs err %d\n", err);
260 }
261
262 void hfsplus_mark_mdb_dirty(struct super_block *sb)
263 {
264 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
265 unsigned long delay;
266
267 if (sb->s_flags & MS_RDONLY)
268 return;
269
270 spin_lock(&sbi->work_lock);
271 if (!sbi->work_queued) {
272 delay = msecs_to_jiffies(dirty_writeback_interval * 10);
273 queue_delayed_work(system_long_wq, &sbi->sync_work, delay);
274 sbi->work_queued = 1;
275 }
276 spin_unlock(&sbi->work_lock);
277 }
278
279 static void hfsplus_put_super(struct super_block *sb)
280 {
281 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
282
283 hfs_dbg(SUPER, "hfsplus_put_super\n");
284
285 cancel_delayed_work_sync(&sbi->sync_work);
286
287 if (!(sb->s_flags & MS_RDONLY) && sbi->s_vhdr) {
288 struct hfsplus_vh *vhdr = sbi->s_vhdr;
289
290 vhdr->modify_date = hfsp_now2mt();
291 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT);
292 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT);
293
294 hfsplus_sync_fs(sb, 1);
295 }
296
297 hfs_btree_close(sbi->attr_tree);
298 hfs_btree_close(sbi->cat_tree);
299 hfs_btree_close(sbi->ext_tree);
300 iput(sbi->alloc_file);
301 iput(sbi->hidden_dir);
302 kfree(sbi->s_vhdr_buf);
303 kfree(sbi->s_backup_vhdr_buf);
304 unload_nls(sbi->nls);
305 kfree(sb->s_fs_info);
306 sb->s_fs_info = NULL;
307 }
308
309 static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
310 {
311 struct super_block *sb = dentry->d_sb;
312 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
313 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
314
315 buf->f_type = HFSPLUS_SUPER_MAGIC;
316 buf->f_bsize = sb->s_blocksize;
317 buf->f_blocks = sbi->total_blocks << sbi->fs_shift;
318 buf->f_bfree = sbi->free_blocks << sbi->fs_shift;
319 buf->f_bavail = buf->f_bfree;
320 buf->f_files = 0xFFFFFFFF;
321 buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid;
322 buf->f_fsid.val[0] = (u32)id;
323 buf->f_fsid.val[1] = (u32)(id >> 32);
324 buf->f_namelen = HFSPLUS_MAX_STRLEN;
325
326 return 0;
327 }
328
329 static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
330 {
331 sync_filesystem(sb);
332 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
333 return 0;
334 if (!(*flags & MS_RDONLY)) {
335 struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
336 int force = 0;
337
338 if (!hfsplus_parse_options_remount(data, &force))
339 return -EINVAL;
340
341 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
342 pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n");
343 sb->s_flags |= MS_RDONLY;
344 *flags |= MS_RDONLY;
345 } else if (force) {
346 /* nothing */
347 } else if (vhdr->attributes &
348 cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
349 pr_warn("filesystem is marked locked, leaving read-only.\n");
350 sb->s_flags |= MS_RDONLY;
351 *flags |= MS_RDONLY;
352 } else if (vhdr->attributes &
353 cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
354 pr_warn("filesystem is marked journaled, leaving read-only.\n");
355 sb->s_flags |= MS_RDONLY;
356 *flags |= MS_RDONLY;
357 }
358 }
359 return 0;
360 }
361
362 static const struct super_operations hfsplus_sops = {
363 .alloc_inode = hfsplus_alloc_inode,
364 .destroy_inode = hfsplus_destroy_inode,
365 .write_inode = hfsplus_write_inode,
366 .evict_inode = hfsplus_evict_inode,
367 .put_super = hfsplus_put_super,
368 .sync_fs = hfsplus_sync_fs,
369 .statfs = hfsplus_statfs,
370 .remount_fs = hfsplus_remount,
371 .show_options = hfsplus_show_options,
372 };
373
374 static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
375 {
376 struct hfsplus_vh *vhdr;
377 struct hfsplus_sb_info *sbi;
378 hfsplus_cat_entry entry;
379 struct hfs_find_data fd;
380 struct inode *root, *inode;
381 struct qstr str;
382 struct nls_table *nls = NULL;
383 u64 last_fs_block, last_fs_page;
384 int err;
385
386 err = -ENOMEM;
387 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
388 if (!sbi)
389 goto out;
390
391 sb->s_fs_info = sbi;
392 mutex_init(&sbi->alloc_mutex);
393 mutex_init(&sbi->vh_mutex);
394 spin_lock_init(&sbi->work_lock);
395 INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
396 hfsplus_fill_defaults(sbi);
397
398 err = -EINVAL;
399 if (!hfsplus_parse_options(data, sbi)) {
400 pr_err("unable to parse mount options\n");
401 goto out_unload_nls;
402 }
403
404 /* temporarily use utf8 to correctly find the hidden dir below */
405 nls = sbi->nls;
406 sbi->nls = load_nls("utf8");
407 if (!sbi->nls) {
408 pr_err("unable to load nls for utf8\n");
409 goto out_unload_nls;
410 }
411
412 /* Grab the volume header */
413 if (hfsplus_read_wrapper(sb)) {
414 if (!silent)
415 pr_warn("unable to find HFS+ superblock\n");
416 goto out_unload_nls;
417 }
418 vhdr = sbi->s_vhdr;
419
420 /* Copy parts of the volume header into the superblock */
421 sb->s_magic = HFSPLUS_VOLHEAD_SIG;
422 if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION ||
423 be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) {
424 pr_err("wrong filesystem version\n");
425 goto out_free_vhdr;
426 }
427 sbi->total_blocks = be32_to_cpu(vhdr->total_blocks);
428 sbi->free_blocks = be32_to_cpu(vhdr->free_blocks);
429 sbi->next_cnid = be32_to_cpu(vhdr->next_cnid);
430 sbi->file_count = be32_to_cpu(vhdr->file_count);
431 sbi->folder_count = be32_to_cpu(vhdr->folder_count);
432 sbi->data_clump_blocks =
433 be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift;
434 if (!sbi->data_clump_blocks)
435 sbi->data_clump_blocks = 1;
436 sbi->rsrc_clump_blocks =
437 be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift;
438 if (!sbi->rsrc_clump_blocks)
439 sbi->rsrc_clump_blocks = 1;
440
441 err = -EFBIG;
442 last_fs_block = sbi->total_blocks - 1;
443 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
444 PAGE_SHIFT;
445
446 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
447 (last_fs_page > (pgoff_t)(~0ULL))) {
448 pr_err("filesystem size too large\n");
449 goto out_free_vhdr;
450 }
451
452 /* Set up operations so we can load metadata */
453 sb->s_op = &hfsplus_sops;
454 sb->s_maxbytes = MAX_LFS_FILESIZE;
455
456 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
457 pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n");
458 sb->s_flags |= MS_RDONLY;
459 } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
460 /* nothing */
461 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
462 pr_warn("Filesystem is marked locked, mounting read-only.\n");
463 sb->s_flags |= MS_RDONLY;
464 } else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) &&
465 !(sb->s_flags & MS_RDONLY)) {
466 pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
467 sb->s_flags |= MS_RDONLY;
468 }
469
470 err = -EINVAL;
471
472 /* Load metadata objects (B*Trees) */
473 sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
474 if (!sbi->ext_tree) {
475 pr_err("failed to load extents file\n");
476 goto out_free_vhdr;
477 }
478 sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
479 if (!sbi->cat_tree) {
480 pr_err("failed to load catalog file\n");
481 goto out_close_ext_tree;
482 }
483 atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE);
484 if (vhdr->attr_file.total_blocks != 0) {
485 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
486 if (!sbi->attr_tree) {
487 pr_err("failed to load attributes file\n");
488 goto out_close_cat_tree;
489 }
490 atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
491 }
492 sb->s_xattr = hfsplus_xattr_handlers;
493
494 inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
495 if (IS_ERR(inode)) {
496 pr_err("failed to load allocation file\n");
497 err = PTR_ERR(inode);
498 goto out_close_attr_tree;
499 }
500 sbi->alloc_file = inode;
501
502 /* Load the root directory */
503 root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID);
504 if (IS_ERR(root)) {
505 pr_err("failed to load root directory\n");
506 err = PTR_ERR(root);
507 goto out_put_alloc_file;
508 }
509
510 sb->s_d_op = &hfsplus_dentry_operations;
511 sb->s_root = d_make_root(root);
512 if (!sb->s_root) {
513 err = -ENOMEM;
514 goto out_put_alloc_file;
515 }
516
517 str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
518 str.name = HFSP_HIDDENDIR_NAME;
519 err = hfs_find_init(sbi->cat_tree, &fd);
520 if (err)
521 goto out_put_root;
522 err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
523 if (unlikely(err < 0))
524 goto out_put_root;
525 if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
526 hfs_find_exit(&fd);
527 if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
528 goto out_put_root;
529 inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
530 if (IS_ERR(inode)) {
531 err = PTR_ERR(inode);
532 goto out_put_root;
533 }
534 sbi->hidden_dir = inode;
535 } else
536 hfs_find_exit(&fd);
537
538 if (!(sb->s_flags & MS_RDONLY)) {
539 /*
540 * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
541 * all three are registered with Apple for our use
542 */
543 vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
544 vhdr->modify_date = hfsp_now2mt();
545 be32_add_cpu(&vhdr->write_count, 1);
546 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
547 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
548 hfsplus_sync_fs(sb, 1);
549
550 if (!sbi->hidden_dir) {
551 mutex_lock(&sbi->vh_mutex);
552 sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
553 if (!sbi->hidden_dir) {
554 mutex_unlock(&sbi->vh_mutex);
555 err = -ENOMEM;
556 goto out_put_root;
557 }
558 err = hfsplus_create_cat(sbi->hidden_dir->i_ino, root,
559 &str, sbi->hidden_dir);
560 if (err) {
561 mutex_unlock(&sbi->vh_mutex);
562 goto out_put_hidden_dir;
563 }
564
565 err = hfsplus_init_inode_security(sbi->hidden_dir,
566 root, &str);
567 if (err == -EOPNOTSUPP)
568 err = 0; /* Operation is not supported. */
569 else if (err) {
570 /*
571 * Try to delete anyway without
572 * error analysis.
573 */
574 hfsplus_delete_cat(sbi->hidden_dir->i_ino,
575 root, &str);
576 mutex_unlock(&sbi->vh_mutex);
577 goto out_put_hidden_dir;
578 }
579
580 mutex_unlock(&sbi->vh_mutex);
581 hfsplus_mark_inode_dirty(sbi->hidden_dir,
582 HFSPLUS_I_CAT_DIRTY);
583 }
584 }
585
586 unload_nls(sbi->nls);
587 sbi->nls = nls;
588 return 0;
589
590 out_put_hidden_dir:
591 iput(sbi->hidden_dir);
592 out_put_root:
593 dput(sb->s_root);
594 sb->s_root = NULL;
595 out_put_alloc_file:
596 iput(sbi->alloc_file);
597 out_close_attr_tree:
598 hfs_btree_close(sbi->attr_tree);
599 out_close_cat_tree:
600 hfs_btree_close(sbi->cat_tree);
601 out_close_ext_tree:
602 hfs_btree_close(sbi->ext_tree);
603 out_free_vhdr:
604 kfree(sbi->s_vhdr_buf);
605 kfree(sbi->s_backup_vhdr_buf);
606 out_unload_nls:
607 unload_nls(sbi->nls);
608 unload_nls(nls);
609 kfree(sbi);
610 out:
611 return err;
612 }
613
614 MODULE_AUTHOR("Brad Boyer");
615 MODULE_DESCRIPTION("Extended Macintosh Filesystem");
616 MODULE_LICENSE("GPL");
617
618 static struct kmem_cache *hfsplus_inode_cachep;
619
620 static struct inode *hfsplus_alloc_inode(struct super_block *sb)
621 {
622 struct hfsplus_inode_info *i;
623
624 i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL);
625 return i ? &i->vfs_inode : NULL;
626 }
627
628 static void hfsplus_i_callback(struct rcu_head *head)
629 {
630 struct inode *inode = container_of(head, struct inode, i_rcu);
631
632 kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
633 }
634
635 static void hfsplus_destroy_inode(struct inode *inode)
636 {
637 call_rcu(&inode->i_rcu, hfsplus_i_callback);
638 }
639
640 #define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
641
642 static struct dentry *hfsplus_mount(struct file_system_type *fs_type,
643 int flags, const char *dev_name, void *data)
644 {
645 return mount_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super);
646 }
647
648 static struct file_system_type hfsplus_fs_type = {
649 .owner = THIS_MODULE,
650 .name = "hfsplus",
651 .mount = hfsplus_mount,
652 .kill_sb = kill_block_super,
653 .fs_flags = FS_REQUIRES_DEV,
654 };
655 MODULE_ALIAS_FS("hfsplus");
656
657 static void hfsplus_init_once(void *p)
658 {
659 struct hfsplus_inode_info *i = p;
660
661 inode_init_once(&i->vfs_inode);
662 }
663
664 static int __init init_hfsplus_fs(void)
665 {
666 int err;
667
668 hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache",
669 HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT,
670 hfsplus_init_once);
671 if (!hfsplus_inode_cachep)
672 return -ENOMEM;
673 err = hfsplus_create_attr_tree_cache();
674 if (err)
675 goto destroy_inode_cache;
676 err = register_filesystem(&hfsplus_fs_type);
677 if (err)
678 goto destroy_attr_tree_cache;
679 return 0;
680
681 destroy_attr_tree_cache:
682 hfsplus_destroy_attr_tree_cache();
683
684 destroy_inode_cache:
685 kmem_cache_destroy(hfsplus_inode_cachep);
686
687 return err;
688 }
689
690 static void __exit exit_hfsplus_fs(void)
691 {
692 unregister_filesystem(&hfsplus_fs_type);
693
694 /*
695 * Make sure all delayed rcu free inodes are flushed before we
696 * destroy cache.
697 */
698 rcu_barrier();
699 hfsplus_destroy_attr_tree_cache();
700 kmem_cache_destroy(hfsplus_inode_cachep);
701 }
702
703 module_init(init_hfsplus_fs)
704 module_exit(exit_hfsplus_fs)