]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ext4/super.c
ext4: xattr inode deduplication
[mirror_ubuntu-artful-kernel.git] / fs / ext4 / super.c
1 /*
2 * linux/fs/ext4/super.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/inode.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * Big-endian to little-endian byte-swapping/bitmaps by
16 * David S. Miller (davem@caip.rutgers.edu), 1995
17 */
18
19 #include <linux/module.h>
20 #include <linux/string.h>
21 #include <linux/fs.h>
22 #include <linux/time.h>
23 #include <linux/vmalloc.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/parser.h>
29 #include <linux/buffer_head.h>
30 #include <linux/exportfs.h>
31 #include <linux/vfs.h>
32 #include <linux/random.h>
33 #include <linux/mount.h>
34 #include <linux/namei.h>
35 #include <linux/quotaops.h>
36 #include <linux/seq_file.h>
37 #include <linux/ctype.h>
38 #include <linux/log2.h>
39 #include <linux/crc16.h>
40 #include <linux/dax.h>
41 #include <linux/cleancache.h>
42 #include <linux/uaccess.h>
43
44 #include <linux/kthread.h>
45 #include <linux/freezer.h>
46
47 #include "ext4.h"
48 #include "ext4_extents.h" /* Needed for trace points definition */
49 #include "ext4_jbd2.h"
50 #include "xattr.h"
51 #include "acl.h"
52 #include "mballoc.h"
53 #include "fsmap.h"
54
55 #define CREATE_TRACE_POINTS
56 #include <trace/events/ext4.h>
57
58 static struct ext4_lazy_init *ext4_li_info;
59 static struct mutex ext4_li_mtx;
60 static struct ratelimit_state ext4_mount_msg_ratelimit;
61
62 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
63 unsigned long journal_devnum);
64 static int ext4_show_options(struct seq_file *seq, struct dentry *root);
65 static int ext4_commit_super(struct super_block *sb, int sync);
66 static void ext4_mark_recovery_complete(struct super_block *sb,
67 struct ext4_super_block *es);
68 static void ext4_clear_journal_err(struct super_block *sb,
69 struct ext4_super_block *es);
70 static int ext4_sync_fs(struct super_block *sb, int wait);
71 static int ext4_remount(struct super_block *sb, int *flags, char *data);
72 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
73 static int ext4_unfreeze(struct super_block *sb);
74 static int ext4_freeze(struct super_block *sb);
75 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
76 const char *dev_name, void *data);
77 static inline int ext2_feature_set_ok(struct super_block *sb);
78 static inline int ext3_feature_set_ok(struct super_block *sb);
79 static int ext4_feature_set_ok(struct super_block *sb, int readonly);
80 static void ext4_destroy_lazyinit_thread(void);
81 static void ext4_unregister_li_request(struct super_block *sb);
82 static void ext4_clear_request_list(void);
83 static struct inode *ext4_get_journal_inode(struct super_block *sb,
84 unsigned int journal_inum);
85
86 /*
87 * Lock ordering
88 *
89 * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
90 * i_mmap_rwsem (inode->i_mmap_rwsem)!
91 *
92 * page fault path:
93 * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
94 * page lock -> i_data_sem (rw)
95 *
96 * buffered write path:
97 * sb_start_write -> i_mutex -> mmap_sem
98 * sb_start_write -> i_mutex -> transaction start -> page lock ->
99 * i_data_sem (rw)
100 *
101 * truncate:
102 * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
103 * i_mmap_rwsem (w) -> page lock
104 * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
105 * transaction start -> i_data_sem (rw)
106 *
107 * direct IO:
108 * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) -> mmap_sem
109 * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) ->
110 * transaction start -> i_data_sem (rw)
111 *
112 * writepages:
113 * transaction start -> page lock(s) -> i_data_sem (rw)
114 */
115
116 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
117 static struct file_system_type ext2_fs_type = {
118 .owner = THIS_MODULE,
119 .name = "ext2",
120 .mount = ext4_mount,
121 .kill_sb = kill_block_super,
122 .fs_flags = FS_REQUIRES_DEV,
123 };
124 MODULE_ALIAS_FS("ext2");
125 MODULE_ALIAS("ext2");
126 #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
127 #else
128 #define IS_EXT2_SB(sb) (0)
129 #endif
130
131
132 static struct file_system_type ext3_fs_type = {
133 .owner = THIS_MODULE,
134 .name = "ext3",
135 .mount = ext4_mount,
136 .kill_sb = kill_block_super,
137 .fs_flags = FS_REQUIRES_DEV,
138 };
139 MODULE_ALIAS_FS("ext3");
140 MODULE_ALIAS("ext3");
141 #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
142
143 static int ext4_verify_csum_type(struct super_block *sb,
144 struct ext4_super_block *es)
145 {
146 if (!ext4_has_feature_metadata_csum(sb))
147 return 1;
148
149 return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
150 }
151
152 static __le32 ext4_superblock_csum(struct super_block *sb,
153 struct ext4_super_block *es)
154 {
155 struct ext4_sb_info *sbi = EXT4_SB(sb);
156 int offset = offsetof(struct ext4_super_block, s_checksum);
157 __u32 csum;
158
159 csum = ext4_chksum(sbi, ~0, (char *)es, offset);
160
161 return cpu_to_le32(csum);
162 }
163
164 static int ext4_superblock_csum_verify(struct super_block *sb,
165 struct ext4_super_block *es)
166 {
167 if (!ext4_has_metadata_csum(sb))
168 return 1;
169
170 return es->s_checksum == ext4_superblock_csum(sb, es);
171 }
172
173 void ext4_superblock_csum_set(struct super_block *sb)
174 {
175 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
176
177 if (!ext4_has_metadata_csum(sb))
178 return;
179
180 es->s_checksum = ext4_superblock_csum(sb, es);
181 }
182
183 void *ext4_kvmalloc(size_t size, gfp_t flags)
184 {
185 void *ret;
186
187 ret = kmalloc(size, flags | __GFP_NOWARN);
188 if (!ret)
189 ret = __vmalloc(size, flags, PAGE_KERNEL);
190 return ret;
191 }
192
193 void *ext4_kvzalloc(size_t size, gfp_t flags)
194 {
195 void *ret;
196
197 ret = kzalloc(size, flags | __GFP_NOWARN);
198 if (!ret)
199 ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
200 return ret;
201 }
202
203 ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
204 struct ext4_group_desc *bg)
205 {
206 return le32_to_cpu(bg->bg_block_bitmap_lo) |
207 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
208 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
209 }
210
211 ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
212 struct ext4_group_desc *bg)
213 {
214 return le32_to_cpu(bg->bg_inode_bitmap_lo) |
215 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
216 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
217 }
218
219 ext4_fsblk_t ext4_inode_table(struct super_block *sb,
220 struct ext4_group_desc *bg)
221 {
222 return le32_to_cpu(bg->bg_inode_table_lo) |
223 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
224 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
225 }
226
227 __u32 ext4_free_group_clusters(struct super_block *sb,
228 struct ext4_group_desc *bg)
229 {
230 return le16_to_cpu(bg->bg_free_blocks_count_lo) |
231 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
232 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
233 }
234
235 __u32 ext4_free_inodes_count(struct super_block *sb,
236 struct ext4_group_desc *bg)
237 {
238 return le16_to_cpu(bg->bg_free_inodes_count_lo) |
239 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
240 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
241 }
242
243 __u32 ext4_used_dirs_count(struct super_block *sb,
244 struct ext4_group_desc *bg)
245 {
246 return le16_to_cpu(bg->bg_used_dirs_count_lo) |
247 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
248 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
249 }
250
251 __u32 ext4_itable_unused_count(struct super_block *sb,
252 struct ext4_group_desc *bg)
253 {
254 return le16_to_cpu(bg->bg_itable_unused_lo) |
255 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
256 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
257 }
258
259 void ext4_block_bitmap_set(struct super_block *sb,
260 struct ext4_group_desc *bg, ext4_fsblk_t blk)
261 {
262 bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
263 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
264 bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
265 }
266
267 void ext4_inode_bitmap_set(struct super_block *sb,
268 struct ext4_group_desc *bg, ext4_fsblk_t blk)
269 {
270 bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
271 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
272 bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
273 }
274
275 void ext4_inode_table_set(struct super_block *sb,
276 struct ext4_group_desc *bg, ext4_fsblk_t blk)
277 {
278 bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
279 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
280 bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
281 }
282
283 void ext4_free_group_clusters_set(struct super_block *sb,
284 struct ext4_group_desc *bg, __u32 count)
285 {
286 bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
287 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
288 bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
289 }
290
291 void ext4_free_inodes_set(struct super_block *sb,
292 struct ext4_group_desc *bg, __u32 count)
293 {
294 bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
295 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
296 bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
297 }
298
299 void ext4_used_dirs_set(struct super_block *sb,
300 struct ext4_group_desc *bg, __u32 count)
301 {
302 bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
303 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
304 bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
305 }
306
307 void ext4_itable_unused_set(struct super_block *sb,
308 struct ext4_group_desc *bg, __u32 count)
309 {
310 bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
311 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
312 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
313 }
314
315
316 static void __save_error_info(struct super_block *sb, const char *func,
317 unsigned int line)
318 {
319 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
320
321 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
322 if (bdev_read_only(sb->s_bdev))
323 return;
324 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
325 es->s_last_error_time = cpu_to_le32(get_seconds());
326 strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
327 es->s_last_error_line = cpu_to_le32(line);
328 if (!es->s_first_error_time) {
329 es->s_first_error_time = es->s_last_error_time;
330 strncpy(es->s_first_error_func, func,
331 sizeof(es->s_first_error_func));
332 es->s_first_error_line = cpu_to_le32(line);
333 es->s_first_error_ino = es->s_last_error_ino;
334 es->s_first_error_block = es->s_last_error_block;
335 }
336 /*
337 * Start the daily error reporting function if it hasn't been
338 * started already
339 */
340 if (!es->s_error_count)
341 mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
342 le32_add_cpu(&es->s_error_count, 1);
343 }
344
345 static void save_error_info(struct super_block *sb, const char *func,
346 unsigned int line)
347 {
348 __save_error_info(sb, func, line);
349 ext4_commit_super(sb, 1);
350 }
351
352 /*
353 * The del_gendisk() function uninitializes the disk-specific data
354 * structures, including the bdi structure, without telling anyone
355 * else. Once this happens, any attempt to call mark_buffer_dirty()
356 * (for example, by ext4_commit_super), will cause a kernel OOPS.
357 * This is a kludge to prevent these oops until we can put in a proper
358 * hook in del_gendisk() to inform the VFS and file system layers.
359 */
360 static int block_device_ejected(struct super_block *sb)
361 {
362 struct inode *bd_inode = sb->s_bdev->bd_inode;
363 struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
364
365 return bdi->dev == NULL;
366 }
367
368 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
369 {
370 struct super_block *sb = journal->j_private;
371 struct ext4_sb_info *sbi = EXT4_SB(sb);
372 int error = is_journal_aborted(journal);
373 struct ext4_journal_cb_entry *jce;
374
375 BUG_ON(txn->t_state == T_FINISHED);
376 spin_lock(&sbi->s_md_lock);
377 while (!list_empty(&txn->t_private_list)) {
378 jce = list_entry(txn->t_private_list.next,
379 struct ext4_journal_cb_entry, jce_list);
380 list_del_init(&jce->jce_list);
381 spin_unlock(&sbi->s_md_lock);
382 jce->jce_func(sb, jce, error);
383 spin_lock(&sbi->s_md_lock);
384 }
385 spin_unlock(&sbi->s_md_lock);
386 }
387
388 /* Deal with the reporting of failure conditions on a filesystem such as
389 * inconsistencies detected or read IO failures.
390 *
391 * On ext2, we can store the error state of the filesystem in the
392 * superblock. That is not possible on ext4, because we may have other
393 * write ordering constraints on the superblock which prevent us from
394 * writing it out straight away; and given that the journal is about to
395 * be aborted, we can't rely on the current, or future, transactions to
396 * write out the superblock safely.
397 *
398 * We'll just use the jbd2_journal_abort() error code to record an error in
399 * the journal instead. On recovery, the journal will complain about
400 * that error until we've noted it down and cleared it.
401 */
402
403 static void ext4_handle_error(struct super_block *sb)
404 {
405 if (sb->s_flags & MS_RDONLY)
406 return;
407
408 if (!test_opt(sb, ERRORS_CONT)) {
409 journal_t *journal = EXT4_SB(sb)->s_journal;
410
411 EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
412 if (journal)
413 jbd2_journal_abort(journal, -EIO);
414 }
415 if (test_opt(sb, ERRORS_RO)) {
416 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
417 /*
418 * Make sure updated value of ->s_mount_flags will be visible
419 * before ->s_flags update
420 */
421 smp_wmb();
422 sb->s_flags |= MS_RDONLY;
423 }
424 if (test_opt(sb, ERRORS_PANIC)) {
425 if (EXT4_SB(sb)->s_journal &&
426 !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
427 return;
428 panic("EXT4-fs (device %s): panic forced after error\n",
429 sb->s_id);
430 }
431 }
432
433 #define ext4_error_ratelimit(sb) \
434 ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \
435 "EXT4-fs error")
436
437 void __ext4_error(struct super_block *sb, const char *function,
438 unsigned int line, const char *fmt, ...)
439 {
440 struct va_format vaf;
441 va_list args;
442
443 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
444 return;
445
446 if (ext4_error_ratelimit(sb)) {
447 va_start(args, fmt);
448 vaf.fmt = fmt;
449 vaf.va = &args;
450 printk(KERN_CRIT
451 "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
452 sb->s_id, function, line, current->comm, &vaf);
453 va_end(args);
454 }
455 save_error_info(sb, function, line);
456 ext4_handle_error(sb);
457 }
458
459 void __ext4_error_inode(struct inode *inode, const char *function,
460 unsigned int line, ext4_fsblk_t block,
461 const char *fmt, ...)
462 {
463 va_list args;
464 struct va_format vaf;
465 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
466
467 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
468 return;
469
470 es->s_last_error_ino = cpu_to_le32(inode->i_ino);
471 es->s_last_error_block = cpu_to_le64(block);
472 if (ext4_error_ratelimit(inode->i_sb)) {
473 va_start(args, fmt);
474 vaf.fmt = fmt;
475 vaf.va = &args;
476 if (block)
477 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
478 "inode #%lu: block %llu: comm %s: %pV\n",
479 inode->i_sb->s_id, function, line, inode->i_ino,
480 block, current->comm, &vaf);
481 else
482 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
483 "inode #%lu: comm %s: %pV\n",
484 inode->i_sb->s_id, function, line, inode->i_ino,
485 current->comm, &vaf);
486 va_end(args);
487 }
488 save_error_info(inode->i_sb, function, line);
489 ext4_handle_error(inode->i_sb);
490 }
491
492 void __ext4_error_file(struct file *file, const char *function,
493 unsigned int line, ext4_fsblk_t block,
494 const char *fmt, ...)
495 {
496 va_list args;
497 struct va_format vaf;
498 struct ext4_super_block *es;
499 struct inode *inode = file_inode(file);
500 char pathname[80], *path;
501
502 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
503 return;
504
505 es = EXT4_SB(inode->i_sb)->s_es;
506 es->s_last_error_ino = cpu_to_le32(inode->i_ino);
507 if (ext4_error_ratelimit(inode->i_sb)) {
508 path = file_path(file, pathname, sizeof(pathname));
509 if (IS_ERR(path))
510 path = "(unknown)";
511 va_start(args, fmt);
512 vaf.fmt = fmt;
513 vaf.va = &args;
514 if (block)
515 printk(KERN_CRIT
516 "EXT4-fs error (device %s): %s:%d: inode #%lu: "
517 "block %llu: comm %s: path %s: %pV\n",
518 inode->i_sb->s_id, function, line, inode->i_ino,
519 block, current->comm, path, &vaf);
520 else
521 printk(KERN_CRIT
522 "EXT4-fs error (device %s): %s:%d: inode #%lu: "
523 "comm %s: path %s: %pV\n",
524 inode->i_sb->s_id, function, line, inode->i_ino,
525 current->comm, path, &vaf);
526 va_end(args);
527 }
528 save_error_info(inode->i_sb, function, line);
529 ext4_handle_error(inode->i_sb);
530 }
531
532 const char *ext4_decode_error(struct super_block *sb, int errno,
533 char nbuf[16])
534 {
535 char *errstr = NULL;
536
537 switch (errno) {
538 case -EFSCORRUPTED:
539 errstr = "Corrupt filesystem";
540 break;
541 case -EFSBADCRC:
542 errstr = "Filesystem failed CRC";
543 break;
544 case -EIO:
545 errstr = "IO failure";
546 break;
547 case -ENOMEM:
548 errstr = "Out of memory";
549 break;
550 case -EROFS:
551 if (!sb || (EXT4_SB(sb)->s_journal &&
552 EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
553 errstr = "Journal has aborted";
554 else
555 errstr = "Readonly filesystem";
556 break;
557 default:
558 /* If the caller passed in an extra buffer for unknown
559 * errors, textualise them now. Else we just return
560 * NULL. */
561 if (nbuf) {
562 /* Check for truncated error codes... */
563 if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
564 errstr = nbuf;
565 }
566 break;
567 }
568
569 return errstr;
570 }
571
572 /* __ext4_std_error decodes expected errors from journaling functions
573 * automatically and invokes the appropriate error response. */
574
575 void __ext4_std_error(struct super_block *sb, const char *function,
576 unsigned int line, int errno)
577 {
578 char nbuf[16];
579 const char *errstr;
580
581 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
582 return;
583
584 /* Special case: if the error is EROFS, and we're not already
585 * inside a transaction, then there's really no point in logging
586 * an error. */
587 if (errno == -EROFS && journal_current_handle() == NULL &&
588 (sb->s_flags & MS_RDONLY))
589 return;
590
591 if (ext4_error_ratelimit(sb)) {
592 errstr = ext4_decode_error(sb, errno, nbuf);
593 printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
594 sb->s_id, function, line, errstr);
595 }
596
597 save_error_info(sb, function, line);
598 ext4_handle_error(sb);
599 }
600
601 /*
602 * ext4_abort is a much stronger failure handler than ext4_error. The
603 * abort function may be used to deal with unrecoverable failures such
604 * as journal IO errors or ENOMEM at a critical moment in log management.
605 *
606 * We unconditionally force the filesystem into an ABORT|READONLY state,
607 * unless the error response on the fs has been set to panic in which
608 * case we take the easy way out and panic immediately.
609 */
610
611 void __ext4_abort(struct super_block *sb, const char *function,
612 unsigned int line, const char *fmt, ...)
613 {
614 struct va_format vaf;
615 va_list args;
616
617 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
618 return;
619
620 save_error_info(sb, function, line);
621 va_start(args, fmt);
622 vaf.fmt = fmt;
623 vaf.va = &args;
624 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
625 sb->s_id, function, line, &vaf);
626 va_end(args);
627
628 if ((sb->s_flags & MS_RDONLY) == 0) {
629 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
630 EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
631 /*
632 * Make sure updated value of ->s_mount_flags will be visible
633 * before ->s_flags update
634 */
635 smp_wmb();
636 sb->s_flags |= MS_RDONLY;
637 if (EXT4_SB(sb)->s_journal)
638 jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
639 save_error_info(sb, function, line);
640 }
641 if (test_opt(sb, ERRORS_PANIC)) {
642 if (EXT4_SB(sb)->s_journal &&
643 !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
644 return;
645 panic("EXT4-fs panic from previous error\n");
646 }
647 }
648
649 void __ext4_msg(struct super_block *sb,
650 const char *prefix, const char *fmt, ...)
651 {
652 struct va_format vaf;
653 va_list args;
654
655 if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
656 return;
657
658 va_start(args, fmt);
659 vaf.fmt = fmt;
660 vaf.va = &args;
661 printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
662 va_end(args);
663 }
664
665 #define ext4_warning_ratelimit(sb) \
666 ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \
667 "EXT4-fs warning")
668
669 void __ext4_warning(struct super_block *sb, const char *function,
670 unsigned int line, const char *fmt, ...)
671 {
672 struct va_format vaf;
673 va_list args;
674
675 if (!ext4_warning_ratelimit(sb))
676 return;
677
678 va_start(args, fmt);
679 vaf.fmt = fmt;
680 vaf.va = &args;
681 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
682 sb->s_id, function, line, &vaf);
683 va_end(args);
684 }
685
686 void __ext4_warning_inode(const struct inode *inode, const char *function,
687 unsigned int line, const char *fmt, ...)
688 {
689 struct va_format vaf;
690 va_list args;
691
692 if (!ext4_warning_ratelimit(inode->i_sb))
693 return;
694
695 va_start(args, fmt);
696 vaf.fmt = fmt;
697 vaf.va = &args;
698 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
699 "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
700 function, line, inode->i_ino, current->comm, &vaf);
701 va_end(args);
702 }
703
704 void __ext4_grp_locked_error(const char *function, unsigned int line,
705 struct super_block *sb, ext4_group_t grp,
706 unsigned long ino, ext4_fsblk_t block,
707 const char *fmt, ...)
708 __releases(bitlock)
709 __acquires(bitlock)
710 {
711 struct va_format vaf;
712 va_list args;
713 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
714
715 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
716 return;
717
718 es->s_last_error_ino = cpu_to_le32(ino);
719 es->s_last_error_block = cpu_to_le64(block);
720 __save_error_info(sb, function, line);
721
722 if (ext4_error_ratelimit(sb)) {
723 va_start(args, fmt);
724 vaf.fmt = fmt;
725 vaf.va = &args;
726 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
727 sb->s_id, function, line, grp);
728 if (ino)
729 printk(KERN_CONT "inode %lu: ", ino);
730 if (block)
731 printk(KERN_CONT "block %llu:",
732 (unsigned long long) block);
733 printk(KERN_CONT "%pV\n", &vaf);
734 va_end(args);
735 }
736
737 if (test_opt(sb, ERRORS_CONT)) {
738 ext4_commit_super(sb, 0);
739 return;
740 }
741
742 ext4_unlock_group(sb, grp);
743 ext4_handle_error(sb);
744 /*
745 * We only get here in the ERRORS_RO case; relocking the group
746 * may be dangerous, but nothing bad will happen since the
747 * filesystem will have already been marked read/only and the
748 * journal has been aborted. We return 1 as a hint to callers
749 * who might what to use the return value from
750 * ext4_grp_locked_error() to distinguish between the
751 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
752 * aggressively from the ext4 function in question, with a
753 * more appropriate error code.
754 */
755 ext4_lock_group(sb, grp);
756 return;
757 }
758
759 void ext4_update_dynamic_rev(struct super_block *sb)
760 {
761 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
762
763 if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
764 return;
765
766 ext4_warning(sb,
767 "updating to rev %d because of new feature flag, "
768 "running e2fsck is recommended",
769 EXT4_DYNAMIC_REV);
770
771 es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
772 es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
773 es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
774 /* leave es->s_feature_*compat flags alone */
775 /* es->s_uuid will be set by e2fsck if empty */
776
777 /*
778 * The rest of the superblock fields should be zero, and if not it
779 * means they are likely already in use, so leave them alone. We
780 * can leave it up to e2fsck to clean up any inconsistencies there.
781 */
782 }
783
784 /*
785 * Open the external journal device
786 */
787 static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
788 {
789 struct block_device *bdev;
790 char b[BDEVNAME_SIZE];
791
792 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
793 if (IS_ERR(bdev))
794 goto fail;
795 return bdev;
796
797 fail:
798 ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
799 __bdevname(dev, b), PTR_ERR(bdev));
800 return NULL;
801 }
802
803 /*
804 * Release the journal device
805 */
806 static void ext4_blkdev_put(struct block_device *bdev)
807 {
808 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
809 }
810
811 static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
812 {
813 struct block_device *bdev;
814 bdev = sbi->journal_bdev;
815 if (bdev) {
816 ext4_blkdev_put(bdev);
817 sbi->journal_bdev = NULL;
818 }
819 }
820
821 static inline struct inode *orphan_list_entry(struct list_head *l)
822 {
823 return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
824 }
825
826 static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
827 {
828 struct list_head *l;
829
830 ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
831 le32_to_cpu(sbi->s_es->s_last_orphan));
832
833 printk(KERN_ERR "sb_info orphan list:\n");
834 list_for_each(l, &sbi->s_orphan) {
835 struct inode *inode = orphan_list_entry(l);
836 printk(KERN_ERR " "
837 "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
838 inode->i_sb->s_id, inode->i_ino, inode,
839 inode->i_mode, inode->i_nlink,
840 NEXT_ORPHAN(inode));
841 }
842 }
843
844 #ifdef CONFIG_QUOTA
845 static int ext4_quota_off(struct super_block *sb, int type);
846
847 static inline void ext4_quota_off_umount(struct super_block *sb)
848 {
849 int type;
850
851 /* Use our quota_off function to clear inode flags etc. */
852 for (type = 0; type < EXT4_MAXQUOTAS; type++)
853 ext4_quota_off(sb, type);
854 }
855 #else
856 static inline void ext4_quota_off_umount(struct super_block *sb)
857 {
858 }
859 #endif
860
861 static void ext4_put_super(struct super_block *sb)
862 {
863 struct ext4_sb_info *sbi = EXT4_SB(sb);
864 struct ext4_super_block *es = sbi->s_es;
865 int aborted = 0;
866 int i, err;
867
868 ext4_unregister_li_request(sb);
869 ext4_quota_off_umount(sb);
870
871 flush_workqueue(sbi->rsv_conversion_wq);
872 destroy_workqueue(sbi->rsv_conversion_wq);
873
874 if (sbi->s_journal) {
875 aborted = is_journal_aborted(sbi->s_journal);
876 err = jbd2_journal_destroy(sbi->s_journal);
877 sbi->s_journal = NULL;
878 if ((err < 0) && !aborted)
879 ext4_abort(sb, "Couldn't clean up the journal");
880 }
881
882 ext4_unregister_sysfs(sb);
883 ext4_es_unregister_shrinker(sbi);
884 del_timer_sync(&sbi->s_err_report);
885 ext4_release_system_zone(sb);
886 ext4_mb_release(sb);
887 ext4_ext_release(sb);
888
889 if (!(sb->s_flags & MS_RDONLY) && !aborted) {
890 ext4_clear_feature_journal_needs_recovery(sb);
891 es->s_state = cpu_to_le16(sbi->s_mount_state);
892 }
893 if (!(sb->s_flags & MS_RDONLY))
894 ext4_commit_super(sb, 1);
895
896 for (i = 0; i < sbi->s_gdb_count; i++)
897 brelse(sbi->s_group_desc[i]);
898 kvfree(sbi->s_group_desc);
899 kvfree(sbi->s_flex_groups);
900 percpu_counter_destroy(&sbi->s_freeclusters_counter);
901 percpu_counter_destroy(&sbi->s_freeinodes_counter);
902 percpu_counter_destroy(&sbi->s_dirs_counter);
903 percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
904 percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
905 #ifdef CONFIG_QUOTA
906 for (i = 0; i < EXT4_MAXQUOTAS; i++)
907 kfree(sbi->s_qf_names[i]);
908 #endif
909
910 /* Debugging code just in case the in-memory inode orphan list
911 * isn't empty. The on-disk one can be non-empty if we've
912 * detected an error and taken the fs readonly, but the
913 * in-memory list had better be clean by this point. */
914 if (!list_empty(&sbi->s_orphan))
915 dump_orphan_list(sb, sbi);
916 J_ASSERT(list_empty(&sbi->s_orphan));
917
918 sync_blockdev(sb->s_bdev);
919 invalidate_bdev(sb->s_bdev);
920 if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
921 /*
922 * Invalidate the journal device's buffers. We don't want them
923 * floating about in memory - the physical journal device may
924 * hotswapped, and it breaks the `ro-after' testing code.
925 */
926 sync_blockdev(sbi->journal_bdev);
927 invalidate_bdev(sbi->journal_bdev);
928 ext4_blkdev_remove(sbi);
929 }
930 if (sbi->s_ea_inode_cache) {
931 ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
932 sbi->s_ea_inode_cache = NULL;
933 }
934 if (sbi->s_ea_block_cache) {
935 ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
936 sbi->s_ea_block_cache = NULL;
937 }
938 if (sbi->s_mmp_tsk)
939 kthread_stop(sbi->s_mmp_tsk);
940 brelse(sbi->s_sbh);
941 sb->s_fs_info = NULL;
942 /*
943 * Now that we are completely done shutting down the
944 * superblock, we need to actually destroy the kobject.
945 */
946 kobject_put(&sbi->s_kobj);
947 wait_for_completion(&sbi->s_kobj_unregister);
948 if (sbi->s_chksum_driver)
949 crypto_free_shash(sbi->s_chksum_driver);
950 kfree(sbi->s_blockgroup_lock);
951 kfree(sbi);
952 }
953
954 static struct kmem_cache *ext4_inode_cachep;
955
956 /*
957 * Called inside transaction, so use GFP_NOFS
958 */
959 static struct inode *ext4_alloc_inode(struct super_block *sb)
960 {
961 struct ext4_inode_info *ei;
962
963 ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
964 if (!ei)
965 return NULL;
966
967 ei->vfs_inode.i_version = 1;
968 spin_lock_init(&ei->i_raw_lock);
969 INIT_LIST_HEAD(&ei->i_prealloc_list);
970 spin_lock_init(&ei->i_prealloc_lock);
971 ext4_es_init_tree(&ei->i_es_tree);
972 rwlock_init(&ei->i_es_lock);
973 INIT_LIST_HEAD(&ei->i_es_list);
974 ei->i_es_all_nr = 0;
975 ei->i_es_shk_nr = 0;
976 ei->i_es_shrink_lblk = 0;
977 ei->i_reserved_data_blocks = 0;
978 ei->i_reserved_meta_blocks = 0;
979 ei->i_allocated_meta_blocks = 0;
980 ei->i_da_metadata_calc_len = 0;
981 ei->i_da_metadata_calc_last_lblock = 0;
982 spin_lock_init(&(ei->i_block_reservation_lock));
983 #ifdef CONFIG_QUOTA
984 ei->i_reserved_quota = 0;
985 memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
986 #endif
987 ei->jinode = NULL;
988 INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
989 spin_lock_init(&ei->i_completed_io_lock);
990 ei->i_sync_tid = 0;
991 ei->i_datasync_tid = 0;
992 atomic_set(&ei->i_unwritten, 0);
993 INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
994 return &ei->vfs_inode;
995 }
996
997 static int ext4_drop_inode(struct inode *inode)
998 {
999 int drop = generic_drop_inode(inode);
1000
1001 trace_ext4_drop_inode(inode, drop);
1002 return drop;
1003 }
1004
1005 static void ext4_i_callback(struct rcu_head *head)
1006 {
1007 struct inode *inode = container_of(head, struct inode, i_rcu);
1008 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
1009 }
1010
1011 static void ext4_destroy_inode(struct inode *inode)
1012 {
1013 if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
1014 ext4_msg(inode->i_sb, KERN_ERR,
1015 "Inode %lu (%p): orphan list check failed!",
1016 inode->i_ino, EXT4_I(inode));
1017 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
1018 EXT4_I(inode), sizeof(struct ext4_inode_info),
1019 true);
1020 dump_stack();
1021 }
1022 call_rcu(&inode->i_rcu, ext4_i_callback);
1023 }
1024
1025 static void init_once(void *foo)
1026 {
1027 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
1028
1029 INIT_LIST_HEAD(&ei->i_orphan);
1030 init_rwsem(&ei->xattr_sem);
1031 init_rwsem(&ei->i_data_sem);
1032 init_rwsem(&ei->i_mmap_sem);
1033 inode_init_once(&ei->vfs_inode);
1034 }
1035
1036 static int __init init_inodecache(void)
1037 {
1038 ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
1039 sizeof(struct ext4_inode_info),
1040 0, (SLAB_RECLAIM_ACCOUNT|
1041 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1042 init_once);
1043 if (ext4_inode_cachep == NULL)
1044 return -ENOMEM;
1045 return 0;
1046 }
1047
1048 static void destroy_inodecache(void)
1049 {
1050 /*
1051 * Make sure all delayed rcu free inodes are flushed before we
1052 * destroy cache.
1053 */
1054 rcu_barrier();
1055 kmem_cache_destroy(ext4_inode_cachep);
1056 }
1057
1058 void ext4_clear_inode(struct inode *inode)
1059 {
1060 invalidate_inode_buffers(inode);
1061 clear_inode(inode);
1062 dquot_drop(inode);
1063 ext4_discard_preallocations(inode);
1064 ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
1065 if (EXT4_I(inode)->jinode) {
1066 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
1067 EXT4_I(inode)->jinode);
1068 jbd2_free_inode(EXT4_I(inode)->jinode);
1069 EXT4_I(inode)->jinode = NULL;
1070 }
1071 #ifdef CONFIG_EXT4_FS_ENCRYPTION
1072 fscrypt_put_encryption_info(inode, NULL);
1073 #endif
1074 }
1075
1076 static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1077 u64 ino, u32 generation)
1078 {
1079 struct inode *inode;
1080
1081 if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
1082 return ERR_PTR(-ESTALE);
1083 if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
1084 return ERR_PTR(-ESTALE);
1085
1086 /* iget isn't really right if the inode is currently unallocated!!
1087 *
1088 * ext4_read_inode will return a bad_inode if the inode had been
1089 * deleted, so we should be safe.
1090 *
1091 * Currently we don't know the generation for parent directory, so
1092 * a generation of 0 means "accept any"
1093 */
1094 inode = ext4_iget_normal(sb, ino);
1095 if (IS_ERR(inode))
1096 return ERR_CAST(inode);
1097 if (generation && inode->i_generation != generation) {
1098 iput(inode);
1099 return ERR_PTR(-ESTALE);
1100 }
1101
1102 return inode;
1103 }
1104
1105 static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1106 int fh_len, int fh_type)
1107 {
1108 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1109 ext4_nfs_get_inode);
1110 }
1111
1112 static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1113 int fh_len, int fh_type)
1114 {
1115 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1116 ext4_nfs_get_inode);
1117 }
1118
1119 /*
1120 * Try to release metadata pages (indirect blocks, directories) which are
1121 * mapped via the block device. Since these pages could have journal heads
1122 * which would prevent try_to_free_buffers() from freeing them, we must use
1123 * jbd2 layer's try_to_free_buffers() function to release them.
1124 */
1125 static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
1126 gfp_t wait)
1127 {
1128 journal_t *journal = EXT4_SB(sb)->s_journal;
1129
1130 WARN_ON(PageChecked(page));
1131 if (!page_has_buffers(page))
1132 return 0;
1133 if (journal)
1134 return jbd2_journal_try_to_free_buffers(journal, page,
1135 wait & ~__GFP_DIRECT_RECLAIM);
1136 return try_to_free_buffers(page);
1137 }
1138
1139 #ifdef CONFIG_EXT4_FS_ENCRYPTION
1140 static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
1141 {
1142 return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
1143 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
1144 }
1145
1146 static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
1147 void *fs_data)
1148 {
1149 handle_t *handle = fs_data;
1150 int res, res2, credits, retries = 0;
1151
1152 res = ext4_convert_inline_data(inode);
1153 if (res)
1154 return res;
1155
1156 /*
1157 * If a journal handle was specified, then the encryption context is
1158 * being set on a new inode via inheritance and is part of a larger
1159 * transaction to create the inode. Otherwise the encryption context is
1160 * being set on an existing inode in its own transaction. Only in the
1161 * latter case should the "retry on ENOSPC" logic be used.
1162 */
1163
1164 if (handle) {
1165 res = ext4_xattr_set_handle(handle, inode,
1166 EXT4_XATTR_INDEX_ENCRYPTION,
1167 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
1168 ctx, len, 0);
1169 if (!res) {
1170 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1171 ext4_clear_inode_state(inode,
1172 EXT4_STATE_MAY_INLINE_DATA);
1173 /*
1174 * Update inode->i_flags - e.g. S_DAX may get disabled
1175 */
1176 ext4_set_inode_flags(inode);
1177 }
1178 return res;
1179 }
1180
1181 res = dquot_initialize(inode);
1182 if (res)
1183 return res;
1184 retry:
1185 res = ext4_xattr_set_credits(inode, len, &credits);
1186 if (res)
1187 return res;
1188
1189 handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
1190 if (IS_ERR(handle))
1191 return PTR_ERR(handle);
1192
1193 res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
1194 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
1195 ctx, len, 0);
1196 if (!res) {
1197 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1198 /* Update inode->i_flags - e.g. S_DAX may get disabled */
1199 ext4_set_inode_flags(inode);
1200 res = ext4_mark_inode_dirty(handle, inode);
1201 if (res)
1202 EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
1203 }
1204 res2 = ext4_journal_stop(handle);
1205
1206 if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1207 goto retry;
1208 if (!res)
1209 res = res2;
1210 return res;
1211 }
1212
1213 static int ext4_dummy_context(struct inode *inode)
1214 {
1215 return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
1216 }
1217
1218 static unsigned ext4_max_namelen(struct inode *inode)
1219 {
1220 return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
1221 EXT4_NAME_LEN;
1222 }
1223
1224 static const struct fscrypt_operations ext4_cryptops = {
1225 .key_prefix = "ext4:",
1226 .get_context = ext4_get_context,
1227 .set_context = ext4_set_context,
1228 .dummy_context = ext4_dummy_context,
1229 .is_encrypted = ext4_encrypted_inode,
1230 .empty_dir = ext4_empty_dir,
1231 .max_namelen = ext4_max_namelen,
1232 };
1233 #else
1234 static const struct fscrypt_operations ext4_cryptops = {
1235 .is_encrypted = ext4_encrypted_inode,
1236 };
1237 #endif
1238
1239 #ifdef CONFIG_QUOTA
1240 static const char * const quotatypes[] = INITQFNAMES;
1241 #define QTYPE2NAME(t) (quotatypes[t])
1242
1243 static int ext4_write_dquot(struct dquot *dquot);
1244 static int ext4_acquire_dquot(struct dquot *dquot);
1245 static int ext4_release_dquot(struct dquot *dquot);
1246 static int ext4_mark_dquot_dirty(struct dquot *dquot);
1247 static int ext4_write_info(struct super_block *sb, int type);
1248 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1249 const struct path *path);
1250 static int ext4_quota_on_mount(struct super_block *sb, int type);
1251 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1252 size_t len, loff_t off);
1253 static ssize_t ext4_quota_write(struct super_block *sb, int type,
1254 const char *data, size_t len, loff_t off);
1255 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
1256 unsigned int flags);
1257 static int ext4_enable_quotas(struct super_block *sb);
1258 static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
1259
1260 static struct dquot **ext4_get_dquots(struct inode *inode)
1261 {
1262 return EXT4_I(inode)->i_dquot;
1263 }
1264
1265 static const struct dquot_operations ext4_quota_operations = {
1266 .get_reserved_space = ext4_get_reserved_space,
1267 .write_dquot = ext4_write_dquot,
1268 .acquire_dquot = ext4_acquire_dquot,
1269 .release_dquot = ext4_release_dquot,
1270 .mark_dirty = ext4_mark_dquot_dirty,
1271 .write_info = ext4_write_info,
1272 .alloc_dquot = dquot_alloc,
1273 .destroy_dquot = dquot_destroy,
1274 .get_projid = ext4_get_projid,
1275 .get_next_id = ext4_get_next_id,
1276 };
1277
1278 static const struct quotactl_ops ext4_qctl_operations = {
1279 .quota_on = ext4_quota_on,
1280 .quota_off = ext4_quota_off,
1281 .quota_sync = dquot_quota_sync,
1282 .get_state = dquot_get_state,
1283 .set_info = dquot_set_dqinfo,
1284 .get_dqblk = dquot_get_dqblk,
1285 .set_dqblk = dquot_set_dqblk,
1286 .get_nextdqblk = dquot_get_next_dqblk,
1287 };
1288 #endif
1289
1290 static const struct super_operations ext4_sops = {
1291 .alloc_inode = ext4_alloc_inode,
1292 .destroy_inode = ext4_destroy_inode,
1293 .write_inode = ext4_write_inode,
1294 .dirty_inode = ext4_dirty_inode,
1295 .drop_inode = ext4_drop_inode,
1296 .evict_inode = ext4_evict_inode,
1297 .put_super = ext4_put_super,
1298 .sync_fs = ext4_sync_fs,
1299 .freeze_fs = ext4_freeze,
1300 .unfreeze_fs = ext4_unfreeze,
1301 .statfs = ext4_statfs,
1302 .remount_fs = ext4_remount,
1303 .show_options = ext4_show_options,
1304 #ifdef CONFIG_QUOTA
1305 .quota_read = ext4_quota_read,
1306 .quota_write = ext4_quota_write,
1307 .get_dquots = ext4_get_dquots,
1308 #endif
1309 .bdev_try_to_free_page = bdev_try_to_free_page,
1310 };
1311
1312 static const struct export_operations ext4_export_ops = {
1313 .fh_to_dentry = ext4_fh_to_dentry,
1314 .fh_to_parent = ext4_fh_to_parent,
1315 .get_parent = ext4_get_parent,
1316 };
1317
1318 enum {
1319 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
1320 Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1321 Opt_nouid32, Opt_debug, Opt_removed,
1322 Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1323 Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1324 Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
1325 Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1326 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1327 Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1328 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1329 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
1330 Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1331 Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
1332 Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
1333 Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
1334 Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1335 Opt_inode_readahead_blks, Opt_journal_ioprio,
1336 Opt_dioread_nolock, Opt_dioread_lock,
1337 Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1338 Opt_max_dir_size_kb, Opt_nojournal_checksum,
1339 };
1340
1341 static const match_table_t tokens = {
1342 {Opt_bsd_df, "bsddf"},
1343 {Opt_minix_df, "minixdf"},
1344 {Opt_grpid, "grpid"},
1345 {Opt_grpid, "bsdgroups"},
1346 {Opt_nogrpid, "nogrpid"},
1347 {Opt_nogrpid, "sysvgroups"},
1348 {Opt_resgid, "resgid=%u"},
1349 {Opt_resuid, "resuid=%u"},
1350 {Opt_sb, "sb=%u"},
1351 {Opt_err_cont, "errors=continue"},
1352 {Opt_err_panic, "errors=panic"},
1353 {Opt_err_ro, "errors=remount-ro"},
1354 {Opt_nouid32, "nouid32"},
1355 {Opt_debug, "debug"},
1356 {Opt_removed, "oldalloc"},
1357 {Opt_removed, "orlov"},
1358 {Opt_user_xattr, "user_xattr"},
1359 {Opt_nouser_xattr, "nouser_xattr"},
1360 {Opt_acl, "acl"},
1361 {Opt_noacl, "noacl"},
1362 {Opt_noload, "norecovery"},
1363 {Opt_noload, "noload"},
1364 {Opt_removed, "nobh"},
1365 {Opt_removed, "bh"},
1366 {Opt_commit, "commit=%u"},
1367 {Opt_min_batch_time, "min_batch_time=%u"},
1368 {Opt_max_batch_time, "max_batch_time=%u"},
1369 {Opt_journal_dev, "journal_dev=%u"},
1370 {Opt_journal_path, "journal_path=%s"},
1371 {Opt_journal_checksum, "journal_checksum"},
1372 {Opt_nojournal_checksum, "nojournal_checksum"},
1373 {Opt_journal_async_commit, "journal_async_commit"},
1374 {Opt_abort, "abort"},
1375 {Opt_data_journal, "data=journal"},
1376 {Opt_data_ordered, "data=ordered"},
1377 {Opt_data_writeback, "data=writeback"},
1378 {Opt_data_err_abort, "data_err=abort"},
1379 {Opt_data_err_ignore, "data_err=ignore"},
1380 {Opt_offusrjquota, "usrjquota="},
1381 {Opt_usrjquota, "usrjquota=%s"},
1382 {Opt_offgrpjquota, "grpjquota="},
1383 {Opt_grpjquota, "grpjquota=%s"},
1384 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
1385 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
1386 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1387 {Opt_grpquota, "grpquota"},
1388 {Opt_noquota, "noquota"},
1389 {Opt_quota, "quota"},
1390 {Opt_usrquota, "usrquota"},
1391 {Opt_prjquota, "prjquota"},
1392 {Opt_barrier, "barrier=%u"},
1393 {Opt_barrier, "barrier"},
1394 {Opt_nobarrier, "nobarrier"},
1395 {Opt_i_version, "i_version"},
1396 {Opt_dax, "dax"},
1397 {Opt_stripe, "stripe=%u"},
1398 {Opt_delalloc, "delalloc"},
1399 {Opt_lazytime, "lazytime"},
1400 {Opt_nolazytime, "nolazytime"},
1401 {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
1402 {Opt_nodelalloc, "nodelalloc"},
1403 {Opt_removed, "mblk_io_submit"},
1404 {Opt_removed, "nomblk_io_submit"},
1405 {Opt_block_validity, "block_validity"},
1406 {Opt_noblock_validity, "noblock_validity"},
1407 {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1408 {Opt_journal_ioprio, "journal_ioprio=%u"},
1409 {Opt_auto_da_alloc, "auto_da_alloc=%u"},
1410 {Opt_auto_da_alloc, "auto_da_alloc"},
1411 {Opt_noauto_da_alloc, "noauto_da_alloc"},
1412 {Opt_dioread_nolock, "dioread_nolock"},
1413 {Opt_dioread_lock, "dioread_lock"},
1414 {Opt_discard, "discard"},
1415 {Opt_nodiscard, "nodiscard"},
1416 {Opt_init_itable, "init_itable=%u"},
1417 {Opt_init_itable, "init_itable"},
1418 {Opt_noinit_itable, "noinit_itable"},
1419 {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1420 {Opt_test_dummy_encryption, "test_dummy_encryption"},
1421 {Opt_removed, "check=none"}, /* mount option from ext2/3 */
1422 {Opt_removed, "nocheck"}, /* mount option from ext2/3 */
1423 {Opt_removed, "reservation"}, /* mount option from ext2/3 */
1424 {Opt_removed, "noreservation"}, /* mount option from ext2/3 */
1425 {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */
1426 {Opt_err, NULL},
1427 };
1428
1429 static ext4_fsblk_t get_sb_block(void **data)
1430 {
1431 ext4_fsblk_t sb_block;
1432 char *options = (char *) *data;
1433
1434 if (!options || strncmp(options, "sb=", 3) != 0)
1435 return 1; /* Default location */
1436
1437 options += 3;
1438 /* TODO: use simple_strtoll with >32bit ext4 */
1439 sb_block = simple_strtoul(options, &options, 0);
1440 if (*options && *options != ',') {
1441 printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1442 (char *) *data);
1443 return 1;
1444 }
1445 if (*options == ',')
1446 options++;
1447 *data = (void *) options;
1448
1449 return sb_block;
1450 }
1451
1452 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1453 static const char deprecated_msg[] =
1454 "Mount option \"%s\" will be removed by %s\n"
1455 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1456
1457 #ifdef CONFIG_QUOTA
1458 static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
1459 {
1460 struct ext4_sb_info *sbi = EXT4_SB(sb);
1461 char *qname;
1462 int ret = -1;
1463
1464 if (sb_any_quota_loaded(sb) &&
1465 !sbi->s_qf_names[qtype]) {
1466 ext4_msg(sb, KERN_ERR,
1467 "Cannot change journaled "
1468 "quota options when quota turned on");
1469 return -1;
1470 }
1471 if (ext4_has_feature_quota(sb)) {
1472 ext4_msg(sb, KERN_INFO, "Journaled quota options "
1473 "ignored when QUOTA feature is enabled");
1474 return 1;
1475 }
1476 qname = match_strdup(args);
1477 if (!qname) {
1478 ext4_msg(sb, KERN_ERR,
1479 "Not enough memory for storing quotafile name");
1480 return -1;
1481 }
1482 if (sbi->s_qf_names[qtype]) {
1483 if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
1484 ret = 1;
1485 else
1486 ext4_msg(sb, KERN_ERR,
1487 "%s quota file already specified",
1488 QTYPE2NAME(qtype));
1489 goto errout;
1490 }
1491 if (strchr(qname, '/')) {
1492 ext4_msg(sb, KERN_ERR,
1493 "quotafile must be on filesystem root");
1494 goto errout;
1495 }
1496 sbi->s_qf_names[qtype] = qname;
1497 set_opt(sb, QUOTA);
1498 return 1;
1499 errout:
1500 kfree(qname);
1501 return ret;
1502 }
1503
1504 static int clear_qf_name(struct super_block *sb, int qtype)
1505 {
1506
1507 struct ext4_sb_info *sbi = EXT4_SB(sb);
1508
1509 if (sb_any_quota_loaded(sb) &&
1510 sbi->s_qf_names[qtype]) {
1511 ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
1512 " when quota turned on");
1513 return -1;
1514 }
1515 kfree(sbi->s_qf_names[qtype]);
1516 sbi->s_qf_names[qtype] = NULL;
1517 return 1;
1518 }
1519 #endif
1520
1521 #define MOPT_SET 0x0001
1522 #define MOPT_CLEAR 0x0002
1523 #define MOPT_NOSUPPORT 0x0004
1524 #define MOPT_EXPLICIT 0x0008
1525 #define MOPT_CLEAR_ERR 0x0010
1526 #define MOPT_GTE0 0x0020
1527 #ifdef CONFIG_QUOTA
1528 #define MOPT_Q 0
1529 #define MOPT_QFMT 0x0040
1530 #else
1531 #define MOPT_Q MOPT_NOSUPPORT
1532 #define MOPT_QFMT MOPT_NOSUPPORT
1533 #endif
1534 #define MOPT_DATAJ 0x0080
1535 #define MOPT_NO_EXT2 0x0100
1536 #define MOPT_NO_EXT3 0x0200
1537 #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
1538 #define MOPT_STRING 0x0400
1539
1540 static const struct mount_opts {
1541 int token;
1542 int mount_opt;
1543 int flags;
1544 } ext4_mount_opts[] = {
1545 {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
1546 {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
1547 {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
1548 {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
1549 {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
1550 {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1551 {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
1552 MOPT_EXT4_ONLY | MOPT_SET},
1553 {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
1554 MOPT_EXT4_ONLY | MOPT_CLEAR},
1555 {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
1556 {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1557 {Opt_delalloc, EXT4_MOUNT_DELALLOC,
1558 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1559 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1560 MOPT_EXT4_ONLY | MOPT_CLEAR},
1561 {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1562 MOPT_EXT4_ONLY | MOPT_CLEAR},
1563 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1564 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1565 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1566 EXT4_MOUNT_JOURNAL_CHECKSUM),
1567 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1568 {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1569 {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
1570 {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
1571 {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
1572 {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
1573 MOPT_NO_EXT2},
1574 {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
1575 MOPT_NO_EXT2},
1576 {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
1577 {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
1578 {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
1579 {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
1580 {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
1581 {Opt_commit, 0, MOPT_GTE0},
1582 {Opt_max_batch_time, 0, MOPT_GTE0},
1583 {Opt_min_batch_time, 0, MOPT_GTE0},
1584 {Opt_inode_readahead_blks, 0, MOPT_GTE0},
1585 {Opt_init_itable, 0, MOPT_GTE0},
1586 {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
1587 {Opt_stripe, 0, MOPT_GTE0},
1588 {Opt_resuid, 0, MOPT_GTE0},
1589 {Opt_resgid, 0, MOPT_GTE0},
1590 {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1591 {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
1592 {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1593 {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
1594 {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
1595 {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
1596 MOPT_NO_EXT2 | MOPT_DATAJ},
1597 {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
1598 {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
1599 #ifdef CONFIG_EXT4_FS_POSIX_ACL
1600 {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
1601 {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
1602 #else
1603 {Opt_acl, 0, MOPT_NOSUPPORT},
1604 {Opt_noacl, 0, MOPT_NOSUPPORT},
1605 #endif
1606 {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
1607 {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
1608 {Opt_debug_want_extra_isize, 0, MOPT_GTE0},
1609 {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
1610 {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
1611 MOPT_SET | MOPT_Q},
1612 {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
1613 MOPT_SET | MOPT_Q},
1614 {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
1615 MOPT_SET | MOPT_Q},
1616 {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1617 EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
1618 MOPT_CLEAR | MOPT_Q},
1619 {Opt_usrjquota, 0, MOPT_Q},
1620 {Opt_grpjquota, 0, MOPT_Q},
1621 {Opt_offusrjquota, 0, MOPT_Q},
1622 {Opt_offgrpjquota, 0, MOPT_Q},
1623 {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
1624 {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
1625 {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1626 {Opt_max_dir_size_kb, 0, MOPT_GTE0},
1627 {Opt_test_dummy_encryption, 0, MOPT_GTE0},
1628 {Opt_err, 0, 0}
1629 };
1630
1631 static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1632 substring_t *args, unsigned long *journal_devnum,
1633 unsigned int *journal_ioprio, int is_remount)
1634 {
1635 struct ext4_sb_info *sbi = EXT4_SB(sb);
1636 const struct mount_opts *m;
1637 kuid_t uid;
1638 kgid_t gid;
1639 int arg = 0;
1640
1641 #ifdef CONFIG_QUOTA
1642 if (token == Opt_usrjquota)
1643 return set_qf_name(sb, USRQUOTA, &args[0]);
1644 else if (token == Opt_grpjquota)
1645 return set_qf_name(sb, GRPQUOTA, &args[0]);
1646 else if (token == Opt_offusrjquota)
1647 return clear_qf_name(sb, USRQUOTA);
1648 else if (token == Opt_offgrpjquota)
1649 return clear_qf_name(sb, GRPQUOTA);
1650 #endif
1651 switch (token) {
1652 case Opt_noacl:
1653 case Opt_nouser_xattr:
1654 ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
1655 break;
1656 case Opt_sb:
1657 return 1; /* handled by get_sb_block() */
1658 case Opt_removed:
1659 ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
1660 return 1;
1661 case Opt_abort:
1662 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
1663 return 1;
1664 case Opt_i_version:
1665 sb->s_flags |= MS_I_VERSION;
1666 return 1;
1667 case Opt_lazytime:
1668 sb->s_flags |= MS_LAZYTIME;
1669 return 1;
1670 case Opt_nolazytime:
1671 sb->s_flags &= ~MS_LAZYTIME;
1672 return 1;
1673 }
1674
1675 for (m = ext4_mount_opts; m->token != Opt_err; m++)
1676 if (token == m->token)
1677 break;
1678
1679 if (m->token == Opt_err) {
1680 ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
1681 "or missing value", opt);
1682 return -1;
1683 }
1684
1685 if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
1686 ext4_msg(sb, KERN_ERR,
1687 "Mount option \"%s\" incompatible with ext2", opt);
1688 return -1;
1689 }
1690 if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
1691 ext4_msg(sb, KERN_ERR,
1692 "Mount option \"%s\" incompatible with ext3", opt);
1693 return -1;
1694 }
1695
1696 if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
1697 return -1;
1698 if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
1699 return -1;
1700 if (m->flags & MOPT_EXPLICIT) {
1701 if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
1702 set_opt2(sb, EXPLICIT_DELALLOC);
1703 } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
1704 set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
1705 } else
1706 return -1;
1707 }
1708 if (m->flags & MOPT_CLEAR_ERR)
1709 clear_opt(sb, ERRORS_MASK);
1710 if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
1711 ext4_msg(sb, KERN_ERR, "Cannot change quota "
1712 "options when quota turned on");
1713 return -1;
1714 }
1715
1716 if (m->flags & MOPT_NOSUPPORT) {
1717 ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
1718 } else if (token == Opt_commit) {
1719 if (arg == 0)
1720 arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
1721 sbi->s_commit_interval = HZ * arg;
1722 } else if (token == Opt_debug_want_extra_isize) {
1723 sbi->s_want_extra_isize = arg;
1724 } else if (token == Opt_max_batch_time) {
1725 sbi->s_max_batch_time = arg;
1726 } else if (token == Opt_min_batch_time) {
1727 sbi->s_min_batch_time = arg;
1728 } else if (token == Opt_inode_readahead_blks) {
1729 if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
1730 ext4_msg(sb, KERN_ERR,
1731 "EXT4-fs: inode_readahead_blks must be "
1732 "0 or a power of 2 smaller than 2^31");
1733 return -1;
1734 }
1735 sbi->s_inode_readahead_blks = arg;
1736 } else if (token == Opt_init_itable) {
1737 set_opt(sb, INIT_INODE_TABLE);
1738 if (!args->from)
1739 arg = EXT4_DEF_LI_WAIT_MULT;
1740 sbi->s_li_wait_mult = arg;
1741 } else if (token == Opt_max_dir_size_kb) {
1742 sbi->s_max_dir_size_kb = arg;
1743 } else if (token == Opt_stripe) {
1744 sbi->s_stripe = arg;
1745 } else if (token == Opt_resuid) {
1746 uid = make_kuid(current_user_ns(), arg);
1747 if (!uid_valid(uid)) {
1748 ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
1749 return -1;
1750 }
1751 sbi->s_resuid = uid;
1752 } else if (token == Opt_resgid) {
1753 gid = make_kgid(current_user_ns(), arg);
1754 if (!gid_valid(gid)) {
1755 ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
1756 return -1;
1757 }
1758 sbi->s_resgid = gid;
1759 } else if (token == Opt_journal_dev) {
1760 if (is_remount) {
1761 ext4_msg(sb, KERN_ERR,
1762 "Cannot specify journal on remount");
1763 return -1;
1764 }
1765 *journal_devnum = arg;
1766 } else if (token == Opt_journal_path) {
1767 char *journal_path;
1768 struct inode *journal_inode;
1769 struct path path;
1770 int error;
1771
1772 if (is_remount) {
1773 ext4_msg(sb, KERN_ERR,
1774 "Cannot specify journal on remount");
1775 return -1;
1776 }
1777 journal_path = match_strdup(&args[0]);
1778 if (!journal_path) {
1779 ext4_msg(sb, KERN_ERR, "error: could not dup "
1780 "journal device string");
1781 return -1;
1782 }
1783
1784 error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
1785 if (error) {
1786 ext4_msg(sb, KERN_ERR, "error: could not find "
1787 "journal device path: error %d", error);
1788 kfree(journal_path);
1789 return -1;
1790 }
1791
1792 journal_inode = d_inode(path.dentry);
1793 if (!S_ISBLK(journal_inode->i_mode)) {
1794 ext4_msg(sb, KERN_ERR, "error: journal path %s "
1795 "is not a block device", journal_path);
1796 path_put(&path);
1797 kfree(journal_path);
1798 return -1;
1799 }
1800
1801 *journal_devnum = new_encode_dev(journal_inode->i_rdev);
1802 path_put(&path);
1803 kfree(journal_path);
1804 } else if (token == Opt_journal_ioprio) {
1805 if (arg > 7) {
1806 ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
1807 " (must be 0-7)");
1808 return -1;
1809 }
1810 *journal_ioprio =
1811 IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
1812 } else if (token == Opt_test_dummy_encryption) {
1813 #ifdef CONFIG_EXT4_FS_ENCRYPTION
1814 sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
1815 ext4_msg(sb, KERN_WARNING,
1816 "Test dummy encryption mode enabled");
1817 #else
1818 ext4_msg(sb, KERN_WARNING,
1819 "Test dummy encryption mount option ignored");
1820 #endif
1821 } else if (m->flags & MOPT_DATAJ) {
1822 if (is_remount) {
1823 if (!sbi->s_journal)
1824 ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
1825 else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
1826 ext4_msg(sb, KERN_ERR,
1827 "Cannot change data mode on remount");
1828 return -1;
1829 }
1830 } else {
1831 clear_opt(sb, DATA_FLAGS);
1832 sbi->s_mount_opt |= m->mount_opt;
1833 }
1834 #ifdef CONFIG_QUOTA
1835 } else if (m->flags & MOPT_QFMT) {
1836 if (sb_any_quota_loaded(sb) &&
1837 sbi->s_jquota_fmt != m->mount_opt) {
1838 ext4_msg(sb, KERN_ERR, "Cannot change journaled "
1839 "quota options when quota turned on");
1840 return -1;
1841 }
1842 if (ext4_has_feature_quota(sb)) {
1843 ext4_msg(sb, KERN_INFO,
1844 "Quota format mount options ignored "
1845 "when QUOTA feature is enabled");
1846 return 1;
1847 }
1848 sbi->s_jquota_fmt = m->mount_opt;
1849 #endif
1850 } else if (token == Opt_dax) {
1851 #ifdef CONFIG_FS_DAX
1852 ext4_msg(sb, KERN_WARNING,
1853 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1854 sbi->s_mount_opt |= m->mount_opt;
1855 #else
1856 ext4_msg(sb, KERN_INFO, "dax option not supported");
1857 return -1;
1858 #endif
1859 } else if (token == Opt_data_err_abort) {
1860 sbi->s_mount_opt |= m->mount_opt;
1861 } else if (token == Opt_data_err_ignore) {
1862 sbi->s_mount_opt &= ~m->mount_opt;
1863 } else {
1864 if (!args->from)
1865 arg = 1;
1866 if (m->flags & MOPT_CLEAR)
1867 arg = !arg;
1868 else if (unlikely(!(m->flags & MOPT_SET))) {
1869 ext4_msg(sb, KERN_WARNING,
1870 "buggy handling of option %s", opt);
1871 WARN_ON(1);
1872 return -1;
1873 }
1874 if (arg != 0)
1875 sbi->s_mount_opt |= m->mount_opt;
1876 else
1877 sbi->s_mount_opt &= ~m->mount_opt;
1878 }
1879 return 1;
1880 }
1881
1882 static int parse_options(char *options, struct super_block *sb,
1883 unsigned long *journal_devnum,
1884 unsigned int *journal_ioprio,
1885 int is_remount)
1886 {
1887 struct ext4_sb_info *sbi = EXT4_SB(sb);
1888 char *p;
1889 substring_t args[MAX_OPT_ARGS];
1890 int token;
1891
1892 if (!options)
1893 return 1;
1894
1895 while ((p = strsep(&options, ",")) != NULL) {
1896 if (!*p)
1897 continue;
1898 /*
1899 * Initialize args struct so we know whether arg was
1900 * found; some options take optional arguments.
1901 */
1902 args[0].to = args[0].from = NULL;
1903 token = match_token(p, tokens, args);
1904 if (handle_mount_opt(sb, p, token, args, journal_devnum,
1905 journal_ioprio, is_remount) < 0)
1906 return 0;
1907 }
1908 #ifdef CONFIG_QUOTA
1909 /*
1910 * We do the test below only for project quotas. 'usrquota' and
1911 * 'grpquota' mount options are allowed even without quota feature
1912 * to support legacy quotas in quota files.
1913 */
1914 if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
1915 ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
1916 "Cannot enable project quota enforcement.");
1917 return 0;
1918 }
1919 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1920 if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1921 clear_opt(sb, USRQUOTA);
1922
1923 if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
1924 clear_opt(sb, GRPQUOTA);
1925
1926 if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
1927 ext4_msg(sb, KERN_ERR, "old and new quota "
1928 "format mixing");
1929 return 0;
1930 }
1931
1932 if (!sbi->s_jquota_fmt) {
1933 ext4_msg(sb, KERN_ERR, "journaled quota format "
1934 "not specified");
1935 return 0;
1936 }
1937 }
1938 #endif
1939 if (test_opt(sb, DIOREAD_NOLOCK)) {
1940 int blocksize =
1941 BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
1942
1943 if (blocksize < PAGE_SIZE) {
1944 ext4_msg(sb, KERN_ERR, "can't mount with "
1945 "dioread_nolock if block size != PAGE_SIZE");
1946 return 0;
1947 }
1948 }
1949 return 1;
1950 }
1951
1952 static inline void ext4_show_quota_options(struct seq_file *seq,
1953 struct super_block *sb)
1954 {
1955 #if defined(CONFIG_QUOTA)
1956 struct ext4_sb_info *sbi = EXT4_SB(sb);
1957
1958 if (sbi->s_jquota_fmt) {
1959 char *fmtname = "";
1960
1961 switch (sbi->s_jquota_fmt) {
1962 case QFMT_VFS_OLD:
1963 fmtname = "vfsold";
1964 break;
1965 case QFMT_VFS_V0:
1966 fmtname = "vfsv0";
1967 break;
1968 case QFMT_VFS_V1:
1969 fmtname = "vfsv1";
1970 break;
1971 }
1972 seq_printf(seq, ",jqfmt=%s", fmtname);
1973 }
1974
1975 if (sbi->s_qf_names[USRQUOTA])
1976 seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
1977
1978 if (sbi->s_qf_names[GRPQUOTA])
1979 seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
1980 #endif
1981 }
1982
1983 static const char *token2str(int token)
1984 {
1985 const struct match_token *t;
1986
1987 for (t = tokens; t->token != Opt_err; t++)
1988 if (t->token == token && !strchr(t->pattern, '='))
1989 break;
1990 return t->pattern;
1991 }
1992
1993 /*
1994 * Show an option if
1995 * - it's set to a non-default value OR
1996 * - if the per-sb default is different from the global default
1997 */
1998 static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
1999 int nodefs)
2000 {
2001 struct ext4_sb_info *sbi = EXT4_SB(sb);
2002 struct ext4_super_block *es = sbi->s_es;
2003 int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt;
2004 const struct mount_opts *m;
2005 char sep = nodefs ? '\n' : ',';
2006
2007 #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
2008 #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2009
2010 if (sbi->s_sb_block != 1)
2011 SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
2012
2013 for (m = ext4_mount_opts; m->token != Opt_err; m++) {
2014 int want_set = m->flags & MOPT_SET;
2015 if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
2016 (m->flags & MOPT_CLEAR_ERR))
2017 continue;
2018 if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
2019 continue; /* skip if same as the default */
2020 if ((want_set &&
2021 (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
2022 (!want_set && (sbi->s_mount_opt & m->mount_opt)))
2023 continue; /* select Opt_noFoo vs Opt_Foo */
2024 SEQ_OPTS_PRINT("%s", token2str(m->token));
2025 }
2026
2027 if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
2028 le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
2029 SEQ_OPTS_PRINT("resuid=%u",
2030 from_kuid_munged(&init_user_ns, sbi->s_resuid));
2031 if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
2032 le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
2033 SEQ_OPTS_PRINT("resgid=%u",
2034 from_kgid_munged(&init_user_ns, sbi->s_resgid));
2035 def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
2036 if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
2037 SEQ_OPTS_PUTS("errors=remount-ro");
2038 if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2039 SEQ_OPTS_PUTS("errors=continue");
2040 if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2041 SEQ_OPTS_PUTS("errors=panic");
2042 if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2043 SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2044 if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2045 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2046 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2047 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
2048 if (sb->s_flags & MS_I_VERSION)
2049 SEQ_OPTS_PUTS("i_version");
2050 if (nodefs || sbi->s_stripe)
2051 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
2052 if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) {
2053 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
2054 SEQ_OPTS_PUTS("data=journal");
2055 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
2056 SEQ_OPTS_PUTS("data=ordered");
2057 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
2058 SEQ_OPTS_PUTS("data=writeback");
2059 }
2060 if (nodefs ||
2061 sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
2062 SEQ_OPTS_PRINT("inode_readahead_blks=%u",
2063 sbi->s_inode_readahead_blks);
2064
2065 if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
2066 (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
2067 SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
2068 if (nodefs || sbi->s_max_dir_size_kb)
2069 SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
2070 if (test_opt(sb, DATA_ERR_ABORT))
2071 SEQ_OPTS_PUTS("data_err=abort");
2072
2073 ext4_show_quota_options(seq, sb);
2074 return 0;
2075 }
2076
2077 static int ext4_show_options(struct seq_file *seq, struct dentry *root)
2078 {
2079 return _ext4_show_options(seq, root->d_sb, 0);
2080 }
2081
2082 int ext4_seq_options_show(struct seq_file *seq, void *offset)
2083 {
2084 struct super_block *sb = seq->private;
2085 int rc;
2086
2087 seq_puts(seq, (sb->s_flags & MS_RDONLY) ? "ro" : "rw");
2088 rc = _ext4_show_options(seq, sb, 1);
2089 seq_puts(seq, "\n");
2090 return rc;
2091 }
2092
2093 static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2094 int read_only)
2095 {
2096 struct ext4_sb_info *sbi = EXT4_SB(sb);
2097 int res = 0;
2098
2099 if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
2100 ext4_msg(sb, KERN_ERR, "revision level too high, "
2101 "forcing read-only mode");
2102 res = MS_RDONLY;
2103 }
2104 if (read_only)
2105 goto done;
2106 if (!(sbi->s_mount_state & EXT4_VALID_FS))
2107 ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
2108 "running e2fsck is recommended");
2109 else if (sbi->s_mount_state & EXT4_ERROR_FS)
2110 ext4_msg(sb, KERN_WARNING,
2111 "warning: mounting fs with errors, "
2112 "running e2fsck is recommended");
2113 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
2114 le16_to_cpu(es->s_mnt_count) >=
2115 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
2116 ext4_msg(sb, KERN_WARNING,
2117 "warning: maximal mount count reached, "
2118 "running e2fsck is recommended");
2119 else if (le32_to_cpu(es->s_checkinterval) &&
2120 (le32_to_cpu(es->s_lastcheck) +
2121 le32_to_cpu(es->s_checkinterval) <= get_seconds()))
2122 ext4_msg(sb, KERN_WARNING,
2123 "warning: checktime reached, "
2124 "running e2fsck is recommended");
2125 if (!sbi->s_journal)
2126 es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
2127 if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
2128 es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
2129 le16_add_cpu(&es->s_mnt_count, 1);
2130 es->s_mtime = cpu_to_le32(get_seconds());
2131 ext4_update_dynamic_rev(sb);
2132 if (sbi->s_journal)
2133 ext4_set_feature_journal_needs_recovery(sb);
2134
2135 ext4_commit_super(sb, 1);
2136 done:
2137 if (test_opt(sb, DEBUG))
2138 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
2139 "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
2140 sb->s_blocksize,
2141 sbi->s_groups_count,
2142 EXT4_BLOCKS_PER_GROUP(sb),
2143 EXT4_INODES_PER_GROUP(sb),
2144 sbi->s_mount_opt, sbi->s_mount_opt2);
2145
2146 cleancache_init_fs(sb);
2147 return res;
2148 }
2149
2150 int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
2151 {
2152 struct ext4_sb_info *sbi = EXT4_SB(sb);
2153 struct flex_groups *new_groups;
2154 int size;
2155
2156 if (!sbi->s_log_groups_per_flex)
2157 return 0;
2158
2159 size = ext4_flex_group(sbi, ngroup - 1) + 1;
2160 if (size <= sbi->s_flex_groups_allocated)
2161 return 0;
2162
2163 size = roundup_pow_of_two(size * sizeof(struct flex_groups));
2164 new_groups = kvzalloc(size, GFP_KERNEL);
2165 if (!new_groups) {
2166 ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
2167 size / (int) sizeof(struct flex_groups));
2168 return -ENOMEM;
2169 }
2170
2171 if (sbi->s_flex_groups) {
2172 memcpy(new_groups, sbi->s_flex_groups,
2173 (sbi->s_flex_groups_allocated *
2174 sizeof(struct flex_groups)));
2175 kvfree(sbi->s_flex_groups);
2176 }
2177 sbi->s_flex_groups = new_groups;
2178 sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
2179 return 0;
2180 }
2181
2182 static int ext4_fill_flex_info(struct super_block *sb)
2183 {
2184 struct ext4_sb_info *sbi = EXT4_SB(sb);
2185 struct ext4_group_desc *gdp = NULL;
2186 ext4_group_t flex_group;
2187 int i, err;
2188
2189 sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2190 if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2191 sbi->s_log_groups_per_flex = 0;
2192 return 1;
2193 }
2194
2195 err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
2196 if (err)
2197 goto failed;
2198
2199 for (i = 0; i < sbi->s_groups_count; i++) {
2200 gdp = ext4_get_group_desc(sb, i, NULL);
2201
2202 flex_group = ext4_flex_group(sbi, i);
2203 atomic_add(ext4_free_inodes_count(sb, gdp),
2204 &sbi->s_flex_groups[flex_group].free_inodes);
2205 atomic64_add(ext4_free_group_clusters(sb, gdp),
2206 &sbi->s_flex_groups[flex_group].free_clusters);
2207 atomic_add(ext4_used_dirs_count(sb, gdp),
2208 &sbi->s_flex_groups[flex_group].used_dirs);
2209 }
2210
2211 return 1;
2212 failed:
2213 return 0;
2214 }
2215
2216 static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2217 struct ext4_group_desc *gdp)
2218 {
2219 int offset = offsetof(struct ext4_group_desc, bg_checksum);
2220 __u16 crc = 0;
2221 __le32 le_group = cpu_to_le32(block_group);
2222 struct ext4_sb_info *sbi = EXT4_SB(sb);
2223
2224 if (ext4_has_metadata_csum(sbi->s_sb)) {
2225 /* Use new metadata_csum algorithm */
2226 __u32 csum32;
2227 __u16 dummy_csum = 0;
2228
2229 csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
2230 sizeof(le_group));
2231 csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
2232 csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
2233 sizeof(dummy_csum));
2234 offset += sizeof(dummy_csum);
2235 if (offset < sbi->s_desc_size)
2236 csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
2237 sbi->s_desc_size - offset);
2238
2239 crc = csum32 & 0xFFFF;
2240 goto out;
2241 }
2242
2243 /* old crc16 code */
2244 if (!ext4_has_feature_gdt_csum(sb))
2245 return 0;
2246
2247 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
2248 crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
2249 crc = crc16(crc, (__u8 *)gdp, offset);
2250 offset += sizeof(gdp->bg_checksum); /* skip checksum */
2251 /* for checksum of struct ext4_group_desc do the rest...*/
2252 if (ext4_has_feature_64bit(sb) &&
2253 offset < le16_to_cpu(sbi->s_es->s_desc_size))
2254 crc = crc16(crc, (__u8 *)gdp + offset,
2255 le16_to_cpu(sbi->s_es->s_desc_size) -
2256 offset);
2257
2258 out:
2259 return cpu_to_le16(crc);
2260 }
2261
2262 int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
2263 struct ext4_group_desc *gdp)
2264 {
2265 if (ext4_has_group_desc_csum(sb) &&
2266 (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
2267 return 0;
2268
2269 return 1;
2270 }
2271
2272 void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
2273 struct ext4_group_desc *gdp)
2274 {
2275 if (!ext4_has_group_desc_csum(sb))
2276 return;
2277 gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2278 }
2279
2280 /* Called at mount-time, super-block is locked */
2281 static int ext4_check_descriptors(struct super_block *sb,
2282 ext4_fsblk_t sb_block,
2283 ext4_group_t *first_not_zeroed)
2284 {
2285 struct ext4_sb_info *sbi = EXT4_SB(sb);
2286 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
2287 ext4_fsblk_t last_block;
2288 ext4_fsblk_t block_bitmap;
2289 ext4_fsblk_t inode_bitmap;
2290 ext4_fsblk_t inode_table;
2291 int flexbg_flag = 0;
2292 ext4_group_t i, grp = sbi->s_groups_count;
2293
2294 if (ext4_has_feature_flex_bg(sb))
2295 flexbg_flag = 1;
2296
2297 ext4_debug("Checking group descriptors");
2298
2299 for (i = 0; i < sbi->s_groups_count; i++) {
2300 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
2301
2302 if (i == sbi->s_groups_count - 1 || flexbg_flag)
2303 last_block = ext4_blocks_count(sbi->s_es) - 1;
2304 else
2305 last_block = first_block +
2306 (EXT4_BLOCKS_PER_GROUP(sb) - 1);
2307
2308 if ((grp == sbi->s_groups_count) &&
2309 !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
2310 grp = i;
2311
2312 block_bitmap = ext4_block_bitmap(sb, gdp);
2313 if (block_bitmap == sb_block) {
2314 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2315 "Block bitmap for group %u overlaps "
2316 "superblock", i);
2317 }
2318 if (block_bitmap < first_block || block_bitmap > last_block) {
2319 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2320 "Block bitmap for group %u not in group "
2321 "(block %llu)!", i, block_bitmap);
2322 return 0;
2323 }
2324 inode_bitmap = ext4_inode_bitmap(sb, gdp);
2325 if (inode_bitmap == sb_block) {
2326 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2327 "Inode bitmap for group %u overlaps "
2328 "superblock", i);
2329 }
2330 if (inode_bitmap < first_block || inode_bitmap > last_block) {
2331 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2332 "Inode bitmap for group %u not in group "
2333 "(block %llu)!", i, inode_bitmap);
2334 return 0;
2335 }
2336 inode_table = ext4_inode_table(sb, gdp);
2337 if (inode_table == sb_block) {
2338 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2339 "Inode table for group %u overlaps "
2340 "superblock", i);
2341 }
2342 if (inode_table < first_block ||
2343 inode_table + sbi->s_itb_per_group - 1 > last_block) {
2344 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2345 "Inode table for group %u not in group "
2346 "(block %llu)!", i, inode_table);
2347 return 0;
2348 }
2349 ext4_lock_group(sb, i);
2350 if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2351 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2352 "Checksum for group %u failed (%u!=%u)",
2353 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2354 gdp)), le16_to_cpu(gdp->bg_checksum));
2355 if (!(sb->s_flags & MS_RDONLY)) {
2356 ext4_unlock_group(sb, i);
2357 return 0;
2358 }
2359 }
2360 ext4_unlock_group(sb, i);
2361 if (!flexbg_flag)
2362 first_block += EXT4_BLOCKS_PER_GROUP(sb);
2363 }
2364 if (NULL != first_not_zeroed)
2365 *first_not_zeroed = grp;
2366 return 1;
2367 }
2368
2369 /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2370 * the superblock) which were deleted from all directories, but held open by
2371 * a process at the time of a crash. We walk the list and try to delete these
2372 * inodes at recovery time (only with a read-write filesystem).
2373 *
2374 * In order to keep the orphan inode chain consistent during traversal (in
2375 * case of crash during recovery), we link each inode into the superblock
2376 * orphan list_head and handle it the same way as an inode deletion during
2377 * normal operation (which journals the operations for us).
2378 *
2379 * We only do an iget() and an iput() on each inode, which is very safe if we
2380 * accidentally point at an in-use or already deleted inode. The worst that
2381 * can happen in this case is that we get a "bit already cleared" message from
2382 * ext4_free_inode(). The only reason we would point at a wrong inode is if
2383 * e2fsck was run on this filesystem, and it must have already done the orphan
2384 * inode cleanup for us, so we can safely abort without any further action.
2385 */
2386 static void ext4_orphan_cleanup(struct super_block *sb,
2387 struct ext4_super_block *es)
2388 {
2389 unsigned int s_flags = sb->s_flags;
2390 int ret, nr_orphans = 0, nr_truncates = 0;
2391 #ifdef CONFIG_QUOTA
2392 int i;
2393 #endif
2394 if (!es->s_last_orphan) {
2395 jbd_debug(4, "no orphan inodes to clean up\n");
2396 return;
2397 }
2398
2399 if (bdev_read_only(sb->s_bdev)) {
2400 ext4_msg(sb, KERN_ERR, "write access "
2401 "unavailable, skipping orphan cleanup");
2402 return;
2403 }
2404
2405 /* Check if feature set would not allow a r/w mount */
2406 if (!ext4_feature_set_ok(sb, 0)) {
2407 ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
2408 "unknown ROCOMPAT features");
2409 return;
2410 }
2411
2412 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2413 /* don't clear list on RO mount w/ errors */
2414 if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
2415 ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
2416 "clearing orphan list.\n");
2417 es->s_last_orphan = 0;
2418 }
2419 jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
2420 return;
2421 }
2422
2423 if (s_flags & MS_RDONLY) {
2424 ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
2425 sb->s_flags &= ~MS_RDONLY;
2426 }
2427 #ifdef CONFIG_QUOTA
2428 /* Needed for iput() to work correctly and not trash data */
2429 sb->s_flags |= MS_ACTIVE;
2430 /* Turn on quotas so that they are updated correctly */
2431 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2432 if (EXT4_SB(sb)->s_qf_names[i]) {
2433 int ret = ext4_quota_on_mount(sb, i);
2434 if (ret < 0)
2435 ext4_msg(sb, KERN_ERR,
2436 "Cannot turn on journaled "
2437 "quota: error %d", ret);
2438 }
2439 }
2440 #endif
2441
2442 while (es->s_last_orphan) {
2443 struct inode *inode;
2444
2445 /*
2446 * We may have encountered an error during cleanup; if
2447 * so, skip the rest.
2448 */
2449 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2450 jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
2451 es->s_last_orphan = 0;
2452 break;
2453 }
2454
2455 inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
2456 if (IS_ERR(inode)) {
2457 es->s_last_orphan = 0;
2458 break;
2459 }
2460
2461 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2462 dquot_initialize(inode);
2463 if (inode->i_nlink) {
2464 if (test_opt(sb, DEBUG))
2465 ext4_msg(sb, KERN_DEBUG,
2466 "%s: truncating inode %lu to %lld bytes",
2467 __func__, inode->i_ino, inode->i_size);
2468 jbd_debug(2, "truncating inode %lu to %lld bytes\n",
2469 inode->i_ino, inode->i_size);
2470 inode_lock(inode);
2471 truncate_inode_pages(inode->i_mapping, inode->i_size);
2472 ret = ext4_truncate(inode);
2473 if (ret)
2474 ext4_std_error(inode->i_sb, ret);
2475 inode_unlock(inode);
2476 nr_truncates++;
2477 } else {
2478 if (test_opt(sb, DEBUG))
2479 ext4_msg(sb, KERN_DEBUG,
2480 "%s: deleting unreferenced inode %lu",
2481 __func__, inode->i_ino);
2482 jbd_debug(2, "deleting unreferenced inode %lu\n",
2483 inode->i_ino);
2484 nr_orphans++;
2485 }
2486 iput(inode); /* The delete magic happens here! */
2487 }
2488
2489 #define PLURAL(x) (x), ((x) == 1) ? "" : "s"
2490
2491 if (nr_orphans)
2492 ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
2493 PLURAL(nr_orphans));
2494 if (nr_truncates)
2495 ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
2496 PLURAL(nr_truncates));
2497 #ifdef CONFIG_QUOTA
2498 /* Turn quotas off */
2499 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2500 if (sb_dqopt(sb)->files[i])
2501 dquot_quota_off(sb, i);
2502 }
2503 #endif
2504 sb->s_flags = s_flags; /* Restore MS_RDONLY status */
2505 }
2506
2507 /*
2508 * Maximal extent format file size.
2509 * Resulting logical blkno at s_maxbytes must fit in our on-disk
2510 * extent format containers, within a sector_t, and within i_blocks
2511 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
2512 * so that won't be a limiting factor.
2513 *
2514 * However there is other limiting factor. We do store extents in the form
2515 * of starting block and length, hence the resulting length of the extent
2516 * covering maximum file size must fit into on-disk format containers as
2517 * well. Given that length is always by 1 unit bigger than max unit (because
2518 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
2519 *
2520 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
2521 */
2522 static loff_t ext4_max_size(int blkbits, int has_huge_files)
2523 {
2524 loff_t res;
2525 loff_t upper_limit = MAX_LFS_FILESIZE;
2526
2527 /* small i_blocks in vfs inode? */
2528 if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2529 /*
2530 * CONFIG_LBDAF is not enabled implies the inode
2531 * i_block represent total blocks in 512 bytes
2532 * 32 == size of vfs inode i_blocks * 8
2533 */
2534 upper_limit = (1LL << 32) - 1;
2535
2536 /* total blocks in file system block size */
2537 upper_limit >>= (blkbits - 9);
2538 upper_limit <<= blkbits;
2539 }
2540
2541 /*
2542 * 32-bit extent-start container, ee_block. We lower the maxbytes
2543 * by one fs block, so ee_len can cover the extent of maximum file
2544 * size
2545 */
2546 res = (1LL << 32) - 1;
2547 res <<= blkbits;
2548
2549 /* Sanity check against vm- & vfs- imposed limits */
2550 if (res > upper_limit)
2551 res = upper_limit;
2552
2553 return res;
2554 }
2555
2556 /*
2557 * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
2558 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
2559 * We need to be 1 filesystem block less than the 2^48 sector limit.
2560 */
2561 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
2562 {
2563 loff_t res = EXT4_NDIR_BLOCKS;
2564 int meta_blocks;
2565 loff_t upper_limit;
2566 /* This is calculated to be the largest file size for a dense, block
2567 * mapped file such that the file's total number of 512-byte sectors,
2568 * including data and all indirect blocks, does not exceed (2^48 - 1).
2569 *
2570 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
2571 * number of 512-byte sectors of the file.
2572 */
2573
2574 if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2575 /*
2576 * !has_huge_files or CONFIG_LBDAF not enabled implies that
2577 * the inode i_block field represents total file blocks in
2578 * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
2579 */
2580 upper_limit = (1LL << 32) - 1;
2581
2582 /* total blocks in file system block size */
2583 upper_limit >>= (bits - 9);
2584
2585 } else {
2586 /*
2587 * We use 48 bit ext4_inode i_blocks
2588 * With EXT4_HUGE_FILE_FL set the i_blocks
2589 * represent total number of blocks in
2590 * file system block size
2591 */
2592 upper_limit = (1LL << 48) - 1;
2593
2594 }
2595
2596 /* indirect blocks */
2597 meta_blocks = 1;
2598 /* double indirect blocks */
2599 meta_blocks += 1 + (1LL << (bits-2));
2600 /* tripple indirect blocks */
2601 meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
2602
2603 upper_limit -= meta_blocks;
2604 upper_limit <<= bits;
2605
2606 res += 1LL << (bits-2);
2607 res += 1LL << (2*(bits-2));
2608 res += 1LL << (3*(bits-2));
2609 res <<= bits;
2610 if (res > upper_limit)
2611 res = upper_limit;
2612
2613 if (res > MAX_LFS_FILESIZE)
2614 res = MAX_LFS_FILESIZE;
2615
2616 return res;
2617 }
2618
2619 static ext4_fsblk_t descriptor_loc(struct super_block *sb,
2620 ext4_fsblk_t logical_sb_block, int nr)
2621 {
2622 struct ext4_sb_info *sbi = EXT4_SB(sb);
2623 ext4_group_t bg, first_meta_bg;
2624 int has_super = 0;
2625
2626 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
2627
2628 if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
2629 return logical_sb_block + nr + 1;
2630 bg = sbi->s_desc_per_block * nr;
2631 if (ext4_bg_has_super(sb, bg))
2632 has_super = 1;
2633
2634 /*
2635 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
2636 * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled
2637 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
2638 * compensate.
2639 */
2640 if (sb->s_blocksize == 1024 && nr == 0 &&
2641 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) == 0)
2642 has_super++;
2643
2644 return (has_super + ext4_group_first_block_no(sb, bg));
2645 }
2646
2647 /**
2648 * ext4_get_stripe_size: Get the stripe size.
2649 * @sbi: In memory super block info
2650 *
2651 * If we have specified it via mount option, then
2652 * use the mount option value. If the value specified at mount time is
2653 * greater than the blocks per group use the super block value.
2654 * If the super block value is greater than blocks per group return 0.
2655 * Allocator needs it be less than blocks per group.
2656 *
2657 */
2658 static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
2659 {
2660 unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
2661 unsigned long stripe_width =
2662 le32_to_cpu(sbi->s_es->s_raid_stripe_width);
2663 int ret;
2664
2665 if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
2666 ret = sbi->s_stripe;
2667 else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
2668 ret = stripe_width;
2669 else if (stride && stride <= sbi->s_blocks_per_group)
2670 ret = stride;
2671 else
2672 ret = 0;
2673
2674 /*
2675 * If the stripe width is 1, this makes no sense and
2676 * we set it to 0 to turn off stripe handling code.
2677 */
2678 if (ret <= 1)
2679 ret = 0;
2680
2681 return ret;
2682 }
2683
2684 /*
2685 * Check whether this filesystem can be mounted based on
2686 * the features present and the RDONLY/RDWR mount requested.
2687 * Returns 1 if this filesystem can be mounted as requested,
2688 * 0 if it cannot be.
2689 */
2690 static int ext4_feature_set_ok(struct super_block *sb, int readonly)
2691 {
2692 if (ext4_has_unknown_ext4_incompat_features(sb)) {
2693 ext4_msg(sb, KERN_ERR,
2694 "Couldn't mount because of "
2695 "unsupported optional features (%x)",
2696 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
2697 ~EXT4_FEATURE_INCOMPAT_SUPP));
2698 return 0;
2699 }
2700
2701 if (readonly)
2702 return 1;
2703
2704 if (ext4_has_feature_readonly(sb)) {
2705 ext4_msg(sb, KERN_INFO, "filesystem is read-only");
2706 sb->s_flags |= MS_RDONLY;
2707 return 1;
2708 }
2709
2710 /* Check that feature set is OK for a read-write mount */
2711 if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
2712 ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
2713 "unsupported optional features (%x)",
2714 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
2715 ~EXT4_FEATURE_RO_COMPAT_SUPP));
2716 return 0;
2717 }
2718 /*
2719 * Large file size enabled file system can only be mounted
2720 * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
2721 */
2722 if (ext4_has_feature_huge_file(sb)) {
2723 if (sizeof(blkcnt_t) < sizeof(u64)) {
2724 ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
2725 "cannot be mounted RDWR without "
2726 "CONFIG_LBDAF");
2727 return 0;
2728 }
2729 }
2730 if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
2731 ext4_msg(sb, KERN_ERR,
2732 "Can't support bigalloc feature without "
2733 "extents feature\n");
2734 return 0;
2735 }
2736
2737 #ifndef CONFIG_QUOTA
2738 if (ext4_has_feature_quota(sb) && !readonly) {
2739 ext4_msg(sb, KERN_ERR,
2740 "Filesystem with quota feature cannot be mounted RDWR "
2741 "without CONFIG_QUOTA");
2742 return 0;
2743 }
2744 if (ext4_has_feature_project(sb) && !readonly) {
2745 ext4_msg(sb, KERN_ERR,
2746 "Filesystem with project quota feature cannot be mounted RDWR "
2747 "without CONFIG_QUOTA");
2748 return 0;
2749 }
2750 #endif /* CONFIG_QUOTA */
2751 return 1;
2752 }
2753
2754 /*
2755 * This function is called once a day if we have errors logged
2756 * on the file system
2757 */
2758 static void print_daily_error_info(unsigned long arg)
2759 {
2760 struct super_block *sb = (struct super_block *) arg;
2761 struct ext4_sb_info *sbi;
2762 struct ext4_super_block *es;
2763
2764 sbi = EXT4_SB(sb);
2765 es = sbi->s_es;
2766
2767 if (es->s_error_count)
2768 /* fsck newer than v1.41.13 is needed to clean this condition. */
2769 ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
2770 le32_to_cpu(es->s_error_count));
2771 if (es->s_first_error_time) {
2772 printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
2773 sb->s_id, le32_to_cpu(es->s_first_error_time),
2774 (int) sizeof(es->s_first_error_func),
2775 es->s_first_error_func,
2776 le32_to_cpu(es->s_first_error_line));
2777 if (es->s_first_error_ino)
2778 printk(KERN_CONT ": inode %u",
2779 le32_to_cpu(es->s_first_error_ino));
2780 if (es->s_first_error_block)
2781 printk(KERN_CONT ": block %llu", (unsigned long long)
2782 le64_to_cpu(es->s_first_error_block));
2783 printk(KERN_CONT "\n");
2784 }
2785 if (es->s_last_error_time) {
2786 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
2787 sb->s_id, le32_to_cpu(es->s_last_error_time),
2788 (int) sizeof(es->s_last_error_func),
2789 es->s_last_error_func,
2790 le32_to_cpu(es->s_last_error_line));
2791 if (es->s_last_error_ino)
2792 printk(KERN_CONT ": inode %u",
2793 le32_to_cpu(es->s_last_error_ino));
2794 if (es->s_last_error_block)
2795 printk(KERN_CONT ": block %llu", (unsigned long long)
2796 le64_to_cpu(es->s_last_error_block));
2797 printk(KERN_CONT "\n");
2798 }
2799 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
2800 }
2801
2802 /* Find next suitable group and run ext4_init_inode_table */
2803 static int ext4_run_li_request(struct ext4_li_request *elr)
2804 {
2805 struct ext4_group_desc *gdp = NULL;
2806 ext4_group_t group, ngroups;
2807 struct super_block *sb;
2808 unsigned long timeout = 0;
2809 int ret = 0;
2810
2811 sb = elr->lr_super;
2812 ngroups = EXT4_SB(sb)->s_groups_count;
2813
2814 for (group = elr->lr_next_group; group < ngroups; group++) {
2815 gdp = ext4_get_group_desc(sb, group, NULL);
2816 if (!gdp) {
2817 ret = 1;
2818 break;
2819 }
2820
2821 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
2822 break;
2823 }
2824
2825 if (group >= ngroups)
2826 ret = 1;
2827
2828 if (!ret) {
2829 timeout = jiffies;
2830 ret = ext4_init_inode_table(sb, group,
2831 elr->lr_timeout ? 0 : 1);
2832 if (elr->lr_timeout == 0) {
2833 timeout = (jiffies - timeout) *
2834 elr->lr_sbi->s_li_wait_mult;
2835 elr->lr_timeout = timeout;
2836 }
2837 elr->lr_next_sched = jiffies + elr->lr_timeout;
2838 elr->lr_next_group = group + 1;
2839 }
2840 return ret;
2841 }
2842
2843 /*
2844 * Remove lr_request from the list_request and free the
2845 * request structure. Should be called with li_list_mtx held
2846 */
2847 static void ext4_remove_li_request(struct ext4_li_request *elr)
2848 {
2849 struct ext4_sb_info *sbi;
2850
2851 if (!elr)
2852 return;
2853
2854 sbi = elr->lr_sbi;
2855
2856 list_del(&elr->lr_request);
2857 sbi->s_li_request = NULL;
2858 kfree(elr);
2859 }
2860
2861 static void ext4_unregister_li_request(struct super_block *sb)
2862 {
2863 mutex_lock(&ext4_li_mtx);
2864 if (!ext4_li_info) {
2865 mutex_unlock(&ext4_li_mtx);
2866 return;
2867 }
2868
2869 mutex_lock(&ext4_li_info->li_list_mtx);
2870 ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
2871 mutex_unlock(&ext4_li_info->li_list_mtx);
2872 mutex_unlock(&ext4_li_mtx);
2873 }
2874
2875 static struct task_struct *ext4_lazyinit_task;
2876
2877 /*
2878 * This is the function where ext4lazyinit thread lives. It walks
2879 * through the request list searching for next scheduled filesystem.
2880 * When such a fs is found, run the lazy initialization request
2881 * (ext4_rn_li_request) and keep track of the time spend in this
2882 * function. Based on that time we compute next schedule time of
2883 * the request. When walking through the list is complete, compute
2884 * next waking time and put itself into sleep.
2885 */
2886 static int ext4_lazyinit_thread(void *arg)
2887 {
2888 struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
2889 struct list_head *pos, *n;
2890 struct ext4_li_request *elr;
2891 unsigned long next_wakeup, cur;
2892
2893 BUG_ON(NULL == eli);
2894
2895 cont_thread:
2896 while (true) {
2897 next_wakeup = MAX_JIFFY_OFFSET;
2898
2899 mutex_lock(&eli->li_list_mtx);
2900 if (list_empty(&eli->li_request_list)) {
2901 mutex_unlock(&eli->li_list_mtx);
2902 goto exit_thread;
2903 }
2904 list_for_each_safe(pos, n, &eli->li_request_list) {
2905 int err = 0;
2906 int progress = 0;
2907 elr = list_entry(pos, struct ext4_li_request,
2908 lr_request);
2909
2910 if (time_before(jiffies, elr->lr_next_sched)) {
2911 if (time_before(elr->lr_next_sched, next_wakeup))
2912 next_wakeup = elr->lr_next_sched;
2913 continue;
2914 }
2915 if (down_read_trylock(&elr->lr_super->s_umount)) {
2916 if (sb_start_write_trylock(elr->lr_super)) {
2917 progress = 1;
2918 /*
2919 * We hold sb->s_umount, sb can not
2920 * be removed from the list, it is
2921 * now safe to drop li_list_mtx
2922 */
2923 mutex_unlock(&eli->li_list_mtx);
2924 err = ext4_run_li_request(elr);
2925 sb_end_write(elr->lr_super);
2926 mutex_lock(&eli->li_list_mtx);
2927 n = pos->next;
2928 }
2929 up_read((&elr->lr_super->s_umount));
2930 }
2931 /* error, remove the lazy_init job */
2932 if (err) {
2933 ext4_remove_li_request(elr);
2934 continue;
2935 }
2936 if (!progress) {
2937 elr->lr_next_sched = jiffies +
2938 (prandom_u32()
2939 % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
2940 }
2941 if (time_before(elr->lr_next_sched, next_wakeup))
2942 next_wakeup = elr->lr_next_sched;
2943 }
2944 mutex_unlock(&eli->li_list_mtx);
2945
2946 try_to_freeze();
2947
2948 cur = jiffies;
2949 if ((time_after_eq(cur, next_wakeup)) ||
2950 (MAX_JIFFY_OFFSET == next_wakeup)) {
2951 cond_resched();
2952 continue;
2953 }
2954
2955 schedule_timeout_interruptible(next_wakeup - cur);
2956
2957 if (kthread_should_stop()) {
2958 ext4_clear_request_list();
2959 goto exit_thread;
2960 }
2961 }
2962
2963 exit_thread:
2964 /*
2965 * It looks like the request list is empty, but we need
2966 * to check it under the li_list_mtx lock, to prevent any
2967 * additions into it, and of course we should lock ext4_li_mtx
2968 * to atomically free the list and ext4_li_info, because at
2969 * this point another ext4 filesystem could be registering
2970 * new one.
2971 */
2972 mutex_lock(&ext4_li_mtx);
2973 mutex_lock(&eli->li_list_mtx);
2974 if (!list_empty(&eli->li_request_list)) {
2975 mutex_unlock(&eli->li_list_mtx);
2976 mutex_unlock(&ext4_li_mtx);
2977 goto cont_thread;
2978 }
2979 mutex_unlock(&eli->li_list_mtx);
2980 kfree(ext4_li_info);
2981 ext4_li_info = NULL;
2982 mutex_unlock(&ext4_li_mtx);
2983
2984 return 0;
2985 }
2986
2987 static void ext4_clear_request_list(void)
2988 {
2989 struct list_head *pos, *n;
2990 struct ext4_li_request *elr;
2991
2992 mutex_lock(&ext4_li_info->li_list_mtx);
2993 list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
2994 elr = list_entry(pos, struct ext4_li_request,
2995 lr_request);
2996 ext4_remove_li_request(elr);
2997 }
2998 mutex_unlock(&ext4_li_info->li_list_mtx);
2999 }
3000
3001 static int ext4_run_lazyinit_thread(void)
3002 {
3003 ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
3004 ext4_li_info, "ext4lazyinit");
3005 if (IS_ERR(ext4_lazyinit_task)) {
3006 int err = PTR_ERR(ext4_lazyinit_task);
3007 ext4_clear_request_list();
3008 kfree(ext4_li_info);
3009 ext4_li_info = NULL;
3010 printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3011 "initialization thread\n",
3012 err);
3013 return err;
3014 }
3015 ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
3016 return 0;
3017 }
3018
3019 /*
3020 * Check whether it make sense to run itable init. thread or not.
3021 * If there is at least one uninitialized inode table, return
3022 * corresponding group number, else the loop goes through all
3023 * groups and return total number of groups.
3024 */
3025 static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
3026 {
3027 ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
3028 struct ext4_group_desc *gdp = NULL;
3029
3030 for (group = 0; group < ngroups; group++) {
3031 gdp = ext4_get_group_desc(sb, group, NULL);
3032 if (!gdp)
3033 continue;
3034
3035 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3036 break;
3037 }
3038
3039 return group;
3040 }
3041
3042 static int ext4_li_info_new(void)
3043 {
3044 struct ext4_lazy_init *eli = NULL;
3045
3046 eli = kzalloc(sizeof(*eli), GFP_KERNEL);
3047 if (!eli)
3048 return -ENOMEM;
3049
3050 INIT_LIST_HEAD(&eli->li_request_list);
3051 mutex_init(&eli->li_list_mtx);
3052
3053 eli->li_state |= EXT4_LAZYINIT_QUIT;
3054
3055 ext4_li_info = eli;
3056
3057 return 0;
3058 }
3059
3060 static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
3061 ext4_group_t start)
3062 {
3063 struct ext4_sb_info *sbi = EXT4_SB(sb);
3064 struct ext4_li_request *elr;
3065
3066 elr = kzalloc(sizeof(*elr), GFP_KERNEL);
3067 if (!elr)
3068 return NULL;
3069
3070 elr->lr_super = sb;
3071 elr->lr_sbi = sbi;
3072 elr->lr_next_group = start;
3073
3074 /*
3075 * Randomize first schedule time of the request to
3076 * spread the inode table initialization requests
3077 * better.
3078 */
3079 elr->lr_next_sched = jiffies + (prandom_u32() %
3080 (EXT4_DEF_LI_MAX_START_DELAY * HZ));
3081 return elr;
3082 }
3083
3084 int ext4_register_li_request(struct super_block *sb,
3085 ext4_group_t first_not_zeroed)
3086 {
3087 struct ext4_sb_info *sbi = EXT4_SB(sb);
3088 struct ext4_li_request *elr = NULL;
3089 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
3090 int ret = 0;
3091
3092 mutex_lock(&ext4_li_mtx);
3093 if (sbi->s_li_request != NULL) {
3094 /*
3095 * Reset timeout so it can be computed again, because
3096 * s_li_wait_mult might have changed.
3097 */
3098 sbi->s_li_request->lr_timeout = 0;
3099 goto out;
3100 }
3101
3102 if (first_not_zeroed == ngroups ||
3103 (sb->s_flags & MS_RDONLY) ||
3104 !test_opt(sb, INIT_INODE_TABLE))
3105 goto out;
3106
3107 elr = ext4_li_request_new(sb, first_not_zeroed);
3108 if (!elr) {
3109 ret = -ENOMEM;
3110 goto out;
3111 }
3112
3113 if (NULL == ext4_li_info) {
3114 ret = ext4_li_info_new();
3115 if (ret)
3116 goto out;
3117 }
3118
3119 mutex_lock(&ext4_li_info->li_list_mtx);
3120 list_add(&elr->lr_request, &ext4_li_info->li_request_list);
3121 mutex_unlock(&ext4_li_info->li_list_mtx);
3122
3123 sbi->s_li_request = elr;
3124 /*
3125 * set elr to NULL here since it has been inserted to
3126 * the request_list and the removal and free of it is
3127 * handled by ext4_clear_request_list from now on.
3128 */
3129 elr = NULL;
3130
3131 if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
3132 ret = ext4_run_lazyinit_thread();
3133 if (ret)
3134 goto out;
3135 }
3136 out:
3137 mutex_unlock(&ext4_li_mtx);
3138 if (ret)
3139 kfree(elr);
3140 return ret;
3141 }
3142
3143 /*
3144 * We do not need to lock anything since this is called on
3145 * module unload.
3146 */
3147 static void ext4_destroy_lazyinit_thread(void)
3148 {
3149 /*
3150 * If thread exited earlier
3151 * there's nothing to be done.
3152 */
3153 if (!ext4_li_info || !ext4_lazyinit_task)
3154 return;
3155
3156 kthread_stop(ext4_lazyinit_task);
3157 }
3158
3159 static int set_journal_csum_feature_set(struct super_block *sb)
3160 {
3161 int ret = 1;
3162 int compat, incompat;
3163 struct ext4_sb_info *sbi = EXT4_SB(sb);
3164
3165 if (ext4_has_metadata_csum(sb)) {
3166 /* journal checksum v3 */
3167 compat = 0;
3168 incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3169 } else {
3170 /* journal checksum v1 */
3171 compat = JBD2_FEATURE_COMPAT_CHECKSUM;
3172 incompat = 0;
3173 }
3174
3175 jbd2_journal_clear_features(sbi->s_journal,
3176 JBD2_FEATURE_COMPAT_CHECKSUM, 0,
3177 JBD2_FEATURE_INCOMPAT_CSUM_V3 |
3178 JBD2_FEATURE_INCOMPAT_CSUM_V2);
3179 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
3180 ret = jbd2_journal_set_features(sbi->s_journal,
3181 compat, 0,
3182 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
3183 incompat);
3184 } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
3185 ret = jbd2_journal_set_features(sbi->s_journal,
3186 compat, 0,
3187 incompat);
3188 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3189 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3190 } else {
3191 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3192 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3193 }
3194
3195 return ret;
3196 }
3197
3198 /*
3199 * Note: calculating the overhead so we can be compatible with
3200 * historical BSD practice is quite difficult in the face of
3201 * clusters/bigalloc. This is because multiple metadata blocks from
3202 * different block group can end up in the same allocation cluster.
3203 * Calculating the exact overhead in the face of clustered allocation
3204 * requires either O(all block bitmaps) in memory or O(number of block
3205 * groups**2) in time. We will still calculate the superblock for
3206 * older file systems --- and if we come across with a bigalloc file
3207 * system with zero in s_overhead_clusters the estimate will be close to
3208 * correct especially for very large cluster sizes --- but for newer
3209 * file systems, it's better to calculate this figure once at mkfs
3210 * time, and store it in the superblock. If the superblock value is
3211 * present (even for non-bigalloc file systems), we will use it.
3212 */
3213 static int count_overhead(struct super_block *sb, ext4_group_t grp,
3214 char *buf)
3215 {
3216 struct ext4_sb_info *sbi = EXT4_SB(sb);
3217 struct ext4_group_desc *gdp;
3218 ext4_fsblk_t first_block, last_block, b;
3219 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3220 int s, j, count = 0;
3221
3222 if (!ext4_has_feature_bigalloc(sb))
3223 return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
3224 sbi->s_itb_per_group + 2);
3225
3226 first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
3227 (grp * EXT4_BLOCKS_PER_GROUP(sb));
3228 last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
3229 for (i = 0; i < ngroups; i++) {
3230 gdp = ext4_get_group_desc(sb, i, NULL);
3231 b = ext4_block_bitmap(sb, gdp);
3232 if (b >= first_block && b <= last_block) {
3233 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3234 count++;
3235 }
3236 b = ext4_inode_bitmap(sb, gdp);
3237 if (b >= first_block && b <= last_block) {
3238 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3239 count++;
3240 }
3241 b = ext4_inode_table(sb, gdp);
3242 if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
3243 for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
3244 int c = EXT4_B2C(sbi, b - first_block);
3245 ext4_set_bit(c, buf);
3246 count++;
3247 }
3248 if (i != grp)
3249 continue;
3250 s = 0;
3251 if (ext4_bg_has_super(sb, grp)) {
3252 ext4_set_bit(s++, buf);
3253 count++;
3254 }
3255 j = ext4_bg_num_gdb(sb, grp);
3256 if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
3257 ext4_error(sb, "Invalid number of block group "
3258 "descriptor blocks: %d", j);
3259 j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3260 }
3261 count += j;
3262 for (; j > 0; j--)
3263 ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3264 }
3265 if (!count)
3266 return 0;
3267 return EXT4_CLUSTERS_PER_GROUP(sb) -
3268 ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
3269 }
3270
3271 /*
3272 * Compute the overhead and stash it in sbi->s_overhead
3273 */
3274 int ext4_calculate_overhead(struct super_block *sb)
3275 {
3276 struct ext4_sb_info *sbi = EXT4_SB(sb);
3277 struct ext4_super_block *es = sbi->s_es;
3278 struct inode *j_inode;
3279 unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3280 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3281 ext4_fsblk_t overhead = 0;
3282 char *buf = (char *) get_zeroed_page(GFP_NOFS);
3283
3284 if (!buf)
3285 return -ENOMEM;
3286
3287 /*
3288 * Compute the overhead (FS structures). This is constant
3289 * for a given filesystem unless the number of block groups
3290 * changes so we cache the previous value until it does.
3291 */
3292
3293 /*
3294 * All of the blocks before first_data_block are overhead
3295 */
3296 overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
3297
3298 /*
3299 * Add the overhead found in each block group
3300 */
3301 for (i = 0; i < ngroups; i++) {
3302 int blks;
3303
3304 blks = count_overhead(sb, i, buf);
3305 overhead += blks;
3306 if (blks)
3307 memset(buf, 0, PAGE_SIZE);
3308 cond_resched();
3309 }
3310
3311 /*
3312 * Add the internal journal blocks whether the journal has been
3313 * loaded or not
3314 */
3315 if (sbi->s_journal && !sbi->journal_bdev)
3316 overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
3317 else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
3318 j_inode = ext4_get_journal_inode(sb, j_inum);
3319 if (j_inode) {
3320 j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
3321 overhead += EXT4_NUM_B2C(sbi, j_blocks);
3322 iput(j_inode);
3323 } else {
3324 ext4_msg(sb, KERN_ERR, "can't get journal size");
3325 }
3326 }
3327 sbi->s_overhead = overhead;
3328 smp_wmb();
3329 free_page((unsigned long) buf);
3330 return 0;
3331 }
3332
3333 static void ext4_set_resv_clusters(struct super_block *sb)
3334 {
3335 ext4_fsblk_t resv_clusters;
3336 struct ext4_sb_info *sbi = EXT4_SB(sb);
3337
3338 /*
3339 * There's no need to reserve anything when we aren't using extents.
3340 * The space estimates are exact, there are no unwritten extents,
3341 * hole punching doesn't need new metadata... This is needed especially
3342 * to keep ext2/3 backward compatibility.
3343 */
3344 if (!ext4_has_feature_extents(sb))
3345 return;
3346 /*
3347 * By default we reserve 2% or 4096 clusters, whichever is smaller.
3348 * This should cover the situations where we can not afford to run
3349 * out of space like for example punch hole, or converting
3350 * unwritten extents in delalloc path. In most cases such
3351 * allocation would require 1, or 2 blocks, higher numbers are
3352 * very rare.
3353 */
3354 resv_clusters = (ext4_blocks_count(sbi->s_es) >>
3355 sbi->s_cluster_bits);
3356
3357 do_div(resv_clusters, 50);
3358 resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
3359
3360 atomic64_set(&sbi->s_resv_clusters, resv_clusters);
3361 }
3362
3363 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3364 {
3365 char *orig_data = kstrdup(data, GFP_KERNEL);
3366 struct buffer_head *bh;
3367 struct ext4_super_block *es = NULL;
3368 struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
3369 ext4_fsblk_t block;
3370 ext4_fsblk_t sb_block = get_sb_block(&data);
3371 ext4_fsblk_t logical_sb_block;
3372 unsigned long offset = 0;
3373 unsigned long journal_devnum = 0;
3374 unsigned long def_mount_opts;
3375 struct inode *root;
3376 const char *descr;
3377 int ret = -ENOMEM;
3378 int blocksize, clustersize;
3379 unsigned int db_count;
3380 unsigned int i;
3381 int needs_recovery, has_huge_files, has_bigalloc;
3382 __u64 blocks_count;
3383 int err = 0;
3384 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3385 ext4_group_t first_not_zeroed;
3386
3387 if ((data && !orig_data) || !sbi)
3388 goto out_free_base;
3389
3390 sbi->s_blockgroup_lock =
3391 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
3392 if (!sbi->s_blockgroup_lock)
3393 goto out_free_base;
3394
3395 sb->s_fs_info = sbi;
3396 sbi->s_sb = sb;
3397 sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
3398 sbi->s_sb_block = sb_block;
3399 if (sb->s_bdev->bd_part)
3400 sbi->s_sectors_written_start =
3401 part_stat_read(sb->s_bdev->bd_part, sectors[1]);
3402
3403 /* Cleanup superblock name */
3404 strreplace(sb->s_id, '/', '!');
3405
3406 /* -EINVAL is default */
3407 ret = -EINVAL;
3408 blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
3409 if (!blocksize) {
3410 ext4_msg(sb, KERN_ERR, "unable to set blocksize");
3411 goto out_fail;
3412 }
3413
3414 /*
3415 * The ext4 superblock will not be buffer aligned for other than 1kB
3416 * block sizes. We need to calculate the offset from buffer start.
3417 */
3418 if (blocksize != EXT4_MIN_BLOCK_SIZE) {
3419 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
3420 offset = do_div(logical_sb_block, blocksize);
3421 } else {
3422 logical_sb_block = sb_block;
3423 }
3424
3425 if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
3426 ext4_msg(sb, KERN_ERR, "unable to read superblock");
3427 goto out_fail;
3428 }
3429 /*
3430 * Note: s_es must be initialized as soon as possible because
3431 * some ext4 macro-instructions depend on its value
3432 */
3433 es = (struct ext4_super_block *) (bh->b_data + offset);
3434 sbi->s_es = es;
3435 sb->s_magic = le16_to_cpu(es->s_magic);
3436 if (sb->s_magic != EXT4_SUPER_MAGIC)
3437 goto cantfind_ext4;
3438 sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
3439
3440 /* Warn if metadata_csum and gdt_csum are both set. */
3441 if (ext4_has_feature_metadata_csum(sb) &&
3442 ext4_has_feature_gdt_csum(sb))
3443 ext4_warning(sb, "metadata_csum and uninit_bg are "
3444 "redundant flags; please run fsck.");
3445
3446 /* Check for a known checksum algorithm */
3447 if (!ext4_verify_csum_type(sb, es)) {
3448 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
3449 "unknown checksum algorithm.");
3450 silent = 1;
3451 goto cantfind_ext4;
3452 }
3453
3454 /* Load the checksum driver */
3455 if (ext4_has_feature_metadata_csum(sb) ||
3456 ext4_has_feature_ea_inode(sb)) {
3457 sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
3458 if (IS_ERR(sbi->s_chksum_driver)) {
3459 ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
3460 ret = PTR_ERR(sbi->s_chksum_driver);
3461 sbi->s_chksum_driver = NULL;
3462 goto failed_mount;
3463 }
3464 }
3465
3466 /* Check superblock checksum */
3467 if (!ext4_superblock_csum_verify(sb, es)) {
3468 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
3469 "invalid superblock checksum. Run e2fsck?");
3470 silent = 1;
3471 ret = -EFSBADCRC;
3472 goto cantfind_ext4;
3473 }
3474
3475 /* Precompute checksum seed for all metadata */
3476 if (ext4_has_feature_csum_seed(sb))
3477 sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
3478 else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
3479 sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
3480 sizeof(es->s_uuid));
3481
3482 /* Set defaults before we parse the mount options */
3483 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
3484 set_opt(sb, INIT_INODE_TABLE);
3485 if (def_mount_opts & EXT4_DEFM_DEBUG)
3486 set_opt(sb, DEBUG);
3487 if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
3488 set_opt(sb, GRPID);
3489 if (def_mount_opts & EXT4_DEFM_UID16)
3490 set_opt(sb, NO_UID32);
3491 /* xattr user namespace & acls are now defaulted on */
3492 set_opt(sb, XATTR_USER);
3493 #ifdef CONFIG_EXT4_FS_POSIX_ACL
3494 set_opt(sb, POSIX_ACL);
3495 #endif
3496 /* don't forget to enable journal_csum when metadata_csum is enabled. */
3497 if (ext4_has_metadata_csum(sb))
3498 set_opt(sb, JOURNAL_CHECKSUM);
3499
3500 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
3501 set_opt(sb, JOURNAL_DATA);
3502 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
3503 set_opt(sb, ORDERED_DATA);
3504 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
3505 set_opt(sb, WRITEBACK_DATA);
3506
3507 if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
3508 set_opt(sb, ERRORS_PANIC);
3509 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
3510 set_opt(sb, ERRORS_CONT);
3511 else
3512 set_opt(sb, ERRORS_RO);
3513 /* block_validity enabled by default; disable with noblock_validity */
3514 set_opt(sb, BLOCK_VALIDITY);
3515 if (def_mount_opts & EXT4_DEFM_DISCARD)
3516 set_opt(sb, DISCARD);
3517
3518 sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
3519 sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
3520 sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
3521 sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
3522 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
3523
3524 if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
3525 set_opt(sb, BARRIER);
3526
3527 /*
3528 * enable delayed allocation by default
3529 * Use -o nodelalloc to turn it off
3530 */
3531 if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
3532 ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
3533 set_opt(sb, DELALLOC);
3534
3535 /*
3536 * set default s_li_wait_mult for lazyinit, for the case there is
3537 * no mount option specified.
3538 */
3539 sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
3540
3541 if (sbi->s_es->s_mount_opts[0]) {
3542 char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
3543 sizeof(sbi->s_es->s_mount_opts),
3544 GFP_KERNEL);
3545 if (!s_mount_opts)
3546 goto failed_mount;
3547 if (!parse_options(s_mount_opts, sb, &journal_devnum,
3548 &journal_ioprio, 0)) {
3549 ext4_msg(sb, KERN_WARNING,
3550 "failed to parse options in superblock: %s",
3551 s_mount_opts);
3552 }
3553 kfree(s_mount_opts);
3554 }
3555 sbi->s_def_mount_opt = sbi->s_mount_opt;
3556 if (!parse_options((char *) data, sb, &journal_devnum,
3557 &journal_ioprio, 0))
3558 goto failed_mount;
3559
3560 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
3561 printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
3562 "with data=journal disables delayed "
3563 "allocation and O_DIRECT support!\n");
3564 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
3565 ext4_msg(sb, KERN_ERR, "can't mount with "
3566 "both data=journal and delalloc");
3567 goto failed_mount;
3568 }
3569 if (test_opt(sb, DIOREAD_NOLOCK)) {
3570 ext4_msg(sb, KERN_ERR, "can't mount with "
3571 "both data=journal and dioread_nolock");
3572 goto failed_mount;
3573 }
3574 if (test_opt(sb, DAX)) {
3575 ext4_msg(sb, KERN_ERR, "can't mount with "
3576 "both data=journal and dax");
3577 goto failed_mount;
3578 }
3579 if (ext4_has_feature_encrypt(sb)) {
3580 ext4_msg(sb, KERN_WARNING,
3581 "encrypted files will use data=ordered "
3582 "instead of data journaling mode");
3583 }
3584 if (test_opt(sb, DELALLOC))
3585 clear_opt(sb, DELALLOC);
3586 } else {
3587 sb->s_iflags |= SB_I_CGROUPWB;
3588 }
3589
3590 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
3591 (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
3592
3593 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
3594 (ext4_has_compat_features(sb) ||
3595 ext4_has_ro_compat_features(sb) ||
3596 ext4_has_incompat_features(sb)))
3597 ext4_msg(sb, KERN_WARNING,
3598 "feature flags set on rev 0 fs, "
3599 "running e2fsck is recommended");
3600
3601 if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
3602 set_opt2(sb, HURD_COMPAT);
3603 if (ext4_has_feature_64bit(sb)) {
3604 ext4_msg(sb, KERN_ERR,
3605 "The Hurd can't support 64-bit file systems");
3606 goto failed_mount;
3607 }
3608
3609 /*
3610 * ea_inode feature uses l_i_version field which is not
3611 * available in HURD_COMPAT mode.
3612 */
3613 if (ext4_has_feature_ea_inode(sb)) {
3614 ext4_msg(sb, KERN_ERR,
3615 "ea_inode feature is not supported for Hurd");
3616 goto failed_mount;
3617 }
3618 }
3619
3620 if (IS_EXT2_SB(sb)) {
3621 if (ext2_feature_set_ok(sb))
3622 ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
3623 "using the ext4 subsystem");
3624 else {
3625 ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
3626 "to feature incompatibilities");
3627 goto failed_mount;
3628 }
3629 }
3630
3631 if (IS_EXT3_SB(sb)) {
3632 if (ext3_feature_set_ok(sb))
3633 ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
3634 "using the ext4 subsystem");
3635 else {
3636 ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
3637 "to feature incompatibilities");
3638 goto failed_mount;
3639 }
3640 }
3641
3642 /*
3643 * Check feature flags regardless of the revision level, since we
3644 * previously didn't change the revision level when setting the flags,
3645 * so there is a chance incompat flags are set on a rev 0 filesystem.
3646 */
3647 if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
3648 goto failed_mount;
3649
3650 blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
3651 if (blocksize < EXT4_MIN_BLOCK_SIZE ||
3652 blocksize > EXT4_MAX_BLOCK_SIZE) {
3653 ext4_msg(sb, KERN_ERR,
3654 "Unsupported filesystem blocksize %d (%d log_block_size)",
3655 blocksize, le32_to_cpu(es->s_log_block_size));
3656 goto failed_mount;
3657 }
3658 if (le32_to_cpu(es->s_log_block_size) >
3659 (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
3660 ext4_msg(sb, KERN_ERR,
3661 "Invalid log block size: %u",
3662 le32_to_cpu(es->s_log_block_size));
3663 goto failed_mount;
3664 }
3665
3666 if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
3667 ext4_msg(sb, KERN_ERR,
3668 "Number of reserved GDT blocks insanely large: %d",
3669 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
3670 goto failed_mount;
3671 }
3672
3673 if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
3674 err = bdev_dax_supported(sb, blocksize);
3675 if (err)
3676 goto failed_mount;
3677 }
3678
3679 if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
3680 ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
3681 es->s_encryption_level);
3682 goto failed_mount;
3683 }
3684
3685 if (sb->s_blocksize != blocksize) {
3686 /* Validate the filesystem blocksize */
3687 if (!sb_set_blocksize(sb, blocksize)) {
3688 ext4_msg(sb, KERN_ERR, "bad block size %d",
3689 blocksize);
3690 goto failed_mount;
3691 }
3692
3693 brelse(bh);
3694 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
3695 offset = do_div(logical_sb_block, blocksize);
3696 bh = sb_bread_unmovable(sb, logical_sb_block);
3697 if (!bh) {
3698 ext4_msg(sb, KERN_ERR,
3699 "Can't read superblock on 2nd try");
3700 goto failed_mount;
3701 }
3702 es = (struct ext4_super_block *)(bh->b_data + offset);
3703 sbi->s_es = es;
3704 if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
3705 ext4_msg(sb, KERN_ERR,
3706 "Magic mismatch, very weird!");
3707 goto failed_mount;
3708 }
3709 }
3710
3711 has_huge_files = ext4_has_feature_huge_file(sb);
3712 sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
3713 has_huge_files);
3714 sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
3715
3716 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
3717 sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
3718 sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
3719 } else {
3720 sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
3721 sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
3722 if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
3723 (!is_power_of_2(sbi->s_inode_size)) ||
3724 (sbi->s_inode_size > blocksize)) {
3725 ext4_msg(sb, KERN_ERR,
3726 "unsupported inode size: %d",
3727 sbi->s_inode_size);
3728 goto failed_mount;
3729 }
3730 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
3731 sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
3732 }
3733
3734 sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
3735 if (ext4_has_feature_64bit(sb)) {
3736 if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
3737 sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
3738 !is_power_of_2(sbi->s_desc_size)) {
3739 ext4_msg(sb, KERN_ERR,
3740 "unsupported descriptor size %lu",
3741 sbi->s_desc_size);
3742 goto failed_mount;
3743 }
3744 } else
3745 sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
3746
3747 sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
3748 sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
3749
3750 sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
3751 if (sbi->s_inodes_per_block == 0)
3752 goto cantfind_ext4;
3753 if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
3754 sbi->s_inodes_per_group > blocksize * 8) {
3755 ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
3756 sbi->s_blocks_per_group);
3757 goto failed_mount;
3758 }
3759 sbi->s_itb_per_group = sbi->s_inodes_per_group /
3760 sbi->s_inodes_per_block;
3761 sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
3762 sbi->s_sbh = bh;
3763 sbi->s_mount_state = le16_to_cpu(es->s_state);
3764 sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
3765 sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
3766
3767 for (i = 0; i < 4; i++)
3768 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
3769 sbi->s_def_hash_version = es->s_def_hash_version;
3770 if (ext4_has_feature_dir_index(sb)) {
3771 i = le32_to_cpu(es->s_flags);
3772 if (i & EXT2_FLAGS_UNSIGNED_HASH)
3773 sbi->s_hash_unsigned = 3;
3774 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
3775 #ifdef __CHAR_UNSIGNED__
3776 if (!(sb->s_flags & MS_RDONLY))
3777 es->s_flags |=
3778 cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
3779 sbi->s_hash_unsigned = 3;
3780 #else
3781 if (!(sb->s_flags & MS_RDONLY))
3782 es->s_flags |=
3783 cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
3784 #endif
3785 }
3786 }
3787
3788 /* Handle clustersize */
3789 clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
3790 has_bigalloc = ext4_has_feature_bigalloc(sb);
3791 if (has_bigalloc) {
3792 if (clustersize < blocksize) {
3793 ext4_msg(sb, KERN_ERR,
3794 "cluster size (%d) smaller than "
3795 "block size (%d)", clustersize, blocksize);
3796 goto failed_mount;
3797 }
3798 if (le32_to_cpu(es->s_log_cluster_size) >
3799 (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
3800 ext4_msg(sb, KERN_ERR,
3801 "Invalid log cluster size: %u",
3802 le32_to_cpu(es->s_log_cluster_size));
3803 goto failed_mount;
3804 }
3805 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
3806 le32_to_cpu(es->s_log_block_size);
3807 sbi->s_clusters_per_group =
3808 le32_to_cpu(es->s_clusters_per_group);
3809 if (sbi->s_clusters_per_group > blocksize * 8) {
3810 ext4_msg(sb, KERN_ERR,
3811 "#clusters per group too big: %lu",
3812 sbi->s_clusters_per_group);
3813 goto failed_mount;
3814 }
3815 if (sbi->s_blocks_per_group !=
3816 (sbi->s_clusters_per_group * (clustersize / blocksize))) {
3817 ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
3818 "clusters per group (%lu) inconsistent",
3819 sbi->s_blocks_per_group,
3820 sbi->s_clusters_per_group);
3821 goto failed_mount;
3822 }
3823 } else {
3824 if (clustersize != blocksize) {
3825 ext4_warning(sb, "fragment/cluster size (%d) != "
3826 "block size (%d)", clustersize,
3827 blocksize);
3828 clustersize = blocksize;
3829 }
3830 if (sbi->s_blocks_per_group > blocksize * 8) {
3831 ext4_msg(sb, KERN_ERR,
3832 "#blocks per group too big: %lu",
3833 sbi->s_blocks_per_group);
3834 goto failed_mount;
3835 }
3836 sbi->s_clusters_per_group = sbi->s_blocks_per_group;
3837 sbi->s_cluster_bits = 0;
3838 }
3839 sbi->s_cluster_ratio = clustersize / blocksize;
3840
3841 /* Do we have standard group size of clustersize * 8 blocks ? */
3842 if (sbi->s_blocks_per_group == clustersize << 3)
3843 set_opt2(sb, STD_GROUP_SIZE);
3844
3845 /*
3846 * Test whether we have more sectors than will fit in sector_t,
3847 * and whether the max offset is addressable by the page cache.
3848 */
3849 err = generic_check_addressable(sb->s_blocksize_bits,
3850 ext4_blocks_count(es));
3851 if (err) {
3852 ext4_msg(sb, KERN_ERR, "filesystem"
3853 " too large to mount safely on this system");
3854 if (sizeof(sector_t) < 8)
3855 ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
3856 goto failed_mount;
3857 }
3858
3859 if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
3860 goto cantfind_ext4;
3861
3862 /* check blocks count against device size */
3863 blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
3864 if (blocks_count && ext4_blocks_count(es) > blocks_count) {
3865 ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
3866 "exceeds size of device (%llu blocks)",
3867 ext4_blocks_count(es), blocks_count);
3868 goto failed_mount;
3869 }
3870
3871 /*
3872 * It makes no sense for the first data block to be beyond the end
3873 * of the filesystem.
3874 */
3875 if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
3876 ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
3877 "block %u is beyond end of filesystem (%llu)",
3878 le32_to_cpu(es->s_first_data_block),
3879 ext4_blocks_count(es));
3880 goto failed_mount;
3881 }
3882 blocks_count = (ext4_blocks_count(es) -
3883 le32_to_cpu(es->s_first_data_block) +
3884 EXT4_BLOCKS_PER_GROUP(sb) - 1);
3885 do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
3886 if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
3887 ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
3888 "(block count %llu, first data block %u, "
3889 "blocks per group %lu)", sbi->s_groups_count,
3890 ext4_blocks_count(es),
3891 le32_to_cpu(es->s_first_data_block),
3892 EXT4_BLOCKS_PER_GROUP(sb));
3893 goto failed_mount;
3894 }
3895 sbi->s_groups_count = blocks_count;
3896 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
3897 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
3898 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
3899 EXT4_DESC_PER_BLOCK(sb);
3900 if (ext4_has_feature_meta_bg(sb)) {
3901 if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
3902 ext4_msg(sb, KERN_WARNING,
3903 "first meta block group too large: %u "
3904 "(group descriptor block count %u)",
3905 le32_to_cpu(es->s_first_meta_bg), db_count);
3906 goto failed_mount;
3907 }
3908 }
3909 sbi->s_group_desc = kvmalloc(db_count *
3910 sizeof(struct buffer_head *),
3911 GFP_KERNEL);
3912 if (sbi->s_group_desc == NULL) {
3913 ext4_msg(sb, KERN_ERR, "not enough memory");
3914 ret = -ENOMEM;
3915 goto failed_mount;
3916 }
3917
3918 bgl_lock_init(sbi->s_blockgroup_lock);
3919
3920 /* Pre-read the descriptors into the buffer cache */
3921 for (i = 0; i < db_count; i++) {
3922 block = descriptor_loc(sb, logical_sb_block, i);
3923 sb_breadahead(sb, block);
3924 }
3925
3926 for (i = 0; i < db_count; i++) {
3927 block = descriptor_loc(sb, logical_sb_block, i);
3928 sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
3929 if (!sbi->s_group_desc[i]) {
3930 ext4_msg(sb, KERN_ERR,
3931 "can't read group descriptor %d", i);
3932 db_count = i;
3933 goto failed_mount2;
3934 }
3935 }
3936 if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
3937 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
3938 ret = -EFSCORRUPTED;
3939 goto failed_mount2;
3940 }
3941
3942 sbi->s_gdb_count = db_count;
3943 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
3944 spin_lock_init(&sbi->s_next_gen_lock);
3945
3946 setup_timer(&sbi->s_err_report, print_daily_error_info,
3947 (unsigned long) sb);
3948
3949 /* Register extent status tree shrinker */
3950 if (ext4_es_register_shrinker(sbi))
3951 goto failed_mount3;
3952
3953 sbi->s_stripe = ext4_get_stripe_size(sbi);
3954 sbi->s_extent_max_zeroout_kb = 32;
3955
3956 /*
3957 * set up enough so that it can read an inode
3958 */
3959 sb->s_op = &ext4_sops;
3960 sb->s_export_op = &ext4_export_ops;
3961 sb->s_xattr = ext4_xattr_handlers;
3962 sb->s_cop = &ext4_cryptops;
3963 #ifdef CONFIG_QUOTA
3964 sb->dq_op = &ext4_quota_operations;
3965 if (ext4_has_feature_quota(sb))
3966 sb->s_qcop = &dquot_quotactl_sysfile_ops;
3967 else
3968 sb->s_qcop = &ext4_qctl_operations;
3969 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3970 #endif
3971 memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
3972
3973 INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
3974 mutex_init(&sbi->s_orphan_lock);
3975
3976 sb->s_root = NULL;
3977
3978 needs_recovery = (es->s_last_orphan != 0 ||
3979 ext4_has_feature_journal_needs_recovery(sb));
3980
3981 if (ext4_has_feature_mmp(sb) && !(sb->s_flags & MS_RDONLY))
3982 if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
3983 goto failed_mount3a;
3984
3985 /*
3986 * The first inode we look at is the journal inode. Don't try
3987 * root first: it may be modified in the journal!
3988 */
3989 if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
3990 err = ext4_load_journal(sb, es, journal_devnum);
3991 if (err)
3992 goto failed_mount3a;
3993 } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
3994 ext4_has_feature_journal_needs_recovery(sb)) {
3995 ext4_msg(sb, KERN_ERR, "required journal recovery "
3996 "suppressed and not mounted read-only");
3997 goto failed_mount_wq;
3998 } else {
3999 /* Nojournal mode, all journal mount options are illegal */
4000 if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
4001 ext4_msg(sb, KERN_ERR, "can't mount with "
4002 "journal_checksum, fs mounted w/o journal");
4003 goto failed_mount_wq;
4004 }
4005 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4006 ext4_msg(sb, KERN_ERR, "can't mount with "
4007 "journal_async_commit, fs mounted w/o journal");
4008 goto failed_mount_wq;
4009 }
4010 if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
4011 ext4_msg(sb, KERN_ERR, "can't mount with "
4012 "commit=%lu, fs mounted w/o journal",
4013 sbi->s_commit_interval / HZ);
4014 goto failed_mount_wq;
4015 }
4016 if (EXT4_MOUNT_DATA_FLAGS &
4017 (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
4018 ext4_msg(sb, KERN_ERR, "can't mount with "
4019 "data=, fs mounted w/o journal");
4020 goto failed_mount_wq;
4021 }
4022 sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
4023 clear_opt(sb, JOURNAL_CHECKSUM);
4024 clear_opt(sb, DATA_FLAGS);
4025 sbi->s_journal = NULL;
4026 needs_recovery = 0;
4027 goto no_journal;
4028 }
4029
4030 if (ext4_has_feature_64bit(sb) &&
4031 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
4032 JBD2_FEATURE_INCOMPAT_64BIT)) {
4033 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4034 goto failed_mount_wq;
4035 }
4036
4037 if (!set_journal_csum_feature_set(sb)) {
4038 ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
4039 "feature set");
4040 goto failed_mount_wq;
4041 }
4042
4043 /* We have now updated the journal if required, so we can
4044 * validate the data journaling mode. */
4045 switch (test_opt(sb, DATA_FLAGS)) {
4046 case 0:
4047 /* No mode set, assume a default based on the journal
4048 * capabilities: ORDERED_DATA if the journal can
4049 * cope, else JOURNAL_DATA
4050 */
4051 if (jbd2_journal_check_available_features
4052 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
4053 set_opt(sb, ORDERED_DATA);
4054 else
4055 set_opt(sb, JOURNAL_DATA);
4056 break;
4057
4058 case EXT4_MOUNT_ORDERED_DATA:
4059 case EXT4_MOUNT_WRITEBACK_DATA:
4060 if (!jbd2_journal_check_available_features
4061 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4062 ext4_msg(sb, KERN_ERR, "Journal does not support "
4063 "requested data journaling mode");
4064 goto failed_mount_wq;
4065 }
4066 default:
4067 break;
4068 }
4069
4070 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
4071 test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4072 ext4_msg(sb, KERN_ERR, "can't mount with "
4073 "journal_async_commit in data=ordered mode");
4074 goto failed_mount_wq;
4075 }
4076
4077 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
4078
4079 sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
4080
4081 no_journal:
4082 sbi->s_ea_block_cache = ext4_xattr_create_cache();
4083 if (!sbi->s_ea_block_cache) {
4084 ext4_msg(sb, KERN_ERR, "Failed to create ea_block_cache");
4085 goto failed_mount_wq;
4086 }
4087
4088 if (ext4_has_feature_ea_inode(sb)) {
4089 sbi->s_ea_inode_cache = ext4_xattr_create_cache();
4090 if (!sbi->s_ea_inode_cache) {
4091 ext4_msg(sb, KERN_ERR,
4092 "Failed to create ea_inode_cache");
4093 goto failed_mount_wq;
4094 }
4095 }
4096
4097 if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
4098 (blocksize != PAGE_SIZE)) {
4099 ext4_msg(sb, KERN_ERR,
4100 "Unsupported blocksize for fs encryption");
4101 goto failed_mount_wq;
4102 }
4103
4104 if (DUMMY_ENCRYPTION_ENABLED(sbi) && !(sb->s_flags & MS_RDONLY) &&
4105 !ext4_has_feature_encrypt(sb)) {
4106 ext4_set_feature_encrypt(sb);
4107 ext4_commit_super(sb, 1);
4108 }
4109
4110 /*
4111 * Get the # of file system overhead blocks from the
4112 * superblock if present.
4113 */
4114 if (es->s_overhead_clusters)
4115 sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
4116 else {
4117 err = ext4_calculate_overhead(sb);
4118 if (err)
4119 goto failed_mount_wq;
4120 }
4121
4122 /*
4123 * The maximum number of concurrent works can be high and
4124 * concurrency isn't really necessary. Limit it to 1.
4125 */
4126 EXT4_SB(sb)->rsv_conversion_wq =
4127 alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
4128 if (!EXT4_SB(sb)->rsv_conversion_wq) {
4129 printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
4130 ret = -ENOMEM;
4131 goto failed_mount4;
4132 }
4133
4134 /*
4135 * The jbd2_journal_load will have done any necessary log recovery,
4136 * so we can safely mount the rest of the filesystem now.
4137 */
4138
4139 root = ext4_iget(sb, EXT4_ROOT_INO);
4140 if (IS_ERR(root)) {
4141 ext4_msg(sb, KERN_ERR, "get root inode failed");
4142 ret = PTR_ERR(root);
4143 root = NULL;
4144 goto failed_mount4;
4145 }
4146 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
4147 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
4148 iput(root);
4149 goto failed_mount4;
4150 }
4151 sb->s_root = d_make_root(root);
4152 if (!sb->s_root) {
4153 ext4_msg(sb, KERN_ERR, "get root dentry failed");
4154 ret = -ENOMEM;
4155 goto failed_mount4;
4156 }
4157
4158 if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
4159 sb->s_flags |= MS_RDONLY;
4160
4161 /* determine the minimum size of new large inodes, if present */
4162 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
4163 sbi->s_want_extra_isize == 0) {
4164 sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
4165 EXT4_GOOD_OLD_INODE_SIZE;
4166 if (ext4_has_feature_extra_isize(sb)) {
4167 if (sbi->s_want_extra_isize <
4168 le16_to_cpu(es->s_want_extra_isize))
4169 sbi->s_want_extra_isize =
4170 le16_to_cpu(es->s_want_extra_isize);
4171 if (sbi->s_want_extra_isize <
4172 le16_to_cpu(es->s_min_extra_isize))
4173 sbi->s_want_extra_isize =
4174 le16_to_cpu(es->s_min_extra_isize);
4175 }
4176 }
4177 /* Check if enough inode space is available */
4178 if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
4179 sbi->s_inode_size) {
4180 sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
4181 EXT4_GOOD_OLD_INODE_SIZE;
4182 ext4_msg(sb, KERN_INFO, "required extra inode space not"
4183 "available");
4184 }
4185
4186 ext4_set_resv_clusters(sb);
4187
4188 err = ext4_setup_system_zone(sb);
4189 if (err) {
4190 ext4_msg(sb, KERN_ERR, "failed to initialize system "
4191 "zone (%d)", err);
4192 goto failed_mount4a;
4193 }
4194
4195 ext4_ext_init(sb);
4196 err = ext4_mb_init(sb);
4197 if (err) {
4198 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
4199 err);
4200 goto failed_mount5;
4201 }
4202
4203 block = ext4_count_free_clusters(sb);
4204 ext4_free_blocks_count_set(sbi->s_es,
4205 EXT4_C2B(sbi, block));
4206 err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
4207 GFP_KERNEL);
4208 if (!err) {
4209 unsigned long freei = ext4_count_free_inodes(sb);
4210 sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4211 err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
4212 GFP_KERNEL);
4213 }
4214 if (!err)
4215 err = percpu_counter_init(&sbi->s_dirs_counter,
4216 ext4_count_dirs(sb), GFP_KERNEL);
4217 if (!err)
4218 err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
4219 GFP_KERNEL);
4220 if (!err)
4221 err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
4222
4223 if (err) {
4224 ext4_msg(sb, KERN_ERR, "insufficient memory");
4225 goto failed_mount6;
4226 }
4227
4228 if (ext4_has_feature_flex_bg(sb))
4229 if (!ext4_fill_flex_info(sb)) {
4230 ext4_msg(sb, KERN_ERR,
4231 "unable to initialize "
4232 "flex_bg meta info!");
4233 goto failed_mount6;
4234 }
4235
4236 err = ext4_register_li_request(sb, first_not_zeroed);
4237 if (err)
4238 goto failed_mount6;
4239
4240 err = ext4_register_sysfs(sb);
4241 if (err)
4242 goto failed_mount7;
4243
4244 #ifdef CONFIG_QUOTA
4245 /* Enable quota usage during mount. */
4246 if (ext4_has_feature_quota(sb) && !(sb->s_flags & MS_RDONLY)) {
4247 err = ext4_enable_quotas(sb);
4248 if (err)
4249 goto failed_mount8;
4250 }
4251 #endif /* CONFIG_QUOTA */
4252
4253 EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
4254 ext4_orphan_cleanup(sb, es);
4255 EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
4256 if (needs_recovery) {
4257 ext4_msg(sb, KERN_INFO, "recovery complete");
4258 ext4_mark_recovery_complete(sb, es);
4259 }
4260 if (EXT4_SB(sb)->s_journal) {
4261 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
4262 descr = " journalled data mode";
4263 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
4264 descr = " ordered data mode";
4265 else
4266 descr = " writeback data mode";
4267 } else
4268 descr = "out journal";
4269
4270 if (test_opt(sb, DISCARD)) {
4271 struct request_queue *q = bdev_get_queue(sb->s_bdev);
4272 if (!blk_queue_discard(q))
4273 ext4_msg(sb, KERN_WARNING,
4274 "mounting with \"discard\" option, but "
4275 "the device does not support discard");
4276 }
4277
4278 if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
4279 ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
4280 "Opts: %.*s%s%s", descr,
4281 (int) sizeof(sbi->s_es->s_mount_opts),
4282 sbi->s_es->s_mount_opts,
4283 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
4284
4285 if (es->s_error_count)
4286 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
4287
4288 /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
4289 ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
4290 ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
4291 ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
4292
4293 kfree(orig_data);
4294 return 0;
4295
4296 cantfind_ext4:
4297 if (!silent)
4298 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
4299 goto failed_mount;
4300
4301 #ifdef CONFIG_QUOTA
4302 failed_mount8:
4303 ext4_unregister_sysfs(sb);
4304 #endif
4305 failed_mount7:
4306 ext4_unregister_li_request(sb);
4307 failed_mount6:
4308 ext4_mb_release(sb);
4309 if (sbi->s_flex_groups)
4310 kvfree(sbi->s_flex_groups);
4311 percpu_counter_destroy(&sbi->s_freeclusters_counter);
4312 percpu_counter_destroy(&sbi->s_freeinodes_counter);
4313 percpu_counter_destroy(&sbi->s_dirs_counter);
4314 percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4315 failed_mount5:
4316 ext4_ext_release(sb);
4317 ext4_release_system_zone(sb);
4318 failed_mount4a:
4319 dput(sb->s_root);
4320 sb->s_root = NULL;
4321 failed_mount4:
4322 ext4_msg(sb, KERN_ERR, "mount failed");
4323 if (EXT4_SB(sb)->rsv_conversion_wq)
4324 destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
4325 failed_mount_wq:
4326 if (sbi->s_ea_inode_cache) {
4327 ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
4328 sbi->s_ea_inode_cache = NULL;
4329 }
4330 if (sbi->s_ea_block_cache) {
4331 ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
4332 sbi->s_ea_block_cache = NULL;
4333 }
4334 if (sbi->s_journal) {
4335 jbd2_journal_destroy(sbi->s_journal);
4336 sbi->s_journal = NULL;
4337 }
4338 failed_mount3a:
4339 ext4_es_unregister_shrinker(sbi);
4340 failed_mount3:
4341 del_timer_sync(&sbi->s_err_report);
4342 if (sbi->s_mmp_tsk)
4343 kthread_stop(sbi->s_mmp_tsk);
4344 failed_mount2:
4345 for (i = 0; i < db_count; i++)
4346 brelse(sbi->s_group_desc[i]);
4347 kvfree(sbi->s_group_desc);
4348 failed_mount:
4349 if (sbi->s_chksum_driver)
4350 crypto_free_shash(sbi->s_chksum_driver);
4351 #ifdef CONFIG_QUOTA
4352 for (i = 0; i < EXT4_MAXQUOTAS; i++)
4353 kfree(sbi->s_qf_names[i]);
4354 #endif
4355 ext4_blkdev_remove(sbi);
4356 brelse(bh);
4357 out_fail:
4358 sb->s_fs_info = NULL;
4359 kfree(sbi->s_blockgroup_lock);
4360 out_free_base:
4361 kfree(sbi);
4362 kfree(orig_data);
4363 return err ? err : ret;
4364 }
4365
4366 /*
4367 * Setup any per-fs journal parameters now. We'll do this both on
4368 * initial mount, once the journal has been initialised but before we've
4369 * done any recovery; and again on any subsequent remount.
4370 */
4371 static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
4372 {
4373 struct ext4_sb_info *sbi = EXT4_SB(sb);
4374
4375 journal->j_commit_interval = sbi->s_commit_interval;
4376 journal->j_min_batch_time = sbi->s_min_batch_time;
4377 journal->j_max_batch_time = sbi->s_max_batch_time;
4378
4379 write_lock(&journal->j_state_lock);
4380 if (test_opt(sb, BARRIER))
4381 journal->j_flags |= JBD2_BARRIER;
4382 else
4383 journal->j_flags &= ~JBD2_BARRIER;
4384 if (test_opt(sb, DATA_ERR_ABORT))
4385 journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
4386 else
4387 journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
4388 write_unlock(&journal->j_state_lock);
4389 }
4390
4391 static struct inode *ext4_get_journal_inode(struct super_block *sb,
4392 unsigned int journal_inum)
4393 {
4394 struct inode *journal_inode;
4395
4396 /*
4397 * Test for the existence of a valid inode on disk. Bad things
4398 * happen if we iget() an unused inode, as the subsequent iput()
4399 * will try to delete it.
4400 */
4401 journal_inode = ext4_iget(sb, journal_inum);
4402 if (IS_ERR(journal_inode)) {
4403 ext4_msg(sb, KERN_ERR, "no journal found");
4404 return NULL;
4405 }
4406 if (!journal_inode->i_nlink) {
4407 make_bad_inode(journal_inode);
4408 iput(journal_inode);
4409 ext4_msg(sb, KERN_ERR, "journal inode is deleted");
4410 return NULL;
4411 }
4412
4413 jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
4414 journal_inode, journal_inode->i_size);
4415 if (!S_ISREG(journal_inode->i_mode)) {
4416 ext4_msg(sb, KERN_ERR, "invalid journal inode");
4417 iput(journal_inode);
4418 return NULL;
4419 }
4420 return journal_inode;
4421 }
4422
4423 static journal_t *ext4_get_journal(struct super_block *sb,
4424 unsigned int journal_inum)
4425 {
4426 struct inode *journal_inode;
4427 journal_t *journal;
4428
4429 BUG_ON(!ext4_has_feature_journal(sb));
4430
4431 journal_inode = ext4_get_journal_inode(sb, journal_inum);
4432 if (!journal_inode)
4433 return NULL;
4434
4435 journal = jbd2_journal_init_inode(journal_inode);
4436 if (!journal) {
4437 ext4_msg(sb, KERN_ERR, "Could not load journal inode");
4438 iput(journal_inode);
4439 return NULL;
4440 }
4441 journal->j_private = sb;
4442 ext4_init_journal_params(sb, journal);
4443 return journal;
4444 }
4445
4446 static journal_t *ext4_get_dev_journal(struct super_block *sb,
4447 dev_t j_dev)
4448 {
4449 struct buffer_head *bh;
4450 journal_t *journal;
4451 ext4_fsblk_t start;
4452 ext4_fsblk_t len;
4453 int hblock, blocksize;
4454 ext4_fsblk_t sb_block;
4455 unsigned long offset;
4456 struct ext4_super_block *es;
4457 struct block_device *bdev;
4458
4459 BUG_ON(!ext4_has_feature_journal(sb));
4460
4461 bdev = ext4_blkdev_get(j_dev, sb);
4462 if (bdev == NULL)
4463 return NULL;
4464
4465 blocksize = sb->s_blocksize;
4466 hblock = bdev_logical_block_size(bdev);
4467 if (blocksize < hblock) {
4468 ext4_msg(sb, KERN_ERR,
4469 "blocksize too small for journal device");
4470 goto out_bdev;
4471 }
4472
4473 sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
4474 offset = EXT4_MIN_BLOCK_SIZE % blocksize;
4475 set_blocksize(bdev, blocksize);
4476 if (!(bh = __bread(bdev, sb_block, blocksize))) {
4477 ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
4478 "external journal");
4479 goto out_bdev;
4480 }
4481
4482 es = (struct ext4_super_block *) (bh->b_data + offset);
4483 if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
4484 !(le32_to_cpu(es->s_feature_incompat) &
4485 EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
4486 ext4_msg(sb, KERN_ERR, "external journal has "
4487 "bad superblock");
4488 brelse(bh);
4489 goto out_bdev;
4490 }
4491
4492 if ((le32_to_cpu(es->s_feature_ro_compat) &
4493 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
4494 es->s_checksum != ext4_superblock_csum(sb, es)) {
4495 ext4_msg(sb, KERN_ERR, "external journal has "
4496 "corrupt superblock");
4497 brelse(bh);
4498 goto out_bdev;
4499 }
4500
4501 if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
4502 ext4_msg(sb, KERN_ERR, "journal UUID does not match");
4503 brelse(bh);
4504 goto out_bdev;
4505 }
4506
4507 len = ext4_blocks_count(es);
4508 start = sb_block + 1;
4509 brelse(bh); /* we're done with the superblock */
4510
4511 journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
4512 start, len, blocksize);
4513 if (!journal) {
4514 ext4_msg(sb, KERN_ERR, "failed to create device journal");
4515 goto out_bdev;
4516 }
4517 journal->j_private = sb;
4518 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
4519 wait_on_buffer(journal->j_sb_buffer);
4520 if (!buffer_uptodate(journal->j_sb_buffer)) {
4521 ext4_msg(sb, KERN_ERR, "I/O error on journal device");
4522 goto out_journal;
4523 }
4524 if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
4525 ext4_msg(sb, KERN_ERR, "External journal has more than one "
4526 "user (unsupported) - %d",
4527 be32_to_cpu(journal->j_superblock->s_nr_users));
4528 goto out_journal;
4529 }
4530 EXT4_SB(sb)->journal_bdev = bdev;
4531 ext4_init_journal_params(sb, journal);
4532 return journal;
4533
4534 out_journal:
4535 jbd2_journal_destroy(journal);
4536 out_bdev:
4537 ext4_blkdev_put(bdev);
4538 return NULL;
4539 }
4540
4541 static int ext4_load_journal(struct super_block *sb,
4542 struct ext4_super_block *es,
4543 unsigned long journal_devnum)
4544 {
4545 journal_t *journal;
4546 unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
4547 dev_t journal_dev;
4548 int err = 0;
4549 int really_read_only;
4550
4551 BUG_ON(!ext4_has_feature_journal(sb));
4552
4553 if (journal_devnum &&
4554 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
4555 ext4_msg(sb, KERN_INFO, "external journal device major/minor "
4556 "numbers have changed");
4557 journal_dev = new_decode_dev(journal_devnum);
4558 } else
4559 journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
4560
4561 really_read_only = bdev_read_only(sb->s_bdev);
4562
4563 /*
4564 * Are we loading a blank journal or performing recovery after a
4565 * crash? For recovery, we need to check in advance whether we
4566 * can get read-write access to the device.
4567 */
4568 if (ext4_has_feature_journal_needs_recovery(sb)) {
4569 if (sb->s_flags & MS_RDONLY) {
4570 ext4_msg(sb, KERN_INFO, "INFO: recovery "
4571 "required on readonly filesystem");
4572 if (really_read_only) {
4573 ext4_msg(sb, KERN_ERR, "write access "
4574 "unavailable, cannot proceed");
4575 return -EROFS;
4576 }
4577 ext4_msg(sb, KERN_INFO, "write access will "
4578 "be enabled during recovery");
4579 }
4580 }
4581
4582 if (journal_inum && journal_dev) {
4583 ext4_msg(sb, KERN_ERR, "filesystem has both journal "
4584 "and inode journals!");
4585 return -EINVAL;
4586 }
4587
4588 if (journal_inum) {
4589 if (!(journal = ext4_get_journal(sb, journal_inum)))
4590 return -EINVAL;
4591 } else {
4592 if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
4593 return -EINVAL;
4594 }
4595
4596 if (!(journal->j_flags & JBD2_BARRIER))
4597 ext4_msg(sb, KERN_INFO, "barriers disabled");
4598
4599 if (!ext4_has_feature_journal_needs_recovery(sb))
4600 err = jbd2_journal_wipe(journal, !really_read_only);
4601 if (!err) {
4602 char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
4603 if (save)
4604 memcpy(save, ((char *) es) +
4605 EXT4_S_ERR_START, EXT4_S_ERR_LEN);
4606 err = jbd2_journal_load(journal);
4607 if (save)
4608 memcpy(((char *) es) + EXT4_S_ERR_START,
4609 save, EXT4_S_ERR_LEN);
4610 kfree(save);
4611 }
4612
4613 if (err) {
4614 ext4_msg(sb, KERN_ERR, "error loading journal");
4615 jbd2_journal_destroy(journal);
4616 return err;
4617 }
4618
4619 EXT4_SB(sb)->s_journal = journal;
4620 ext4_clear_journal_err(sb, es);
4621
4622 if (!really_read_only && journal_devnum &&
4623 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
4624 es->s_journal_dev = cpu_to_le32(journal_devnum);
4625
4626 /* Make sure we flush the recovery flag to disk. */
4627 ext4_commit_super(sb, 1);
4628 }
4629
4630 return 0;
4631 }
4632
4633 static int ext4_commit_super(struct super_block *sb, int sync)
4634 {
4635 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4636 struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
4637 int error = 0;
4638
4639 if (!sbh || block_device_ejected(sb))
4640 return error;
4641 /*
4642 * If the file system is mounted read-only, don't update the
4643 * superblock write time. This avoids updating the superblock
4644 * write time when we are mounting the root file system
4645 * read/only but we need to replay the journal; at that point,
4646 * for people who are east of GMT and who make their clock
4647 * tick in localtime for Windows bug-for-bug compatibility,
4648 * the clock is set in the future, and this will cause e2fsck
4649 * to complain and force a full file system check.
4650 */
4651 if (!(sb->s_flags & MS_RDONLY))
4652 es->s_wtime = cpu_to_le32(get_seconds());
4653 if (sb->s_bdev->bd_part)
4654 es->s_kbytes_written =
4655 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
4656 ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
4657 EXT4_SB(sb)->s_sectors_written_start) >> 1));
4658 else
4659 es->s_kbytes_written =
4660 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
4661 if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
4662 ext4_free_blocks_count_set(es,
4663 EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
4664 &EXT4_SB(sb)->s_freeclusters_counter)));
4665 if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
4666 es->s_free_inodes_count =
4667 cpu_to_le32(percpu_counter_sum_positive(
4668 &EXT4_SB(sb)->s_freeinodes_counter));
4669 BUFFER_TRACE(sbh, "marking dirty");
4670 ext4_superblock_csum_set(sb);
4671 if (sync)
4672 lock_buffer(sbh);
4673 if (buffer_write_io_error(sbh)) {
4674 /*
4675 * Oh, dear. A previous attempt to write the
4676 * superblock failed. This could happen because the
4677 * USB device was yanked out. Or it could happen to
4678 * be a transient write error and maybe the block will
4679 * be remapped. Nothing we can do but to retry the
4680 * write and hope for the best.
4681 */
4682 ext4_msg(sb, KERN_ERR, "previous I/O error to "
4683 "superblock detected");
4684 clear_buffer_write_io_error(sbh);
4685 set_buffer_uptodate(sbh);
4686 }
4687 mark_buffer_dirty(sbh);
4688 if (sync) {
4689 unlock_buffer(sbh);
4690 error = __sync_dirty_buffer(sbh,
4691 REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
4692 if (error)
4693 return error;
4694
4695 error = buffer_write_io_error(sbh);
4696 if (error) {
4697 ext4_msg(sb, KERN_ERR, "I/O error while writing "
4698 "superblock");
4699 clear_buffer_write_io_error(sbh);
4700 set_buffer_uptodate(sbh);
4701 }
4702 }
4703 return error;
4704 }
4705
4706 /*
4707 * Have we just finished recovery? If so, and if we are mounting (or
4708 * remounting) the filesystem readonly, then we will end up with a
4709 * consistent fs on disk. Record that fact.
4710 */
4711 static void ext4_mark_recovery_complete(struct super_block *sb,
4712 struct ext4_super_block *es)
4713 {
4714 journal_t *journal = EXT4_SB(sb)->s_journal;
4715
4716 if (!ext4_has_feature_journal(sb)) {
4717 BUG_ON(journal != NULL);
4718 return;
4719 }
4720 jbd2_journal_lock_updates(journal);
4721 if (jbd2_journal_flush(journal) < 0)
4722 goto out;
4723
4724 if (ext4_has_feature_journal_needs_recovery(sb) &&
4725 sb->s_flags & MS_RDONLY) {
4726 ext4_clear_feature_journal_needs_recovery(sb);
4727 ext4_commit_super(sb, 1);
4728 }
4729
4730 out:
4731 jbd2_journal_unlock_updates(journal);
4732 }
4733
4734 /*
4735 * If we are mounting (or read-write remounting) a filesystem whose journal
4736 * has recorded an error from a previous lifetime, move that error to the
4737 * main filesystem now.
4738 */
4739 static void ext4_clear_journal_err(struct super_block *sb,
4740 struct ext4_super_block *es)
4741 {
4742 journal_t *journal;
4743 int j_errno;
4744 const char *errstr;
4745
4746 BUG_ON(!ext4_has_feature_journal(sb));
4747
4748 journal = EXT4_SB(sb)->s_journal;
4749
4750 /*
4751 * Now check for any error status which may have been recorded in the
4752 * journal by a prior ext4_error() or ext4_abort()
4753 */
4754
4755 j_errno = jbd2_journal_errno(journal);
4756 if (j_errno) {
4757 char nbuf[16];
4758
4759 errstr = ext4_decode_error(sb, j_errno, nbuf);
4760 ext4_warning(sb, "Filesystem error recorded "
4761 "from previous mount: %s", errstr);
4762 ext4_warning(sb, "Marking fs in need of filesystem check.");
4763
4764 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
4765 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
4766 ext4_commit_super(sb, 1);
4767
4768 jbd2_journal_clear_err(journal);
4769 jbd2_journal_update_sb_errno(journal);
4770 }
4771 }
4772
4773 /*
4774 * Force the running and committing transactions to commit,
4775 * and wait on the commit.
4776 */
4777 int ext4_force_commit(struct super_block *sb)
4778 {
4779 journal_t *journal;
4780
4781 if (sb->s_flags & MS_RDONLY)
4782 return 0;
4783
4784 journal = EXT4_SB(sb)->s_journal;
4785 return ext4_journal_force_commit(journal);
4786 }
4787
4788 static int ext4_sync_fs(struct super_block *sb, int wait)
4789 {
4790 int ret = 0;
4791 tid_t target;
4792 bool needs_barrier = false;
4793 struct ext4_sb_info *sbi = EXT4_SB(sb);
4794
4795 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
4796 return 0;
4797
4798 trace_ext4_sync_fs(sb, wait);
4799 flush_workqueue(sbi->rsv_conversion_wq);
4800 /*
4801 * Writeback quota in non-journalled quota case - journalled quota has
4802 * no dirty dquots
4803 */
4804 dquot_writeback_dquots(sb, -1);
4805 /*
4806 * Data writeback is possible w/o journal transaction, so barrier must
4807 * being sent at the end of the function. But we can skip it if
4808 * transaction_commit will do it for us.
4809 */
4810 if (sbi->s_journal) {
4811 target = jbd2_get_latest_transaction(sbi->s_journal);
4812 if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
4813 !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
4814 needs_barrier = true;
4815
4816 if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
4817 if (wait)
4818 ret = jbd2_log_wait_commit(sbi->s_journal,
4819 target);
4820 }
4821 } else if (wait && test_opt(sb, BARRIER))
4822 needs_barrier = true;
4823 if (needs_barrier) {
4824 int err;
4825 err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
4826 if (!ret)
4827 ret = err;
4828 }
4829
4830 return ret;
4831 }
4832
4833 /*
4834 * LVM calls this function before a (read-only) snapshot is created. This
4835 * gives us a chance to flush the journal completely and mark the fs clean.
4836 *
4837 * Note that only this function cannot bring a filesystem to be in a clean
4838 * state independently. It relies on upper layer to stop all data & metadata
4839 * modifications.
4840 */
4841 static int ext4_freeze(struct super_block *sb)
4842 {
4843 int error = 0;
4844 journal_t *journal;
4845
4846 if (sb->s_flags & MS_RDONLY)
4847 return 0;
4848
4849 journal = EXT4_SB(sb)->s_journal;
4850
4851 if (journal) {
4852 /* Now we set up the journal barrier. */
4853 jbd2_journal_lock_updates(journal);
4854
4855 /*
4856 * Don't clear the needs_recovery flag if we failed to
4857 * flush the journal.
4858 */
4859 error = jbd2_journal_flush(journal);
4860 if (error < 0)
4861 goto out;
4862
4863 /* Journal blocked and flushed, clear needs_recovery flag. */
4864 ext4_clear_feature_journal_needs_recovery(sb);
4865 }
4866
4867 error = ext4_commit_super(sb, 1);
4868 out:
4869 if (journal)
4870 /* we rely on upper layer to stop further updates */
4871 jbd2_journal_unlock_updates(journal);
4872 return error;
4873 }
4874
4875 /*
4876 * Called by LVM after the snapshot is done. We need to reset the RECOVER
4877 * flag here, even though the filesystem is not technically dirty yet.
4878 */
4879 static int ext4_unfreeze(struct super_block *sb)
4880 {
4881 if ((sb->s_flags & MS_RDONLY) || ext4_forced_shutdown(EXT4_SB(sb)))
4882 return 0;
4883
4884 if (EXT4_SB(sb)->s_journal) {
4885 /* Reset the needs_recovery flag before the fs is unlocked. */
4886 ext4_set_feature_journal_needs_recovery(sb);
4887 }
4888
4889 ext4_commit_super(sb, 1);
4890 return 0;
4891 }
4892
4893 /*
4894 * Structure to save mount options for ext4_remount's benefit
4895 */
4896 struct ext4_mount_options {
4897 unsigned long s_mount_opt;
4898 unsigned long s_mount_opt2;
4899 kuid_t s_resuid;
4900 kgid_t s_resgid;
4901 unsigned long s_commit_interval;
4902 u32 s_min_batch_time, s_max_batch_time;
4903 #ifdef CONFIG_QUOTA
4904 int s_jquota_fmt;
4905 char *s_qf_names[EXT4_MAXQUOTAS];
4906 #endif
4907 };
4908
4909 static int ext4_remount(struct super_block *sb, int *flags, char *data)
4910 {
4911 struct ext4_super_block *es;
4912 struct ext4_sb_info *sbi = EXT4_SB(sb);
4913 unsigned long old_sb_flags;
4914 struct ext4_mount_options old_opts;
4915 int enable_quota = 0;
4916 ext4_group_t g;
4917 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
4918 int err = 0;
4919 #ifdef CONFIG_QUOTA
4920 int i, j;
4921 #endif
4922 char *orig_data = kstrdup(data, GFP_KERNEL);
4923
4924 /* Store the original options */
4925 old_sb_flags = sb->s_flags;
4926 old_opts.s_mount_opt = sbi->s_mount_opt;
4927 old_opts.s_mount_opt2 = sbi->s_mount_opt2;
4928 old_opts.s_resuid = sbi->s_resuid;
4929 old_opts.s_resgid = sbi->s_resgid;
4930 old_opts.s_commit_interval = sbi->s_commit_interval;
4931 old_opts.s_min_batch_time = sbi->s_min_batch_time;
4932 old_opts.s_max_batch_time = sbi->s_max_batch_time;
4933 #ifdef CONFIG_QUOTA
4934 old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
4935 for (i = 0; i < EXT4_MAXQUOTAS; i++)
4936 if (sbi->s_qf_names[i]) {
4937 old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
4938 GFP_KERNEL);
4939 if (!old_opts.s_qf_names[i]) {
4940 for (j = 0; j < i; j++)
4941 kfree(old_opts.s_qf_names[j]);
4942 kfree(orig_data);
4943 return -ENOMEM;
4944 }
4945 } else
4946 old_opts.s_qf_names[i] = NULL;
4947 #endif
4948 if (sbi->s_journal && sbi->s_journal->j_task->io_context)
4949 journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
4950
4951 if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
4952 err = -EINVAL;
4953 goto restore_opts;
4954 }
4955
4956 if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
4957 test_opt(sb, JOURNAL_CHECKSUM)) {
4958 ext4_msg(sb, KERN_ERR, "changing journal_checksum "
4959 "during remount not supported; ignoring");
4960 sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
4961 }
4962
4963 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
4964 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
4965 ext4_msg(sb, KERN_ERR, "can't mount with "
4966 "both data=journal and delalloc");
4967 err = -EINVAL;
4968 goto restore_opts;
4969 }
4970 if (test_opt(sb, DIOREAD_NOLOCK)) {
4971 ext4_msg(sb, KERN_ERR, "can't mount with "
4972 "both data=journal and dioread_nolock");
4973 err = -EINVAL;
4974 goto restore_opts;
4975 }
4976 if (test_opt(sb, DAX)) {
4977 ext4_msg(sb, KERN_ERR, "can't mount with "
4978 "both data=journal and dax");
4979 err = -EINVAL;
4980 goto restore_opts;
4981 }
4982 } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
4983 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4984 ext4_msg(sb, KERN_ERR, "can't mount with "
4985 "journal_async_commit in data=ordered mode");
4986 err = -EINVAL;
4987 goto restore_opts;
4988 }
4989 }
4990
4991 if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
4992 ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
4993 "dax flag with busy inodes while remounting");
4994 sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
4995 }
4996
4997 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
4998 ext4_abort(sb, "Abort forced by user");
4999
5000 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
5001 (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
5002
5003 es = sbi->s_es;
5004
5005 if (sbi->s_journal) {
5006 ext4_init_journal_params(sb, sbi->s_journal);
5007 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
5008 }
5009
5010 if (*flags & MS_LAZYTIME)
5011 sb->s_flags |= MS_LAZYTIME;
5012
5013 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
5014 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
5015 err = -EROFS;
5016 goto restore_opts;
5017 }
5018
5019 if (*flags & MS_RDONLY) {
5020 err = sync_filesystem(sb);
5021 if (err < 0)
5022 goto restore_opts;
5023 err = dquot_suspend(sb, -1);
5024 if (err < 0)
5025 goto restore_opts;
5026
5027 /*
5028 * First of all, the unconditional stuff we have to do
5029 * to disable replay of the journal when we next remount
5030 */
5031 sb->s_flags |= MS_RDONLY;
5032
5033 /*
5034 * OK, test if we are remounting a valid rw partition
5035 * readonly, and if so set the rdonly flag and then
5036 * mark the partition as valid again.
5037 */
5038 if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
5039 (sbi->s_mount_state & EXT4_VALID_FS))
5040 es->s_state = cpu_to_le16(sbi->s_mount_state);
5041
5042 if (sbi->s_journal)
5043 ext4_mark_recovery_complete(sb, es);
5044 } else {
5045 /* Make sure we can mount this feature set readwrite */
5046 if (ext4_has_feature_readonly(sb) ||
5047 !ext4_feature_set_ok(sb, 0)) {
5048 err = -EROFS;
5049 goto restore_opts;
5050 }
5051 /*
5052 * Make sure the group descriptor checksums
5053 * are sane. If they aren't, refuse to remount r/w.
5054 */
5055 for (g = 0; g < sbi->s_groups_count; g++) {
5056 struct ext4_group_desc *gdp =
5057 ext4_get_group_desc(sb, g, NULL);
5058
5059 if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
5060 ext4_msg(sb, KERN_ERR,
5061 "ext4_remount: Checksum for group %u failed (%u!=%u)",
5062 g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
5063 le16_to_cpu(gdp->bg_checksum));
5064 err = -EFSBADCRC;
5065 goto restore_opts;
5066 }
5067 }
5068
5069 /*
5070 * If we have an unprocessed orphan list hanging
5071 * around from a previously readonly bdev mount,
5072 * require a full umount/remount for now.
5073 */
5074 if (es->s_last_orphan) {
5075 ext4_msg(sb, KERN_WARNING, "Couldn't "
5076 "remount RDWR because of unprocessed "
5077 "orphan inode list. Please "
5078 "umount/remount instead");
5079 err = -EINVAL;
5080 goto restore_opts;
5081 }
5082
5083 /*
5084 * Mounting a RDONLY partition read-write, so reread
5085 * and store the current valid flag. (It may have
5086 * been changed by e2fsck since we originally mounted
5087 * the partition.)
5088 */
5089 if (sbi->s_journal)
5090 ext4_clear_journal_err(sb, es);
5091 sbi->s_mount_state = le16_to_cpu(es->s_state);
5092 if (!ext4_setup_super(sb, es, 0))
5093 sb->s_flags &= ~MS_RDONLY;
5094 if (ext4_has_feature_mmp(sb))
5095 if (ext4_multi_mount_protect(sb,
5096 le64_to_cpu(es->s_mmp_block))) {
5097 err = -EROFS;
5098 goto restore_opts;
5099 }
5100 enable_quota = 1;
5101 }
5102 }
5103
5104 /*
5105 * Reinitialize lazy itable initialization thread based on
5106 * current settings
5107 */
5108 if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE))
5109 ext4_unregister_li_request(sb);
5110 else {
5111 ext4_group_t first_not_zeroed;
5112 first_not_zeroed = ext4_has_uninit_itable(sb);
5113 ext4_register_li_request(sb, first_not_zeroed);
5114 }
5115
5116 ext4_setup_system_zone(sb);
5117 if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
5118 ext4_commit_super(sb, 1);
5119
5120 #ifdef CONFIG_QUOTA
5121 /* Release old quota file names */
5122 for (i = 0; i < EXT4_MAXQUOTAS; i++)
5123 kfree(old_opts.s_qf_names[i]);
5124 if (enable_quota) {
5125 if (sb_any_quota_suspended(sb))
5126 dquot_resume(sb, -1);
5127 else if (ext4_has_feature_quota(sb)) {
5128 err = ext4_enable_quotas(sb);
5129 if (err)
5130 goto restore_opts;
5131 }
5132 }
5133 #endif
5134
5135 *flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME);
5136 ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
5137 kfree(orig_data);
5138 return 0;
5139
5140 restore_opts:
5141 sb->s_flags = old_sb_flags;
5142 sbi->s_mount_opt = old_opts.s_mount_opt;
5143 sbi->s_mount_opt2 = old_opts.s_mount_opt2;
5144 sbi->s_resuid = old_opts.s_resuid;
5145 sbi->s_resgid = old_opts.s_resgid;
5146 sbi->s_commit_interval = old_opts.s_commit_interval;
5147 sbi->s_min_batch_time = old_opts.s_min_batch_time;
5148 sbi->s_max_batch_time = old_opts.s_max_batch_time;
5149 #ifdef CONFIG_QUOTA
5150 sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
5151 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
5152 kfree(sbi->s_qf_names[i]);
5153 sbi->s_qf_names[i] = old_opts.s_qf_names[i];
5154 }
5155 #endif
5156 kfree(orig_data);
5157 return err;
5158 }
5159
5160 #ifdef CONFIG_QUOTA
5161 static int ext4_statfs_project(struct super_block *sb,
5162 kprojid_t projid, struct kstatfs *buf)
5163 {
5164 struct kqid qid;
5165 struct dquot *dquot;
5166 u64 limit;
5167 u64 curblock;
5168
5169 qid = make_kqid_projid(projid);
5170 dquot = dqget(sb, qid);
5171 if (IS_ERR(dquot))
5172 return PTR_ERR(dquot);
5173 spin_lock(&dq_data_lock);
5174
5175 limit = (dquot->dq_dqb.dqb_bsoftlimit ?
5176 dquot->dq_dqb.dqb_bsoftlimit :
5177 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
5178 if (limit && buf->f_blocks > limit) {
5179 curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
5180 buf->f_blocks = limit;
5181 buf->f_bfree = buf->f_bavail =
5182 (buf->f_blocks > curblock) ?
5183 (buf->f_blocks - curblock) : 0;
5184 }
5185
5186 limit = dquot->dq_dqb.dqb_isoftlimit ?
5187 dquot->dq_dqb.dqb_isoftlimit :
5188 dquot->dq_dqb.dqb_ihardlimit;
5189 if (limit && buf->f_files > limit) {
5190 buf->f_files = limit;
5191 buf->f_ffree =
5192 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
5193 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
5194 }
5195
5196 spin_unlock(&dq_data_lock);
5197 dqput(dquot);
5198 return 0;
5199 }
5200 #endif
5201
5202 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
5203 {
5204 struct super_block *sb = dentry->d_sb;
5205 struct ext4_sb_info *sbi = EXT4_SB(sb);
5206 struct ext4_super_block *es = sbi->s_es;
5207 ext4_fsblk_t overhead = 0, resv_blocks;
5208 u64 fsid;
5209 s64 bfree;
5210 resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
5211
5212 if (!test_opt(sb, MINIX_DF))
5213 overhead = sbi->s_overhead;
5214
5215 buf->f_type = EXT4_SUPER_MAGIC;
5216 buf->f_bsize = sb->s_blocksize;
5217 buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
5218 bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
5219 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
5220 /* prevent underflow in case that few free space is available */
5221 buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
5222 buf->f_bavail = buf->f_bfree -
5223 (ext4_r_blocks_count(es) + resv_blocks);
5224 if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
5225 buf->f_bavail = 0;
5226 buf->f_files = le32_to_cpu(es->s_inodes_count);
5227 buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
5228 buf->f_namelen = EXT4_NAME_LEN;
5229 fsid = le64_to_cpup((void *)es->s_uuid) ^
5230 le64_to_cpup((void *)es->s_uuid + sizeof(u64));
5231 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
5232 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
5233
5234 #ifdef CONFIG_QUOTA
5235 if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
5236 sb_has_quota_limits_enabled(sb, PRJQUOTA))
5237 ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
5238 #endif
5239 return 0;
5240 }
5241
5242 /* Helper function for writing quotas on sync - we need to start transaction
5243 * before quota file is locked for write. Otherwise the are possible deadlocks:
5244 * Process 1 Process 2
5245 * ext4_create() quota_sync()
5246 * jbd2_journal_start() write_dquot()
5247 * dquot_initialize() down(dqio_mutex)
5248 * down(dqio_mutex) jbd2_journal_start()
5249 *
5250 */
5251
5252 #ifdef CONFIG_QUOTA
5253
5254 static inline struct inode *dquot_to_inode(struct dquot *dquot)
5255 {
5256 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
5257 }
5258
5259 static int ext4_write_dquot(struct dquot *dquot)
5260 {
5261 int ret, err;
5262 handle_t *handle;
5263 struct inode *inode;
5264
5265 inode = dquot_to_inode(dquot);
5266 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5267 EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
5268 if (IS_ERR(handle))
5269 return PTR_ERR(handle);
5270 ret = dquot_commit(dquot);
5271 err = ext4_journal_stop(handle);
5272 if (!ret)
5273 ret = err;
5274 return ret;
5275 }
5276
5277 static int ext4_acquire_dquot(struct dquot *dquot)
5278 {
5279 int ret, err;
5280 handle_t *handle;
5281
5282 handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5283 EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
5284 if (IS_ERR(handle))
5285 return PTR_ERR(handle);
5286 ret = dquot_acquire(dquot);
5287 err = ext4_journal_stop(handle);
5288 if (!ret)
5289 ret = err;
5290 return ret;
5291 }
5292
5293 static int ext4_release_dquot(struct dquot *dquot)
5294 {
5295 int ret, err;
5296 handle_t *handle;
5297
5298 handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5299 EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
5300 if (IS_ERR(handle)) {
5301 /* Release dquot anyway to avoid endless cycle in dqput() */
5302 dquot_release(dquot);
5303 return PTR_ERR(handle);
5304 }
5305 ret = dquot_release(dquot);
5306 err = ext4_journal_stop(handle);
5307 if (!ret)
5308 ret = err;
5309 return ret;
5310 }
5311
5312 static int ext4_mark_dquot_dirty(struct dquot *dquot)
5313 {
5314 struct super_block *sb = dquot->dq_sb;
5315 struct ext4_sb_info *sbi = EXT4_SB(sb);
5316
5317 /* Are we journaling quotas? */
5318 if (ext4_has_feature_quota(sb) ||
5319 sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
5320 dquot_mark_dquot_dirty(dquot);
5321 return ext4_write_dquot(dquot);
5322 } else {
5323 return dquot_mark_dquot_dirty(dquot);
5324 }
5325 }
5326
5327 static int ext4_write_info(struct super_block *sb, int type)
5328 {
5329 int ret, err;
5330 handle_t *handle;
5331
5332 /* Data block + inode block */
5333 handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
5334 if (IS_ERR(handle))
5335 return PTR_ERR(handle);
5336 ret = dquot_commit_info(sb, type);
5337 err = ext4_journal_stop(handle);
5338 if (!ret)
5339 ret = err;
5340 return ret;
5341 }
5342
5343 /*
5344 * Turn on quotas during mount time - we need to find
5345 * the quota file and such...
5346 */
5347 static int ext4_quota_on_mount(struct super_block *sb, int type)
5348 {
5349 return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
5350 EXT4_SB(sb)->s_jquota_fmt, type);
5351 }
5352
5353 static void lockdep_set_quota_inode(struct inode *inode, int subclass)
5354 {
5355 struct ext4_inode_info *ei = EXT4_I(inode);
5356
5357 /* The first argument of lockdep_set_subclass has to be
5358 * *exactly* the same as the argument to init_rwsem() --- in
5359 * this case, in init_once() --- or lockdep gets unhappy
5360 * because the name of the lock is set using the
5361 * stringification of the argument to init_rwsem().
5362 */
5363 (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
5364 lockdep_set_subclass(&ei->i_data_sem, subclass);
5365 }
5366
5367 /*
5368 * Standard function to be called on quota_on
5369 */
5370 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
5371 const struct path *path)
5372 {
5373 int err;
5374
5375 if (!test_opt(sb, QUOTA))
5376 return -EINVAL;
5377
5378 /* Quotafile not on the same filesystem? */
5379 if (path->dentry->d_sb != sb)
5380 return -EXDEV;
5381 /* Journaling quota? */
5382 if (EXT4_SB(sb)->s_qf_names[type]) {
5383 /* Quotafile not in fs root? */
5384 if (path->dentry->d_parent != sb->s_root)
5385 ext4_msg(sb, KERN_WARNING,
5386 "Quota file not on filesystem root. "
5387 "Journaled quota will not work");
5388 }
5389
5390 /*
5391 * When we journal data on quota file, we have to flush journal to see
5392 * all updates to the file when we bypass pagecache...
5393 */
5394 if (EXT4_SB(sb)->s_journal &&
5395 ext4_should_journal_data(d_inode(path->dentry))) {
5396 /*
5397 * We don't need to lock updates but journal_flush() could
5398 * otherwise be livelocked...
5399 */
5400 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
5401 err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
5402 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
5403 if (err)
5404 return err;
5405 }
5406
5407 lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
5408 err = dquot_quota_on(sb, type, format_id, path);
5409 if (err) {
5410 lockdep_set_quota_inode(path->dentry->d_inode,
5411 I_DATA_SEM_NORMAL);
5412 } else {
5413 struct inode *inode = d_inode(path->dentry);
5414 handle_t *handle;
5415
5416 /*
5417 * Set inode flags to prevent userspace from messing with quota
5418 * files. If this fails, we return success anyway since quotas
5419 * are already enabled and this is not a hard failure.
5420 */
5421 inode_lock(inode);
5422 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
5423 if (IS_ERR(handle))
5424 goto unlock_inode;
5425 EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
5426 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
5427 S_NOATIME | S_IMMUTABLE);
5428 ext4_mark_inode_dirty(handle, inode);
5429 ext4_journal_stop(handle);
5430 unlock_inode:
5431 inode_unlock(inode);
5432 }
5433 return err;
5434 }
5435
5436 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
5437 unsigned int flags)
5438 {
5439 int err;
5440 struct inode *qf_inode;
5441 unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5442 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
5443 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
5444 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5445 };
5446
5447 BUG_ON(!ext4_has_feature_quota(sb));
5448
5449 if (!qf_inums[type])
5450 return -EPERM;
5451
5452 qf_inode = ext4_iget(sb, qf_inums[type]);
5453 if (IS_ERR(qf_inode)) {
5454 ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
5455 return PTR_ERR(qf_inode);
5456 }
5457
5458 /* Don't account quota for quota files to avoid recursion */
5459 qf_inode->i_flags |= S_NOQUOTA;
5460 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
5461 err = dquot_enable(qf_inode, type, format_id, flags);
5462 iput(qf_inode);
5463 if (err)
5464 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
5465
5466 return err;
5467 }
5468
5469 /* Enable usage tracking for all quota types. */
5470 static int ext4_enable_quotas(struct super_block *sb)
5471 {
5472 int type, err = 0;
5473 unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5474 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
5475 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
5476 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5477 };
5478 bool quota_mopt[EXT4_MAXQUOTAS] = {
5479 test_opt(sb, USRQUOTA),
5480 test_opt(sb, GRPQUOTA),
5481 test_opt(sb, PRJQUOTA),
5482 };
5483
5484 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
5485 for (type = 0; type < EXT4_MAXQUOTAS; type++) {
5486 if (qf_inums[type]) {
5487 err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
5488 DQUOT_USAGE_ENABLED |
5489 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
5490 if (err) {
5491 ext4_warning(sb,
5492 "Failed to enable quota tracking "
5493 "(type=%d, err=%d). Please run "
5494 "e2fsck to fix.", type, err);
5495 return err;
5496 }
5497 }
5498 }
5499 return 0;
5500 }
5501
5502 static int ext4_quota_off(struct super_block *sb, int type)
5503 {
5504 struct inode *inode = sb_dqopt(sb)->files[type];
5505 handle_t *handle;
5506 int err;
5507
5508 /* Force all delayed allocation blocks to be allocated.
5509 * Caller already holds s_umount sem */
5510 if (test_opt(sb, DELALLOC))
5511 sync_filesystem(sb);
5512
5513 if (!inode || !igrab(inode))
5514 goto out;
5515
5516 err = dquot_quota_off(sb, type);
5517 if (err || ext4_has_feature_quota(sb))
5518 goto out_put;
5519
5520 inode_lock(inode);
5521 /*
5522 * Update modification times of quota files when userspace can
5523 * start looking at them. If we fail, we return success anyway since
5524 * this is not a hard failure and quotas are already disabled.
5525 */
5526 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
5527 if (IS_ERR(handle))
5528 goto out_unlock;
5529 EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
5530 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
5531 inode->i_mtime = inode->i_ctime = current_time(inode);
5532 ext4_mark_inode_dirty(handle, inode);
5533 ext4_journal_stop(handle);
5534 out_unlock:
5535 inode_unlock(inode);
5536 out_put:
5537 lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
5538 iput(inode);
5539 return err;
5540 out:
5541 return dquot_quota_off(sb, type);
5542 }
5543
5544 /* Read data from quotafile - avoid pagecache and such because we cannot afford
5545 * acquiring the locks... As quota files are never truncated and quota code
5546 * itself serializes the operations (and no one else should touch the files)
5547 * we don't have to be afraid of races */
5548 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
5549 size_t len, loff_t off)
5550 {
5551 struct inode *inode = sb_dqopt(sb)->files[type];
5552 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5553 int offset = off & (sb->s_blocksize - 1);
5554 int tocopy;
5555 size_t toread;
5556 struct buffer_head *bh;
5557 loff_t i_size = i_size_read(inode);
5558
5559 if (off > i_size)
5560 return 0;
5561 if (off+len > i_size)
5562 len = i_size-off;
5563 toread = len;
5564 while (toread > 0) {
5565 tocopy = sb->s_blocksize - offset < toread ?
5566 sb->s_blocksize - offset : toread;
5567 bh = ext4_bread(NULL, inode, blk, 0);
5568 if (IS_ERR(bh))
5569 return PTR_ERR(bh);
5570 if (!bh) /* A hole? */
5571 memset(data, 0, tocopy);
5572 else
5573 memcpy(data, bh->b_data+offset, tocopy);
5574 brelse(bh);
5575 offset = 0;
5576 toread -= tocopy;
5577 data += tocopy;
5578 blk++;
5579 }
5580 return len;
5581 }
5582
5583 /* Write to quotafile (we know the transaction is already started and has
5584 * enough credits) */
5585 static ssize_t ext4_quota_write(struct super_block *sb, int type,
5586 const char *data, size_t len, loff_t off)
5587 {
5588 struct inode *inode = sb_dqopt(sb)->files[type];
5589 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5590 int err, offset = off & (sb->s_blocksize - 1);
5591 int retries = 0;
5592 struct buffer_head *bh;
5593 handle_t *handle = journal_current_handle();
5594
5595 if (EXT4_SB(sb)->s_journal && !handle) {
5596 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
5597 " cancelled because transaction is not started",
5598 (unsigned long long)off, (unsigned long long)len);
5599 return -EIO;
5600 }
5601 /*
5602 * Since we account only one data block in transaction credits,
5603 * then it is impossible to cross a block boundary.
5604 */
5605 if (sb->s_blocksize - offset < len) {
5606 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
5607 " cancelled because not block aligned",
5608 (unsigned long long)off, (unsigned long long)len);
5609 return -EIO;
5610 }
5611
5612 do {
5613 bh = ext4_bread(handle, inode, blk,
5614 EXT4_GET_BLOCKS_CREATE |
5615 EXT4_GET_BLOCKS_METADATA_NOFAIL);
5616 } while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
5617 ext4_should_retry_alloc(inode->i_sb, &retries));
5618 if (IS_ERR(bh))
5619 return PTR_ERR(bh);
5620 if (!bh)
5621 goto out;
5622 BUFFER_TRACE(bh, "get write access");
5623 err = ext4_journal_get_write_access(handle, bh);
5624 if (err) {
5625 brelse(bh);
5626 return err;
5627 }
5628 lock_buffer(bh);
5629 memcpy(bh->b_data+offset, data, len);
5630 flush_dcache_page(bh->b_page);
5631 unlock_buffer(bh);
5632 err = ext4_handle_dirty_metadata(handle, NULL, bh);
5633 brelse(bh);
5634 out:
5635 if (inode->i_size < off + len) {
5636 i_size_write(inode, off + len);
5637 EXT4_I(inode)->i_disksize = inode->i_size;
5638 ext4_mark_inode_dirty(handle, inode);
5639 }
5640 return len;
5641 }
5642
5643 static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
5644 {
5645 const struct quota_format_ops *ops;
5646
5647 if (!sb_has_quota_loaded(sb, qid->type))
5648 return -ESRCH;
5649 ops = sb_dqopt(sb)->ops[qid->type];
5650 if (!ops || !ops->get_next_id)
5651 return -ENOSYS;
5652 return dquot_get_next_id(sb, qid);
5653 }
5654 #endif
5655
5656 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
5657 const char *dev_name, void *data)
5658 {
5659 return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
5660 }
5661
5662 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
5663 static inline void register_as_ext2(void)
5664 {
5665 int err = register_filesystem(&ext2_fs_type);
5666 if (err)
5667 printk(KERN_WARNING
5668 "EXT4-fs: Unable to register as ext2 (%d)\n", err);
5669 }
5670
5671 static inline void unregister_as_ext2(void)
5672 {
5673 unregister_filesystem(&ext2_fs_type);
5674 }
5675
5676 static inline int ext2_feature_set_ok(struct super_block *sb)
5677 {
5678 if (ext4_has_unknown_ext2_incompat_features(sb))
5679 return 0;
5680 if (sb->s_flags & MS_RDONLY)
5681 return 1;
5682 if (ext4_has_unknown_ext2_ro_compat_features(sb))
5683 return 0;
5684 return 1;
5685 }
5686 #else
5687 static inline void register_as_ext2(void) { }
5688 static inline void unregister_as_ext2(void) { }
5689 static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
5690 #endif
5691
5692 static inline void register_as_ext3(void)
5693 {
5694 int err = register_filesystem(&ext3_fs_type);
5695 if (err)
5696 printk(KERN_WARNING
5697 "EXT4-fs: Unable to register as ext3 (%d)\n", err);
5698 }
5699
5700 static inline void unregister_as_ext3(void)
5701 {
5702 unregister_filesystem(&ext3_fs_type);
5703 }
5704
5705 static inline int ext3_feature_set_ok(struct super_block *sb)
5706 {
5707 if (ext4_has_unknown_ext3_incompat_features(sb))
5708 return 0;
5709 if (!ext4_has_feature_journal(sb))
5710 return 0;
5711 if (sb->s_flags & MS_RDONLY)
5712 return 1;
5713 if (ext4_has_unknown_ext3_ro_compat_features(sb))
5714 return 0;
5715 return 1;
5716 }
5717
5718 static struct file_system_type ext4_fs_type = {
5719 .owner = THIS_MODULE,
5720 .name = "ext4",
5721 .mount = ext4_mount,
5722 .kill_sb = kill_block_super,
5723 .fs_flags = FS_REQUIRES_DEV,
5724 };
5725 MODULE_ALIAS_FS("ext4");
5726
5727 /* Shared across all ext4 file systems */
5728 wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
5729
5730 static int __init ext4_init_fs(void)
5731 {
5732 int i, err;
5733
5734 ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
5735 ext4_li_info = NULL;
5736 mutex_init(&ext4_li_mtx);
5737
5738 /* Build-time check for flags consistency */
5739 ext4_check_flag_values();
5740
5741 for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
5742 init_waitqueue_head(&ext4__ioend_wq[i]);
5743
5744 err = ext4_init_es();
5745 if (err)
5746 return err;
5747
5748 err = ext4_init_pageio();
5749 if (err)
5750 goto out5;
5751
5752 err = ext4_init_system_zone();
5753 if (err)
5754 goto out4;
5755
5756 err = ext4_init_sysfs();
5757 if (err)
5758 goto out3;
5759
5760 err = ext4_init_mballoc();
5761 if (err)
5762 goto out2;
5763 err = init_inodecache();
5764 if (err)
5765 goto out1;
5766 register_as_ext3();
5767 register_as_ext2();
5768 err = register_filesystem(&ext4_fs_type);
5769 if (err)
5770 goto out;
5771
5772 return 0;
5773 out:
5774 unregister_as_ext2();
5775 unregister_as_ext3();
5776 destroy_inodecache();
5777 out1:
5778 ext4_exit_mballoc();
5779 out2:
5780 ext4_exit_sysfs();
5781 out3:
5782 ext4_exit_system_zone();
5783 out4:
5784 ext4_exit_pageio();
5785 out5:
5786 ext4_exit_es();
5787
5788 return err;
5789 }
5790
5791 static void __exit ext4_exit_fs(void)
5792 {
5793 ext4_destroy_lazyinit_thread();
5794 unregister_as_ext2();
5795 unregister_as_ext3();
5796 unregister_filesystem(&ext4_fs_type);
5797 destroy_inodecache();
5798 ext4_exit_mballoc();
5799 ext4_exit_sysfs();
5800 ext4_exit_system_zone();
5801 ext4_exit_pageio();
5802 ext4_exit_es();
5803 }
5804
5805 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
5806 MODULE_DESCRIPTION("Fourth Extended Filesystem");
5807 MODULE_LICENSE("GPL");
5808 module_init(ext4_init_fs)
5809 module_exit(ext4_exit_fs)