]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ext4/resize.c
b55f11f0c9eec7f48a4cd269dab7cdf7e66ee9fb
[mirror_ubuntu-bionic-kernel.git] / fs / ext4 / resize.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/resize.c
4 *
5 * Support for resizing an ext4 filesystem while it is mounted.
6 *
7 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
8 *
9 * This could probably be made into a module, because it is not often in use.
10 */
11
12
13 #define EXT4FS_DEBUG
14
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17
18 #include "ext4_jbd2.h"
19
20 int ext4_resize_begin(struct super_block *sb)
21 {
22 struct ext4_sb_info *sbi = EXT4_SB(sb);
23 int ret = 0;
24
25 if (!ns_capable(sb->s_user_ns, CAP_SYS_RESOURCE))
26 return -EPERM;
27
28 /*
29 * If we are not using the primary superblock/GDT copy don't resize,
30 * because the user tools have no way of handling this. Probably a
31 * bad time to do it anyways.
32 */
33 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
34 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
35 ext4_warning(sb, "won't resize using backup superblock at %llu",
36 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
37 return -EPERM;
38 }
39
40 /*
41 * We are not allowed to do online-resizing on a filesystem mounted
42 * with error, because it can destroy the filesystem easily.
43 */
44 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
45 ext4_warning(sb, "There are errors in the filesystem, "
46 "so online resizing is not allowed");
47 return -EPERM;
48 }
49
50 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
51 &EXT4_SB(sb)->s_ext4_flags))
52 ret = -EBUSY;
53
54 return ret;
55 }
56
57 void ext4_resize_end(struct super_block *sb)
58 {
59 clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
60 smp_mb__after_atomic();
61 }
62
63 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
64 ext4_group_t group) {
65 return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
66 EXT4_DESC_PER_BLOCK_BITS(sb);
67 }
68
69 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
70 ext4_group_t group) {
71 group = ext4_meta_bg_first_group(sb, group);
72 return ext4_group_first_block_no(sb, group);
73 }
74
75 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
76 ext4_group_t group) {
77 ext4_grpblk_t overhead;
78 overhead = ext4_bg_num_gdb(sb, group);
79 if (ext4_bg_has_super(sb, group))
80 overhead += 1 +
81 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
82 return overhead;
83 }
84
85 #define outside(b, first, last) ((b) < (first) || (b) >= (last))
86 #define inside(b, first, last) ((b) >= (first) && (b) < (last))
87
88 static int verify_group_input(struct super_block *sb,
89 struct ext4_new_group_data *input)
90 {
91 struct ext4_sb_info *sbi = EXT4_SB(sb);
92 struct ext4_super_block *es = sbi->s_es;
93 ext4_fsblk_t start = ext4_blocks_count(es);
94 ext4_fsblk_t end = start + input->blocks_count;
95 ext4_group_t group = input->group;
96 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
97 unsigned overhead;
98 ext4_fsblk_t metaend;
99 struct buffer_head *bh = NULL;
100 ext4_grpblk_t free_blocks_count, offset;
101 int err = -EINVAL;
102
103 if (group != sbi->s_groups_count) {
104 ext4_warning(sb, "Cannot add at group %u (only %u groups)",
105 input->group, sbi->s_groups_count);
106 return -EINVAL;
107 }
108
109 overhead = ext4_group_overhead_blocks(sb, group);
110 metaend = start + overhead;
111 input->free_clusters_count = free_blocks_count =
112 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
113
114 if (test_opt(sb, DEBUG))
115 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
116 "(%d free, %u reserved)\n",
117 ext4_bg_has_super(sb, input->group) ? "normal" :
118 "no-super", input->group, input->blocks_count,
119 free_blocks_count, input->reserved_blocks);
120
121 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
122 if (offset != 0)
123 ext4_warning(sb, "Last group not full");
124 else if (input->reserved_blocks > input->blocks_count / 5)
125 ext4_warning(sb, "Reserved blocks too high (%u)",
126 input->reserved_blocks);
127 else if (free_blocks_count < 0)
128 ext4_warning(sb, "Bad blocks count %u",
129 input->blocks_count);
130 else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
131 err = PTR_ERR(bh);
132 bh = NULL;
133 ext4_warning(sb, "Cannot read last block (%llu)",
134 end - 1);
135 } else if (outside(input->block_bitmap, start, end))
136 ext4_warning(sb, "Block bitmap not in group (block %llu)",
137 (unsigned long long)input->block_bitmap);
138 else if (outside(input->inode_bitmap, start, end))
139 ext4_warning(sb, "Inode bitmap not in group (block %llu)",
140 (unsigned long long)input->inode_bitmap);
141 else if (outside(input->inode_table, start, end) ||
142 outside(itend - 1, start, end))
143 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
144 (unsigned long long)input->inode_table, itend - 1);
145 else if (input->inode_bitmap == input->block_bitmap)
146 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
147 (unsigned long long)input->block_bitmap);
148 else if (inside(input->block_bitmap, input->inode_table, itend))
149 ext4_warning(sb, "Block bitmap (%llu) in inode table "
150 "(%llu-%llu)",
151 (unsigned long long)input->block_bitmap,
152 (unsigned long long)input->inode_table, itend - 1);
153 else if (inside(input->inode_bitmap, input->inode_table, itend))
154 ext4_warning(sb, "Inode bitmap (%llu) in inode table "
155 "(%llu-%llu)",
156 (unsigned long long)input->inode_bitmap,
157 (unsigned long long)input->inode_table, itend - 1);
158 else if (inside(input->block_bitmap, start, metaend))
159 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
160 (unsigned long long)input->block_bitmap,
161 start, metaend - 1);
162 else if (inside(input->inode_bitmap, start, metaend))
163 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
164 (unsigned long long)input->inode_bitmap,
165 start, metaend - 1);
166 else if (inside(input->inode_table, start, metaend) ||
167 inside(itend - 1, start, metaend))
168 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
169 "(%llu-%llu)",
170 (unsigned long long)input->inode_table,
171 itend - 1, start, metaend - 1);
172 else
173 err = 0;
174 brelse(bh);
175
176 return err;
177 }
178
179 /*
180 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
181 * group each time.
182 */
183 struct ext4_new_flex_group_data {
184 struct ext4_new_group_data *groups; /* new_group_data for groups
185 in the flex group */
186 __u16 *bg_flags; /* block group flags of groups
187 in @groups */
188 ext4_group_t count; /* number of groups in @groups
189 */
190 };
191
192 /*
193 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
194 * @flexbg_size.
195 *
196 * Returns NULL on failure otherwise address of the allocated structure.
197 */
198 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
199 {
200 struct ext4_new_flex_group_data *flex_gd;
201
202 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
203 if (flex_gd == NULL)
204 goto out3;
205
206 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
207 goto out2;
208 flex_gd->count = flexbg_size;
209
210 flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) *
211 flexbg_size, GFP_NOFS);
212 if (flex_gd->groups == NULL)
213 goto out2;
214
215 flex_gd->bg_flags = kmalloc(flexbg_size * sizeof(__u16), GFP_NOFS);
216 if (flex_gd->bg_flags == NULL)
217 goto out1;
218
219 return flex_gd;
220
221 out1:
222 kfree(flex_gd->groups);
223 out2:
224 kfree(flex_gd);
225 out3:
226 return NULL;
227 }
228
229 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
230 {
231 kfree(flex_gd->bg_flags);
232 kfree(flex_gd->groups);
233 kfree(flex_gd);
234 }
235
236 /*
237 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
238 * and inode tables for a flex group.
239 *
240 * This function is used by 64bit-resize. Note that this function allocates
241 * group tables from the 1st group of groups contained by @flexgd, which may
242 * be a partial of a flex group.
243 *
244 * @sb: super block of fs to which the groups belongs
245 *
246 * Returns 0 on a successful allocation of the metadata blocks in the
247 * block group.
248 */
249 static int ext4_alloc_group_tables(struct super_block *sb,
250 struct ext4_new_flex_group_data *flex_gd,
251 int flexbg_size)
252 {
253 struct ext4_new_group_data *group_data = flex_gd->groups;
254 ext4_fsblk_t start_blk;
255 ext4_fsblk_t last_blk;
256 ext4_group_t src_group;
257 ext4_group_t bb_index = 0;
258 ext4_group_t ib_index = 0;
259 ext4_group_t it_index = 0;
260 ext4_group_t group;
261 ext4_group_t last_group;
262 unsigned overhead;
263 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
264 int i;
265
266 BUG_ON(flex_gd->count == 0 || group_data == NULL);
267
268 src_group = group_data[0].group;
269 last_group = src_group + flex_gd->count - 1;
270
271 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
272 (last_group & ~(flexbg_size - 1))));
273 next_group:
274 group = group_data[0].group;
275 if (src_group >= group_data[0].group + flex_gd->count)
276 return -ENOSPC;
277 start_blk = ext4_group_first_block_no(sb, src_group);
278 last_blk = start_blk + group_data[src_group - group].blocks_count;
279
280 overhead = ext4_group_overhead_blocks(sb, src_group);
281
282 start_blk += overhead;
283
284 /* We collect contiguous blocks as much as possible. */
285 src_group++;
286 for (; src_group <= last_group; src_group++) {
287 overhead = ext4_group_overhead_blocks(sb, src_group);
288 if (overhead == 0)
289 last_blk += group_data[src_group - group].blocks_count;
290 else
291 break;
292 }
293
294 /* Allocate block bitmaps */
295 for (; bb_index < flex_gd->count; bb_index++) {
296 if (start_blk >= last_blk)
297 goto next_group;
298 group_data[bb_index].block_bitmap = start_blk++;
299 group = ext4_get_group_number(sb, start_blk - 1);
300 group -= group_data[0].group;
301 group_data[group].mdata_blocks++;
302 flex_gd->bg_flags[group] &= uninit_mask;
303 }
304
305 /* Allocate inode bitmaps */
306 for (; ib_index < flex_gd->count; ib_index++) {
307 if (start_blk >= last_blk)
308 goto next_group;
309 group_data[ib_index].inode_bitmap = start_blk++;
310 group = ext4_get_group_number(sb, start_blk - 1);
311 group -= group_data[0].group;
312 group_data[group].mdata_blocks++;
313 flex_gd->bg_flags[group] &= uninit_mask;
314 }
315
316 /* Allocate inode tables */
317 for (; it_index < flex_gd->count; it_index++) {
318 unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
319 ext4_fsblk_t next_group_start;
320
321 if (start_blk + itb > last_blk)
322 goto next_group;
323 group_data[it_index].inode_table = start_blk;
324 group = ext4_get_group_number(sb, start_blk);
325 next_group_start = ext4_group_first_block_no(sb, group + 1);
326 group -= group_data[0].group;
327
328 if (start_blk + itb > next_group_start) {
329 flex_gd->bg_flags[group + 1] &= uninit_mask;
330 overhead = start_blk + itb - next_group_start;
331 group_data[group + 1].mdata_blocks += overhead;
332 itb -= overhead;
333 }
334
335 group_data[group].mdata_blocks += itb;
336 flex_gd->bg_flags[group] &= uninit_mask;
337 start_blk += EXT4_SB(sb)->s_itb_per_group;
338 }
339
340 /* Update free clusters count to exclude metadata blocks */
341 for (i = 0; i < flex_gd->count; i++) {
342 group_data[i].free_clusters_count -=
343 EXT4_NUM_B2C(EXT4_SB(sb),
344 group_data[i].mdata_blocks);
345 }
346
347 if (test_opt(sb, DEBUG)) {
348 int i;
349 group = group_data[0].group;
350
351 printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
352 "%d groups, flexbg size is %d:\n", flex_gd->count,
353 flexbg_size);
354
355 for (i = 0; i < flex_gd->count; i++) {
356 ext4_debug(
357 "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
358 ext4_bg_has_super(sb, group + i) ? "normal" :
359 "no-super", group + i,
360 group_data[i].blocks_count,
361 group_data[i].free_clusters_count,
362 group_data[i].mdata_blocks);
363 }
364 }
365 return 0;
366 }
367
368 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
369 ext4_fsblk_t blk)
370 {
371 struct buffer_head *bh;
372 int err;
373
374 bh = sb_getblk(sb, blk);
375 if (unlikely(!bh))
376 return ERR_PTR(-ENOMEM);
377 BUFFER_TRACE(bh, "get_write_access");
378 if ((err = ext4_journal_get_write_access(handle, bh))) {
379 brelse(bh);
380 bh = ERR_PTR(err);
381 } else {
382 memset(bh->b_data, 0, sb->s_blocksize);
383 set_buffer_uptodate(bh);
384 }
385
386 return bh;
387 }
388
389 /*
390 * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA.
391 * If that fails, restart the transaction & regain write access for the
392 * buffer head which is used for block_bitmap modifications.
393 */
394 static int extend_or_restart_transaction(handle_t *handle, int thresh)
395 {
396 int err;
397
398 if (ext4_handle_has_enough_credits(handle, thresh))
399 return 0;
400
401 err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA);
402 if (err < 0)
403 return err;
404 if (err) {
405 err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA);
406 if (err)
407 return err;
408 }
409
410 return 0;
411 }
412
413 /*
414 * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
415 *
416 * Helper function for ext4_setup_new_group_blocks() which set .
417 *
418 * @sb: super block
419 * @handle: journal handle
420 * @flex_gd: flex group data
421 */
422 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
423 struct ext4_new_flex_group_data *flex_gd,
424 ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
425 {
426 struct ext4_sb_info *sbi = EXT4_SB(sb);
427 ext4_group_t count = last_cluster - first_cluster + 1;
428 ext4_group_t count2;
429
430 ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
431 last_cluster);
432 for (count2 = count; count > 0;
433 count -= count2, first_cluster += count2) {
434 ext4_fsblk_t start;
435 struct buffer_head *bh;
436 ext4_group_t group;
437 int err;
438
439 group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
440 start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
441 group -= flex_gd->groups[0].group;
442
443 count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
444 if (count2 > count)
445 count2 = count;
446
447 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
448 BUG_ON(flex_gd->count > 1);
449 continue;
450 }
451
452 err = extend_or_restart_transaction(handle, 1);
453 if (err)
454 return err;
455
456 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
457 if (unlikely(!bh))
458 return -ENOMEM;
459
460 BUFFER_TRACE(bh, "get_write_access");
461 err = ext4_journal_get_write_access(handle, bh);
462 if (err) {
463 brelse(bh);
464 return err;
465 }
466 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
467 first_cluster, first_cluster - start, count2);
468 ext4_set_bits(bh->b_data, first_cluster - start, count2);
469
470 err = ext4_handle_dirty_metadata(handle, NULL, bh);
471 brelse(bh);
472 if (unlikely(err))
473 return err;
474 }
475
476 return 0;
477 }
478
479 /*
480 * Set up the block and inode bitmaps, and the inode table for the new groups.
481 * This doesn't need to be part of the main transaction, since we are only
482 * changing blocks outside the actual filesystem. We still do journaling to
483 * ensure the recovery is correct in case of a failure just after resize.
484 * If any part of this fails, we simply abort the resize.
485 *
486 * setup_new_flex_group_blocks handles a flex group as follow:
487 * 1. copy super block and GDT, and initialize group tables if necessary.
488 * In this step, we only set bits in blocks bitmaps for blocks taken by
489 * super block and GDT.
490 * 2. allocate group tables in block bitmaps, that is, set bits in block
491 * bitmap for blocks taken by group tables.
492 */
493 static int setup_new_flex_group_blocks(struct super_block *sb,
494 struct ext4_new_flex_group_data *flex_gd)
495 {
496 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
497 ext4_fsblk_t start;
498 ext4_fsblk_t block;
499 struct ext4_sb_info *sbi = EXT4_SB(sb);
500 struct ext4_super_block *es = sbi->s_es;
501 struct ext4_new_group_data *group_data = flex_gd->groups;
502 __u16 *bg_flags = flex_gd->bg_flags;
503 handle_t *handle;
504 ext4_group_t group, count;
505 struct buffer_head *bh = NULL;
506 int reserved_gdb, i, j, err = 0, err2;
507 int meta_bg;
508
509 BUG_ON(!flex_gd->count || !group_data ||
510 group_data[0].group != sbi->s_groups_count);
511
512 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
513 meta_bg = ext4_has_feature_meta_bg(sb);
514
515 /* This transaction may be extended/restarted along the way */
516 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
517 if (IS_ERR(handle))
518 return PTR_ERR(handle);
519
520 group = group_data[0].group;
521 for (i = 0; i < flex_gd->count; i++, group++) {
522 unsigned long gdblocks;
523 ext4_grpblk_t overhead;
524
525 gdblocks = ext4_bg_num_gdb(sb, group);
526 start = ext4_group_first_block_no(sb, group);
527
528 if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
529 goto handle_itb;
530
531 if (meta_bg == 1) {
532 ext4_group_t first_group;
533 first_group = ext4_meta_bg_first_group(sb, group);
534 if (first_group != group + 1 &&
535 first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
536 goto handle_itb;
537 }
538
539 block = start + ext4_bg_has_super(sb, group);
540 /* Copy all of the GDT blocks into the backup in this group */
541 for (j = 0; j < gdblocks; j++, block++) {
542 struct buffer_head *gdb;
543
544 ext4_debug("update backup group %#04llx\n", block);
545 err = extend_or_restart_transaction(handle, 1);
546 if (err)
547 goto out;
548
549 gdb = sb_getblk(sb, block);
550 if (unlikely(!gdb)) {
551 err = -ENOMEM;
552 goto out;
553 }
554
555 BUFFER_TRACE(gdb, "get_write_access");
556 err = ext4_journal_get_write_access(handle, gdb);
557 if (err) {
558 brelse(gdb);
559 goto out;
560 }
561 memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
562 gdb->b_size);
563 set_buffer_uptodate(gdb);
564
565 err = ext4_handle_dirty_metadata(handle, NULL, gdb);
566 if (unlikely(err)) {
567 brelse(gdb);
568 goto out;
569 }
570 brelse(gdb);
571 }
572
573 /* Zero out all of the reserved backup group descriptor
574 * table blocks
575 */
576 if (ext4_bg_has_super(sb, group)) {
577 err = sb_issue_zeroout(sb, gdblocks + start + 1,
578 reserved_gdb, GFP_NOFS);
579 if (err)
580 goto out;
581 }
582
583 handle_itb:
584 /* Initialize group tables of the grop @group */
585 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
586 goto handle_bb;
587
588 /* Zero out all of the inode table blocks */
589 block = group_data[i].inode_table;
590 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
591 block, sbi->s_itb_per_group);
592 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
593 GFP_NOFS);
594 if (err)
595 goto out;
596
597 handle_bb:
598 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
599 goto handle_ib;
600
601 /* Initialize block bitmap of the @group */
602 block = group_data[i].block_bitmap;
603 err = extend_or_restart_transaction(handle, 1);
604 if (err)
605 goto out;
606
607 bh = bclean(handle, sb, block);
608 if (IS_ERR(bh)) {
609 err = PTR_ERR(bh);
610 goto out;
611 }
612 overhead = ext4_group_overhead_blocks(sb, group);
613 if (overhead != 0) {
614 ext4_debug("mark backup superblock %#04llx (+0)\n",
615 start);
616 ext4_set_bits(bh->b_data, 0,
617 EXT4_NUM_B2C(sbi, overhead));
618 }
619 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
620 sb->s_blocksize * 8, bh->b_data);
621 err = ext4_handle_dirty_metadata(handle, NULL, bh);
622 brelse(bh);
623 if (err)
624 goto out;
625
626 handle_ib:
627 if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
628 continue;
629
630 /* Initialize inode bitmap of the @group */
631 block = group_data[i].inode_bitmap;
632 err = extend_or_restart_transaction(handle, 1);
633 if (err)
634 goto out;
635 /* Mark unused entries in inode bitmap used */
636 bh = bclean(handle, sb, block);
637 if (IS_ERR(bh)) {
638 err = PTR_ERR(bh);
639 goto out;
640 }
641
642 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
643 sb->s_blocksize * 8, bh->b_data);
644 err = ext4_handle_dirty_metadata(handle, NULL, bh);
645 brelse(bh);
646 if (err)
647 goto out;
648 }
649
650 /* Mark group tables in block bitmap */
651 for (j = 0; j < GROUP_TABLE_COUNT; j++) {
652 count = group_table_count[j];
653 start = (&group_data[0].block_bitmap)[j];
654 block = start;
655 for (i = 1; i < flex_gd->count; i++) {
656 block += group_table_count[j];
657 if (block == (&group_data[i].block_bitmap)[j]) {
658 count += group_table_count[j];
659 continue;
660 }
661 err = set_flexbg_block_bitmap(sb, handle,
662 flex_gd,
663 EXT4_B2C(sbi, start),
664 EXT4_B2C(sbi,
665 start + count
666 - 1));
667 if (err)
668 goto out;
669 count = group_table_count[j];
670 start = (&group_data[i].block_bitmap)[j];
671 block = start;
672 }
673
674 if (count) {
675 err = set_flexbg_block_bitmap(sb, handle,
676 flex_gd,
677 EXT4_B2C(sbi, start),
678 EXT4_B2C(sbi,
679 start + count
680 - 1));
681 if (err)
682 goto out;
683 }
684 }
685
686 out:
687 err2 = ext4_journal_stop(handle);
688 if (err2 && !err)
689 err = err2;
690
691 return err;
692 }
693
694 /*
695 * Iterate through the groups which hold BACKUP superblock/GDT copies in an
696 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
697 * calling this for the first time. In a sparse filesystem it will be the
698 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
699 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
700 */
701 static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
702 unsigned *five, unsigned *seven)
703 {
704 unsigned *min = three;
705 int mult = 3;
706 unsigned ret;
707
708 if (!ext4_has_feature_sparse_super(sb)) {
709 ret = *min;
710 *min += 1;
711 return ret;
712 }
713
714 if (*five < *min) {
715 min = five;
716 mult = 5;
717 }
718 if (*seven < *min) {
719 min = seven;
720 mult = 7;
721 }
722
723 ret = *min;
724 *min *= mult;
725
726 return ret;
727 }
728
729 /*
730 * Check that all of the backup GDT blocks are held in the primary GDT block.
731 * It is assumed that they are stored in group order. Returns the number of
732 * groups in current filesystem that have BACKUPS, or -ve error code.
733 */
734 static int verify_reserved_gdb(struct super_block *sb,
735 ext4_group_t end,
736 struct buffer_head *primary)
737 {
738 const ext4_fsblk_t blk = primary->b_blocknr;
739 unsigned three = 1;
740 unsigned five = 5;
741 unsigned seven = 7;
742 unsigned grp;
743 __le32 *p = (__le32 *)primary->b_data;
744 int gdbackups = 0;
745
746 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
747 if (le32_to_cpu(*p++) !=
748 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
749 ext4_warning(sb, "reserved GDT %llu"
750 " missing grp %d (%llu)",
751 blk, grp,
752 grp *
753 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
754 blk);
755 return -EINVAL;
756 }
757 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
758 return -EFBIG;
759 }
760
761 return gdbackups;
762 }
763
764 /*
765 * Called when we need to bring a reserved group descriptor table block into
766 * use from the resize inode. The primary copy of the new GDT block currently
767 * is an indirect block (under the double indirect block in the resize inode).
768 * The new backup GDT blocks will be stored as leaf blocks in this indirect
769 * block, in group order. Even though we know all the block numbers we need,
770 * we check to ensure that the resize inode has actually reserved these blocks.
771 *
772 * Don't need to update the block bitmaps because the blocks are still in use.
773 *
774 * We get all of the error cases out of the way, so that we are sure to not
775 * fail once we start modifying the data on disk, because JBD has no rollback.
776 */
777 static int add_new_gdb(handle_t *handle, struct inode *inode,
778 ext4_group_t group)
779 {
780 struct super_block *sb = inode->i_sb;
781 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
782 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
783 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
784 struct buffer_head **o_group_desc, **n_group_desc = NULL;
785 struct buffer_head *dind = NULL;
786 struct buffer_head *gdb_bh = NULL;
787 int gdbackups;
788 struct ext4_iloc iloc = { .bh = NULL };
789 __le32 *data;
790 int err;
791
792 if (test_opt(sb, DEBUG))
793 printk(KERN_DEBUG
794 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
795 gdb_num);
796
797 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
798 if (IS_ERR(gdb_bh))
799 return PTR_ERR(gdb_bh);
800
801 gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
802 if (gdbackups < 0) {
803 err = gdbackups;
804 goto errout;
805 }
806
807 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
808 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
809 if (IS_ERR(dind)) {
810 err = PTR_ERR(dind);
811 dind = NULL;
812 goto errout;
813 }
814
815 data = (__le32 *)dind->b_data;
816 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
817 ext4_warning(sb, "new group %u GDT block %llu not reserved",
818 group, gdblock);
819 err = -EINVAL;
820 goto errout;
821 }
822
823 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
824 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
825 if (unlikely(err))
826 goto errout;
827
828 BUFFER_TRACE(gdb_bh, "get_write_access");
829 err = ext4_journal_get_write_access(handle, gdb_bh);
830 if (unlikely(err))
831 goto errout;
832
833 BUFFER_TRACE(dind, "get_write_access");
834 err = ext4_journal_get_write_access(handle, dind);
835 if (unlikely(err))
836 ext4_std_error(sb, err);
837
838 /* ext4_reserve_inode_write() gets a reference on the iloc */
839 err = ext4_reserve_inode_write(handle, inode, &iloc);
840 if (unlikely(err))
841 goto errout;
842
843 n_group_desc = ext4_kvmalloc((gdb_num + 1) *
844 sizeof(struct buffer_head *),
845 GFP_NOFS);
846 if (!n_group_desc) {
847 err = -ENOMEM;
848 ext4_warning(sb, "not enough memory for %lu groups",
849 gdb_num + 1);
850 goto errout;
851 }
852
853 /*
854 * Finally, we have all of the possible failures behind us...
855 *
856 * Remove new GDT block from inode double-indirect block and clear out
857 * the new GDT block for use (which also "frees" the backup GDT blocks
858 * from the reserved inode). We don't need to change the bitmaps for
859 * these blocks, because they are marked as in-use from being in the
860 * reserved inode, and will become GDT blocks (primary and backup).
861 */
862 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
863 err = ext4_handle_dirty_metadata(handle, NULL, dind);
864 if (unlikely(err)) {
865 ext4_std_error(sb, err);
866 goto errout;
867 }
868 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
869 (9 - EXT4_SB(sb)->s_cluster_bits);
870 ext4_mark_iloc_dirty(handle, inode, &iloc);
871 memset(gdb_bh->b_data, 0, sb->s_blocksize);
872 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
873 if (unlikely(err)) {
874 ext4_std_error(sb, err);
875 goto errout;
876 }
877 brelse(dind);
878
879 o_group_desc = EXT4_SB(sb)->s_group_desc;
880 memcpy(n_group_desc, o_group_desc,
881 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
882 n_group_desc[gdb_num] = gdb_bh;
883 EXT4_SB(sb)->s_group_desc = n_group_desc;
884 EXT4_SB(sb)->s_gdb_count++;
885 kvfree(o_group_desc);
886
887 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
888 err = ext4_handle_dirty_super(handle, sb);
889 if (err)
890 ext4_std_error(sb, err);
891 return err;
892 errout:
893 kvfree(n_group_desc);
894 brelse(iloc.bh);
895 brelse(dind);
896 brelse(gdb_bh);
897
898 ext4_debug("leaving with error %d\n", err);
899 return err;
900 }
901
902 /*
903 * add_new_gdb_meta_bg is the sister of add_new_gdb.
904 */
905 static int add_new_gdb_meta_bg(struct super_block *sb,
906 handle_t *handle, ext4_group_t group) {
907 ext4_fsblk_t gdblock;
908 struct buffer_head *gdb_bh;
909 struct buffer_head **o_group_desc, **n_group_desc;
910 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
911 int err;
912
913 gdblock = ext4_meta_bg_first_block_no(sb, group) +
914 ext4_bg_has_super(sb, group);
915 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
916 if (IS_ERR(gdb_bh))
917 return PTR_ERR(gdb_bh);
918 n_group_desc = ext4_kvmalloc((gdb_num + 1) *
919 sizeof(struct buffer_head *),
920 GFP_NOFS);
921 if (!n_group_desc) {
922 brelse(gdb_bh);
923 err = -ENOMEM;
924 ext4_warning(sb, "not enough memory for %lu groups",
925 gdb_num + 1);
926 return err;
927 }
928
929 o_group_desc = EXT4_SB(sb)->s_group_desc;
930 memcpy(n_group_desc, o_group_desc,
931 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
932 n_group_desc[gdb_num] = gdb_bh;
933 EXT4_SB(sb)->s_group_desc = n_group_desc;
934 EXT4_SB(sb)->s_gdb_count++;
935 kvfree(o_group_desc);
936 BUFFER_TRACE(gdb_bh, "get_write_access");
937 err = ext4_journal_get_write_access(handle, gdb_bh);
938 return err;
939 }
940
941 /*
942 * Called when we are adding a new group which has a backup copy of each of
943 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
944 * We need to add these reserved backup GDT blocks to the resize inode, so
945 * that they are kept for future resizing and not allocated to files.
946 *
947 * Each reserved backup GDT block will go into a different indirect block.
948 * The indirect blocks are actually the primary reserved GDT blocks,
949 * so we know in advance what their block numbers are. We only get the
950 * double-indirect block to verify it is pointing to the primary reserved
951 * GDT blocks so we don't overwrite a data block by accident. The reserved
952 * backup GDT blocks are stored in their reserved primary GDT block.
953 */
954 static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
955 ext4_group_t group)
956 {
957 struct super_block *sb = inode->i_sb;
958 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
959 int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
960 struct buffer_head **primary;
961 struct buffer_head *dind;
962 struct ext4_iloc iloc;
963 ext4_fsblk_t blk;
964 __le32 *data, *end;
965 int gdbackups = 0;
966 int res, i;
967 int err;
968
969 primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS);
970 if (!primary)
971 return -ENOMEM;
972
973 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
974 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
975 if (IS_ERR(dind)) {
976 err = PTR_ERR(dind);
977 dind = NULL;
978 goto exit_free;
979 }
980
981 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
982 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
983 EXT4_ADDR_PER_BLOCK(sb));
984 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
985
986 /* Get each reserved primary GDT block and verify it holds backups */
987 for (res = 0; res < reserved_gdb; res++, blk++) {
988 if (le32_to_cpu(*data) != blk) {
989 ext4_warning(sb, "reserved block %llu"
990 " not at offset %ld",
991 blk,
992 (long)(data - (__le32 *)dind->b_data));
993 err = -EINVAL;
994 goto exit_bh;
995 }
996 primary[res] = ext4_sb_bread(sb, blk, 0);
997 if (IS_ERR(primary[res])) {
998 err = PTR_ERR(primary[res]);
999 primary[res] = NULL;
1000 goto exit_bh;
1001 }
1002 gdbackups = verify_reserved_gdb(sb, group, primary[res]);
1003 if (gdbackups < 0) {
1004 brelse(primary[res]);
1005 err = gdbackups;
1006 goto exit_bh;
1007 }
1008 if (++data >= end)
1009 data = (__le32 *)dind->b_data;
1010 }
1011
1012 for (i = 0; i < reserved_gdb; i++) {
1013 BUFFER_TRACE(primary[i], "get_write_access");
1014 if ((err = ext4_journal_get_write_access(handle, primary[i])))
1015 goto exit_bh;
1016 }
1017
1018 if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
1019 goto exit_bh;
1020
1021 /*
1022 * Finally we can add each of the reserved backup GDT blocks from
1023 * the new group to its reserved primary GDT block.
1024 */
1025 blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1026 for (i = 0; i < reserved_gdb; i++) {
1027 int err2;
1028 data = (__le32 *)primary[i]->b_data;
1029 /* printk("reserving backup %lu[%u] = %lu\n",
1030 primary[i]->b_blocknr, gdbackups,
1031 blk + primary[i]->b_blocknr); */
1032 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1033 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1034 if (!err)
1035 err = err2;
1036 }
1037
1038 inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
1039 ext4_mark_iloc_dirty(handle, inode, &iloc);
1040
1041 exit_bh:
1042 while (--res >= 0)
1043 brelse(primary[res]);
1044 brelse(dind);
1045
1046 exit_free:
1047 kfree(primary);
1048
1049 return err;
1050 }
1051
1052 /*
1053 * Update the backup copies of the ext4 metadata. These don't need to be part
1054 * of the main resize transaction, because e2fsck will re-write them if there
1055 * is a problem (basically only OOM will cause a problem). However, we
1056 * _should_ update the backups if possible, in case the primary gets trashed
1057 * for some reason and we need to run e2fsck from a backup superblock. The
1058 * important part is that the new block and inode counts are in the backup
1059 * superblocks, and the location of the new group metadata in the GDT backups.
1060 *
1061 * We do not need take the s_resize_lock for this, because these
1062 * blocks are not otherwise touched by the filesystem code when it is
1063 * mounted. We don't need to worry about last changing from
1064 * sbi->s_groups_count, because the worst that can happen is that we
1065 * do not copy the full number of backups at this time. The resize
1066 * which changed s_groups_count will backup again.
1067 */
1068 static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1069 int size, int meta_bg)
1070 {
1071 struct ext4_sb_info *sbi = EXT4_SB(sb);
1072 ext4_group_t last;
1073 const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1074 unsigned three = 1;
1075 unsigned five = 5;
1076 unsigned seven = 7;
1077 ext4_group_t group = 0;
1078 int rest = sb->s_blocksize - size;
1079 handle_t *handle;
1080 int err = 0, err2;
1081
1082 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1083 if (IS_ERR(handle)) {
1084 group = 1;
1085 err = PTR_ERR(handle);
1086 goto exit_err;
1087 }
1088
1089 if (meta_bg == 0) {
1090 group = ext4_list_backups(sb, &three, &five, &seven);
1091 last = sbi->s_groups_count;
1092 } else {
1093 group = ext4_get_group_number(sb, blk_off) + 1;
1094 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1095 }
1096
1097 while (group < sbi->s_groups_count) {
1098 struct buffer_head *bh;
1099 ext4_fsblk_t backup_block;
1100
1101 /* Out of journal space, and can't get more - abort - so sad */
1102 if (ext4_handle_valid(handle) &&
1103 handle->h_buffer_credits == 0 &&
1104 ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) &&
1105 (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
1106 break;
1107
1108 if (meta_bg == 0)
1109 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1110 else
1111 backup_block = (ext4_group_first_block_no(sb, group) +
1112 ext4_bg_has_super(sb, group));
1113
1114 bh = sb_getblk(sb, backup_block);
1115 if (unlikely(!bh)) {
1116 err = -ENOMEM;
1117 break;
1118 }
1119 ext4_debug("update metadata backup %llu(+%llu)\n",
1120 backup_block, backup_block -
1121 ext4_group_first_block_no(sb, group));
1122 BUFFER_TRACE(bh, "get_write_access");
1123 if ((err = ext4_journal_get_write_access(handle, bh))) {
1124 brelse(bh);
1125 break;
1126 }
1127 lock_buffer(bh);
1128 memcpy(bh->b_data, data, size);
1129 if (rest)
1130 memset(bh->b_data + size, 0, rest);
1131 set_buffer_uptodate(bh);
1132 unlock_buffer(bh);
1133 err = ext4_handle_dirty_metadata(handle, NULL, bh);
1134 if (unlikely(err))
1135 ext4_std_error(sb, err);
1136 brelse(bh);
1137
1138 if (meta_bg == 0)
1139 group = ext4_list_backups(sb, &three, &five, &seven);
1140 else if (group == last)
1141 break;
1142 else
1143 group = last;
1144 }
1145 if ((err2 = ext4_journal_stop(handle)) && !err)
1146 err = err2;
1147
1148 /*
1149 * Ugh! Need to have e2fsck write the backup copies. It is too
1150 * late to revert the resize, we shouldn't fail just because of
1151 * the backup copies (they are only needed in case of corruption).
1152 *
1153 * However, if we got here we have a journal problem too, so we
1154 * can't really start a transaction to mark the superblock.
1155 * Chicken out and just set the flag on the hope it will be written
1156 * to disk, and if not - we will simply wait until next fsck.
1157 */
1158 exit_err:
1159 if (err) {
1160 ext4_warning(sb, "can't update backup for group %u (err %d), "
1161 "forcing fsck on next reboot", group, err);
1162 sbi->s_mount_state &= ~EXT4_VALID_FS;
1163 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1164 mark_buffer_dirty(sbi->s_sbh);
1165 }
1166 }
1167
1168 /*
1169 * ext4_add_new_descs() adds @count group descriptor of groups
1170 * starting at @group
1171 *
1172 * @handle: journal handle
1173 * @sb: super block
1174 * @group: the group no. of the first group desc to be added
1175 * @resize_inode: the resize inode
1176 * @count: number of group descriptors to be added
1177 */
1178 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1179 ext4_group_t group, struct inode *resize_inode,
1180 ext4_group_t count)
1181 {
1182 struct ext4_sb_info *sbi = EXT4_SB(sb);
1183 struct ext4_super_block *es = sbi->s_es;
1184 struct buffer_head *gdb_bh;
1185 int i, gdb_off, gdb_num, err = 0;
1186 int meta_bg;
1187
1188 meta_bg = ext4_has_feature_meta_bg(sb);
1189 for (i = 0; i < count; i++, group++) {
1190 int reserved_gdb = ext4_bg_has_super(sb, group) ?
1191 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1192
1193 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1194 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1195
1196 /*
1197 * We will only either add reserved group blocks to a backup group
1198 * or remove reserved blocks for the first group in a new group block.
1199 * Doing both would be mean more complex code, and sane people don't
1200 * use non-sparse filesystems anymore. This is already checked above.
1201 */
1202 if (gdb_off) {
1203 gdb_bh = sbi->s_group_desc[gdb_num];
1204 BUFFER_TRACE(gdb_bh, "get_write_access");
1205 err = ext4_journal_get_write_access(handle, gdb_bh);
1206
1207 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1208 err = reserve_backup_gdb(handle, resize_inode, group);
1209 } else if (meta_bg != 0) {
1210 err = add_new_gdb_meta_bg(sb, handle, group);
1211 } else {
1212 err = add_new_gdb(handle, resize_inode, group);
1213 }
1214 if (err)
1215 break;
1216 }
1217 return err;
1218 }
1219
1220 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1221 {
1222 struct buffer_head *bh = sb_getblk(sb, block);
1223 if (unlikely(!bh))
1224 return NULL;
1225 if (!bh_uptodate_or_lock(bh)) {
1226 if (bh_submit_read(bh) < 0) {
1227 brelse(bh);
1228 return NULL;
1229 }
1230 }
1231
1232 return bh;
1233 }
1234
1235 static int ext4_set_bitmap_checksums(struct super_block *sb,
1236 ext4_group_t group,
1237 struct ext4_group_desc *gdp,
1238 struct ext4_new_group_data *group_data)
1239 {
1240 struct buffer_head *bh;
1241
1242 if (!ext4_has_metadata_csum(sb))
1243 return 0;
1244
1245 bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1246 if (!bh)
1247 return -EIO;
1248 ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
1249 EXT4_INODES_PER_GROUP(sb) / 8);
1250 brelse(bh);
1251
1252 bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1253 if (!bh)
1254 return -EIO;
1255 ext4_block_bitmap_csum_set(sb, group, gdp, bh);
1256 brelse(bh);
1257
1258 return 0;
1259 }
1260
1261 /*
1262 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1263 */
1264 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1265 struct ext4_new_flex_group_data *flex_gd)
1266 {
1267 struct ext4_new_group_data *group_data = flex_gd->groups;
1268 struct ext4_group_desc *gdp;
1269 struct ext4_sb_info *sbi = EXT4_SB(sb);
1270 struct buffer_head *gdb_bh;
1271 ext4_group_t group;
1272 __u16 *bg_flags = flex_gd->bg_flags;
1273 int i, gdb_off, gdb_num, err = 0;
1274
1275
1276 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1277 group = group_data->group;
1278
1279 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1280 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1281
1282 /*
1283 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1284 */
1285 gdb_bh = sbi->s_group_desc[gdb_num];
1286 /* Update group descriptor block for new group */
1287 gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1288 gdb_off * EXT4_DESC_SIZE(sb));
1289
1290 memset(gdp, 0, EXT4_DESC_SIZE(sb));
1291 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1292 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1293 err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
1294 if (err) {
1295 ext4_std_error(sb, err);
1296 break;
1297 }
1298
1299 ext4_inode_table_set(sb, gdp, group_data->inode_table);
1300 ext4_free_group_clusters_set(sb, gdp,
1301 group_data->free_clusters_count);
1302 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1303 if (ext4_has_group_desc_csum(sb))
1304 ext4_itable_unused_set(sb, gdp,
1305 EXT4_INODES_PER_GROUP(sb));
1306 gdp->bg_flags = cpu_to_le16(*bg_flags);
1307 ext4_group_desc_csum_set(sb, group, gdp);
1308
1309 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1310 if (unlikely(err)) {
1311 ext4_std_error(sb, err);
1312 break;
1313 }
1314
1315 /*
1316 * We can allocate memory for mb_alloc based on the new group
1317 * descriptor
1318 */
1319 err = ext4_mb_add_groupinfo(sb, group, gdp);
1320 if (err)
1321 break;
1322 }
1323 return err;
1324 }
1325
1326 /*
1327 * ext4_update_super() updates the super block so that the newly added
1328 * groups can be seen by the filesystem.
1329 *
1330 * @sb: super block
1331 * @flex_gd: new added groups
1332 */
1333 static void ext4_update_super(struct super_block *sb,
1334 struct ext4_new_flex_group_data *flex_gd)
1335 {
1336 ext4_fsblk_t blocks_count = 0;
1337 ext4_fsblk_t free_blocks = 0;
1338 ext4_fsblk_t reserved_blocks = 0;
1339 struct ext4_new_group_data *group_data = flex_gd->groups;
1340 struct ext4_sb_info *sbi = EXT4_SB(sb);
1341 struct ext4_super_block *es = sbi->s_es;
1342 int i;
1343
1344 BUG_ON(flex_gd->count == 0 || group_data == NULL);
1345 /*
1346 * Make the new blocks and inodes valid next. We do this before
1347 * increasing the group count so that once the group is enabled,
1348 * all of its blocks and inodes are already valid.
1349 *
1350 * We always allocate group-by-group, then block-by-block or
1351 * inode-by-inode within a group, so enabling these
1352 * blocks/inodes before the group is live won't actually let us
1353 * allocate the new space yet.
1354 */
1355 for (i = 0; i < flex_gd->count; i++) {
1356 blocks_count += group_data[i].blocks_count;
1357 free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
1358 }
1359
1360 reserved_blocks = ext4_r_blocks_count(es) * 100;
1361 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1362 reserved_blocks *= blocks_count;
1363 do_div(reserved_blocks, 100);
1364
1365 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1366 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1367 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1368 flex_gd->count);
1369 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1370 flex_gd->count);
1371
1372 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1373 /*
1374 * We need to protect s_groups_count against other CPUs seeing
1375 * inconsistent state in the superblock.
1376 *
1377 * The precise rules we use are:
1378 *
1379 * * Writers must perform a smp_wmb() after updating all
1380 * dependent data and before modifying the groups count
1381 *
1382 * * Readers must perform an smp_rmb() after reading the groups
1383 * count and before reading any dependent data.
1384 *
1385 * NB. These rules can be relaxed when checking the group count
1386 * while freeing data, as we can only allocate from a block
1387 * group after serialising against the group count, and we can
1388 * only then free after serialising in turn against that
1389 * allocation.
1390 */
1391 smp_wmb();
1392
1393 /* Update the global fs size fields */
1394 sbi->s_groups_count += flex_gd->count;
1395 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1396 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1397
1398 /* Update the reserved block counts only once the new group is
1399 * active. */
1400 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1401 reserved_blocks);
1402
1403 /* Update the free space counts */
1404 percpu_counter_add(&sbi->s_freeclusters_counter,
1405 EXT4_NUM_B2C(sbi, free_blocks));
1406 percpu_counter_add(&sbi->s_freeinodes_counter,
1407 EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1408
1409 ext4_debug("free blocks count %llu",
1410 percpu_counter_read(&sbi->s_freeclusters_counter));
1411 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1412 ext4_group_t flex_group;
1413 flex_group = ext4_flex_group(sbi, group_data[0].group);
1414 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1415 &sbi->s_flex_groups[flex_group].free_clusters);
1416 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1417 &sbi->s_flex_groups[flex_group].free_inodes);
1418 }
1419
1420 /*
1421 * Update the fs overhead information
1422 */
1423 ext4_calculate_overhead(sb);
1424
1425 if (test_opt(sb, DEBUG))
1426 printk(KERN_DEBUG "EXT4-fs: added group %u:"
1427 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1428 blocks_count, free_blocks, reserved_blocks);
1429 }
1430
1431 /* Add a flex group to an fs. Ensure we handle all possible error conditions
1432 * _before_ we start modifying the filesystem, because we cannot abort the
1433 * transaction and not have it write the data to disk.
1434 */
1435 static int ext4_flex_group_add(struct super_block *sb,
1436 struct inode *resize_inode,
1437 struct ext4_new_flex_group_data *flex_gd)
1438 {
1439 struct ext4_sb_info *sbi = EXT4_SB(sb);
1440 struct ext4_super_block *es = sbi->s_es;
1441 ext4_fsblk_t o_blocks_count;
1442 ext4_grpblk_t last;
1443 ext4_group_t group;
1444 handle_t *handle;
1445 unsigned reserved_gdb;
1446 int err = 0, err2 = 0, credit;
1447
1448 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1449
1450 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1451 o_blocks_count = ext4_blocks_count(es);
1452 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1453 BUG_ON(last);
1454
1455 err = setup_new_flex_group_blocks(sb, flex_gd);
1456 if (err)
1457 goto exit;
1458 /*
1459 * We will always be modifying at least the superblock and GDT
1460 * blocks. If we are adding a group past the last current GDT block,
1461 * we will also modify the inode and the dindirect block. If we
1462 * are adding a group with superblock/GDT backups we will also
1463 * modify each of the reserved GDT dindirect blocks.
1464 */
1465 credit = 3; /* sb, resize inode, resize inode dindirect */
1466 /* GDT blocks */
1467 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1468 credit += reserved_gdb; /* Reserved GDT dindirect blocks */
1469 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1470 if (IS_ERR(handle)) {
1471 err = PTR_ERR(handle);
1472 goto exit;
1473 }
1474
1475 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1476 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1477 if (err)
1478 goto exit_journal;
1479
1480 group = flex_gd->groups[0].group;
1481 BUG_ON(group != EXT4_SB(sb)->s_groups_count);
1482 err = ext4_add_new_descs(handle, sb, group,
1483 resize_inode, flex_gd->count);
1484 if (err)
1485 goto exit_journal;
1486
1487 err = ext4_setup_new_descs(handle, sb, flex_gd);
1488 if (err)
1489 goto exit_journal;
1490
1491 ext4_update_super(sb, flex_gd);
1492
1493 err = ext4_handle_dirty_super(handle, sb);
1494
1495 exit_journal:
1496 err2 = ext4_journal_stop(handle);
1497 if (!err)
1498 err = err2;
1499
1500 if (!err) {
1501 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1502 int gdb_num_end = ((group + flex_gd->count - 1) /
1503 EXT4_DESC_PER_BLOCK(sb));
1504 int meta_bg = ext4_has_feature_meta_bg(sb);
1505 sector_t old_gdb = 0;
1506
1507 update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
1508 sizeof(struct ext4_super_block), 0);
1509 for (; gdb_num <= gdb_num_end; gdb_num++) {
1510 struct buffer_head *gdb_bh;
1511
1512 gdb_bh = sbi->s_group_desc[gdb_num];
1513 if (old_gdb == gdb_bh->b_blocknr)
1514 continue;
1515 update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
1516 gdb_bh->b_size, meta_bg);
1517 old_gdb = gdb_bh->b_blocknr;
1518 }
1519 }
1520 exit:
1521 return err;
1522 }
1523
1524 static int ext4_setup_next_flex_gd(struct super_block *sb,
1525 struct ext4_new_flex_group_data *flex_gd,
1526 ext4_fsblk_t n_blocks_count,
1527 unsigned long flexbg_size)
1528 {
1529 struct ext4_sb_info *sbi = EXT4_SB(sb);
1530 struct ext4_super_block *es = sbi->s_es;
1531 struct ext4_new_group_data *group_data = flex_gd->groups;
1532 ext4_fsblk_t o_blocks_count;
1533 ext4_group_t n_group;
1534 ext4_group_t group;
1535 ext4_group_t last_group;
1536 ext4_grpblk_t last;
1537 ext4_grpblk_t clusters_per_group;
1538 unsigned long i;
1539
1540 clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
1541
1542 o_blocks_count = ext4_blocks_count(es);
1543
1544 if (o_blocks_count == n_blocks_count)
1545 return 0;
1546
1547 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1548 BUG_ON(last);
1549 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1550
1551 last_group = group | (flexbg_size - 1);
1552 if (last_group > n_group)
1553 last_group = n_group;
1554
1555 flex_gd->count = last_group - group + 1;
1556
1557 for (i = 0; i < flex_gd->count; i++) {
1558 int overhead;
1559
1560 group_data[i].group = group + i;
1561 group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
1562 overhead = ext4_group_overhead_blocks(sb, group + i);
1563 group_data[i].mdata_blocks = overhead;
1564 group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
1565 if (ext4_has_group_desc_csum(sb)) {
1566 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1567 EXT4_BG_INODE_UNINIT;
1568 if (!test_opt(sb, INIT_INODE_TABLE))
1569 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1570 } else
1571 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1572 }
1573
1574 if (last_group == n_group && ext4_has_group_desc_csum(sb))
1575 /* We need to initialize block bitmap of last group. */
1576 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1577
1578 if ((last_group == n_group) && (last != clusters_per_group - 1)) {
1579 group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
1580 group_data[i - 1].free_clusters_count -= clusters_per_group -
1581 last - 1;
1582 }
1583
1584 return 1;
1585 }
1586
1587 /* Add group descriptor data to an existing or new group descriptor block.
1588 * Ensure we handle all possible error conditions _before_ we start modifying
1589 * the filesystem, because we cannot abort the transaction and not have it
1590 * write the data to disk.
1591 *
1592 * If we are on a GDT block boundary, we need to get the reserved GDT block.
1593 * Otherwise, we may need to add backup GDT blocks for a sparse group.
1594 *
1595 * We only need to hold the superblock lock while we are actually adding
1596 * in the new group's counts to the superblock. Prior to that we have
1597 * not really "added" the group at all. We re-check that we are still
1598 * adding in the last group in case things have changed since verifying.
1599 */
1600 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1601 {
1602 struct ext4_new_flex_group_data flex_gd;
1603 struct ext4_sb_info *sbi = EXT4_SB(sb);
1604 struct ext4_super_block *es = sbi->s_es;
1605 int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1606 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1607 struct inode *inode = NULL;
1608 int gdb_off;
1609 int err;
1610 __u16 bg_flags = 0;
1611
1612 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1613
1614 if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
1615 ext4_warning(sb, "Can't resize non-sparse filesystem further");
1616 return -EPERM;
1617 }
1618
1619 if (ext4_blocks_count(es) + input->blocks_count <
1620 ext4_blocks_count(es)) {
1621 ext4_warning(sb, "blocks_count overflow");
1622 return -EINVAL;
1623 }
1624
1625 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1626 le32_to_cpu(es->s_inodes_count)) {
1627 ext4_warning(sb, "inodes_count overflow");
1628 return -EINVAL;
1629 }
1630
1631 if (reserved_gdb || gdb_off == 0) {
1632 if (!ext4_has_feature_resize_inode(sb) ||
1633 !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1634 ext4_warning(sb,
1635 "No reserved GDT blocks, can't resize");
1636 return -EPERM;
1637 }
1638 inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
1639 if (IS_ERR(inode)) {
1640 ext4_warning(sb, "Error opening resize inode");
1641 return PTR_ERR(inode);
1642 }
1643 }
1644
1645
1646 err = verify_group_input(sb, input);
1647 if (err)
1648 goto out;
1649
1650 err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1651 if (err)
1652 goto out;
1653
1654 err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1655 if (err)
1656 goto out;
1657
1658 flex_gd.count = 1;
1659 flex_gd.groups = input;
1660 flex_gd.bg_flags = &bg_flags;
1661 err = ext4_flex_group_add(sb, inode, &flex_gd);
1662 out:
1663 iput(inode);
1664 return err;
1665 } /* ext4_group_add */
1666
1667 /*
1668 * extend a group without checking assuming that checking has been done.
1669 */
1670 static int ext4_group_extend_no_check(struct super_block *sb,
1671 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1672 {
1673 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1674 handle_t *handle;
1675 int err = 0, err2;
1676
1677 /* We will update the superblock, one block bitmap, and
1678 * one group descriptor via ext4_group_add_blocks().
1679 */
1680 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1681 if (IS_ERR(handle)) {
1682 err = PTR_ERR(handle);
1683 ext4_warning(sb, "error %d on journal start", err);
1684 return err;
1685 }
1686
1687 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1688 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
1689 if (err) {
1690 ext4_warning(sb, "error %d on journal write access", err);
1691 goto errout;
1692 }
1693
1694 ext4_blocks_count_set(es, o_blocks_count + add);
1695 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1696 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1697 o_blocks_count + add);
1698 /* We add the blocks to the bitmap and set the group need init bit */
1699 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1700 if (err)
1701 goto errout;
1702 ext4_handle_dirty_super(handle, sb);
1703 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1704 o_blocks_count + add);
1705 errout:
1706 err2 = ext4_journal_stop(handle);
1707 if (err2 && !err)
1708 err = err2;
1709
1710 if (!err) {
1711 if (test_opt(sb, DEBUG))
1712 printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1713 "blocks\n", ext4_blocks_count(es));
1714 update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
1715 (char *)es, sizeof(struct ext4_super_block), 0);
1716 }
1717 return err;
1718 }
1719
1720 /*
1721 * Extend the filesystem to the new number of blocks specified. This entry
1722 * point is only used to extend the current filesystem to the end of the last
1723 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
1724 * for emergencies (because it has no dependencies on reserved blocks).
1725 *
1726 * If we _really_ wanted, we could use default values to call ext4_group_add()
1727 * allow the "remount" trick to work for arbitrary resizing, assuming enough
1728 * GDT blocks are reserved to grow to the desired size.
1729 */
1730 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1731 ext4_fsblk_t n_blocks_count)
1732 {
1733 ext4_fsblk_t o_blocks_count;
1734 ext4_grpblk_t last;
1735 ext4_grpblk_t add;
1736 struct buffer_head *bh;
1737 int err;
1738 ext4_group_t group;
1739
1740 o_blocks_count = ext4_blocks_count(es);
1741
1742 if (test_opt(sb, DEBUG))
1743 ext4_msg(sb, KERN_DEBUG,
1744 "extending last group from %llu to %llu blocks",
1745 o_blocks_count, n_blocks_count);
1746
1747 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1748 return 0;
1749
1750 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1751 ext4_msg(sb, KERN_ERR,
1752 "filesystem too large to resize to %llu blocks safely",
1753 n_blocks_count);
1754 if (sizeof(sector_t) < 8)
1755 ext4_warning(sb, "CONFIG_LBDAF not enabled");
1756 return -EINVAL;
1757 }
1758
1759 if (n_blocks_count < o_blocks_count) {
1760 ext4_warning(sb, "can't shrink FS - resize aborted");
1761 return -EINVAL;
1762 }
1763
1764 /* Handle the remaining blocks in the last group only. */
1765 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1766
1767 if (last == 0) {
1768 ext4_warning(sb, "need to use ext2online to resize further");
1769 return -EPERM;
1770 }
1771
1772 add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1773
1774 if (o_blocks_count + add < o_blocks_count) {
1775 ext4_warning(sb, "blocks_count overflow");
1776 return -EINVAL;
1777 }
1778
1779 if (o_blocks_count + add > n_blocks_count)
1780 add = n_blocks_count - o_blocks_count;
1781
1782 if (o_blocks_count + add < n_blocks_count)
1783 ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1784 o_blocks_count + add, add);
1785
1786 /* See if the device is actually as big as what was requested */
1787 bh = sb_bread(sb, o_blocks_count + add - 1);
1788 if (!bh) {
1789 ext4_warning(sb, "can't read last block, resize aborted");
1790 return -ENOSPC;
1791 }
1792 brelse(bh);
1793
1794 err = ext4_group_extend_no_check(sb, o_blocks_count, add);
1795 return err;
1796 } /* ext4_group_extend */
1797
1798
1799 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1800 {
1801 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1802 }
1803
1804 /*
1805 * Release the resize inode and drop the resize_inode feature if there
1806 * are no more reserved gdt blocks, and then convert the file system
1807 * to enable meta_bg
1808 */
1809 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1810 {
1811 handle_t *handle;
1812 struct ext4_sb_info *sbi = EXT4_SB(sb);
1813 struct ext4_super_block *es = sbi->s_es;
1814 struct ext4_inode_info *ei = EXT4_I(inode);
1815 ext4_fsblk_t nr;
1816 int i, ret, err = 0;
1817 int credits = 1;
1818
1819 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1820 if (inode) {
1821 if (es->s_reserved_gdt_blocks) {
1822 ext4_error(sb, "Unexpected non-zero "
1823 "s_reserved_gdt_blocks");
1824 return -EPERM;
1825 }
1826
1827 /* Do a quick sanity check of the resize inode */
1828 if (inode->i_blocks != 1 << (inode->i_blkbits -
1829 (9 - sbi->s_cluster_bits)))
1830 goto invalid_resize_inode;
1831 for (i = 0; i < EXT4_N_BLOCKS; i++) {
1832 if (i == EXT4_DIND_BLOCK) {
1833 if (ei->i_data[i])
1834 continue;
1835 else
1836 goto invalid_resize_inode;
1837 }
1838 if (ei->i_data[i])
1839 goto invalid_resize_inode;
1840 }
1841 credits += 3; /* block bitmap, bg descriptor, resize inode */
1842 }
1843
1844 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1845 if (IS_ERR(handle))
1846 return PTR_ERR(handle);
1847
1848 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1849 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1850 if (err)
1851 goto errout;
1852
1853 ext4_clear_feature_resize_inode(sb);
1854 ext4_set_feature_meta_bg(sb);
1855 sbi->s_es->s_first_meta_bg =
1856 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1857
1858 err = ext4_handle_dirty_super(handle, sb);
1859 if (err) {
1860 ext4_std_error(sb, err);
1861 goto errout;
1862 }
1863
1864 if (inode) {
1865 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1866 ext4_free_blocks(handle, inode, NULL, nr, 1,
1867 EXT4_FREE_BLOCKS_METADATA |
1868 EXT4_FREE_BLOCKS_FORGET);
1869 ei->i_data[EXT4_DIND_BLOCK] = 0;
1870 inode->i_blocks = 0;
1871
1872 err = ext4_mark_inode_dirty(handle, inode);
1873 if (err)
1874 ext4_std_error(sb, err);
1875 }
1876
1877 errout:
1878 ret = ext4_journal_stop(handle);
1879 if (!err)
1880 err = ret;
1881 return ret;
1882
1883 invalid_resize_inode:
1884 ext4_error(sb, "corrupted/inconsistent resize inode");
1885 return -EINVAL;
1886 }
1887
1888 /*
1889 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1890 *
1891 * @sb: super block of the fs to be resized
1892 * @n_blocks_count: the number of blocks resides in the resized fs
1893 */
1894 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1895 {
1896 struct ext4_new_flex_group_data *flex_gd = NULL;
1897 struct ext4_sb_info *sbi = EXT4_SB(sb);
1898 struct ext4_super_block *es = sbi->s_es;
1899 struct buffer_head *bh;
1900 struct inode *resize_inode = NULL;
1901 ext4_grpblk_t add, offset;
1902 unsigned long n_desc_blocks;
1903 unsigned long o_desc_blocks;
1904 ext4_group_t o_group;
1905 ext4_group_t n_group;
1906 ext4_fsblk_t o_blocks_count;
1907 ext4_fsblk_t n_blocks_count_retry = 0;
1908 unsigned long last_update_time = 0;
1909 int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
1910 int meta_bg;
1911
1912 /* See if the device is actually as big as what was requested */
1913 bh = sb_bread(sb, n_blocks_count - 1);
1914 if (!bh) {
1915 ext4_warning(sb, "can't read last block, resize aborted");
1916 return -ENOSPC;
1917 }
1918 brelse(bh);
1919
1920 retry:
1921 o_blocks_count = ext4_blocks_count(es);
1922
1923 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
1924 "to %llu blocks", o_blocks_count, n_blocks_count);
1925
1926 if (n_blocks_count < o_blocks_count) {
1927 /* On-line shrinking not supported */
1928 ext4_warning(sb, "can't shrink FS - resize aborted");
1929 return -EINVAL;
1930 }
1931
1932 if (n_blocks_count == o_blocks_count)
1933 /* Nothing need to do */
1934 return 0;
1935
1936 n_group = ext4_get_group_number(sb, n_blocks_count - 1);
1937 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1938 ext4_warning(sb, "resize would cause inodes_count overflow");
1939 return -EINVAL;
1940 }
1941 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
1942
1943 n_desc_blocks = num_desc_blocks(sb, n_group + 1);
1944 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
1945
1946 meta_bg = ext4_has_feature_meta_bg(sb);
1947
1948 if (ext4_has_feature_resize_inode(sb)) {
1949 if (meta_bg) {
1950 ext4_error(sb, "resize_inode and meta_bg enabled "
1951 "simultaneously");
1952 return -EINVAL;
1953 }
1954 if (n_desc_blocks > o_desc_blocks +
1955 le16_to_cpu(es->s_reserved_gdt_blocks)) {
1956 n_blocks_count_retry = n_blocks_count;
1957 n_desc_blocks = o_desc_blocks +
1958 le16_to_cpu(es->s_reserved_gdt_blocks);
1959 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
1960 n_blocks_count = (ext4_fsblk_t)n_group *
1961 EXT4_BLOCKS_PER_GROUP(sb);
1962 n_group--; /* set to last group number */
1963 }
1964
1965 if (!resize_inode)
1966 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
1967 EXT4_IGET_SPECIAL);
1968 if (IS_ERR(resize_inode)) {
1969 ext4_warning(sb, "Error opening resize inode");
1970 return PTR_ERR(resize_inode);
1971 }
1972 }
1973
1974 if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
1975 err = ext4_convert_meta_bg(sb, resize_inode);
1976 if (err)
1977 goto out;
1978 if (resize_inode) {
1979 iput(resize_inode);
1980 resize_inode = NULL;
1981 }
1982 if (n_blocks_count_retry) {
1983 n_blocks_count = n_blocks_count_retry;
1984 n_blocks_count_retry = 0;
1985 goto retry;
1986 }
1987 }
1988
1989 /*
1990 * Make sure the last group has enough space so that it's
1991 * guaranteed to have enough space for all metadata blocks
1992 * that it might need to hold. (We might not need to store
1993 * the inode table blocks in the last block group, but there
1994 * will be cases where this might be needed.)
1995 */
1996 if ((ext4_group_first_block_no(sb, n_group) +
1997 ext4_group_overhead_blocks(sb, n_group) + 2 +
1998 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
1999 n_blocks_count = ext4_group_first_block_no(sb, n_group);
2000 n_group--;
2001 n_blocks_count_retry = 0;
2002 if (resize_inode) {
2003 iput(resize_inode);
2004 resize_inode = NULL;
2005 }
2006 goto retry;
2007 }
2008
2009 /* extend the last group */
2010 if (n_group == o_group)
2011 add = n_blocks_count - o_blocks_count;
2012 else
2013 add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
2014 if (add > 0) {
2015 err = ext4_group_extend_no_check(sb, o_blocks_count, add);
2016 if (err)
2017 goto out;
2018 }
2019
2020 if (ext4_blocks_count(es) == n_blocks_count)
2021 goto out;
2022
2023 err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2024 if (err)
2025 goto out;
2026
2027 err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2028 if (err)
2029 goto out;
2030
2031 flex_gd = alloc_flex_gd(flexbg_size);
2032 if (flex_gd == NULL) {
2033 err = -ENOMEM;
2034 goto out;
2035 }
2036
2037 /* Add flex groups. Note that a regular group is a
2038 * flex group with 1 group.
2039 */
2040 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
2041 flexbg_size)) {
2042 if (jiffies - last_update_time > HZ * 10) {
2043 if (last_update_time)
2044 ext4_msg(sb, KERN_INFO,
2045 "resized to %llu blocks",
2046 ext4_blocks_count(es));
2047 last_update_time = jiffies;
2048 }
2049 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2050 break;
2051 err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2052 if (unlikely(err))
2053 break;
2054 }
2055
2056 if (!err && n_blocks_count_retry) {
2057 n_blocks_count = n_blocks_count_retry;
2058 n_blocks_count_retry = 0;
2059 free_flex_gd(flex_gd);
2060 flex_gd = NULL;
2061 if (resize_inode) {
2062 iput(resize_inode);
2063 resize_inode = NULL;
2064 }
2065 goto retry;
2066 }
2067
2068 out:
2069 if (flex_gd)
2070 free_flex_gd(flex_gd);
2071 if (resize_inode != NULL)
2072 iput(resize_inode);
2073 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
2074 return err;
2075 }