]>
Commit | Line | Data |
---|---|---|
a86c6181 AT |
1 | /* |
2 | * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com | |
3 | * Written by Alex Tomas <alex@clusterfs.com> | |
4 | * | |
5 | * Architecture independence: | |
6 | * Copyright (c) 2005, Bull S.A. | |
7 | * Written by Pierre Peiffer <pierre.peiffer@bull.net> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public Licens | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- | |
21 | */ | |
22 | ||
23 | /* | |
24 | * Extents support for EXT4 | |
25 | * | |
26 | * TODO: | |
27 | * - ext4*_error() should be used in some situations | |
28 | * - analyze all BUG()/BUG_ON(), use -EIO where appropriate | |
29 | * - smart tree reduction | |
30 | */ | |
31 | ||
32 | #include <linux/module.h> | |
33 | #include <linux/fs.h> | |
34 | #include <linux/time.h> | |
35 | #include <linux/ext4_jbd2.h> | |
36 | #include <linux/jbd.h> | |
37 | #include <linux/smp_lock.h> | |
38 | #include <linux/highuid.h> | |
39 | #include <linux/pagemap.h> | |
40 | #include <linux/quotaops.h> | |
41 | #include <linux/string.h> | |
42 | #include <linux/slab.h> | |
43 | #include <linux/ext4_fs_extents.h> | |
44 | #include <asm/uaccess.h> | |
45 | ||
46 | ||
47 | static int ext4_ext_check_header(const char *function, struct inode *inode, | |
48 | struct ext4_extent_header *eh) | |
49 | { | |
50 | const char *error_msg = NULL; | |
51 | ||
52 | if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { | |
53 | error_msg = "invalid magic"; | |
54 | goto corrupted; | |
55 | } | |
56 | if (unlikely(eh->eh_max == 0)) { | |
57 | error_msg = "invalid eh_max"; | |
58 | goto corrupted; | |
59 | } | |
60 | if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { | |
61 | error_msg = "invalid eh_entries"; | |
62 | goto corrupted; | |
63 | } | |
64 | return 0; | |
65 | ||
66 | corrupted: | |
67 | ext4_error(inode->i_sb, function, | |
68 | "bad header in inode #%lu: %s - magic %x, " | |
69 | "entries %u, max %u, depth %u", | |
70 | inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), | |
71 | le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), | |
72 | le16_to_cpu(eh->eh_depth)); | |
73 | ||
74 | return -EIO; | |
75 | } | |
76 | ||
77 | static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed) | |
78 | { | |
79 | int err; | |
80 | ||
81 | if (handle->h_buffer_credits > needed) | |
82 | return handle; | |
83 | if (!ext4_journal_extend(handle, needed)) | |
84 | return handle; | |
85 | err = ext4_journal_restart(handle, needed); | |
86 | ||
87 | return handle; | |
88 | } | |
89 | ||
90 | /* | |
91 | * could return: | |
92 | * - EROFS | |
93 | * - ENOMEM | |
94 | */ | |
95 | static int ext4_ext_get_access(handle_t *handle, struct inode *inode, | |
96 | struct ext4_ext_path *path) | |
97 | { | |
98 | if (path->p_bh) { | |
99 | /* path points to block */ | |
100 | return ext4_journal_get_write_access(handle, path->p_bh); | |
101 | } | |
102 | /* path points to leaf/index in inode body */ | |
103 | /* we use in-core data, no need to protect them */ | |
104 | return 0; | |
105 | } | |
106 | ||
107 | /* | |
108 | * could return: | |
109 | * - EROFS | |
110 | * - ENOMEM | |
111 | * - EIO | |
112 | */ | |
113 | static int ext4_ext_dirty(handle_t *handle, struct inode *inode, | |
114 | struct ext4_ext_path *path) | |
115 | { | |
116 | int err; | |
117 | if (path->p_bh) { | |
118 | /* path points to block */ | |
119 | err = ext4_journal_dirty_metadata(handle, path->p_bh); | |
120 | } else { | |
121 | /* path points to leaf/index in inode body */ | |
122 | err = ext4_mark_inode_dirty(handle, inode); | |
123 | } | |
124 | return err; | |
125 | } | |
126 | ||
127 | static int ext4_ext_find_goal(struct inode *inode, | |
128 | struct ext4_ext_path *path, | |
129 | unsigned long block) | |
130 | { | |
131 | struct ext4_inode_info *ei = EXT4_I(inode); | |
132 | unsigned long bg_start; | |
133 | unsigned long colour; | |
134 | int depth; | |
135 | ||
136 | if (path) { | |
137 | struct ext4_extent *ex; | |
138 | depth = path->p_depth; | |
139 | ||
140 | /* try to predict block placement */ | |
141 | if ((ex = path[depth].p_ext)) | |
142 | return le32_to_cpu(ex->ee_start) | |
143 | + (block - le32_to_cpu(ex->ee_block)); | |
144 | ||
145 | /* it looks index is empty | |
146 | * try to find starting from index itself */ | |
147 | if (path[depth].p_bh) | |
148 | return path[depth].p_bh->b_blocknr; | |
149 | } | |
150 | ||
151 | /* OK. use inode's group */ | |
152 | bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) + | |
153 | le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block); | |
154 | colour = (current->pid % 16) * | |
155 | (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); | |
156 | return bg_start + colour + block; | |
157 | } | |
158 | ||
159 | static int | |
160 | ext4_ext_new_block(handle_t *handle, struct inode *inode, | |
161 | struct ext4_ext_path *path, | |
162 | struct ext4_extent *ex, int *err) | |
163 | { | |
164 | int goal, newblock; | |
165 | ||
166 | goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); | |
167 | newblock = ext4_new_block(handle, inode, goal, err); | |
168 | return newblock; | |
169 | } | |
170 | ||
171 | static inline int ext4_ext_space_block(struct inode *inode) | |
172 | { | |
173 | int size; | |
174 | ||
175 | size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) | |
176 | / sizeof(struct ext4_extent); | |
177 | #ifdef AGRESSIVE_TEST | |
178 | if (size > 6) | |
179 | size = 6; | |
180 | #endif | |
181 | return size; | |
182 | } | |
183 | ||
184 | static inline int ext4_ext_space_block_idx(struct inode *inode) | |
185 | { | |
186 | int size; | |
187 | ||
188 | size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) | |
189 | / sizeof(struct ext4_extent_idx); | |
190 | #ifdef AGRESSIVE_TEST | |
191 | if (size > 5) | |
192 | size = 5; | |
193 | #endif | |
194 | return size; | |
195 | } | |
196 | ||
197 | static inline int ext4_ext_space_root(struct inode *inode) | |
198 | { | |
199 | int size; | |
200 | ||
201 | size = sizeof(EXT4_I(inode)->i_data); | |
202 | size -= sizeof(struct ext4_extent_header); | |
203 | size /= sizeof(struct ext4_extent); | |
204 | #ifdef AGRESSIVE_TEST | |
205 | if (size > 3) | |
206 | size = 3; | |
207 | #endif | |
208 | return size; | |
209 | } | |
210 | ||
211 | static inline int ext4_ext_space_root_idx(struct inode *inode) | |
212 | { | |
213 | int size; | |
214 | ||
215 | size = sizeof(EXT4_I(inode)->i_data); | |
216 | size -= sizeof(struct ext4_extent_header); | |
217 | size /= sizeof(struct ext4_extent_idx); | |
218 | #ifdef AGRESSIVE_TEST | |
219 | if (size > 4) | |
220 | size = 4; | |
221 | #endif | |
222 | return size; | |
223 | } | |
224 | ||
225 | #ifdef EXT_DEBUG | |
226 | static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) | |
227 | { | |
228 | int k, l = path->p_depth; | |
229 | ||
230 | ext_debug("path:"); | |
231 | for (k = 0; k <= l; k++, path++) { | |
232 | if (path->p_idx) { | |
233 | ext_debug(" %d->%d", le32_to_cpu(path->p_idx->ei_block), | |
234 | le32_to_cpu(path->p_idx->ei_leaf)); | |
235 | } else if (path->p_ext) { | |
236 | ext_debug(" %d:%d:%d", | |
237 | le32_to_cpu(path->p_ext->ee_block), | |
238 | le16_to_cpu(path->p_ext->ee_len), | |
239 | le32_to_cpu(path->p_ext->ee_start)); | |
240 | } else | |
241 | ext_debug(" []"); | |
242 | } | |
243 | ext_debug("\n"); | |
244 | } | |
245 | ||
246 | static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) | |
247 | { | |
248 | int depth = ext_depth(inode); | |
249 | struct ext4_extent_header *eh; | |
250 | struct ext4_extent *ex; | |
251 | int i; | |
252 | ||
253 | if (!path) | |
254 | return; | |
255 | ||
256 | eh = path[depth].p_hdr; | |
257 | ex = EXT_FIRST_EXTENT(eh); | |
258 | ||
259 | for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { | |
260 | ext_debug("%d:%d:%d ", le32_to_cpu(ex->ee_block), | |
261 | le16_to_cpu(ex->ee_len), | |
262 | le32_to_cpu(ex->ee_start)); | |
263 | } | |
264 | ext_debug("\n"); | |
265 | } | |
266 | #else | |
267 | #define ext4_ext_show_path(inode,path) | |
268 | #define ext4_ext_show_leaf(inode,path) | |
269 | #endif | |
270 | ||
271 | static void ext4_ext_drop_refs(struct ext4_ext_path *path) | |
272 | { | |
273 | int depth = path->p_depth; | |
274 | int i; | |
275 | ||
276 | for (i = 0; i <= depth; i++, path++) | |
277 | if (path->p_bh) { | |
278 | brelse(path->p_bh); | |
279 | path->p_bh = NULL; | |
280 | } | |
281 | } | |
282 | ||
283 | /* | |
284 | * binary search for closest index by given block | |
285 | */ | |
286 | static void | |
287 | ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block) | |
288 | { | |
289 | struct ext4_extent_header *eh = path->p_hdr; | |
290 | struct ext4_extent_idx *r, *l, *m; | |
291 | ||
292 | BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC); | |
293 | BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max)); | |
294 | BUG_ON(le16_to_cpu(eh->eh_entries) <= 0); | |
295 | ||
296 | ext_debug("binsearch for %d(idx): ", block); | |
297 | ||
298 | l = EXT_FIRST_INDEX(eh) + 1; | |
299 | r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1; | |
300 | while (l <= r) { | |
301 | m = l + (r - l) / 2; | |
302 | if (block < le32_to_cpu(m->ei_block)) | |
303 | r = m - 1; | |
304 | else | |
305 | l = m + 1; | |
306 | ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ei_block, | |
307 | m, m->ei_block, r, r->ei_block); | |
308 | } | |
309 | ||
310 | path->p_idx = l - 1; | |
311 | ext_debug(" -> %d->%d ", le32_to_cpu(path->p_idx->ei_block), | |
312 | le32_to_cpu(path->p_idx->ei_leaf)); | |
313 | ||
314 | #ifdef CHECK_BINSEARCH | |
315 | { | |
316 | struct ext4_extent_idx *chix, *ix; | |
317 | int k; | |
318 | ||
319 | chix = ix = EXT_FIRST_INDEX(eh); | |
320 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { | |
321 | if (k != 0 && | |
322 | le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { | |
323 | printk("k=%d, ix=0x%p, first=0x%p\n", k, | |
324 | ix, EXT_FIRST_INDEX(eh)); | |
325 | printk("%u <= %u\n", | |
326 | le32_to_cpu(ix->ei_block), | |
327 | le32_to_cpu(ix[-1].ei_block)); | |
328 | } | |
329 | BUG_ON(k && le32_to_cpu(ix->ei_block) | |
330 | <= le32_to_cpu(ix[-1].ei_block)); | |
331 | if (block < le32_to_cpu(ix->ei_block)) | |
332 | break; | |
333 | chix = ix; | |
334 | } | |
335 | BUG_ON(chix != path->p_idx); | |
336 | } | |
337 | #endif | |
338 | ||
339 | } | |
340 | ||
341 | /* | |
342 | * binary search for closest extent by given block | |
343 | */ | |
344 | static void | |
345 | ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block) | |
346 | { | |
347 | struct ext4_extent_header *eh = path->p_hdr; | |
348 | struct ext4_extent *r, *l, *m; | |
349 | ||
350 | BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC); | |
351 | BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max)); | |
352 | ||
353 | if (eh->eh_entries == 0) { | |
354 | /* | |
355 | * this leaf is empty yet: | |
356 | * we get such a leaf in split/add case | |
357 | */ | |
358 | return; | |
359 | } | |
360 | ||
361 | ext_debug("binsearch for %d: ", block); | |
362 | ||
363 | l = EXT_FIRST_EXTENT(eh) + 1; | |
364 | r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1; | |
365 | ||
366 | while (l <= r) { | |
367 | m = l + (r - l) / 2; | |
368 | if (block < le32_to_cpu(m->ee_block)) | |
369 | r = m - 1; | |
370 | else | |
371 | l = m + 1; | |
372 | ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ee_block, | |
373 | m, m->ee_block, r, r->ee_block); | |
374 | } | |
375 | ||
376 | path->p_ext = l - 1; | |
377 | ext_debug(" -> %d:%d:%d ", | |
378 | le32_to_cpu(path->p_ext->ee_block), | |
379 | le32_to_cpu(path->p_ext->ee_start), | |
380 | le16_to_cpu(path->p_ext->ee_len)); | |
381 | ||
382 | #ifdef CHECK_BINSEARCH | |
383 | { | |
384 | struct ext4_extent *chex, *ex; | |
385 | int k; | |
386 | ||
387 | chex = ex = EXT_FIRST_EXTENT(eh); | |
388 | for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { | |
389 | BUG_ON(k && le32_to_cpu(ex->ee_block) | |
390 | <= le32_to_cpu(ex[-1].ee_block)); | |
391 | if (block < le32_to_cpu(ex->ee_block)) | |
392 | break; | |
393 | chex = ex; | |
394 | } | |
395 | BUG_ON(chex != path->p_ext); | |
396 | } | |
397 | #endif | |
398 | ||
399 | } | |
400 | ||
401 | int ext4_ext_tree_init(handle_t *handle, struct inode *inode) | |
402 | { | |
403 | struct ext4_extent_header *eh; | |
404 | ||
405 | eh = ext_inode_hdr(inode); | |
406 | eh->eh_depth = 0; | |
407 | eh->eh_entries = 0; | |
408 | eh->eh_magic = EXT4_EXT_MAGIC; | |
409 | eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode)); | |
410 | ext4_mark_inode_dirty(handle, inode); | |
411 | ext4_ext_invalidate_cache(inode); | |
412 | return 0; | |
413 | } | |
414 | ||
415 | struct ext4_ext_path * | |
416 | ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path) | |
417 | { | |
418 | struct ext4_extent_header *eh; | |
419 | struct buffer_head *bh; | |
420 | short int depth, i, ppos = 0, alloc = 0; | |
421 | ||
422 | eh = ext_inode_hdr(inode); | |
423 | BUG_ON(eh == NULL); | |
424 | if (ext4_ext_check_header(__FUNCTION__, inode, eh)) | |
425 | return ERR_PTR(-EIO); | |
426 | ||
427 | i = depth = ext_depth(inode); | |
428 | ||
429 | /* account possible depth increase */ | |
430 | if (!path) { | |
431 | path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2), | |
432 | GFP_NOFS); | |
433 | if (!path) | |
434 | return ERR_PTR(-ENOMEM); | |
435 | alloc = 1; | |
436 | } | |
437 | memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1)); | |
438 | path[0].p_hdr = eh; | |
439 | ||
440 | /* walk through the tree */ | |
441 | while (i) { | |
442 | ext_debug("depth %d: num %d, max %d\n", | |
443 | ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); | |
444 | ext4_ext_binsearch_idx(inode, path + ppos, block); | |
445 | path[ppos].p_block = le32_to_cpu(path[ppos].p_idx->ei_leaf); | |
446 | path[ppos].p_depth = i; | |
447 | path[ppos].p_ext = NULL; | |
448 | ||
449 | bh = sb_bread(inode->i_sb, path[ppos].p_block); | |
450 | if (!bh) | |
451 | goto err; | |
452 | ||
453 | eh = ext_block_hdr(bh); | |
454 | ppos++; | |
455 | BUG_ON(ppos > depth); | |
456 | path[ppos].p_bh = bh; | |
457 | path[ppos].p_hdr = eh; | |
458 | i--; | |
459 | ||
460 | if (ext4_ext_check_header(__FUNCTION__, inode, eh)) | |
461 | goto err; | |
462 | } | |
463 | ||
464 | path[ppos].p_depth = i; | |
465 | path[ppos].p_hdr = eh; | |
466 | path[ppos].p_ext = NULL; | |
467 | path[ppos].p_idx = NULL; | |
468 | ||
469 | if (ext4_ext_check_header(__FUNCTION__, inode, eh)) | |
470 | goto err; | |
471 | ||
472 | /* find extent */ | |
473 | ext4_ext_binsearch(inode, path + ppos, block); | |
474 | ||
475 | ext4_ext_show_path(inode, path); | |
476 | ||
477 | return path; | |
478 | ||
479 | err: | |
480 | ext4_ext_drop_refs(path); | |
481 | if (alloc) | |
482 | kfree(path); | |
483 | return ERR_PTR(-EIO); | |
484 | } | |
485 | ||
486 | /* | |
487 | * insert new index [logical;ptr] into the block at cupr | |
488 | * it check where to insert: before curp or after curp | |
489 | */ | |
490 | static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, | |
491 | struct ext4_ext_path *curp, | |
492 | int logical, int ptr) | |
493 | { | |
494 | struct ext4_extent_idx *ix; | |
495 | int len, err; | |
496 | ||
497 | if ((err = ext4_ext_get_access(handle, inode, curp))) | |
498 | return err; | |
499 | ||
500 | BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); | |
501 | len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; | |
502 | if (logical > le32_to_cpu(curp->p_idx->ei_block)) { | |
503 | /* insert after */ | |
504 | if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) { | |
505 | len = (len - 1) * sizeof(struct ext4_extent_idx); | |
506 | len = len < 0 ? 0 : len; | |
507 | ext_debug("insert new index %d after: %d. " | |
508 | "move %d from 0x%p to 0x%p\n", | |
509 | logical, ptr, len, | |
510 | (curp->p_idx + 1), (curp->p_idx + 2)); | |
511 | memmove(curp->p_idx + 2, curp->p_idx + 1, len); | |
512 | } | |
513 | ix = curp->p_idx + 1; | |
514 | } else { | |
515 | /* insert before */ | |
516 | len = len * sizeof(struct ext4_extent_idx); | |
517 | len = len < 0 ? 0 : len; | |
518 | ext_debug("insert new index %d before: %d. " | |
519 | "move %d from 0x%p to 0x%p\n", | |
520 | logical, ptr, len, | |
521 | curp->p_idx, (curp->p_idx + 1)); | |
522 | memmove(curp->p_idx + 1, curp->p_idx, len); | |
523 | ix = curp->p_idx; | |
524 | } | |
525 | ||
526 | ix->ei_block = cpu_to_le32(logical); | |
527 | ix->ei_leaf = cpu_to_le32(ptr); | |
528 | curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1); | |
529 | ||
530 | BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) | |
531 | > le16_to_cpu(curp->p_hdr->eh_max)); | |
532 | BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); | |
533 | ||
534 | err = ext4_ext_dirty(handle, inode, curp); | |
535 | ext4_std_error(inode->i_sb, err); | |
536 | ||
537 | return err; | |
538 | } | |
539 | ||
540 | /* | |
541 | * routine inserts new subtree into the path, using free index entry | |
542 | * at depth 'at: | |
543 | * - allocates all needed blocks (new leaf and all intermediate index blocks) | |
544 | * - makes decision where to split | |
545 | * - moves remaining extens and index entries (right to the split point) | |
546 | * into the newly allocated blocks | |
547 | * - initialize subtree | |
548 | */ | |
549 | static int ext4_ext_split(handle_t *handle, struct inode *inode, | |
550 | struct ext4_ext_path *path, | |
551 | struct ext4_extent *newext, int at) | |
552 | { | |
553 | struct buffer_head *bh = NULL; | |
554 | int depth = ext_depth(inode); | |
555 | struct ext4_extent_header *neh; | |
556 | struct ext4_extent_idx *fidx; | |
557 | struct ext4_extent *ex; | |
558 | int i = at, k, m, a; | |
559 | unsigned long newblock, oldblock; | |
560 | __le32 border; | |
561 | int *ablocks = NULL; /* array of allocated blocks */ | |
562 | int err = 0; | |
563 | ||
564 | /* make decision: where to split? */ | |
565 | /* FIXME: now desicion is simplest: at current extent */ | |
566 | ||
567 | /* if current leaf will be splitted, then we should use | |
568 | * border from split point */ | |
569 | BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr)); | |
570 | if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { | |
571 | border = path[depth].p_ext[1].ee_block; | |
572 | ext_debug("leaf will be splitted." | |
573 | " next leaf starts at %d\n", | |
574 | le32_to_cpu(border)); | |
575 | } else { | |
576 | border = newext->ee_block; | |
577 | ext_debug("leaf will be added." | |
578 | " next leaf starts at %d\n", | |
579 | le32_to_cpu(border)); | |
580 | } | |
581 | ||
582 | /* | |
583 | * if error occurs, then we break processing | |
584 | * and turn filesystem read-only. so, index won't | |
585 | * be inserted and tree will be in consistent | |
586 | * state. next mount will repair buffers too | |
587 | */ | |
588 | ||
589 | /* | |
590 | * get array to track all allocated blocks | |
591 | * we need this to handle errors and free blocks | |
592 | * upon them | |
593 | */ | |
594 | ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS); | |
595 | if (!ablocks) | |
596 | return -ENOMEM; | |
597 | memset(ablocks, 0, sizeof(unsigned long) * depth); | |
598 | ||
599 | /* allocate all needed blocks */ | |
600 | ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); | |
601 | for (a = 0; a < depth - at; a++) { | |
602 | newblock = ext4_ext_new_block(handle, inode, path, newext, &err); | |
603 | if (newblock == 0) | |
604 | goto cleanup; | |
605 | ablocks[a] = newblock; | |
606 | } | |
607 | ||
608 | /* initialize new leaf */ | |
609 | newblock = ablocks[--a]; | |
610 | BUG_ON(newblock == 0); | |
611 | bh = sb_getblk(inode->i_sb, newblock); | |
612 | if (!bh) { | |
613 | err = -EIO; | |
614 | goto cleanup; | |
615 | } | |
616 | lock_buffer(bh); | |
617 | ||
618 | if ((err = ext4_journal_get_create_access(handle, bh))) | |
619 | goto cleanup; | |
620 | ||
621 | neh = ext_block_hdr(bh); | |
622 | neh->eh_entries = 0; | |
623 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode)); | |
624 | neh->eh_magic = EXT4_EXT_MAGIC; | |
625 | neh->eh_depth = 0; | |
626 | ex = EXT_FIRST_EXTENT(neh); | |
627 | ||
628 | /* move remain of path[depth] to the new leaf */ | |
629 | BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max); | |
630 | /* start copy from next extent */ | |
631 | /* TODO: we could do it by single memmove */ | |
632 | m = 0; | |
633 | path[depth].p_ext++; | |
634 | while (path[depth].p_ext <= | |
635 | EXT_MAX_EXTENT(path[depth].p_hdr)) { | |
636 | ext_debug("move %d:%d:%d in new leaf %lu\n", | |
637 | le32_to_cpu(path[depth].p_ext->ee_block), | |
638 | le32_to_cpu(path[depth].p_ext->ee_start), | |
639 | le16_to_cpu(path[depth].p_ext->ee_len), | |
640 | newblock); | |
641 | /*memmove(ex++, path[depth].p_ext++, | |
642 | sizeof(struct ext4_extent)); | |
643 | neh->eh_entries++;*/ | |
644 | path[depth].p_ext++; | |
645 | m++; | |
646 | } | |
647 | if (m) { | |
648 | memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m); | |
649 | neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m); | |
650 | } | |
651 | ||
652 | set_buffer_uptodate(bh); | |
653 | unlock_buffer(bh); | |
654 | ||
655 | if ((err = ext4_journal_dirty_metadata(handle, bh))) | |
656 | goto cleanup; | |
657 | brelse(bh); | |
658 | bh = NULL; | |
659 | ||
660 | /* correct old leaf */ | |
661 | if (m) { | |
662 | if ((err = ext4_ext_get_access(handle, inode, path + depth))) | |
663 | goto cleanup; | |
664 | path[depth].p_hdr->eh_entries = | |
665 | cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m); | |
666 | if ((err = ext4_ext_dirty(handle, inode, path + depth))) | |
667 | goto cleanup; | |
668 | ||
669 | } | |
670 | ||
671 | /* create intermediate indexes */ | |
672 | k = depth - at - 1; | |
673 | BUG_ON(k < 0); | |
674 | if (k) | |
675 | ext_debug("create %d intermediate indices\n", k); | |
676 | /* insert new index into current index block */ | |
677 | /* current depth stored in i var */ | |
678 | i = depth - 1; | |
679 | while (k--) { | |
680 | oldblock = newblock; | |
681 | newblock = ablocks[--a]; | |
682 | bh = sb_getblk(inode->i_sb, newblock); | |
683 | if (!bh) { | |
684 | err = -EIO; | |
685 | goto cleanup; | |
686 | } | |
687 | lock_buffer(bh); | |
688 | ||
689 | if ((err = ext4_journal_get_create_access(handle, bh))) | |
690 | goto cleanup; | |
691 | ||
692 | neh = ext_block_hdr(bh); | |
693 | neh->eh_entries = cpu_to_le16(1); | |
694 | neh->eh_magic = EXT4_EXT_MAGIC; | |
695 | neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode)); | |
696 | neh->eh_depth = cpu_to_le16(depth - i); | |
697 | fidx = EXT_FIRST_INDEX(neh); | |
698 | fidx->ei_block = border; | |
699 | fidx->ei_leaf = cpu_to_le32(oldblock); | |
700 | ||
701 | ext_debug("int.index at %d (block %lu): %lu -> %lu\n", i, | |
702 | newblock, (unsigned long) le32_to_cpu(border), | |
703 | oldblock); | |
704 | /* copy indexes */ | |
705 | m = 0; | |
706 | path[i].p_idx++; | |
707 | ||
708 | ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, | |
709 | EXT_MAX_INDEX(path[i].p_hdr)); | |
710 | BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) != | |
711 | EXT_LAST_INDEX(path[i].p_hdr)); | |
712 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { | |
713 | ext_debug("%d: move %d:%d in new index %lu\n", i, | |
714 | le32_to_cpu(path[i].p_idx->ei_block), | |
715 | le32_to_cpu(path[i].p_idx->ei_leaf), | |
716 | newblock); | |
717 | /*memmove(++fidx, path[i].p_idx++, | |
718 | sizeof(struct ext4_extent_idx)); | |
719 | neh->eh_entries++; | |
720 | BUG_ON(neh->eh_entries > neh->eh_max);*/ | |
721 | path[i].p_idx++; | |
722 | m++; | |
723 | } | |
724 | if (m) { | |
725 | memmove(++fidx, path[i].p_idx - m, | |
726 | sizeof(struct ext4_extent_idx) * m); | |
727 | neh->eh_entries = | |
728 | cpu_to_le16(le16_to_cpu(neh->eh_entries) + m); | |
729 | } | |
730 | set_buffer_uptodate(bh); | |
731 | unlock_buffer(bh); | |
732 | ||
733 | if ((err = ext4_journal_dirty_metadata(handle, bh))) | |
734 | goto cleanup; | |
735 | brelse(bh); | |
736 | bh = NULL; | |
737 | ||
738 | /* correct old index */ | |
739 | if (m) { | |
740 | err = ext4_ext_get_access(handle, inode, path + i); | |
741 | if (err) | |
742 | goto cleanup; | |
743 | path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m); | |
744 | err = ext4_ext_dirty(handle, inode, path + i); | |
745 | if (err) | |
746 | goto cleanup; | |
747 | } | |
748 | ||
749 | i--; | |
750 | } | |
751 | ||
752 | /* insert new index */ | |
753 | if (err) | |
754 | goto cleanup; | |
755 | ||
756 | err = ext4_ext_insert_index(handle, inode, path + at, | |
757 | le32_to_cpu(border), newblock); | |
758 | ||
759 | cleanup: | |
760 | if (bh) { | |
761 | if (buffer_locked(bh)) | |
762 | unlock_buffer(bh); | |
763 | brelse(bh); | |
764 | } | |
765 | ||
766 | if (err) { | |
767 | /* free all allocated blocks in error case */ | |
768 | for (i = 0; i < depth; i++) { | |
769 | if (!ablocks[i]) | |
770 | continue; | |
771 | ext4_free_blocks(handle, inode, ablocks[i], 1); | |
772 | } | |
773 | } | |
774 | kfree(ablocks); | |
775 | ||
776 | return err; | |
777 | } | |
778 | ||
779 | /* | |
780 | * routine implements tree growing procedure: | |
781 | * - allocates new block | |
782 | * - moves top-level data (index block or leaf) into the new block | |
783 | * - initialize new top-level, creating index that points to the | |
784 | * just created block | |
785 | */ | |
786 | static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, | |
787 | struct ext4_ext_path *path, | |
788 | struct ext4_extent *newext) | |
789 | { | |
790 | struct ext4_ext_path *curp = path; | |
791 | struct ext4_extent_header *neh; | |
792 | struct ext4_extent_idx *fidx; | |
793 | struct buffer_head *bh; | |
794 | unsigned long newblock; | |
795 | int err = 0; | |
796 | ||
797 | newblock = ext4_ext_new_block(handle, inode, path, newext, &err); | |
798 | if (newblock == 0) | |
799 | return err; | |
800 | ||
801 | bh = sb_getblk(inode->i_sb, newblock); | |
802 | if (!bh) { | |
803 | err = -EIO; | |
804 | ext4_std_error(inode->i_sb, err); | |
805 | return err; | |
806 | } | |
807 | lock_buffer(bh); | |
808 | ||
809 | if ((err = ext4_journal_get_create_access(handle, bh))) { | |
810 | unlock_buffer(bh); | |
811 | goto out; | |
812 | } | |
813 | ||
814 | /* move top-level index/leaf into new block */ | |
815 | memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data)); | |
816 | ||
817 | /* set size of new block */ | |
818 | neh = ext_block_hdr(bh); | |
819 | /* old root could have indexes or leaves | |
820 | * so calculate e_max right way */ | |
821 | if (ext_depth(inode)) | |
822 | neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode)); | |
823 | else | |
824 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode)); | |
825 | neh->eh_magic = EXT4_EXT_MAGIC; | |
826 | set_buffer_uptodate(bh); | |
827 | unlock_buffer(bh); | |
828 | ||
829 | if ((err = ext4_journal_dirty_metadata(handle, bh))) | |
830 | goto out; | |
831 | ||
832 | /* create index in new top-level index: num,max,pointer */ | |
833 | if ((err = ext4_ext_get_access(handle, inode, curp))) | |
834 | goto out; | |
835 | ||
836 | curp->p_hdr->eh_magic = EXT4_EXT_MAGIC; | |
837 | curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode)); | |
838 | curp->p_hdr->eh_entries = cpu_to_le16(1); | |
839 | curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr); | |
840 | /* FIXME: it works, but actually path[0] can be index */ | |
841 | curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block; | |
842 | curp->p_idx->ei_leaf = cpu_to_le32(newblock); | |
843 | ||
844 | neh = ext_inode_hdr(inode); | |
845 | fidx = EXT_FIRST_INDEX(neh); | |
846 | ext_debug("new root: num %d(%d), lblock %d, ptr %d\n", | |
847 | le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), | |
848 | le32_to_cpu(fidx->ei_block), le32_to_cpu(fidx->ei_leaf)); | |
849 | ||
850 | neh->eh_depth = cpu_to_le16(path->p_depth + 1); | |
851 | err = ext4_ext_dirty(handle, inode, curp); | |
852 | out: | |
853 | brelse(bh); | |
854 | ||
855 | return err; | |
856 | } | |
857 | ||
858 | /* | |
859 | * routine finds empty index and adds new leaf. if no free index found | |
860 | * then it requests in-depth growing | |
861 | */ | |
862 | static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, | |
863 | struct ext4_ext_path *path, | |
864 | struct ext4_extent *newext) | |
865 | { | |
866 | struct ext4_ext_path *curp; | |
867 | int depth, i, err = 0; | |
868 | ||
869 | repeat: | |
870 | i = depth = ext_depth(inode); | |
871 | ||
872 | /* walk up to the tree and look for free index entry */ | |
873 | curp = path + depth; | |
874 | while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { | |
875 | i--; | |
876 | curp--; | |
877 | } | |
878 | ||
879 | /* we use already allocated block for index block | |
880 | * so, subsequent data blocks should be contigoues */ | |
881 | if (EXT_HAS_FREE_INDEX(curp)) { | |
882 | /* if we found index with free entry, then use that | |
883 | * entry: create all needed subtree and add new leaf */ | |
884 | err = ext4_ext_split(handle, inode, path, newext, i); | |
885 | ||
886 | /* refill path */ | |
887 | ext4_ext_drop_refs(path); | |
888 | path = ext4_ext_find_extent(inode, | |
889 | le32_to_cpu(newext->ee_block), | |
890 | path); | |
891 | if (IS_ERR(path)) | |
892 | err = PTR_ERR(path); | |
893 | } else { | |
894 | /* tree is full, time to grow in depth */ | |
895 | err = ext4_ext_grow_indepth(handle, inode, path, newext); | |
896 | if (err) | |
897 | goto out; | |
898 | ||
899 | /* refill path */ | |
900 | ext4_ext_drop_refs(path); | |
901 | path = ext4_ext_find_extent(inode, | |
902 | le32_to_cpu(newext->ee_block), | |
903 | path); | |
904 | if (IS_ERR(path)) { | |
905 | err = PTR_ERR(path); | |
906 | goto out; | |
907 | } | |
908 | ||
909 | /* | |
910 | * only first (depth 0 -> 1) produces free space | |
911 | * in all other cases we have to split growed tree | |
912 | */ | |
913 | depth = ext_depth(inode); | |
914 | if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { | |
915 | /* now we need split */ | |
916 | goto repeat; | |
917 | } | |
918 | } | |
919 | ||
920 | out: | |
921 | return err; | |
922 | } | |
923 | ||
924 | /* | |
925 | * returns allocated block in subsequent extent or EXT_MAX_BLOCK | |
926 | * NOTE: it consider block number from index entry as | |
927 | * allocated block. thus, index entries have to be consistent | |
928 | * with leafs | |
929 | */ | |
930 | static unsigned long | |
931 | ext4_ext_next_allocated_block(struct ext4_ext_path *path) | |
932 | { | |
933 | int depth; | |
934 | ||
935 | BUG_ON(path == NULL); | |
936 | depth = path->p_depth; | |
937 | ||
938 | if (depth == 0 && path->p_ext == NULL) | |
939 | return EXT_MAX_BLOCK; | |
940 | ||
941 | while (depth >= 0) { | |
942 | if (depth == path->p_depth) { | |
943 | /* leaf */ | |
944 | if (path[depth].p_ext != | |
945 | EXT_LAST_EXTENT(path[depth].p_hdr)) | |
946 | return le32_to_cpu(path[depth].p_ext[1].ee_block); | |
947 | } else { | |
948 | /* index */ | |
949 | if (path[depth].p_idx != | |
950 | EXT_LAST_INDEX(path[depth].p_hdr)) | |
951 | return le32_to_cpu(path[depth].p_idx[1].ei_block); | |
952 | } | |
953 | depth--; | |
954 | } | |
955 | ||
956 | return EXT_MAX_BLOCK; | |
957 | } | |
958 | ||
959 | /* | |
960 | * returns first allocated block from next leaf or EXT_MAX_BLOCK | |
961 | */ | |
962 | static unsigned ext4_ext_next_leaf_block(struct inode *inode, | |
963 | struct ext4_ext_path *path) | |
964 | { | |
965 | int depth; | |
966 | ||
967 | BUG_ON(path == NULL); | |
968 | depth = path->p_depth; | |
969 | ||
970 | /* zero-tree has no leaf blocks at all */ | |
971 | if (depth == 0) | |
972 | return EXT_MAX_BLOCK; | |
973 | ||
974 | /* go to index block */ | |
975 | depth--; | |
976 | ||
977 | while (depth >= 0) { | |
978 | if (path[depth].p_idx != | |
979 | EXT_LAST_INDEX(path[depth].p_hdr)) | |
980 | return le32_to_cpu(path[depth].p_idx[1].ei_block); | |
981 | depth--; | |
982 | } | |
983 | ||
984 | return EXT_MAX_BLOCK; | |
985 | } | |
986 | ||
987 | /* | |
988 | * if leaf gets modified and modified extent is first in the leaf | |
989 | * then we have to correct all indexes above | |
990 | * TODO: do we need to correct tree in all cases? | |
991 | */ | |
992 | int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, | |
993 | struct ext4_ext_path *path) | |
994 | { | |
995 | struct ext4_extent_header *eh; | |
996 | int depth = ext_depth(inode); | |
997 | struct ext4_extent *ex; | |
998 | __le32 border; | |
999 | int k, err = 0; | |
1000 | ||
1001 | eh = path[depth].p_hdr; | |
1002 | ex = path[depth].p_ext; | |
1003 | BUG_ON(ex == NULL); | |
1004 | BUG_ON(eh == NULL); | |
1005 | ||
1006 | if (depth == 0) { | |
1007 | /* there is no tree at all */ | |
1008 | return 0; | |
1009 | } | |
1010 | ||
1011 | if (ex != EXT_FIRST_EXTENT(eh)) { | |
1012 | /* we correct tree if first leaf got modified only */ | |
1013 | return 0; | |
1014 | } | |
1015 | ||
1016 | /* | |
1017 | * TODO: we need correction if border is smaller then current one | |
1018 | */ | |
1019 | k = depth - 1; | |
1020 | border = path[depth].p_ext->ee_block; | |
1021 | if ((err = ext4_ext_get_access(handle, inode, path + k))) | |
1022 | return err; | |
1023 | path[k].p_idx->ei_block = border; | |
1024 | if ((err = ext4_ext_dirty(handle, inode, path + k))) | |
1025 | return err; | |
1026 | ||
1027 | while (k--) { | |
1028 | /* change all left-side indexes */ | |
1029 | if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) | |
1030 | break; | |
1031 | if ((err = ext4_ext_get_access(handle, inode, path + k))) | |
1032 | break; | |
1033 | path[k].p_idx->ei_block = border; | |
1034 | if ((err = ext4_ext_dirty(handle, inode, path + k))) | |
1035 | break; | |
1036 | } | |
1037 | ||
1038 | return err; | |
1039 | } | |
1040 | ||
1041 | static int inline | |
1042 | ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, | |
1043 | struct ext4_extent *ex2) | |
1044 | { | |
1045 | /* FIXME: 48bit support */ | |
1046 | if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len) | |
1047 | != le32_to_cpu(ex2->ee_block)) | |
1048 | return 0; | |
1049 | ||
1050 | #ifdef AGRESSIVE_TEST | |
1051 | if (le16_to_cpu(ex1->ee_len) >= 4) | |
1052 | return 0; | |
1053 | #endif | |
1054 | ||
1055 | if (le32_to_cpu(ex1->ee_start) + le16_to_cpu(ex1->ee_len) | |
1056 | == le32_to_cpu(ex2->ee_start)) | |
1057 | return 1; | |
1058 | return 0; | |
1059 | } | |
1060 | ||
1061 | /* | |
1062 | * this routine tries to merge requsted extent into the existing | |
1063 | * extent or inserts requested extent as new one into the tree, | |
1064 | * creating new leaf in no-space case | |
1065 | */ | |
1066 | int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, | |
1067 | struct ext4_ext_path *path, | |
1068 | struct ext4_extent *newext) | |
1069 | { | |
1070 | struct ext4_extent_header * eh; | |
1071 | struct ext4_extent *ex, *fex; | |
1072 | struct ext4_extent *nearex; /* nearest extent */ | |
1073 | struct ext4_ext_path *npath = NULL; | |
1074 | int depth, len, err, next; | |
1075 | ||
1076 | BUG_ON(newext->ee_len == 0); | |
1077 | depth = ext_depth(inode); | |
1078 | ex = path[depth].p_ext; | |
1079 | BUG_ON(path[depth].p_hdr == NULL); | |
1080 | ||
1081 | /* try to insert block into found extent and return */ | |
1082 | if (ex && ext4_can_extents_be_merged(inode, ex, newext)) { | |
1083 | ext_debug("append %d block to %d:%d (from %d)\n", | |
1084 | le16_to_cpu(newext->ee_len), | |
1085 | le32_to_cpu(ex->ee_block), | |
1086 | le16_to_cpu(ex->ee_len), | |
1087 | le32_to_cpu(ex->ee_start)); | |
1088 | if ((err = ext4_ext_get_access(handle, inode, path + depth))) | |
1089 | return err; | |
1090 | ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len) | |
1091 | + le16_to_cpu(newext->ee_len)); | |
1092 | eh = path[depth].p_hdr; | |
1093 | nearex = ex; | |
1094 | goto merge; | |
1095 | } | |
1096 | ||
1097 | repeat: | |
1098 | depth = ext_depth(inode); | |
1099 | eh = path[depth].p_hdr; | |
1100 | if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) | |
1101 | goto has_space; | |
1102 | ||
1103 | /* probably next leaf has space for us? */ | |
1104 | fex = EXT_LAST_EXTENT(eh); | |
1105 | next = ext4_ext_next_leaf_block(inode, path); | |
1106 | if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block) | |
1107 | && next != EXT_MAX_BLOCK) { | |
1108 | ext_debug("next leaf block - %d\n", next); | |
1109 | BUG_ON(npath != NULL); | |
1110 | npath = ext4_ext_find_extent(inode, next, NULL); | |
1111 | if (IS_ERR(npath)) | |
1112 | return PTR_ERR(npath); | |
1113 | BUG_ON(npath->p_depth != path->p_depth); | |
1114 | eh = npath[depth].p_hdr; | |
1115 | if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { | |
1116 | ext_debug("next leaf isnt full(%d)\n", | |
1117 | le16_to_cpu(eh->eh_entries)); | |
1118 | path = npath; | |
1119 | goto repeat; | |
1120 | } | |
1121 | ext_debug("next leaf has no free space(%d,%d)\n", | |
1122 | le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); | |
1123 | } | |
1124 | ||
1125 | /* | |
1126 | * there is no free space in found leaf | |
1127 | * we're gonna add new leaf in the tree | |
1128 | */ | |
1129 | err = ext4_ext_create_new_leaf(handle, inode, path, newext); | |
1130 | if (err) | |
1131 | goto cleanup; | |
1132 | depth = ext_depth(inode); | |
1133 | eh = path[depth].p_hdr; | |
1134 | ||
1135 | has_space: | |
1136 | nearex = path[depth].p_ext; | |
1137 | ||
1138 | if ((err = ext4_ext_get_access(handle, inode, path + depth))) | |
1139 | goto cleanup; | |
1140 | ||
1141 | if (!nearex) { | |
1142 | /* there is no extent in this leaf, create first one */ | |
1143 | ext_debug("first extent in the leaf: %d:%d:%d\n", | |
1144 | le32_to_cpu(newext->ee_block), | |
1145 | le32_to_cpu(newext->ee_start), | |
1146 | le16_to_cpu(newext->ee_len)); | |
1147 | path[depth].p_ext = EXT_FIRST_EXTENT(eh); | |
1148 | } else if (le32_to_cpu(newext->ee_block) | |
1149 | > le32_to_cpu(nearex->ee_block)) { | |
1150 | /* BUG_ON(newext->ee_block == nearex->ee_block); */ | |
1151 | if (nearex != EXT_LAST_EXTENT(eh)) { | |
1152 | len = EXT_MAX_EXTENT(eh) - nearex; | |
1153 | len = (len - 1) * sizeof(struct ext4_extent); | |
1154 | len = len < 0 ? 0 : len; | |
1155 | ext_debug("insert %d:%d:%d after: nearest 0x%p, " | |
1156 | "move %d from 0x%p to 0x%p\n", | |
1157 | le32_to_cpu(newext->ee_block), | |
1158 | le32_to_cpu(newext->ee_start), | |
1159 | le16_to_cpu(newext->ee_len), | |
1160 | nearex, len, nearex + 1, nearex + 2); | |
1161 | memmove(nearex + 2, nearex + 1, len); | |
1162 | } | |
1163 | path[depth].p_ext = nearex + 1; | |
1164 | } else { | |
1165 | BUG_ON(newext->ee_block == nearex->ee_block); | |
1166 | len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent); | |
1167 | len = len < 0 ? 0 : len; | |
1168 | ext_debug("insert %d:%d:%d before: nearest 0x%p, " | |
1169 | "move %d from 0x%p to 0x%p\n", | |
1170 | le32_to_cpu(newext->ee_block), | |
1171 | le32_to_cpu(newext->ee_start), | |
1172 | le16_to_cpu(newext->ee_len), | |
1173 | nearex, len, nearex + 1, nearex + 2); | |
1174 | memmove(nearex + 1, nearex, len); | |
1175 | path[depth].p_ext = nearex; | |
1176 | } | |
1177 | ||
1178 | eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1); | |
1179 | nearex = path[depth].p_ext; | |
1180 | nearex->ee_block = newext->ee_block; | |
1181 | nearex->ee_start = newext->ee_start; | |
1182 | nearex->ee_len = newext->ee_len; | |
1183 | /* FIXME: support for large fs */ | |
1184 | nearex->ee_start_hi = 0; | |
1185 | ||
1186 | merge: | |
1187 | /* try to merge extents to the right */ | |
1188 | while (nearex < EXT_LAST_EXTENT(eh)) { | |
1189 | if (!ext4_can_extents_be_merged(inode, nearex, nearex + 1)) | |
1190 | break; | |
1191 | /* merge with next extent! */ | |
1192 | nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len) | |
1193 | + le16_to_cpu(nearex[1].ee_len)); | |
1194 | if (nearex + 1 < EXT_LAST_EXTENT(eh)) { | |
1195 | len = (EXT_LAST_EXTENT(eh) - nearex - 1) | |
1196 | * sizeof(struct ext4_extent); | |
1197 | memmove(nearex + 1, nearex + 2, len); | |
1198 | } | |
1199 | eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1); | |
1200 | BUG_ON(eh->eh_entries == 0); | |
1201 | } | |
1202 | ||
1203 | /* try to merge extents to the left */ | |
1204 | ||
1205 | /* time to correct all indexes above */ | |
1206 | err = ext4_ext_correct_indexes(handle, inode, path); | |
1207 | if (err) | |
1208 | goto cleanup; | |
1209 | ||
1210 | err = ext4_ext_dirty(handle, inode, path + depth); | |
1211 | ||
1212 | cleanup: | |
1213 | if (npath) { | |
1214 | ext4_ext_drop_refs(npath); | |
1215 | kfree(npath); | |
1216 | } | |
1217 | ext4_ext_tree_changed(inode); | |
1218 | ext4_ext_invalidate_cache(inode); | |
1219 | return err; | |
1220 | } | |
1221 | ||
1222 | int ext4_ext_walk_space(struct inode *inode, unsigned long block, | |
1223 | unsigned long num, ext_prepare_callback func, | |
1224 | void *cbdata) | |
1225 | { | |
1226 | struct ext4_ext_path *path = NULL; | |
1227 | struct ext4_ext_cache cbex; | |
1228 | struct ext4_extent *ex; | |
1229 | unsigned long next, start = 0, end = 0; | |
1230 | unsigned long last = block + num; | |
1231 | int depth, exists, err = 0; | |
1232 | ||
1233 | BUG_ON(func == NULL); | |
1234 | BUG_ON(inode == NULL); | |
1235 | ||
1236 | while (block < last && block != EXT_MAX_BLOCK) { | |
1237 | num = last - block; | |
1238 | /* find extent for this block */ | |
1239 | path = ext4_ext_find_extent(inode, block, path); | |
1240 | if (IS_ERR(path)) { | |
1241 | err = PTR_ERR(path); | |
1242 | path = NULL; | |
1243 | break; | |
1244 | } | |
1245 | ||
1246 | depth = ext_depth(inode); | |
1247 | BUG_ON(path[depth].p_hdr == NULL); | |
1248 | ex = path[depth].p_ext; | |
1249 | next = ext4_ext_next_allocated_block(path); | |
1250 | ||
1251 | exists = 0; | |
1252 | if (!ex) { | |
1253 | /* there is no extent yet, so try to allocate | |
1254 | * all requested space */ | |
1255 | start = block; | |
1256 | end = block + num; | |
1257 | } else if (le32_to_cpu(ex->ee_block) > block) { | |
1258 | /* need to allocate space before found extent */ | |
1259 | start = block; | |
1260 | end = le32_to_cpu(ex->ee_block); | |
1261 | if (block + num < end) | |
1262 | end = block + num; | |
1263 | } else if (block >= | |
1264 | le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) { | |
1265 | /* need to allocate space after found extent */ | |
1266 | start = block; | |
1267 | end = block + num; | |
1268 | if (end >= next) | |
1269 | end = next; | |
1270 | } else if (block >= le32_to_cpu(ex->ee_block)) { | |
1271 | /* | |
1272 | * some part of requested space is covered | |
1273 | * by found extent | |
1274 | */ | |
1275 | start = block; | |
1276 | end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len); | |
1277 | if (block + num < end) | |
1278 | end = block + num; | |
1279 | exists = 1; | |
1280 | } else { | |
1281 | BUG(); | |
1282 | } | |
1283 | BUG_ON(end <= start); | |
1284 | ||
1285 | if (!exists) { | |
1286 | cbex.ec_block = start; | |
1287 | cbex.ec_len = end - start; | |
1288 | cbex.ec_start = 0; | |
1289 | cbex.ec_type = EXT4_EXT_CACHE_GAP; | |
1290 | } else { | |
1291 | cbex.ec_block = le32_to_cpu(ex->ee_block); | |
1292 | cbex.ec_len = le16_to_cpu(ex->ee_len); | |
1293 | cbex.ec_start = le32_to_cpu(ex->ee_start); | |
1294 | cbex.ec_type = EXT4_EXT_CACHE_EXTENT; | |
1295 | } | |
1296 | ||
1297 | BUG_ON(cbex.ec_len == 0); | |
1298 | err = func(inode, path, &cbex, cbdata); | |
1299 | ext4_ext_drop_refs(path); | |
1300 | ||
1301 | if (err < 0) | |
1302 | break; | |
1303 | if (err == EXT_REPEAT) | |
1304 | continue; | |
1305 | else if (err == EXT_BREAK) { | |
1306 | err = 0; | |
1307 | break; | |
1308 | } | |
1309 | ||
1310 | if (ext_depth(inode) != depth) { | |
1311 | /* depth was changed. we have to realloc path */ | |
1312 | kfree(path); | |
1313 | path = NULL; | |
1314 | } | |
1315 | ||
1316 | block = cbex.ec_block + cbex.ec_len; | |
1317 | } | |
1318 | ||
1319 | if (path) { | |
1320 | ext4_ext_drop_refs(path); | |
1321 | kfree(path); | |
1322 | } | |
1323 | ||
1324 | return err; | |
1325 | } | |
1326 | ||
1327 | static inline void | |
1328 | ext4_ext_put_in_cache(struct inode *inode, __u32 block, | |
1329 | __u32 len, __u32 start, int type) | |
1330 | { | |
1331 | struct ext4_ext_cache *cex; | |
1332 | BUG_ON(len == 0); | |
1333 | cex = &EXT4_I(inode)->i_cached_extent; | |
1334 | cex->ec_type = type; | |
1335 | cex->ec_block = block; | |
1336 | cex->ec_len = len; | |
1337 | cex->ec_start = start; | |
1338 | } | |
1339 | ||
1340 | /* | |
1341 | * this routine calculate boundaries of the gap requested block fits into | |
1342 | * and cache this gap | |
1343 | */ | |
1344 | static inline void | |
1345 | ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, | |
1346 | unsigned long block) | |
1347 | { | |
1348 | int depth = ext_depth(inode); | |
1349 | unsigned long lblock, len; | |
1350 | struct ext4_extent *ex; | |
1351 | ||
1352 | ex = path[depth].p_ext; | |
1353 | if (ex == NULL) { | |
1354 | /* there is no extent yet, so gap is [0;-] */ | |
1355 | lblock = 0; | |
1356 | len = EXT_MAX_BLOCK; | |
1357 | ext_debug("cache gap(whole file):"); | |
1358 | } else if (block < le32_to_cpu(ex->ee_block)) { | |
1359 | lblock = block; | |
1360 | len = le32_to_cpu(ex->ee_block) - block; | |
1361 | ext_debug("cache gap(before): %lu [%lu:%lu]", | |
1362 | (unsigned long) block, | |
1363 | (unsigned long) le32_to_cpu(ex->ee_block), | |
1364 | (unsigned long) le16_to_cpu(ex->ee_len)); | |
1365 | } else if (block >= le32_to_cpu(ex->ee_block) | |
1366 | + le16_to_cpu(ex->ee_len)) { | |
1367 | lblock = le32_to_cpu(ex->ee_block) | |
1368 | + le16_to_cpu(ex->ee_len); | |
1369 | len = ext4_ext_next_allocated_block(path); | |
1370 | ext_debug("cache gap(after): [%lu:%lu] %lu", | |
1371 | (unsigned long) le32_to_cpu(ex->ee_block), | |
1372 | (unsigned long) le16_to_cpu(ex->ee_len), | |
1373 | (unsigned long) block); | |
1374 | BUG_ON(len == lblock); | |
1375 | len = len - lblock; | |
1376 | } else { | |
1377 | lblock = len = 0; | |
1378 | BUG(); | |
1379 | } | |
1380 | ||
1381 | ext_debug(" -> %lu:%lu\n", (unsigned long) lblock, len); | |
1382 | ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP); | |
1383 | } | |
1384 | ||
1385 | static inline int | |
1386 | ext4_ext_in_cache(struct inode *inode, unsigned long block, | |
1387 | struct ext4_extent *ex) | |
1388 | { | |
1389 | struct ext4_ext_cache *cex; | |
1390 | ||
1391 | cex = &EXT4_I(inode)->i_cached_extent; | |
1392 | ||
1393 | /* has cache valid data? */ | |
1394 | if (cex->ec_type == EXT4_EXT_CACHE_NO) | |
1395 | return EXT4_EXT_CACHE_NO; | |
1396 | ||
1397 | BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && | |
1398 | cex->ec_type != EXT4_EXT_CACHE_EXTENT); | |
1399 | if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { | |
1400 | ex->ee_block = cpu_to_le32(cex->ec_block); | |
1401 | ex->ee_start = cpu_to_le32(cex->ec_start); | |
1402 | ex->ee_len = cpu_to_le16(cex->ec_len); | |
1403 | ext_debug("%lu cached by %lu:%lu:%lu\n", | |
1404 | (unsigned long) block, | |
1405 | (unsigned long) cex->ec_block, | |
1406 | (unsigned long) cex->ec_len, | |
1407 | (unsigned long) cex->ec_start); | |
1408 | return cex->ec_type; | |
1409 | } | |
1410 | ||
1411 | /* not in cache */ | |
1412 | return EXT4_EXT_CACHE_NO; | |
1413 | } | |
1414 | ||
1415 | /* | |
1416 | * routine removes index from the index block | |
1417 | * it's used in truncate case only. thus all requests are for | |
1418 | * last index in the block only | |
1419 | */ | |
1420 | int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, | |
1421 | struct ext4_ext_path *path) | |
1422 | { | |
1423 | struct buffer_head *bh; | |
1424 | int err; | |
1425 | unsigned long leaf; | |
1426 | ||
1427 | /* free index block */ | |
1428 | path--; | |
1429 | leaf = le32_to_cpu(path->p_idx->ei_leaf); | |
1430 | BUG_ON(path->p_hdr->eh_entries == 0); | |
1431 | if ((err = ext4_ext_get_access(handle, inode, path))) | |
1432 | return err; | |
1433 | path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1); | |
1434 | if ((err = ext4_ext_dirty(handle, inode, path))) | |
1435 | return err; | |
1436 | ext_debug("index is empty, remove it, free block %lu\n", leaf); | |
1437 | bh = sb_find_get_block(inode->i_sb, leaf); | |
1438 | ext4_forget(handle, 1, inode, bh, leaf); | |
1439 | ext4_free_blocks(handle, inode, leaf, 1); | |
1440 | return err; | |
1441 | } | |
1442 | ||
1443 | /* | |
1444 | * This routine returns max. credits extent tree can consume. | |
1445 | * It should be OK for low-performance paths like ->writepage() | |
1446 | * To allow many writing process to fit a single transaction, | |
1447 | * caller should calculate credits under truncate_mutex and | |
1448 | * pass actual path. | |
1449 | */ | |
1450 | int inline ext4_ext_calc_credits_for_insert(struct inode *inode, | |
1451 | struct ext4_ext_path *path) | |
1452 | { | |
1453 | int depth, needed; | |
1454 | ||
1455 | if (path) { | |
1456 | /* probably there is space in leaf? */ | |
1457 | depth = ext_depth(inode); | |
1458 | if (le16_to_cpu(path[depth].p_hdr->eh_entries) | |
1459 | < le16_to_cpu(path[depth].p_hdr->eh_max)) | |
1460 | return 1; | |
1461 | } | |
1462 | ||
1463 | /* | |
1464 | * given 32bit logical block (4294967296 blocks), max. tree | |
1465 | * can be 4 levels in depth -- 4 * 340^4 == 53453440000. | |
1466 | * let's also add one more level for imbalance. | |
1467 | */ | |
1468 | depth = 5; | |
1469 | ||
1470 | /* allocation of new data block(s) */ | |
1471 | needed = 2; | |
1472 | ||
1473 | /* | |
1474 | * tree can be full, so it'd need to grow in depth: | |
1475 | * allocation + old root + new root | |
1476 | */ | |
1477 | needed += 2 + 1 + 1; | |
1478 | ||
1479 | /* | |
1480 | * Index split can happen, we'd need: | |
1481 | * allocate intermediate indexes (bitmap + group) | |
1482 | * + change two blocks at each level, but root (already included) | |
1483 | */ | |
1484 | needed = (depth * 2) + (depth * 2); | |
1485 | ||
1486 | /* any allocation modifies superblock */ | |
1487 | needed += 1; | |
1488 | ||
1489 | return needed; | |
1490 | } | |
1491 | ||
1492 | static int ext4_remove_blocks(handle_t *handle, struct inode *inode, | |
1493 | struct ext4_extent *ex, | |
1494 | unsigned long from, unsigned long to) | |
1495 | { | |
1496 | struct buffer_head *bh; | |
1497 | int i; | |
1498 | ||
1499 | #ifdef EXTENTS_STATS | |
1500 | { | |
1501 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | |
1502 | unsigned short ee_len = le16_to_cpu(ex->ee_len); | |
1503 | spin_lock(&sbi->s_ext_stats_lock); | |
1504 | sbi->s_ext_blocks += ee_len; | |
1505 | sbi->s_ext_extents++; | |
1506 | if (ee_len < sbi->s_ext_min) | |
1507 | sbi->s_ext_min = ee_len; | |
1508 | if (ee_len > sbi->s_ext_max) | |
1509 | sbi->s_ext_max = ee_len; | |
1510 | if (ext_depth(inode) > sbi->s_depth_max) | |
1511 | sbi->s_depth_max = ext_depth(inode); | |
1512 | spin_unlock(&sbi->s_ext_stats_lock); | |
1513 | } | |
1514 | #endif | |
1515 | if (from >= le32_to_cpu(ex->ee_block) | |
1516 | && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) { | |
1517 | /* tail removal */ | |
1518 | unsigned long num, start; | |
1519 | num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from; | |
1520 | start = le32_to_cpu(ex->ee_start) + le16_to_cpu(ex->ee_len) - num; | |
1521 | ext_debug("free last %lu blocks starting %lu\n", num, start); | |
1522 | for (i = 0; i < num; i++) { | |
1523 | bh = sb_find_get_block(inode->i_sb, start + i); | |
1524 | ext4_forget(handle, 0, inode, bh, start + i); | |
1525 | } | |
1526 | ext4_free_blocks(handle, inode, start, num); | |
1527 | } else if (from == le32_to_cpu(ex->ee_block) | |
1528 | && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) { | |
1529 | printk("strange request: removal %lu-%lu from %u:%u\n", | |
1530 | from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len)); | |
1531 | } else { | |
1532 | printk("strange request: removal(2) %lu-%lu from %u:%u\n", | |
1533 | from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len)); | |
1534 | } | |
1535 | return 0; | |
1536 | } | |
1537 | ||
1538 | static int | |
1539 | ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |
1540 | struct ext4_ext_path *path, unsigned long start) | |
1541 | { | |
1542 | int err = 0, correct_index = 0; | |
1543 | int depth = ext_depth(inode), credits; | |
1544 | struct ext4_extent_header *eh; | |
1545 | unsigned a, b, block, num; | |
1546 | unsigned long ex_ee_block; | |
1547 | unsigned short ex_ee_len; | |
1548 | struct ext4_extent *ex; | |
1549 | ||
1550 | ext_debug("truncate since %lu in leaf\n", start); | |
1551 | if (!path[depth].p_hdr) | |
1552 | path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); | |
1553 | eh = path[depth].p_hdr; | |
1554 | BUG_ON(eh == NULL); | |
1555 | BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max)); | |
1556 | BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC); | |
1557 | ||
1558 | /* find where to start removing */ | |
1559 | ex = EXT_LAST_EXTENT(eh); | |
1560 | ||
1561 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
1562 | ex_ee_len = le16_to_cpu(ex->ee_len); | |
1563 | ||
1564 | while (ex >= EXT_FIRST_EXTENT(eh) && | |
1565 | ex_ee_block + ex_ee_len > start) { | |
1566 | ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len); | |
1567 | path[depth].p_ext = ex; | |
1568 | ||
1569 | a = ex_ee_block > start ? ex_ee_block : start; | |
1570 | b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ? | |
1571 | ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK; | |
1572 | ||
1573 | ext_debug(" border %u:%u\n", a, b); | |
1574 | ||
1575 | if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) { | |
1576 | block = 0; | |
1577 | num = 0; | |
1578 | BUG(); | |
1579 | } else if (a != ex_ee_block) { | |
1580 | /* remove tail of the extent */ | |
1581 | block = ex_ee_block; | |
1582 | num = a - block; | |
1583 | } else if (b != ex_ee_block + ex_ee_len - 1) { | |
1584 | /* remove head of the extent */ | |
1585 | block = a; | |
1586 | num = b - a; | |
1587 | /* there is no "make a hole" API yet */ | |
1588 | BUG(); | |
1589 | } else { | |
1590 | /* remove whole extent: excellent! */ | |
1591 | block = ex_ee_block; | |
1592 | num = 0; | |
1593 | BUG_ON(a != ex_ee_block); | |
1594 | BUG_ON(b != ex_ee_block + ex_ee_len - 1); | |
1595 | } | |
1596 | ||
1597 | /* at present, extent can't cross block group */ | |
1598 | /* leaf + bitmap + group desc + sb + inode */ | |
1599 | credits = 5; | |
1600 | if (ex == EXT_FIRST_EXTENT(eh)) { | |
1601 | correct_index = 1; | |
1602 | credits += (ext_depth(inode)) + 1; | |
1603 | } | |
1604 | #ifdef CONFIG_QUOTA | |
1605 | credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); | |
1606 | #endif | |
1607 | ||
1608 | handle = ext4_ext_journal_restart(handle, credits); | |
1609 | if (IS_ERR(handle)) { | |
1610 | err = PTR_ERR(handle); | |
1611 | goto out; | |
1612 | } | |
1613 | ||
1614 | err = ext4_ext_get_access(handle, inode, path + depth); | |
1615 | if (err) | |
1616 | goto out; | |
1617 | ||
1618 | err = ext4_remove_blocks(handle, inode, ex, a, b); | |
1619 | if (err) | |
1620 | goto out; | |
1621 | ||
1622 | if (num == 0) { | |
1623 | /* this extent is removed entirely mark slot unused */ | |
1624 | ex->ee_start = 0; | |
1625 | eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1); | |
1626 | } | |
1627 | ||
1628 | ex->ee_block = cpu_to_le32(block); | |
1629 | ex->ee_len = cpu_to_le16(num); | |
1630 | ||
1631 | err = ext4_ext_dirty(handle, inode, path + depth); | |
1632 | if (err) | |
1633 | goto out; | |
1634 | ||
1635 | ext_debug("new extent: %u:%u:%u\n", block, num, | |
1636 | le32_to_cpu(ex->ee_start)); | |
1637 | ex--; | |
1638 | ex_ee_block = le32_to_cpu(ex->ee_block); | |
1639 | ex_ee_len = le16_to_cpu(ex->ee_len); | |
1640 | } | |
1641 | ||
1642 | if (correct_index && eh->eh_entries) | |
1643 | err = ext4_ext_correct_indexes(handle, inode, path); | |
1644 | ||
1645 | /* if this leaf is free, then we should | |
1646 | * remove it from index block above */ | |
1647 | if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) | |
1648 | err = ext4_ext_rm_idx(handle, inode, path + depth); | |
1649 | ||
1650 | out: | |
1651 | return err; | |
1652 | } | |
1653 | ||
1654 | /* | |
1655 | * returns 1 if current index have to be freed (even partial) | |
1656 | */ | |
1657 | static int inline | |
1658 | ext4_ext_more_to_rm(struct ext4_ext_path *path) | |
1659 | { | |
1660 | BUG_ON(path->p_idx == NULL); | |
1661 | ||
1662 | if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) | |
1663 | return 0; | |
1664 | ||
1665 | /* | |
1666 | * if truncate on deeper level happened it it wasn't partial | |
1667 | * so we have to consider current index for truncation | |
1668 | */ | |
1669 | if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) | |
1670 | return 0; | |
1671 | return 1; | |
1672 | } | |
1673 | ||
1674 | int ext4_ext_remove_space(struct inode *inode, unsigned long start) | |
1675 | { | |
1676 | struct super_block *sb = inode->i_sb; | |
1677 | int depth = ext_depth(inode); | |
1678 | struct ext4_ext_path *path; | |
1679 | handle_t *handle; | |
1680 | int i = 0, err = 0; | |
1681 | ||
1682 | ext_debug("truncate since %lu\n", start); | |
1683 | ||
1684 | /* probably first extent we're gonna free will be last in block */ | |
1685 | handle = ext4_journal_start(inode, depth + 1); | |
1686 | if (IS_ERR(handle)) | |
1687 | return PTR_ERR(handle); | |
1688 | ||
1689 | ext4_ext_invalidate_cache(inode); | |
1690 | ||
1691 | /* | |
1692 | * we start scanning from right side freeing all the blocks | |
1693 | * after i_size and walking into the deep | |
1694 | */ | |
1695 | path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL); | |
1696 | if (path == NULL) { | |
1697 | ext4_journal_stop(handle); | |
1698 | return -ENOMEM; | |
1699 | } | |
1700 | memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1)); | |
1701 | path[0].p_hdr = ext_inode_hdr(inode); | |
1702 | if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) { | |
1703 | err = -EIO; | |
1704 | goto out; | |
1705 | } | |
1706 | path[0].p_depth = depth; | |
1707 | ||
1708 | while (i >= 0 && err == 0) { | |
1709 | if (i == depth) { | |
1710 | /* this is leaf block */ | |
1711 | err = ext4_ext_rm_leaf(handle, inode, path, start); | |
1712 | /* root level have p_bh == NULL, brelse() eats this */ | |
1713 | brelse(path[i].p_bh); | |
1714 | path[i].p_bh = NULL; | |
1715 | i--; | |
1716 | continue; | |
1717 | } | |
1718 | ||
1719 | /* this is index block */ | |
1720 | if (!path[i].p_hdr) { | |
1721 | ext_debug("initialize header\n"); | |
1722 | path[i].p_hdr = ext_block_hdr(path[i].p_bh); | |
1723 | if (ext4_ext_check_header(__FUNCTION__, inode, | |
1724 | path[i].p_hdr)) { | |
1725 | err = -EIO; | |
1726 | goto out; | |
1727 | } | |
1728 | } | |
1729 | ||
1730 | BUG_ON(le16_to_cpu(path[i].p_hdr->eh_entries) | |
1731 | > le16_to_cpu(path[i].p_hdr->eh_max)); | |
1732 | BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC); | |
1733 | ||
1734 | if (!path[i].p_idx) { | |
1735 | /* this level hasn't touched yet */ | |
1736 | path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); | |
1737 | path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; | |
1738 | ext_debug("init index ptr: hdr 0x%p, num %d\n", | |
1739 | path[i].p_hdr, | |
1740 | le16_to_cpu(path[i].p_hdr->eh_entries)); | |
1741 | } else { | |
1742 | /* we've already was here, see at next index */ | |
1743 | path[i].p_idx--; | |
1744 | } | |
1745 | ||
1746 | ext_debug("level %d - index, first 0x%p, cur 0x%p\n", | |
1747 | i, EXT_FIRST_INDEX(path[i].p_hdr), | |
1748 | path[i].p_idx); | |
1749 | if (ext4_ext_more_to_rm(path + i)) { | |
1750 | /* go to the next level */ | |
1751 | ext_debug("move to level %d (block %d)\n", | |
1752 | i + 1, le32_to_cpu(path[i].p_idx->ei_leaf)); | |
1753 | memset(path + i + 1, 0, sizeof(*path)); | |
1754 | path[i+1].p_bh = | |
1755 | sb_bread(sb, le32_to_cpu(path[i].p_idx->ei_leaf)); | |
1756 | if (!path[i+1].p_bh) { | |
1757 | /* should we reset i_size? */ | |
1758 | err = -EIO; | |
1759 | break; | |
1760 | } | |
1761 | ||
1762 | /* put actual number of indexes to know is this | |
1763 | * number got changed at the next iteration */ | |
1764 | path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); | |
1765 | i++; | |
1766 | } else { | |
1767 | /* we finish processing this index, go up */ | |
1768 | if (path[i].p_hdr->eh_entries == 0 && i > 0) { | |
1769 | /* index is empty, remove it | |
1770 | * handle must be already prepared by the | |
1771 | * truncatei_leaf() */ | |
1772 | err = ext4_ext_rm_idx(handle, inode, path + i); | |
1773 | } | |
1774 | /* root level have p_bh == NULL, brelse() eats this */ | |
1775 | brelse(path[i].p_bh); | |
1776 | path[i].p_bh = NULL; | |
1777 | i--; | |
1778 | ext_debug("return to level %d\n", i); | |
1779 | } | |
1780 | } | |
1781 | ||
1782 | /* TODO: flexible tree reduction should be here */ | |
1783 | if (path->p_hdr->eh_entries == 0) { | |
1784 | /* | |
1785 | * truncate to zero freed all the tree | |
1786 | * so, we need to correct eh_depth | |
1787 | */ | |
1788 | err = ext4_ext_get_access(handle, inode, path); | |
1789 | if (err == 0) { | |
1790 | ext_inode_hdr(inode)->eh_depth = 0; | |
1791 | ext_inode_hdr(inode)->eh_max = | |
1792 | cpu_to_le16(ext4_ext_space_root(inode)); | |
1793 | err = ext4_ext_dirty(handle, inode, path); | |
1794 | } | |
1795 | } | |
1796 | out: | |
1797 | ext4_ext_tree_changed(inode); | |
1798 | ext4_ext_drop_refs(path); | |
1799 | kfree(path); | |
1800 | ext4_journal_stop(handle); | |
1801 | ||
1802 | return err; | |
1803 | } | |
1804 | ||
1805 | /* | |
1806 | * called at mount time | |
1807 | */ | |
1808 | void ext4_ext_init(struct super_block *sb) | |
1809 | { | |
1810 | /* | |
1811 | * possible initialization would be here | |
1812 | */ | |
1813 | ||
1814 | if (test_opt(sb, EXTENTS)) { | |
1815 | printk("EXT4-fs: file extents enabled"); | |
1816 | #ifdef AGRESSIVE_TEST | |
1817 | printk(", agressive tests"); | |
1818 | #endif | |
1819 | #ifdef CHECK_BINSEARCH | |
1820 | printk(", check binsearch"); | |
1821 | #endif | |
1822 | #ifdef EXTENTS_STATS | |
1823 | printk(", stats"); | |
1824 | #endif | |
1825 | printk("\n"); | |
1826 | #ifdef EXTENTS_STATS | |
1827 | spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); | |
1828 | EXT4_SB(sb)->s_ext_min = 1 << 30; | |
1829 | EXT4_SB(sb)->s_ext_max = 0; | |
1830 | #endif | |
1831 | } | |
1832 | } | |
1833 | ||
1834 | /* | |
1835 | * called at umount time | |
1836 | */ | |
1837 | void ext4_ext_release(struct super_block *sb) | |
1838 | { | |
1839 | if (!test_opt(sb, EXTENTS)) | |
1840 | return; | |
1841 | ||
1842 | #ifdef EXTENTS_STATS | |
1843 | if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { | |
1844 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
1845 | printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", | |
1846 | sbi->s_ext_blocks, sbi->s_ext_extents, | |
1847 | sbi->s_ext_blocks / sbi->s_ext_extents); | |
1848 | printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", | |
1849 | sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); | |
1850 | } | |
1851 | #endif | |
1852 | } | |
1853 | ||
1854 | int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, sector_t iblock, | |
1855 | unsigned long max_blocks, struct buffer_head *bh_result, | |
1856 | int create, int extend_disksize) | |
1857 | { | |
1858 | struct ext4_ext_path *path = NULL; | |
1859 | struct ext4_extent newex, *ex; | |
1860 | int goal, newblock, err = 0, depth; | |
1861 | unsigned long allocated = 0; | |
1862 | ||
1863 | __clear_bit(BH_New, &bh_result->b_state); | |
1864 | ext_debug("blocks %d/%lu requested for inode %u\n", (int) iblock, | |
1865 | max_blocks, (unsigned) inode->i_ino); | |
1866 | mutex_lock(&EXT4_I(inode)->truncate_mutex); | |
1867 | ||
1868 | /* check in cache */ | |
1869 | if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) { | |
1870 | if (goal == EXT4_EXT_CACHE_GAP) { | |
1871 | if (!create) { | |
1872 | /* block isn't allocated yet and | |
1873 | * user don't want to allocate it */ | |
1874 | goto out2; | |
1875 | } | |
1876 | /* we should allocate requested block */ | |
1877 | } else if (goal == EXT4_EXT_CACHE_EXTENT) { | |
1878 | /* block is already allocated */ | |
1879 | newblock = iblock | |
1880 | - le32_to_cpu(newex.ee_block) | |
1881 | + le32_to_cpu(newex.ee_start); | |
1882 | /* number of remain blocks in the extent */ | |
1883 | allocated = le16_to_cpu(newex.ee_len) - | |
1884 | (iblock - le32_to_cpu(newex.ee_block)); | |
1885 | goto out; | |
1886 | } else { | |
1887 | BUG(); | |
1888 | } | |
1889 | } | |
1890 | ||
1891 | /* find extent for this block */ | |
1892 | path = ext4_ext_find_extent(inode, iblock, NULL); | |
1893 | if (IS_ERR(path)) { | |
1894 | err = PTR_ERR(path); | |
1895 | path = NULL; | |
1896 | goto out2; | |
1897 | } | |
1898 | ||
1899 | depth = ext_depth(inode); | |
1900 | ||
1901 | /* | |
1902 | * consistent leaf must not be empty | |
1903 | * this situations is possible, though, _during_ tree modification | |
1904 | * this is why assert can't be put in ext4_ext_find_extent() | |
1905 | */ | |
1906 | BUG_ON(path[depth].p_ext == NULL && depth != 0); | |
1907 | ||
1908 | if ((ex = path[depth].p_ext)) { | |
1909 | unsigned long ee_block = le32_to_cpu(ex->ee_block); | |
1910 | unsigned long ee_start = le32_to_cpu(ex->ee_start); | |
1911 | unsigned short ee_len = le16_to_cpu(ex->ee_len); | |
1912 | /* if found exent covers block, simple return it */ | |
1913 | if (iblock >= ee_block && iblock < ee_block + ee_len) { | |
1914 | newblock = iblock - ee_block + ee_start; | |
1915 | /* number of remain blocks in the extent */ | |
1916 | allocated = ee_len - (iblock - ee_block); | |
1917 | ext_debug("%d fit into %lu:%d -> %d\n", (int) iblock, | |
1918 | ee_block, ee_len, newblock); | |
1919 | ext4_ext_put_in_cache(inode, ee_block, ee_len, | |
1920 | ee_start, EXT4_EXT_CACHE_EXTENT); | |
1921 | goto out; | |
1922 | } | |
1923 | } | |
1924 | ||
1925 | /* | |
1926 | * requested block isn't allocated yet | |
1927 | * we couldn't try to create block if create flag is zero | |
1928 | */ | |
1929 | if (!create) { | |
1930 | /* put just found gap into cache to speedup subsequest reqs */ | |
1931 | ext4_ext_put_gap_in_cache(inode, path, iblock); | |
1932 | goto out2; | |
1933 | } | |
1934 | /* | |
1935 | * Okay, we need to do block allocation. Lazily initialize the block | |
1936 | * allocation info here if necessary | |
1937 | */ | |
1938 | if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info)) | |
1939 | ext4_init_block_alloc_info(inode); | |
1940 | ||
1941 | /* allocate new block */ | |
1942 | goal = ext4_ext_find_goal(inode, path, iblock); | |
1943 | allocated = max_blocks; | |
1944 | newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err); | |
1945 | if (!newblock) | |
1946 | goto out2; | |
1947 | ext_debug("allocate new block: goal %d, found %d/%lu\n", | |
1948 | goal, newblock, allocated); | |
1949 | ||
1950 | /* try to insert new extent into found leaf and return */ | |
1951 | newex.ee_block = cpu_to_le32(iblock); | |
1952 | newex.ee_start = cpu_to_le32(newblock); | |
1953 | newex.ee_len = cpu_to_le16(allocated); | |
1954 | err = ext4_ext_insert_extent(handle, inode, path, &newex); | |
1955 | if (err) | |
1956 | goto out2; | |
1957 | ||
1958 | if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize) | |
1959 | EXT4_I(inode)->i_disksize = inode->i_size; | |
1960 | ||
1961 | /* previous routine could use block we allocated */ | |
1962 | newblock = le32_to_cpu(newex.ee_start); | |
1963 | __set_bit(BH_New, &bh_result->b_state); | |
1964 | ||
1965 | ext4_ext_put_in_cache(inode, iblock, allocated, newblock, | |
1966 | EXT4_EXT_CACHE_EXTENT); | |
1967 | out: | |
1968 | if (allocated > max_blocks) | |
1969 | allocated = max_blocks; | |
1970 | ext4_ext_show_leaf(inode, path); | |
1971 | __set_bit(BH_Mapped, &bh_result->b_state); | |
1972 | bh_result->b_bdev = inode->i_sb->s_bdev; | |
1973 | bh_result->b_blocknr = newblock; | |
1974 | out2: | |
1975 | if (path) { | |
1976 | ext4_ext_drop_refs(path); | |
1977 | kfree(path); | |
1978 | } | |
1979 | mutex_unlock(&EXT4_I(inode)->truncate_mutex); | |
1980 | ||
1981 | return err ? err : allocated; | |
1982 | } | |
1983 | ||
1984 | void ext4_ext_truncate(struct inode * inode, struct page *page) | |
1985 | { | |
1986 | struct address_space *mapping = inode->i_mapping; | |
1987 | struct super_block *sb = inode->i_sb; | |
1988 | unsigned long last_block; | |
1989 | handle_t *handle; | |
1990 | int err = 0; | |
1991 | ||
1992 | /* | |
1993 | * probably first extent we're gonna free will be last in block | |
1994 | */ | |
1995 | err = ext4_writepage_trans_blocks(inode) + 3; | |
1996 | handle = ext4_journal_start(inode, err); | |
1997 | if (IS_ERR(handle)) { | |
1998 | if (page) { | |
1999 | clear_highpage(page); | |
2000 | flush_dcache_page(page); | |
2001 | unlock_page(page); | |
2002 | page_cache_release(page); | |
2003 | } | |
2004 | return; | |
2005 | } | |
2006 | ||
2007 | if (page) | |
2008 | ext4_block_truncate_page(handle, page, mapping, inode->i_size); | |
2009 | ||
2010 | mutex_lock(&EXT4_I(inode)->truncate_mutex); | |
2011 | ext4_ext_invalidate_cache(inode); | |
2012 | ||
2013 | /* | |
2014 | * TODO: optimization is possible here | |
2015 | * probably we need not scaning at all, | |
2016 | * because page truncation is enough | |
2017 | */ | |
2018 | if (ext4_orphan_add(handle, inode)) | |
2019 | goto out_stop; | |
2020 | ||
2021 | /* we have to know where to truncate from in crash case */ | |
2022 | EXT4_I(inode)->i_disksize = inode->i_size; | |
2023 | ext4_mark_inode_dirty(handle, inode); | |
2024 | ||
2025 | last_block = (inode->i_size + sb->s_blocksize - 1) | |
2026 | >> EXT4_BLOCK_SIZE_BITS(sb); | |
2027 | err = ext4_ext_remove_space(inode, last_block); | |
2028 | ||
2029 | /* In a multi-transaction truncate, we only make the final | |
2030 | * transaction synchronous */ | |
2031 | if (IS_SYNC(inode)) | |
2032 | handle->h_sync = 1; | |
2033 | ||
2034 | out_stop: | |
2035 | /* | |
2036 | * If this was a simple ftruncate(), and the file will remain alive | |
2037 | * then we need to clear up the orphan record which we created above. | |
2038 | * However, if this was a real unlink then we were called by | |
2039 | * ext4_delete_inode(), and we allow that function to clean up the | |
2040 | * orphan info for us. | |
2041 | */ | |
2042 | if (inode->i_nlink) | |
2043 | ext4_orphan_del(handle, inode); | |
2044 | ||
2045 | mutex_unlock(&EXT4_I(inode)->truncate_mutex); | |
2046 | ext4_journal_stop(handle); | |
2047 | } | |
2048 | ||
2049 | /* | |
2050 | * this routine calculate max number of blocks we could modify | |
2051 | * in order to allocate new block for an inode | |
2052 | */ | |
2053 | int ext4_ext_writepage_trans_blocks(struct inode *inode, int num) | |
2054 | { | |
2055 | int needed; | |
2056 | ||
2057 | needed = ext4_ext_calc_credits_for_insert(inode, NULL); | |
2058 | ||
2059 | /* caller want to allocate num blocks, but note it includes sb */ | |
2060 | needed = needed * num - (num - 1); | |
2061 | ||
2062 | #ifdef CONFIG_QUOTA | |
2063 | needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); | |
2064 | #endif | |
2065 | ||
2066 | return needed; | |
2067 | } | |
2068 | ||
2069 | EXPORT_SYMBOL(ext4_mark_inode_dirty); | |
2070 | EXPORT_SYMBOL(ext4_ext_invalidate_cache); | |
2071 | EXPORT_SYMBOL(ext4_ext_insert_extent); | |
2072 | EXPORT_SYMBOL(ext4_ext_walk_space); | |
2073 | EXPORT_SYMBOL(ext4_ext_find_goal); | |
2074 | EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert); | |
2075 |