]>
Commit | Line | Data |
---|---|---|
dae1e52c AG |
1 | /* |
2 | * linux/fs/ext4/indirect.c | |
3 | * | |
4 | * from | |
5 | * | |
6 | * linux/fs/ext4/inode.c | |
7 | * | |
8 | * Copyright (C) 1992, 1993, 1994, 1995 | |
9 | * Remy Card (card@masi.ibp.fr) | |
10 | * Laboratoire MASI - Institut Blaise Pascal | |
11 | * Universite Pierre et Marie Curie (Paris VI) | |
12 | * | |
13 | * from | |
14 | * | |
15 | * linux/fs/minix/inode.c | |
16 | * | |
17 | * Copyright (C) 1991, 1992 Linus Torvalds | |
18 | * | |
19 | * Goal-directed block allocation by Stephen Tweedie | |
20 | * (sct@redhat.com), 1993, 1998 | |
21 | */ | |
22 | ||
a27bb332 | 23 | #include <linux/aio.h> |
dae1e52c AG |
24 | #include "ext4_jbd2.h" |
25 | #include "truncate.h" | |
4a092d73 | 26 | #include "ext4_extents.h" /* Needed for EXT_MAX_BLOCKS */ |
dae1e52c AG |
27 | |
28 | #include <trace/events/ext4.h> | |
29 | ||
30 | typedef struct { | |
31 | __le32 *p; | |
32 | __le32 key; | |
33 | struct buffer_head *bh; | |
34 | } Indirect; | |
35 | ||
36 | static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) | |
37 | { | |
38 | p->key = *(p->p = v); | |
39 | p->bh = bh; | |
40 | } | |
41 | ||
42 | /** | |
43 | * ext4_block_to_path - parse the block number into array of offsets | |
44 | * @inode: inode in question (we are only interested in its superblock) | |
45 | * @i_block: block number to be parsed | |
46 | * @offsets: array to store the offsets in | |
47 | * @boundary: set this non-zero if the referred-to block is likely to be | |
48 | * followed (on disk) by an indirect block. | |
49 | * | |
50 | * To store the locations of file's data ext4 uses a data structure common | |
51 | * for UNIX filesystems - tree of pointers anchored in the inode, with | |
52 | * data blocks at leaves and indirect blocks in intermediate nodes. | |
53 | * This function translates the block number into path in that tree - | |
54 | * return value is the path length and @offsets[n] is the offset of | |
55 | * pointer to (n+1)th node in the nth one. If @block is out of range | |
56 | * (negative or too large) warning is printed and zero returned. | |
57 | * | |
58 | * Note: function doesn't find node addresses, so no IO is needed. All | |
59 | * we need to know is the capacity of indirect blocks (taken from the | |
60 | * inode->i_sb). | |
61 | */ | |
62 | ||
63 | /* | |
64 | * Portability note: the last comparison (check that we fit into triple | |
65 | * indirect block) is spelled differently, because otherwise on an | |
66 | * architecture with 32-bit longs and 8Kb pages we might get into trouble | |
67 | * if our filesystem had 8Kb blocks. We might use long long, but that would | |
68 | * kill us on x86. Oh, well, at least the sign propagation does not matter - | |
69 | * i_block would have to be negative in the very beginning, so we would not | |
70 | * get there at all. | |
71 | */ | |
72 | ||
73 | static int ext4_block_to_path(struct inode *inode, | |
74 | ext4_lblk_t i_block, | |
75 | ext4_lblk_t offsets[4], int *boundary) | |
76 | { | |
77 | int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); | |
78 | int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); | |
79 | const long direct_blocks = EXT4_NDIR_BLOCKS, | |
80 | indirect_blocks = ptrs, | |
81 | double_blocks = (1 << (ptrs_bits * 2)); | |
82 | int n = 0; | |
83 | int final = 0; | |
84 | ||
85 | if (i_block < direct_blocks) { | |
86 | offsets[n++] = i_block; | |
87 | final = direct_blocks; | |
88 | } else if ((i_block -= direct_blocks) < indirect_blocks) { | |
89 | offsets[n++] = EXT4_IND_BLOCK; | |
90 | offsets[n++] = i_block; | |
91 | final = ptrs; | |
92 | } else if ((i_block -= indirect_blocks) < double_blocks) { | |
93 | offsets[n++] = EXT4_DIND_BLOCK; | |
94 | offsets[n++] = i_block >> ptrs_bits; | |
95 | offsets[n++] = i_block & (ptrs - 1); | |
96 | final = ptrs; | |
97 | } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { | |
98 | offsets[n++] = EXT4_TIND_BLOCK; | |
99 | offsets[n++] = i_block >> (ptrs_bits * 2); | |
100 | offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); | |
101 | offsets[n++] = i_block & (ptrs - 1); | |
102 | final = ptrs; | |
103 | } else { | |
104 | ext4_warning(inode->i_sb, "block %lu > max in inode %lu", | |
105 | i_block + direct_blocks + | |
106 | indirect_blocks + double_blocks, inode->i_ino); | |
107 | } | |
108 | if (boundary) | |
109 | *boundary = final - 1 - (i_block & (ptrs - 1)); | |
110 | return n; | |
111 | } | |
112 | ||
113 | /** | |
114 | * ext4_get_branch - read the chain of indirect blocks leading to data | |
115 | * @inode: inode in question | |
116 | * @depth: depth of the chain (1 - direct pointer, etc.) | |
117 | * @offsets: offsets of pointers in inode/indirect blocks | |
118 | * @chain: place to store the result | |
119 | * @err: here we store the error value | |
120 | * | |
121 | * Function fills the array of triples <key, p, bh> and returns %NULL | |
122 | * if everything went OK or the pointer to the last filled triple | |
123 | * (incomplete one) otherwise. Upon the return chain[i].key contains | |
124 | * the number of (i+1)-th block in the chain (as it is stored in memory, | |
125 | * i.e. little-endian 32-bit), chain[i].p contains the address of that | |
126 | * number (it points into struct inode for i==0 and into the bh->b_data | |
127 | * for i>0) and chain[i].bh points to the buffer_head of i-th indirect | |
128 | * block for i>0 and NULL for i==0. In other words, it holds the block | |
129 | * numbers of the chain, addresses they were taken from (and where we can | |
130 | * verify that chain did not change) and buffer_heads hosting these | |
131 | * numbers. | |
132 | * | |
133 | * Function stops when it stumbles upon zero pointer (absent block) | |
134 | * (pointer to last triple returned, *@err == 0) | |
135 | * or when it gets an IO error reading an indirect block | |
136 | * (ditto, *@err == -EIO) | |
137 | * or when it reads all @depth-1 indirect blocks successfully and finds | |
138 | * the whole chain, all way to the data (returns %NULL, *err == 0). | |
139 | * | |
140 | * Need to be called with | |
141 | * down_read(&EXT4_I(inode)->i_data_sem) | |
142 | */ | |
143 | static Indirect *ext4_get_branch(struct inode *inode, int depth, | |
144 | ext4_lblk_t *offsets, | |
145 | Indirect chain[4], int *err) | |
146 | { | |
147 | struct super_block *sb = inode->i_sb; | |
148 | Indirect *p = chain; | |
149 | struct buffer_head *bh; | |
860d21e2 | 150 | int ret = -EIO; |
dae1e52c AG |
151 | |
152 | *err = 0; | |
153 | /* i_data is not going away, no lock needed */ | |
154 | add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); | |
155 | if (!p->key) | |
156 | goto no_block; | |
157 | while (--depth) { | |
158 | bh = sb_getblk(sb, le32_to_cpu(p->key)); | |
860d21e2 TT |
159 | if (unlikely(!bh)) { |
160 | ret = -ENOMEM; | |
dae1e52c | 161 | goto failure; |
860d21e2 | 162 | } |
dae1e52c AG |
163 | |
164 | if (!bh_uptodate_or_lock(bh)) { | |
165 | if (bh_submit_read(bh) < 0) { | |
166 | put_bh(bh); | |
167 | goto failure; | |
168 | } | |
169 | /* validate block references */ | |
170 | if (ext4_check_indirect_blockref(inode, bh)) { | |
171 | put_bh(bh); | |
172 | goto failure; | |
173 | } | |
174 | } | |
175 | ||
176 | add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); | |
177 | /* Reader: end */ | |
178 | if (!p->key) | |
179 | goto no_block; | |
180 | } | |
181 | return NULL; | |
182 | ||
183 | failure: | |
860d21e2 | 184 | *err = ret; |
dae1e52c AG |
185 | no_block: |
186 | return p; | |
187 | } | |
188 | ||
189 | /** | |
190 | * ext4_find_near - find a place for allocation with sufficient locality | |
191 | * @inode: owner | |
192 | * @ind: descriptor of indirect block. | |
193 | * | |
194 | * This function returns the preferred place for block allocation. | |
195 | * It is used when heuristic for sequential allocation fails. | |
196 | * Rules are: | |
197 | * + if there is a block to the left of our position - allocate near it. | |
198 | * + if pointer will live in indirect block - allocate near that block. | |
199 | * + if pointer will live in inode - allocate in the same | |
200 | * cylinder group. | |
201 | * | |
202 | * In the latter case we colour the starting block by the callers PID to | |
203 | * prevent it from clashing with concurrent allocations for a different inode | |
204 | * in the same block group. The PID is used here so that functionally related | |
205 | * files will be close-by on-disk. | |
206 | * | |
207 | * Caller must make sure that @ind is valid and will stay that way. | |
208 | */ | |
209 | static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) | |
210 | { | |
211 | struct ext4_inode_info *ei = EXT4_I(inode); | |
212 | __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; | |
213 | __le32 *p; | |
dae1e52c AG |
214 | |
215 | /* Try to find previous block */ | |
216 | for (p = ind->p - 1; p >= start; p--) { | |
217 | if (*p) | |
218 | return le32_to_cpu(*p); | |
219 | } | |
220 | ||
221 | /* No such thing, so let's try location of indirect block */ | |
222 | if (ind->bh) | |
223 | return ind->bh->b_blocknr; | |
224 | ||
225 | /* | |
226 | * It is going to be referred to from the inode itself? OK, just put it | |
227 | * into the same cylinder group then. | |
228 | */ | |
f86186b4 | 229 | return ext4_inode_to_goal_block(inode); |
dae1e52c AG |
230 | } |
231 | ||
232 | /** | |
233 | * ext4_find_goal - find a preferred place for allocation. | |
234 | * @inode: owner | |
235 | * @block: block we want | |
236 | * @partial: pointer to the last triple within a chain | |
237 | * | |
238 | * Normally this function find the preferred place for block allocation, | |
239 | * returns it. | |
240 | * Because this is only used for non-extent files, we limit the block nr | |
241 | * to 32 bits. | |
242 | */ | |
243 | static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, | |
244 | Indirect *partial) | |
245 | { | |
246 | ext4_fsblk_t goal; | |
247 | ||
248 | /* | |
249 | * XXX need to get goal block from mballoc's data structures | |
250 | */ | |
251 | ||
252 | goal = ext4_find_near(inode, partial); | |
253 | goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; | |
254 | return goal; | |
255 | } | |
256 | ||
257 | /** | |
258 | * ext4_blks_to_allocate - Look up the block map and count the number | |
259 | * of direct blocks need to be allocated for the given branch. | |
260 | * | |
261 | * @branch: chain of indirect blocks | |
262 | * @k: number of blocks need for indirect blocks | |
263 | * @blks: number of data blocks to be mapped. | |
264 | * @blocks_to_boundary: the offset in the indirect block | |
265 | * | |
266 | * return the total number of blocks to be allocate, including the | |
267 | * direct and indirect blocks. | |
268 | */ | |
269 | static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, | |
270 | int blocks_to_boundary) | |
271 | { | |
272 | unsigned int count = 0; | |
273 | ||
274 | /* | |
275 | * Simple case, [t,d]Indirect block(s) has not allocated yet | |
276 | * then it's clear blocks on that path have not allocated | |
277 | */ | |
278 | if (k > 0) { | |
279 | /* right now we don't handle cross boundary allocation */ | |
280 | if (blks < blocks_to_boundary + 1) | |
281 | count += blks; | |
282 | else | |
283 | count += blocks_to_boundary + 1; | |
284 | return count; | |
285 | } | |
286 | ||
287 | count++; | |
288 | while (count < blks && count <= blocks_to_boundary && | |
289 | le32_to_cpu(*(branch[0].p + count)) == 0) { | |
290 | count++; | |
291 | } | |
292 | return count; | |
293 | } | |
294 | ||
dae1e52c AG |
295 | /** |
296 | * ext4_alloc_branch - allocate and set up a chain of blocks. | |
297 | * @handle: handle for this transaction | |
298 | * @inode: owner | |
299 | * @indirect_blks: number of allocated indirect blocks | |
300 | * @blks: number of allocated direct blocks | |
301 | * @goal: preferred place for allocation | |
302 | * @offsets: offsets (in the blocks) to store the pointers to next. | |
303 | * @branch: place to store the chain in. | |
304 | * | |
305 | * This function allocates blocks, zeroes out all but the last one, | |
306 | * links them into chain and (if we are synchronous) writes them to disk. | |
307 | * In other words, it prepares a branch that can be spliced onto the | |
308 | * inode. It stores the information about that chain in the branch[], in | |
309 | * the same format as ext4_get_branch() would do. We are calling it after | |
310 | * we had read the existing part of chain and partial points to the last | |
311 | * triple of that (one with zero ->key). Upon the exit we have the same | |
312 | * picture as after the successful ext4_get_block(), except that in one | |
313 | * place chain is disconnected - *branch->p is still zero (we did not | |
314 | * set the last link), but branch->key contains the number that should | |
315 | * be placed into *branch->p to fill that gap. | |
316 | * | |
317 | * If allocation fails we free all blocks we've allocated (and forget | |
318 | * their buffer_heads) and return the error value the from failed | |
319 | * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain | |
320 | * as described above and return 0. | |
321 | */ | |
322 | static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | |
323 | ext4_lblk_t iblock, int indirect_blks, | |
324 | int *blks, ext4_fsblk_t goal, | |
325 | ext4_lblk_t *offsets, Indirect *branch) | |
326 | { | |
781f143e TT |
327 | struct ext4_allocation_request ar; |
328 | struct buffer_head * bh; | |
329 | ext4_fsblk_t b, new_blocks[4]; | |
330 | __le32 *p; | |
331 | int i, j, err, len = 1; | |
dae1e52c | 332 | |
dae1e52c | 333 | /* |
781f143e | 334 | * Set up for the direct block allocation |
dae1e52c | 335 | */ |
781f143e TT |
336 | memset(&ar, 0, sizeof(ar)); |
337 | ar.inode = inode; | |
338 | ar.len = *blks; | |
339 | ar.logical = iblock; | |
340 | if (S_ISREG(inode->i_mode)) | |
341 | ar.flags = EXT4_MB_HINT_DATA; | |
342 | ||
343 | for (i = 0; i <= indirect_blks; i++) { | |
344 | if (i == indirect_blks) { | |
345 | ar.goal = goal; | |
346 | new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err); | |
347 | } else | |
348 | goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode, | |
349 | goal, 0, NULL, &err); | |
350 | if (err) { | |
351 | i--; | |
352 | goto failed; | |
353 | } | |
354 | branch[i].key = cpu_to_le32(new_blocks[i]); | |
355 | if (i == 0) | |
356 | continue; | |
357 | ||
358 | bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]); | |
dae1e52c | 359 | if (unlikely(!bh)) { |
860d21e2 | 360 | err = -ENOMEM; |
dae1e52c AG |
361 | goto failed; |
362 | } | |
dae1e52c AG |
363 | lock_buffer(bh); |
364 | BUFFER_TRACE(bh, "call get_create_access"); | |
365 | err = ext4_journal_get_create_access(handle, bh); | |
366 | if (err) { | |
dae1e52c AG |
367 | unlock_buffer(bh); |
368 | goto failed; | |
369 | } | |
370 | ||
781f143e TT |
371 | memset(bh->b_data, 0, bh->b_size); |
372 | p = branch[i].p = (__le32 *) bh->b_data + offsets[i]; | |
373 | b = new_blocks[i]; | |
374 | ||
375 | if (i == indirect_blks) | |
376 | len = ar.len; | |
377 | for (j = 0; j < len; j++) | |
378 | *p++ = cpu_to_le32(b++); | |
379 | ||
dae1e52c AG |
380 | BUFFER_TRACE(bh, "marking uptodate"); |
381 | set_buffer_uptodate(bh); | |
382 | unlock_buffer(bh); | |
383 | ||
384 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | |
385 | err = ext4_handle_dirty_metadata(handle, inode, bh); | |
386 | if (err) | |
387 | goto failed; | |
388 | } | |
781f143e TT |
389 | *blks = ar.len; |
390 | return 0; | |
dae1e52c | 391 | failed: |
781f143e TT |
392 | for (; i >= 0; i--) { |
393 | if (i != indirect_blks && branch[i].bh) | |
394 | ext4_forget(handle, 1, inode, branch[i].bh, | |
395 | branch[i].bh->b_blocknr); | |
396 | ext4_free_blocks(handle, inode, NULL, new_blocks[i], | |
397 | (i == indirect_blks) ? ar.len : 1, 0); | |
dae1e52c | 398 | } |
dae1e52c AG |
399 | return err; |
400 | } | |
401 | ||
402 | /** | |
403 | * ext4_splice_branch - splice the allocated branch onto inode. | |
404 | * @handle: handle for this transaction | |
405 | * @inode: owner | |
406 | * @block: (logical) number of block we are adding | |
407 | * @chain: chain of indirect blocks (with a missing link - see | |
408 | * ext4_alloc_branch) | |
409 | * @where: location of missing link | |
410 | * @num: number of indirect blocks we are adding | |
411 | * @blks: number of direct blocks we are adding | |
412 | * | |
413 | * This function fills the missing link and does all housekeeping needed in | |
414 | * inode (->i_blocks, etc.). In case of success we end up with the full | |
415 | * chain to new block and return 0. | |
416 | */ | |
417 | static int ext4_splice_branch(handle_t *handle, struct inode *inode, | |
418 | ext4_lblk_t block, Indirect *where, int num, | |
419 | int blks) | |
420 | { | |
421 | int i; | |
422 | int err = 0; | |
423 | ext4_fsblk_t current_block; | |
424 | ||
425 | /* | |
426 | * If we're splicing into a [td]indirect block (as opposed to the | |
427 | * inode) then we need to get write access to the [td]indirect block | |
428 | * before the splice. | |
429 | */ | |
430 | if (where->bh) { | |
431 | BUFFER_TRACE(where->bh, "get_write_access"); | |
432 | err = ext4_journal_get_write_access(handle, where->bh); | |
433 | if (err) | |
434 | goto err_out; | |
435 | } | |
436 | /* That's it */ | |
437 | ||
438 | *where->p = where->key; | |
439 | ||
440 | /* | |
441 | * Update the host buffer_head or inode to point to more just allocated | |
442 | * direct blocks blocks | |
443 | */ | |
444 | if (num == 0 && blks > 1) { | |
445 | current_block = le32_to_cpu(where->key) + 1; | |
446 | for (i = 1; i < blks; i++) | |
447 | *(where->p + i) = cpu_to_le32(current_block++); | |
448 | } | |
449 | ||
450 | /* We are done with atomic stuff, now do the rest of housekeeping */ | |
451 | /* had we spliced it onto indirect block? */ | |
452 | if (where->bh) { | |
453 | /* | |
454 | * If we spliced it onto an indirect block, we haven't | |
455 | * altered the inode. Note however that if it is being spliced | |
456 | * onto an indirect block at the very end of the file (the | |
457 | * file is growing) then we *will* alter the inode to reflect | |
458 | * the new i_size. But that is not done here - it is done in | |
459 | * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. | |
460 | */ | |
461 | jbd_debug(5, "splicing indirect only\n"); | |
462 | BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); | |
463 | err = ext4_handle_dirty_metadata(handle, inode, where->bh); | |
464 | if (err) | |
465 | goto err_out; | |
466 | } else { | |
467 | /* | |
468 | * OK, we spliced it into the inode itself on a direct block. | |
469 | */ | |
470 | ext4_mark_inode_dirty(handle, inode); | |
471 | jbd_debug(5, "splicing direct\n"); | |
472 | } | |
473 | return err; | |
474 | ||
475 | err_out: | |
476 | for (i = 1; i <= num; i++) { | |
477 | /* | |
478 | * branch[i].bh is newly allocated, so there is no | |
479 | * need to revoke the block, which is why we don't | |
480 | * need to set EXT4_FREE_BLOCKS_METADATA. | |
481 | */ | |
482 | ext4_free_blocks(handle, inode, where[i].bh, 0, 1, | |
483 | EXT4_FREE_BLOCKS_FORGET); | |
484 | } | |
485 | ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), | |
486 | blks, 0); | |
487 | ||
488 | return err; | |
489 | } | |
490 | ||
491 | /* | |
492 | * The ext4_ind_map_blocks() function handles non-extents inodes | |
493 | * (i.e., using the traditional indirect/double-indirect i_blocks | |
494 | * scheme) for ext4_map_blocks(). | |
495 | * | |
496 | * Allocation strategy is simple: if we have to allocate something, we will | |
497 | * have to go the whole way to leaf. So let's do it before attaching anything | |
498 | * to tree, set linkage between the newborn blocks, write them if sync is | |
499 | * required, recheck the path, free and repeat if check fails, otherwise | |
500 | * set the last missing link (that will protect us from any truncate-generated | |
501 | * removals - all blocks on the path are immune now) and possibly force the | |
502 | * write on the parent block. | |
503 | * That has a nice additional property: no special recovery from the failed | |
504 | * allocations is needed - we simply release blocks and do not touch anything | |
505 | * reachable from inode. | |
506 | * | |
507 | * `handle' can be NULL if create == 0. | |
508 | * | |
509 | * return > 0, # of blocks mapped or allocated. | |
510 | * return = 0, if plain lookup failed. | |
511 | * return < 0, error case. | |
512 | * | |
513 | * The ext4_ind_get_blocks() function should be called with | |
514 | * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem | |
515 | * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or | |
516 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system | |
517 | * blocks. | |
518 | */ | |
519 | int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, | |
520 | struct ext4_map_blocks *map, | |
521 | int flags) | |
522 | { | |
523 | int err = -EIO; | |
524 | ext4_lblk_t offsets[4]; | |
525 | Indirect chain[4]; | |
526 | Indirect *partial; | |
527 | ext4_fsblk_t goal; | |
528 | int indirect_blks; | |
529 | int blocks_to_boundary = 0; | |
530 | int depth; | |
531 | int count = 0; | |
532 | ext4_fsblk_t first_block = 0; | |
533 | ||
534 | trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); | |
535 | J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); | |
536 | J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); | |
537 | depth = ext4_block_to_path(inode, map->m_lblk, offsets, | |
538 | &blocks_to_boundary); | |
539 | ||
540 | if (depth == 0) | |
541 | goto out; | |
542 | ||
543 | partial = ext4_get_branch(inode, depth, offsets, chain, &err); | |
544 | ||
545 | /* Simplest case - block found, no allocation needed */ | |
546 | if (!partial) { | |
547 | first_block = le32_to_cpu(chain[depth - 1].key); | |
548 | count++; | |
549 | /*map more blocks*/ | |
550 | while (count < map->m_len && count <= blocks_to_boundary) { | |
551 | ext4_fsblk_t blk; | |
552 | ||
553 | blk = le32_to_cpu(*(chain[depth-1].p + count)); | |
554 | ||
555 | if (blk == first_block + count) | |
556 | count++; | |
557 | else | |
558 | break; | |
559 | } | |
560 | goto got_it; | |
561 | } | |
562 | ||
563 | /* Next simple case - plain lookup or failed read of indirect block */ | |
564 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) | |
565 | goto cleanup; | |
566 | ||
567 | /* | |
568 | * Okay, we need to do block allocation. | |
569 | */ | |
bab08ab9 TT |
570 | if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, |
571 | EXT4_FEATURE_RO_COMPAT_BIGALLOC)) { | |
572 | EXT4_ERROR_INODE(inode, "Can't allocate blocks for " | |
573 | "non-extent mapped inodes with bigalloc"); | |
574 | return -ENOSPC; | |
575 | } | |
576 | ||
dae1e52c AG |
577 | goal = ext4_find_goal(inode, map->m_lblk, partial); |
578 | ||
579 | /* the number of blocks need to allocate for [d,t]indirect blocks */ | |
580 | indirect_blks = (chain + depth) - partial - 1; | |
581 | ||
582 | /* | |
583 | * Next look up the indirect map to count the totoal number of | |
584 | * direct blocks to allocate for this branch. | |
585 | */ | |
586 | count = ext4_blks_to_allocate(partial, indirect_blks, | |
587 | map->m_len, blocks_to_boundary); | |
588 | /* | |
589 | * Block out ext4_truncate while we alter the tree | |
590 | */ | |
591 | err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, | |
592 | &count, goal, | |
593 | offsets + (partial - chain), partial); | |
594 | ||
595 | /* | |
596 | * The ext4_splice_branch call will free and forget any buffers | |
597 | * on the new chain if there is a failure, but that risks using | |
598 | * up transaction credits, especially for bitmaps where the | |
599 | * credits cannot be returned. Can we handle this somehow? We | |
600 | * may need to return -EAGAIN upwards in the worst case. --sct | |
601 | */ | |
602 | if (!err) | |
603 | err = ext4_splice_branch(handle, inode, map->m_lblk, | |
604 | partial, indirect_blks, count); | |
605 | if (err) | |
606 | goto cleanup; | |
607 | ||
608 | map->m_flags |= EXT4_MAP_NEW; | |
609 | ||
610 | ext4_update_inode_fsync_trans(handle, inode, 1); | |
611 | got_it: | |
612 | map->m_flags |= EXT4_MAP_MAPPED; | |
613 | map->m_pblk = le32_to_cpu(chain[depth-1].key); | |
614 | map->m_len = count; | |
615 | if (count > blocks_to_boundary) | |
616 | map->m_flags |= EXT4_MAP_BOUNDARY; | |
617 | err = count; | |
618 | /* Clean up and exit */ | |
619 | partial = chain + depth - 1; /* the whole chain */ | |
620 | cleanup: | |
621 | while (partial > chain) { | |
622 | BUFFER_TRACE(partial->bh, "call brelse"); | |
623 | brelse(partial->bh); | |
624 | partial--; | |
625 | } | |
626 | out: | |
19b303d8 | 627 | trace_ext4_ind_map_blocks_exit(inode, map, err); |
dae1e52c AG |
628 | return err; |
629 | } | |
630 | ||
631 | /* | |
632 | * O_DIRECT for ext3 (or indirect map) based files | |
633 | * | |
634 | * If the O_DIRECT write will extend the file then add this inode to the | |
635 | * orphan list. So recovery will truncate it back to the original size | |
636 | * if the machine crashes during the write. | |
637 | * | |
638 | * If the O_DIRECT write is intantiating holes inside i_size and the machine | |
639 | * crashes then stale disk data _may_ be exposed inside the file. But current | |
640 | * VFS code falls back into buffered path in that case so we are safe. | |
641 | */ | |
642 | ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, | |
643 | const struct iovec *iov, loff_t offset, | |
644 | unsigned long nr_segs) | |
645 | { | |
646 | struct file *file = iocb->ki_filp; | |
647 | struct inode *inode = file->f_mapping->host; | |
648 | struct ext4_inode_info *ei = EXT4_I(inode); | |
649 | handle_t *handle; | |
650 | ssize_t ret; | |
651 | int orphan = 0; | |
652 | size_t count = iov_length(iov, nr_segs); | |
653 | int retries = 0; | |
654 | ||
655 | if (rw == WRITE) { | |
656 | loff_t final_size = offset + count; | |
657 | ||
658 | if (final_size > inode->i_size) { | |
659 | /* Credits for sb + inode write */ | |
9924a92a | 660 | handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); |
dae1e52c AG |
661 | if (IS_ERR(handle)) { |
662 | ret = PTR_ERR(handle); | |
663 | goto out; | |
664 | } | |
665 | ret = ext4_orphan_add(handle, inode); | |
666 | if (ret) { | |
667 | ext4_journal_stop(handle); | |
668 | goto out; | |
669 | } | |
670 | orphan = 1; | |
671 | ei->i_disksize = inode->i_size; | |
672 | ext4_journal_stop(handle); | |
673 | } | |
674 | } | |
675 | ||
676 | retry: | |
dccaf33f | 677 | if (rw == READ && ext4_should_dioread_nolock(inode)) { |
17335dcc DM |
678 | /* |
679 | * Nolock dioread optimization may be dynamically disabled | |
680 | * via ext4_inode_block_unlocked_dio(). Check inode's state | |
681 | * while holding extra i_dio_count ref. | |
682 | */ | |
683 | atomic_inc(&inode->i_dio_count); | |
684 | smp_mb(); | |
685 | if (unlikely(ext4_test_inode_state(inode, | |
686 | EXT4_STATE_DIOREAD_LOCK))) { | |
687 | inode_dio_done(inode); | |
688 | goto locked; | |
689 | } | |
dae1e52c AG |
690 | ret = __blockdev_direct_IO(rw, iocb, inode, |
691 | inode->i_sb->s_bdev, iov, | |
692 | offset, nr_segs, | |
693 | ext4_get_block, NULL, NULL, 0); | |
17335dcc | 694 | inode_dio_done(inode); |
dccaf33f | 695 | } else { |
17335dcc | 696 | locked: |
60ad4466 LT |
697 | ret = blockdev_direct_IO(rw, iocb, inode, iov, |
698 | offset, nr_segs, ext4_get_block); | |
dae1e52c AG |
699 | |
700 | if (unlikely((rw & WRITE) && ret < 0)) { | |
701 | loff_t isize = i_size_read(inode); | |
702 | loff_t end = offset + iov_length(iov, nr_segs); | |
703 | ||
704 | if (end > isize) | |
705 | ext4_truncate_failed_write(inode); | |
706 | } | |
707 | } | |
708 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | |
709 | goto retry; | |
710 | ||
711 | if (orphan) { | |
712 | int err; | |
713 | ||
714 | /* Credits for sb + inode write */ | |
9924a92a | 715 | handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); |
dae1e52c AG |
716 | if (IS_ERR(handle)) { |
717 | /* This is really bad luck. We've written the data | |
718 | * but cannot extend i_size. Bail out and pretend | |
719 | * the write failed... */ | |
720 | ret = PTR_ERR(handle); | |
721 | if (inode->i_nlink) | |
722 | ext4_orphan_del(NULL, inode); | |
723 | ||
724 | goto out; | |
725 | } | |
726 | if (inode->i_nlink) | |
727 | ext4_orphan_del(handle, inode); | |
728 | if (ret > 0) { | |
729 | loff_t end = offset + ret; | |
730 | if (end > inode->i_size) { | |
731 | ei->i_disksize = end; | |
732 | i_size_write(inode, end); | |
733 | /* | |
734 | * We're going to return a positive `ret' | |
735 | * here due to non-zero-length I/O, so there's | |
736 | * no way of reporting error returns from | |
737 | * ext4_mark_inode_dirty() to userspace. So | |
738 | * ignore it. | |
739 | */ | |
740 | ext4_mark_inode_dirty(handle, inode); | |
741 | } | |
742 | } | |
743 | err = ext4_journal_stop(handle); | |
744 | if (ret == 0) | |
745 | ret = err; | |
746 | } | |
747 | out: | |
748 | return ret; | |
749 | } | |
750 | ||
751 | /* | |
752 | * Calculate the number of metadata blocks need to reserve | |
753 | * to allocate a new block at @lblocks for non extent file based file | |
754 | */ | |
755 | int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock) | |
756 | { | |
757 | struct ext4_inode_info *ei = EXT4_I(inode); | |
758 | sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1); | |
759 | int blk_bits; | |
760 | ||
761 | if (lblock < EXT4_NDIR_BLOCKS) | |
762 | return 0; | |
763 | ||
764 | lblock -= EXT4_NDIR_BLOCKS; | |
765 | ||
766 | if (ei->i_da_metadata_calc_len && | |
767 | (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) { | |
768 | ei->i_da_metadata_calc_len++; | |
769 | return 0; | |
770 | } | |
771 | ei->i_da_metadata_calc_last_lblock = lblock & dind_mask; | |
772 | ei->i_da_metadata_calc_len = 1; | |
773 | blk_bits = order_base_2(lblock); | |
774 | return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; | |
775 | } | |
776 | ||
fa55a0ed JK |
777 | /* |
778 | * Calculate number of indirect blocks touched by mapping @nrblocks logically | |
779 | * contiguous blocks | |
780 | */ | |
781 | int ext4_ind_trans_blocks(struct inode *inode, int nrblocks) | |
dae1e52c | 782 | { |
dae1e52c | 783 | /* |
fa55a0ed JK |
784 | * With N contiguous data blocks, we need at most |
785 | * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, | |
786 | * 2 dindirect blocks, and 1 tindirect block | |
dae1e52c | 787 | */ |
fa55a0ed | 788 | return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; |
dae1e52c AG |
789 | } |
790 | ||
791 | /* | |
792 | * Truncate transactions can be complex and absolutely huge. So we need to | |
793 | * be able to restart the transaction at a conventient checkpoint to make | |
794 | * sure we don't overflow the journal. | |
795 | * | |
819c4920 | 796 | * Try to extend this transaction for the purposes of truncation. If |
dae1e52c AG |
797 | * extend fails, we need to propagate the failure up and restart the |
798 | * transaction in the top-level truncate loop. --sct | |
dae1e52c AG |
799 | * |
800 | * Returns 0 if we managed to create more room. If we can't create more | |
801 | * room, and the transaction must be restarted we return 1. | |
802 | */ | |
803 | static int try_to_extend_transaction(handle_t *handle, struct inode *inode) | |
804 | { | |
805 | if (!ext4_handle_valid(handle)) | |
806 | return 0; | |
807 | if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) | |
808 | return 0; | |
809 | if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode))) | |
810 | return 0; | |
811 | return 1; | |
812 | } | |
813 | ||
814 | /* | |
815 | * Probably it should be a library function... search for first non-zero word | |
816 | * or memcmp with zero_page, whatever is better for particular architecture. | |
817 | * Linus? | |
818 | */ | |
819 | static inline int all_zeroes(__le32 *p, __le32 *q) | |
820 | { | |
821 | while (p < q) | |
822 | if (*p++) | |
823 | return 0; | |
824 | return 1; | |
825 | } | |
826 | ||
827 | /** | |
828 | * ext4_find_shared - find the indirect blocks for partial truncation. | |
829 | * @inode: inode in question | |
830 | * @depth: depth of the affected branch | |
831 | * @offsets: offsets of pointers in that branch (see ext4_block_to_path) | |
832 | * @chain: place to store the pointers to partial indirect blocks | |
833 | * @top: place to the (detached) top of branch | |
834 | * | |
835 | * This is a helper function used by ext4_truncate(). | |
836 | * | |
837 | * When we do truncate() we may have to clean the ends of several | |
838 | * indirect blocks but leave the blocks themselves alive. Block is | |
839 | * partially truncated if some data below the new i_size is referred | |
840 | * from it (and it is on the path to the first completely truncated | |
841 | * data block, indeed). We have to free the top of that path along | |
842 | * with everything to the right of the path. Since no allocation | |
843 | * past the truncation point is possible until ext4_truncate() | |
844 | * finishes, we may safely do the latter, but top of branch may | |
845 | * require special attention - pageout below the truncation point | |
846 | * might try to populate it. | |
847 | * | |
848 | * We atomically detach the top of branch from the tree, store the | |
849 | * block number of its root in *@top, pointers to buffer_heads of | |
850 | * partially truncated blocks - in @chain[].bh and pointers to | |
851 | * their last elements that should not be removed - in | |
852 | * @chain[].p. Return value is the pointer to last filled element | |
853 | * of @chain. | |
854 | * | |
855 | * The work left to caller to do the actual freeing of subtrees: | |
856 | * a) free the subtree starting from *@top | |
857 | * b) free the subtrees whose roots are stored in | |
858 | * (@chain[i].p+1 .. end of @chain[i].bh->b_data) | |
859 | * c) free the subtrees growing from the inode past the @chain[0]. | |
860 | * (no partially truncated stuff there). */ | |
861 | ||
862 | static Indirect *ext4_find_shared(struct inode *inode, int depth, | |
863 | ext4_lblk_t offsets[4], Indirect chain[4], | |
864 | __le32 *top) | |
865 | { | |
866 | Indirect *partial, *p; | |
867 | int k, err; | |
868 | ||
869 | *top = 0; | |
870 | /* Make k index the deepest non-null offset + 1 */ | |
871 | for (k = depth; k > 1 && !offsets[k-1]; k--) | |
872 | ; | |
873 | partial = ext4_get_branch(inode, k, offsets, chain, &err); | |
874 | /* Writer: pointers */ | |
875 | if (!partial) | |
876 | partial = chain + k-1; | |
877 | /* | |
878 | * If the branch acquired continuation since we've looked at it - | |
879 | * fine, it should all survive and (new) top doesn't belong to us. | |
880 | */ | |
881 | if (!partial->key && *partial->p) | |
882 | /* Writer: end */ | |
883 | goto no_top; | |
884 | for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) | |
885 | ; | |
886 | /* | |
887 | * OK, we've found the last block that must survive. The rest of our | |
888 | * branch should be detached before unlocking. However, if that rest | |
889 | * of branch is all ours and does not grow immediately from the inode | |
890 | * it's easier to cheat and just decrement partial->p. | |
891 | */ | |
892 | if (p == chain + k - 1 && p > chain) { | |
893 | p->p--; | |
894 | } else { | |
895 | *top = *p->p; | |
896 | /* Nope, don't do this in ext4. Must leave the tree intact */ | |
897 | #if 0 | |
898 | *p->p = 0; | |
899 | #endif | |
900 | } | |
901 | /* Writer: end */ | |
902 | ||
903 | while (partial > p) { | |
904 | brelse(partial->bh); | |
905 | partial--; | |
906 | } | |
907 | no_top: | |
908 | return partial; | |
909 | } | |
910 | ||
911 | /* | |
912 | * Zero a number of block pointers in either an inode or an indirect block. | |
913 | * If we restart the transaction we must again get write access to the | |
914 | * indirect block for further modification. | |
915 | * | |
916 | * We release `count' blocks on disk, but (last - first) may be greater | |
917 | * than `count' because there can be holes in there. | |
918 | * | |
919 | * Return 0 on success, 1 on invalid block range | |
920 | * and < 0 on fatal error. | |
921 | */ | |
922 | static int ext4_clear_blocks(handle_t *handle, struct inode *inode, | |
923 | struct buffer_head *bh, | |
924 | ext4_fsblk_t block_to_free, | |
925 | unsigned long count, __le32 *first, | |
926 | __le32 *last) | |
927 | { | |
928 | __le32 *p; | |
929 | int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; | |
930 | int err; | |
931 | ||
932 | if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) | |
933 | flags |= EXT4_FREE_BLOCKS_METADATA; | |
934 | ||
935 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, | |
936 | count)) { | |
937 | EXT4_ERROR_INODE(inode, "attempt to clear invalid " | |
938 | "blocks %llu len %lu", | |
939 | (unsigned long long) block_to_free, count); | |
940 | return 1; | |
941 | } | |
942 | ||
943 | if (try_to_extend_transaction(handle, inode)) { | |
944 | if (bh) { | |
945 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | |
946 | err = ext4_handle_dirty_metadata(handle, inode, bh); | |
947 | if (unlikely(err)) | |
948 | goto out_err; | |
949 | } | |
950 | err = ext4_mark_inode_dirty(handle, inode); | |
951 | if (unlikely(err)) | |
952 | goto out_err; | |
953 | err = ext4_truncate_restart_trans(handle, inode, | |
954 | ext4_blocks_for_truncate(inode)); | |
955 | if (unlikely(err)) | |
956 | goto out_err; | |
957 | if (bh) { | |
958 | BUFFER_TRACE(bh, "retaking write access"); | |
959 | err = ext4_journal_get_write_access(handle, bh); | |
960 | if (unlikely(err)) | |
961 | goto out_err; | |
962 | } | |
963 | } | |
964 | ||
965 | for (p = first; p < last; p++) | |
966 | *p = 0; | |
967 | ||
968 | ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags); | |
969 | return 0; | |
970 | out_err: | |
971 | ext4_std_error(inode->i_sb, err); | |
972 | return err; | |
973 | } | |
974 | ||
975 | /** | |
976 | * ext4_free_data - free a list of data blocks | |
977 | * @handle: handle for this transaction | |
978 | * @inode: inode we are dealing with | |
979 | * @this_bh: indirect buffer_head which contains *@first and *@last | |
980 | * @first: array of block numbers | |
981 | * @last: points immediately past the end of array | |
982 | * | |
983 | * We are freeing all blocks referred from that array (numbers are stored as | |
984 | * little-endian 32-bit) and updating @inode->i_blocks appropriately. | |
985 | * | |
986 | * We accumulate contiguous runs of blocks to free. Conveniently, if these | |
987 | * blocks are contiguous then releasing them at one time will only affect one | |
988 | * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't | |
989 | * actually use a lot of journal space. | |
990 | * | |
991 | * @this_bh will be %NULL if @first and @last point into the inode's direct | |
992 | * block pointers. | |
993 | */ | |
994 | static void ext4_free_data(handle_t *handle, struct inode *inode, | |
995 | struct buffer_head *this_bh, | |
996 | __le32 *first, __le32 *last) | |
997 | { | |
998 | ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ | |
999 | unsigned long count = 0; /* Number of blocks in the run */ | |
1000 | __le32 *block_to_free_p = NULL; /* Pointer into inode/ind | |
1001 | corresponding to | |
1002 | block_to_free */ | |
1003 | ext4_fsblk_t nr; /* Current block # */ | |
1004 | __le32 *p; /* Pointer into inode/ind | |
1005 | for current block */ | |
1006 | int err = 0; | |
1007 | ||
1008 | if (this_bh) { /* For indirect block */ | |
1009 | BUFFER_TRACE(this_bh, "get_write_access"); | |
1010 | err = ext4_journal_get_write_access(handle, this_bh); | |
1011 | /* Important: if we can't update the indirect pointers | |
1012 | * to the blocks, we can't free them. */ | |
1013 | if (err) | |
1014 | return; | |
1015 | } | |
1016 | ||
1017 | for (p = first; p < last; p++) { | |
1018 | nr = le32_to_cpu(*p); | |
1019 | if (nr) { | |
1020 | /* accumulate blocks to free if they're contiguous */ | |
1021 | if (count == 0) { | |
1022 | block_to_free = nr; | |
1023 | block_to_free_p = p; | |
1024 | count = 1; | |
1025 | } else if (nr == block_to_free + count) { | |
1026 | count++; | |
1027 | } else { | |
1028 | err = ext4_clear_blocks(handle, inode, this_bh, | |
1029 | block_to_free, count, | |
1030 | block_to_free_p, p); | |
1031 | if (err) | |
1032 | break; | |
1033 | block_to_free = nr; | |
1034 | block_to_free_p = p; | |
1035 | count = 1; | |
1036 | } | |
1037 | } | |
1038 | } | |
1039 | ||
1040 | if (!err && count > 0) | |
1041 | err = ext4_clear_blocks(handle, inode, this_bh, block_to_free, | |
1042 | count, block_to_free_p, p); | |
1043 | if (err < 0) | |
1044 | /* fatal error */ | |
1045 | return; | |
1046 | ||
1047 | if (this_bh) { | |
1048 | BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); | |
1049 | ||
1050 | /* | |
1051 | * The buffer head should have an attached journal head at this | |
1052 | * point. However, if the data is corrupted and an indirect | |
1053 | * block pointed to itself, it would have been detached when | |
1054 | * the block was cleared. Check for this instead of OOPSing. | |
1055 | */ | |
1056 | if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) | |
1057 | ext4_handle_dirty_metadata(handle, inode, this_bh); | |
1058 | else | |
1059 | EXT4_ERROR_INODE(inode, | |
1060 | "circular indirect block detected at " | |
1061 | "block %llu", | |
1062 | (unsigned long long) this_bh->b_blocknr); | |
1063 | } | |
1064 | } | |
1065 | ||
1066 | /** | |
1067 | * ext4_free_branches - free an array of branches | |
1068 | * @handle: JBD handle for this transaction | |
1069 | * @inode: inode we are dealing with | |
1070 | * @parent_bh: the buffer_head which contains *@first and *@last | |
1071 | * @first: array of block numbers | |
1072 | * @last: pointer immediately past the end of array | |
1073 | * @depth: depth of the branches to free | |
1074 | * | |
1075 | * We are freeing all blocks referred from these branches (numbers are | |
1076 | * stored as little-endian 32-bit) and updating @inode->i_blocks | |
1077 | * appropriately. | |
1078 | */ | |
1079 | static void ext4_free_branches(handle_t *handle, struct inode *inode, | |
1080 | struct buffer_head *parent_bh, | |
1081 | __le32 *first, __le32 *last, int depth) | |
1082 | { | |
1083 | ext4_fsblk_t nr; | |
1084 | __le32 *p; | |
1085 | ||
1086 | if (ext4_handle_is_aborted(handle)) | |
1087 | return; | |
1088 | ||
1089 | if (depth--) { | |
1090 | struct buffer_head *bh; | |
1091 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); | |
1092 | p = last; | |
1093 | while (--p >= first) { | |
1094 | nr = le32_to_cpu(*p); | |
1095 | if (!nr) | |
1096 | continue; /* A hole */ | |
1097 | ||
1098 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), | |
1099 | nr, 1)) { | |
1100 | EXT4_ERROR_INODE(inode, | |
1101 | "invalid indirect mapped " | |
1102 | "block %lu (level %d)", | |
1103 | (unsigned long) nr, depth); | |
1104 | break; | |
1105 | } | |
1106 | ||
1107 | /* Go read the buffer for the next level down */ | |
1108 | bh = sb_bread(inode->i_sb, nr); | |
1109 | ||
1110 | /* | |
1111 | * A read failure? Report error and clear slot | |
1112 | * (should be rare). | |
1113 | */ | |
1114 | if (!bh) { | |
1115 | EXT4_ERROR_INODE_BLOCK(inode, nr, | |
1116 | "Read failure"); | |
1117 | continue; | |
1118 | } | |
1119 | ||
1120 | /* This zaps the entire block. Bottom up. */ | |
1121 | BUFFER_TRACE(bh, "free child branches"); | |
1122 | ext4_free_branches(handle, inode, bh, | |
1123 | (__le32 *) bh->b_data, | |
1124 | (__le32 *) bh->b_data + addr_per_block, | |
1125 | depth); | |
1126 | brelse(bh); | |
1127 | ||
1128 | /* | |
1129 | * Everything below this this pointer has been | |
1130 | * released. Now let this top-of-subtree go. | |
1131 | * | |
1132 | * We want the freeing of this indirect block to be | |
1133 | * atomic in the journal with the updating of the | |
1134 | * bitmap block which owns it. So make some room in | |
1135 | * the journal. | |
1136 | * | |
1137 | * We zero the parent pointer *after* freeing its | |
1138 | * pointee in the bitmaps, so if extend_transaction() | |
1139 | * for some reason fails to put the bitmap changes and | |
1140 | * the release into the same transaction, recovery | |
1141 | * will merely complain about releasing a free block, | |
1142 | * rather than leaking blocks. | |
1143 | */ | |
1144 | if (ext4_handle_is_aborted(handle)) | |
1145 | return; | |
1146 | if (try_to_extend_transaction(handle, inode)) { | |
1147 | ext4_mark_inode_dirty(handle, inode); | |
1148 | ext4_truncate_restart_trans(handle, inode, | |
1149 | ext4_blocks_for_truncate(inode)); | |
1150 | } | |
1151 | ||
1152 | /* | |
1153 | * The forget flag here is critical because if | |
1154 | * we are journaling (and not doing data | |
1155 | * journaling), we have to make sure a revoke | |
1156 | * record is written to prevent the journal | |
1157 | * replay from overwriting the (former) | |
1158 | * indirect block if it gets reallocated as a | |
1159 | * data block. This must happen in the same | |
1160 | * transaction where the data blocks are | |
1161 | * actually freed. | |
1162 | */ | |
1163 | ext4_free_blocks(handle, inode, NULL, nr, 1, | |
1164 | EXT4_FREE_BLOCKS_METADATA| | |
1165 | EXT4_FREE_BLOCKS_FORGET); | |
1166 | ||
1167 | if (parent_bh) { | |
1168 | /* | |
1169 | * The block which we have just freed is | |
1170 | * pointed to by an indirect block: journal it | |
1171 | */ | |
1172 | BUFFER_TRACE(parent_bh, "get_write_access"); | |
1173 | if (!ext4_journal_get_write_access(handle, | |
1174 | parent_bh)){ | |
1175 | *p = 0; | |
1176 | BUFFER_TRACE(parent_bh, | |
1177 | "call ext4_handle_dirty_metadata"); | |
1178 | ext4_handle_dirty_metadata(handle, | |
1179 | inode, | |
1180 | parent_bh); | |
1181 | } | |
1182 | } | |
1183 | } | |
1184 | } else { | |
1185 | /* We have reached the bottom of the tree. */ | |
1186 | BUFFER_TRACE(parent_bh, "free data blocks"); | |
1187 | ext4_free_data(handle, inode, parent_bh, first, last); | |
1188 | } | |
1189 | } | |
1190 | ||
819c4920 | 1191 | void ext4_ind_truncate(handle_t *handle, struct inode *inode) |
dae1e52c | 1192 | { |
dae1e52c AG |
1193 | struct ext4_inode_info *ei = EXT4_I(inode); |
1194 | __le32 *i_data = ei->i_data; | |
1195 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); | |
dae1e52c AG |
1196 | ext4_lblk_t offsets[4]; |
1197 | Indirect chain[4]; | |
1198 | Indirect *partial; | |
1199 | __le32 nr = 0; | |
1200 | int n = 0; | |
1201 | ext4_lblk_t last_block, max_block; | |
1202 | unsigned blocksize = inode->i_sb->s_blocksize; | |
dae1e52c AG |
1203 | |
1204 | last_block = (inode->i_size + blocksize-1) | |
1205 | >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); | |
1206 | max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) | |
1207 | >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); | |
1208 | ||
dae1e52c AG |
1209 | if (last_block != max_block) { |
1210 | n = ext4_block_to_path(inode, last_block, offsets, NULL); | |
1211 | if (n == 0) | |
819c4920 | 1212 | return; |
dae1e52c AG |
1213 | } |
1214 | ||
51865fda | 1215 | ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block); |
dae1e52c AG |
1216 | |
1217 | /* | |
1218 | * The orphan list entry will now protect us from any crash which | |
1219 | * occurs before the truncate completes, so it is now safe to propagate | |
1220 | * the new, shorter inode size (held for now in i_size) into the | |
1221 | * on-disk inode. We do this via i_disksize, which is the value which | |
1222 | * ext4 *really* writes onto the disk inode. | |
1223 | */ | |
1224 | ei->i_disksize = inode->i_size; | |
1225 | ||
1226 | if (last_block == max_block) { | |
1227 | /* | |
1228 | * It is unnecessary to free any data blocks if last_block is | |
1229 | * equal to the indirect block limit. | |
1230 | */ | |
819c4920 | 1231 | return; |
dae1e52c AG |
1232 | } else if (n == 1) { /* direct blocks */ |
1233 | ext4_free_data(handle, inode, NULL, i_data+offsets[0], | |
1234 | i_data + EXT4_NDIR_BLOCKS); | |
1235 | goto do_indirects; | |
1236 | } | |
1237 | ||
1238 | partial = ext4_find_shared(inode, n, offsets, chain, &nr); | |
1239 | /* Kill the top of shared branch (not detached) */ | |
1240 | if (nr) { | |
1241 | if (partial == chain) { | |
1242 | /* Shared branch grows from the inode */ | |
1243 | ext4_free_branches(handle, inode, NULL, | |
1244 | &nr, &nr+1, (chain+n-1) - partial); | |
1245 | *partial->p = 0; | |
1246 | /* | |
1247 | * We mark the inode dirty prior to restart, | |
1248 | * and prior to stop. No need for it here. | |
1249 | */ | |
1250 | } else { | |
1251 | /* Shared branch grows from an indirect block */ | |
1252 | BUFFER_TRACE(partial->bh, "get_write_access"); | |
1253 | ext4_free_branches(handle, inode, partial->bh, | |
1254 | partial->p, | |
1255 | partial->p+1, (chain+n-1) - partial); | |
1256 | } | |
1257 | } | |
1258 | /* Clear the ends of indirect blocks on the shared branch */ | |
1259 | while (partial > chain) { | |
1260 | ext4_free_branches(handle, inode, partial->bh, partial->p + 1, | |
1261 | (__le32*)partial->bh->b_data+addr_per_block, | |
1262 | (chain+n-1) - partial); | |
1263 | BUFFER_TRACE(partial->bh, "call brelse"); | |
1264 | brelse(partial->bh); | |
1265 | partial--; | |
1266 | } | |
1267 | do_indirects: | |
1268 | /* Kill the remaining (whole) subtrees */ | |
1269 | switch (offsets[0]) { | |
1270 | default: | |
1271 | nr = i_data[EXT4_IND_BLOCK]; | |
1272 | if (nr) { | |
1273 | ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); | |
1274 | i_data[EXT4_IND_BLOCK] = 0; | |
1275 | } | |
1276 | case EXT4_IND_BLOCK: | |
1277 | nr = i_data[EXT4_DIND_BLOCK]; | |
1278 | if (nr) { | |
1279 | ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); | |
1280 | i_data[EXT4_DIND_BLOCK] = 0; | |
1281 | } | |
1282 | case EXT4_DIND_BLOCK: | |
1283 | nr = i_data[EXT4_TIND_BLOCK]; | |
1284 | if (nr) { | |
1285 | ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); | |
1286 | i_data[EXT4_TIND_BLOCK] = 0; | |
1287 | } | |
1288 | case EXT4_TIND_BLOCK: | |
1289 | ; | |
1290 | } | |
dae1e52c AG |
1291 | } |
1292 | ||
8bad6fc8 ZL |
1293 | static int free_hole_blocks(handle_t *handle, struct inode *inode, |
1294 | struct buffer_head *parent_bh, __le32 *i_data, | |
1295 | int level, ext4_lblk_t first, | |
1296 | ext4_lblk_t count, int max) | |
1297 | { | |
1298 | struct buffer_head *bh = NULL; | |
1299 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); | |
1300 | int ret = 0; | |
1301 | int i, inc; | |
1302 | ext4_lblk_t offset; | |
1303 | __le32 blk; | |
1304 | ||
1305 | inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level); | |
1306 | for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) { | |
1307 | if (offset >= count + first) | |
1308 | break; | |
1309 | if (*i_data == 0 || (offset + inc) <= first) | |
1310 | continue; | |
1311 | blk = *i_data; | |
1312 | if (level > 0) { | |
1313 | ext4_lblk_t first2; | |
8cde7ad1 | 1314 | bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); |
8bad6fc8 | 1315 | if (!bh) { |
8cde7ad1 | 1316 | EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), |
8bad6fc8 ZL |
1317 | "Read failure"); |
1318 | return -EIO; | |
1319 | } | |
1320 | first2 = (first > offset) ? first - offset : 0; | |
1321 | ret = free_hole_blocks(handle, inode, bh, | |
1322 | (__le32 *)bh->b_data, level - 1, | |
1323 | first2, count - offset, | |
1324 | inode->i_sb->s_blocksize >> 2); | |
1325 | if (ret) { | |
1326 | brelse(bh); | |
1327 | goto err; | |
1328 | } | |
1329 | } | |
1330 | if (level == 0 || | |
1331 | (bh && all_zeroes((__le32 *)bh->b_data, | |
1332 | (__le32 *)bh->b_data + addr_per_block))) { | |
1333 | ext4_free_data(handle, inode, parent_bh, &blk, &blk+1); | |
1334 | *i_data = 0; | |
1335 | } | |
1336 | brelse(bh); | |
1337 | bh = NULL; | |
1338 | } | |
1339 | ||
1340 | err: | |
1341 | return ret; | |
1342 | } | |
1343 | ||
26a4c0c6 TT |
1344 | int ext4_free_hole_blocks(handle_t *handle, struct inode *inode, |
1345 | ext4_lblk_t first, ext4_lblk_t stop) | |
8bad6fc8 ZL |
1346 | { |
1347 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); | |
1348 | int level, ret = 0; | |
1349 | int num = EXT4_NDIR_BLOCKS; | |
1350 | ext4_lblk_t count, max = EXT4_NDIR_BLOCKS; | |
1351 | __le32 *i_data = EXT4_I(inode)->i_data; | |
1352 | ||
1353 | count = stop - first; | |
1354 | for (level = 0; level < 4; level++, max *= addr_per_block) { | |
1355 | if (first < max) { | |
1356 | ret = free_hole_blocks(handle, inode, NULL, i_data, | |
1357 | level, first, count, num); | |
1358 | if (ret) | |
1359 | goto err; | |
1360 | if (count > max - first) | |
1361 | count -= max - first; | |
1362 | else | |
1363 | break; | |
1364 | first = 0; | |
1365 | } else { | |
1366 | first -= max; | |
1367 | } | |
1368 | i_data += num; | |
1369 | if (level == 0) { | |
1370 | num = 1; | |
1371 | max = 1; | |
1372 | } | |
1373 | } | |
1374 | ||
1375 | err: | |
1376 | return ret; | |
1377 | } | |
1378 |