]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/hfsplus/extents.c | |
3 | * | |
4 | * Copyright (C) 2001 | |
5 | * Brad Boyer (flar@allandria.com) | |
6 | * (C) 2003 Ardis Technologies <roman@ardistech.com> | |
7 | * | |
8 | * Handling of Extents both in catalog and extents overflow trees | |
9 | */ | |
10 | ||
11 | #include <linux/errno.h> | |
12 | #include <linux/fs.h> | |
13 | #include <linux/pagemap.h> | |
1da177e4 LT |
14 | |
15 | #include "hfsplus_fs.h" | |
16 | #include "hfsplus_raw.h" | |
17 | ||
18 | /* Compare two extents keys, returns 0 on same, pos/neg for difference */ | |
19 | int hfsplus_ext_cmp_key(hfsplus_btree_key *k1, hfsplus_btree_key *k2) | |
20 | { | |
21 | __be32 k1id, k2id; | |
22 | __be32 k1s, k2s; | |
23 | ||
24 | k1id = k1->ext.cnid; | |
25 | k2id = k2->ext.cnid; | |
26 | if (k1id != k2id) | |
27 | return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1; | |
28 | ||
29 | if (k1->ext.fork_type != k2->ext.fork_type) | |
30 | return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1; | |
31 | ||
32 | k1s = k1->ext.start_block; | |
33 | k2s = k2->ext.start_block; | |
34 | if (k1s == k2s) | |
35 | return 0; | |
36 | return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1; | |
37 | } | |
38 | ||
39 | static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid, | |
40 | u32 block, u8 type) | |
41 | { | |
42 | key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2); | |
43 | key->ext.cnid = cpu_to_be32(cnid); | |
44 | key->ext.start_block = cpu_to_be32(block); | |
45 | key->ext.fork_type = type; | |
46 | key->ext.pad = 0; | |
47 | } | |
48 | ||
49 | static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off) | |
50 | { | |
51 | int i; | |
52 | u32 count; | |
53 | ||
54 | for (i = 0; i < 8; ext++, i++) { | |
55 | count = be32_to_cpu(ext->block_count); | |
56 | if (off < count) | |
57 | return be32_to_cpu(ext->start_block) + off; | |
58 | off -= count; | |
59 | } | |
60 | /* panic? */ | |
61 | return 0; | |
62 | } | |
63 | ||
64 | static int hfsplus_ext_block_count(struct hfsplus_extent *ext) | |
65 | { | |
66 | int i; | |
67 | u32 count = 0; | |
68 | ||
69 | for (i = 0; i < 8; ext++, i++) | |
70 | count += be32_to_cpu(ext->block_count); | |
71 | return count; | |
72 | } | |
73 | ||
74 | static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext) | |
75 | { | |
76 | int i; | |
77 | ||
78 | ext += 7; | |
79 | for (i = 0; i < 7; ext--, i++) | |
80 | if (ext->block_count) | |
81 | break; | |
82 | return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count); | |
83 | } | |
84 | ||
85 | static void __hfsplus_ext_write_extent(struct inode *inode, struct hfs_find_data *fd) | |
86 | { | |
87 | int res; | |
88 | ||
89 | hfsplus_ext_build_key(fd->search_key, inode->i_ino, HFSPLUS_I(inode).cached_start, | |
90 | HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); | |
91 | res = hfs_brec_find(fd); | |
92 | if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_NEW) { | |
93 | if (res != -ENOENT) | |
94 | return; | |
95 | hfs_brec_insert(fd, HFSPLUS_I(inode).cached_extents, sizeof(hfsplus_extent_rec)); | |
96 | HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); | |
97 | } else { | |
98 | if (res) | |
99 | return; | |
100 | hfs_bnode_write(fd->bnode, HFSPLUS_I(inode).cached_extents, fd->entryoffset, fd->entrylength); | |
101 | HFSPLUS_I(inode).flags &= ~HFSPLUS_FLG_EXT_DIRTY; | |
102 | } | |
103 | } | |
104 | ||
105 | void hfsplus_ext_write_extent(struct inode *inode) | |
106 | { | |
107 | if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) { | |
108 | struct hfs_find_data fd; | |
109 | ||
110 | hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd); | |
111 | __hfsplus_ext_write_extent(inode, &fd); | |
112 | hfs_find_exit(&fd); | |
113 | } | |
114 | } | |
115 | ||
116 | static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd, | |
117 | struct hfsplus_extent *extent, | |
118 | u32 cnid, u32 block, u8 type) | |
119 | { | |
120 | int res; | |
121 | ||
122 | hfsplus_ext_build_key(fd->search_key, cnid, block, type); | |
123 | fd->key->ext.cnid = 0; | |
124 | res = hfs_brec_find(fd); | |
125 | if (res && res != -ENOENT) | |
126 | return res; | |
127 | if (fd->key->ext.cnid != fd->search_key->ext.cnid || | |
128 | fd->key->ext.fork_type != fd->search_key->ext.fork_type) | |
129 | return -ENOENT; | |
130 | if (fd->entrylength != sizeof(hfsplus_extent_rec)) | |
131 | return -EIO; | |
132 | hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfsplus_extent_rec)); | |
133 | return 0; | |
134 | } | |
135 | ||
136 | static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block) | |
137 | { | |
138 | int res; | |
139 | ||
140 | if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) | |
141 | __hfsplus_ext_write_extent(inode, fd); | |
142 | ||
143 | res = __hfsplus_ext_read_extent(fd, HFSPLUS_I(inode).cached_extents, inode->i_ino, | |
144 | block, HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); | |
145 | if (!res) { | |
146 | HFSPLUS_I(inode).cached_start = be32_to_cpu(fd->key->ext.start_block); | |
147 | HFSPLUS_I(inode).cached_blocks = hfsplus_ext_block_count(HFSPLUS_I(inode).cached_extents); | |
148 | } else { | |
149 | HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0; | |
150 | HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); | |
151 | } | |
152 | return res; | |
153 | } | |
154 | ||
155 | static int hfsplus_ext_read_extent(struct inode *inode, u32 block) | |
156 | { | |
157 | struct hfs_find_data fd; | |
158 | int res; | |
159 | ||
160 | if (block >= HFSPLUS_I(inode).cached_start && | |
161 | block < HFSPLUS_I(inode).cached_start + HFSPLUS_I(inode).cached_blocks) | |
162 | return 0; | |
163 | ||
164 | hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd); | |
165 | res = __hfsplus_ext_cache_extent(&fd, inode, block); | |
166 | hfs_find_exit(&fd); | |
167 | return res; | |
168 | } | |
169 | ||
170 | /* Get a block at iblock for inode, possibly allocating if create */ | |
171 | int hfsplus_get_block(struct inode *inode, sector_t iblock, | |
172 | struct buffer_head *bh_result, int create) | |
173 | { | |
174 | struct super_block *sb; | |
175 | int res = -EIO; | |
176 | u32 ablock, dblock, mask; | |
177 | int shift; | |
178 | ||
179 | sb = inode->i_sb; | |
180 | ||
181 | /* Convert inode block to disk allocation block */ | |
182 | shift = HFSPLUS_SB(sb).alloc_blksz_shift - sb->s_blocksize_bits; | |
183 | ablock = iblock >> HFSPLUS_SB(sb).fs_shift; | |
184 | ||
185 | if (iblock >= HFSPLUS_I(inode).fs_blocks) { | |
186 | if (iblock > HFSPLUS_I(inode).fs_blocks || !create) | |
187 | return -EIO; | |
188 | if (ablock >= HFSPLUS_I(inode).alloc_blocks) { | |
189 | res = hfsplus_file_extend(inode); | |
190 | if (res) | |
191 | return res; | |
192 | } | |
193 | } else | |
194 | create = 0; | |
195 | ||
196 | if (ablock < HFSPLUS_I(inode).first_blocks) { | |
197 | dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).first_extents, ablock); | |
198 | goto done; | |
199 | } | |
200 | ||
201 | down(&HFSPLUS_I(inode).extents_lock); | |
202 | res = hfsplus_ext_read_extent(inode, ablock); | |
203 | if (!res) { | |
204 | dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).cached_extents, ablock - | |
205 | HFSPLUS_I(inode).cached_start); | |
206 | } else { | |
207 | up(&HFSPLUS_I(inode).extents_lock); | |
208 | return -EIO; | |
209 | } | |
210 | up(&HFSPLUS_I(inode).extents_lock); | |
211 | ||
212 | done: | |
213 | dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock); | |
214 | mask = (1 << HFSPLUS_SB(sb).fs_shift) - 1; | |
215 | map_bh(bh_result, sb, (dblock << HFSPLUS_SB(sb).fs_shift) + HFSPLUS_SB(sb).blockoffset + (iblock & mask)); | |
216 | if (create) { | |
217 | set_buffer_new(bh_result); | |
218 | HFSPLUS_I(inode).phys_size += sb->s_blocksize; | |
219 | HFSPLUS_I(inode).fs_blocks++; | |
220 | inode_add_bytes(inode, sb->s_blocksize); | |
221 | mark_inode_dirty(inode); | |
222 | } | |
223 | return 0; | |
224 | } | |
225 | ||
226 | static void hfsplus_dump_extent(struct hfsplus_extent *extent) | |
227 | { | |
228 | int i; | |
229 | ||
230 | dprint(DBG_EXTENT, " "); | |
231 | for (i = 0; i < 8; i++) | |
232 | dprint(DBG_EXTENT, " %u:%u", be32_to_cpu(extent[i].start_block), | |
233 | be32_to_cpu(extent[i].block_count)); | |
234 | dprint(DBG_EXTENT, "\n"); | |
235 | } | |
236 | ||
237 | static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset, | |
238 | u32 alloc_block, u32 block_count) | |
239 | { | |
240 | u32 count, start; | |
241 | int i; | |
242 | ||
243 | hfsplus_dump_extent(extent); | |
244 | for (i = 0; i < 8; extent++, i++) { | |
245 | count = be32_to_cpu(extent->block_count); | |
246 | if (offset == count) { | |
247 | start = be32_to_cpu(extent->start_block); | |
248 | if (alloc_block != start + count) { | |
249 | if (++i >= 8) | |
250 | return -ENOSPC; | |
251 | extent++; | |
252 | extent->start_block = cpu_to_be32(alloc_block); | |
253 | } else | |
254 | block_count += count; | |
255 | extent->block_count = cpu_to_be32(block_count); | |
256 | return 0; | |
257 | } else if (offset < count) | |
258 | break; | |
259 | offset -= count; | |
260 | } | |
261 | /* panic? */ | |
262 | return -EIO; | |
263 | } | |
264 | ||
265 | static int hfsplus_free_extents(struct super_block *sb, | |
266 | struct hfsplus_extent *extent, | |
267 | u32 offset, u32 block_nr) | |
268 | { | |
269 | u32 count, start; | |
270 | int i; | |
271 | ||
272 | hfsplus_dump_extent(extent); | |
273 | for (i = 0; i < 8; extent++, i++) { | |
274 | count = be32_to_cpu(extent->block_count); | |
275 | if (offset == count) | |
276 | goto found; | |
277 | else if (offset < count) | |
278 | break; | |
279 | offset -= count; | |
280 | } | |
281 | /* panic? */ | |
282 | return -EIO; | |
283 | found: | |
284 | for (;;) { | |
285 | start = be32_to_cpu(extent->start_block); | |
286 | if (count <= block_nr) { | |
287 | hfsplus_block_free(sb, start, count); | |
288 | extent->block_count = 0; | |
289 | extent->start_block = 0; | |
290 | block_nr -= count; | |
291 | } else { | |
292 | count -= block_nr; | |
293 | hfsplus_block_free(sb, start + count, block_nr); | |
294 | extent->block_count = cpu_to_be32(count); | |
295 | block_nr = 0; | |
296 | } | |
297 | if (!block_nr || !i) | |
298 | return 0; | |
299 | i--; | |
300 | extent--; | |
301 | count = be32_to_cpu(extent->block_count); | |
302 | } | |
303 | } | |
304 | ||
305 | int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw *fork, int type) | |
306 | { | |
307 | struct hfs_find_data fd; | |
308 | hfsplus_extent_rec ext_entry; | |
309 | u32 total_blocks, blocks, start; | |
310 | int res, i; | |
311 | ||
312 | total_blocks = be32_to_cpu(fork->total_blocks); | |
313 | if (!total_blocks) | |
314 | return 0; | |
315 | ||
316 | blocks = 0; | |
317 | for (i = 0; i < 8; i++) | |
318 | blocks += be32_to_cpu(fork->extents[i].block_count); | |
319 | ||
320 | res = hfsplus_free_extents(sb, fork->extents, blocks, blocks); | |
321 | if (res) | |
322 | return res; | |
323 | if (total_blocks == blocks) | |
324 | return 0; | |
325 | ||
326 | hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd); | |
327 | do { | |
328 | res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid, | |
329 | total_blocks, type); | |
330 | if (res) | |
331 | break; | |
332 | start = be32_to_cpu(fd.key->ext.start_block); | |
333 | hfsplus_free_extents(sb, ext_entry, | |
334 | total_blocks - start, | |
335 | total_blocks); | |
336 | hfs_brec_remove(&fd); | |
337 | total_blocks = start; | |
338 | } while (total_blocks > blocks); | |
339 | hfs_find_exit(&fd); | |
340 | ||
341 | return res; | |
342 | } | |
343 | ||
344 | int hfsplus_file_extend(struct inode *inode) | |
345 | { | |
346 | struct super_block *sb = inode->i_sb; | |
347 | u32 start, len, goal; | |
348 | int res; | |
349 | ||
350 | if (HFSPLUS_SB(sb).alloc_file->i_size * 8 < HFSPLUS_SB(sb).total_blocks - HFSPLUS_SB(sb).free_blocks + 8) { | |
351 | // extend alloc file | |
352 | printk("extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8, | |
353 | HFSPLUS_SB(sb).total_blocks, HFSPLUS_SB(sb).free_blocks); | |
354 | return -ENOSPC; | |
355 | //BUG(); | |
356 | } | |
357 | ||
358 | down(&HFSPLUS_I(inode).extents_lock); | |
359 | if (HFSPLUS_I(inode).alloc_blocks == HFSPLUS_I(inode).first_blocks) | |
360 | goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).first_extents); | |
361 | else { | |
362 | res = hfsplus_ext_read_extent(inode, HFSPLUS_I(inode).alloc_blocks); | |
363 | if (res) | |
364 | goto out; | |
365 | goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).cached_extents); | |
366 | } | |
367 | ||
368 | len = HFSPLUS_I(inode).clump_blocks; | |
369 | start = hfsplus_block_allocate(sb, HFSPLUS_SB(sb).total_blocks, goal, &len); | |
370 | if (start >= HFSPLUS_SB(sb).total_blocks) { | |
371 | start = hfsplus_block_allocate(sb, goal, 0, &len); | |
372 | if (start >= goal) { | |
373 | res = -ENOSPC; | |
374 | goto out; | |
375 | } | |
376 | } | |
377 | ||
378 | dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); | |
379 | if (HFSPLUS_I(inode).alloc_blocks <= HFSPLUS_I(inode).first_blocks) { | |
380 | if (!HFSPLUS_I(inode).first_blocks) { | |
381 | dprint(DBG_EXTENT, "first extents\n"); | |
382 | /* no extents yet */ | |
383 | HFSPLUS_I(inode).first_extents[0].start_block = cpu_to_be32(start); | |
384 | HFSPLUS_I(inode).first_extents[0].block_count = cpu_to_be32(len); | |
385 | res = 0; | |
386 | } else { | |
387 | /* try to append to extents in inode */ | |
388 | res = hfsplus_add_extent(HFSPLUS_I(inode).first_extents, | |
389 | HFSPLUS_I(inode).alloc_blocks, | |
390 | start, len); | |
391 | if (res == -ENOSPC) | |
392 | goto insert_extent; | |
393 | } | |
394 | if (!res) { | |
395 | hfsplus_dump_extent(HFSPLUS_I(inode).first_extents); | |
396 | HFSPLUS_I(inode).first_blocks += len; | |
397 | } | |
398 | } else { | |
399 | res = hfsplus_add_extent(HFSPLUS_I(inode).cached_extents, | |
400 | HFSPLUS_I(inode).alloc_blocks - | |
401 | HFSPLUS_I(inode).cached_start, | |
402 | start, len); | |
403 | if (!res) { | |
404 | hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); | |
405 | HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY; | |
406 | HFSPLUS_I(inode).cached_blocks += len; | |
407 | } else if (res == -ENOSPC) | |
408 | goto insert_extent; | |
409 | } | |
410 | out: | |
411 | up(&HFSPLUS_I(inode).extents_lock); | |
412 | if (!res) { | |
413 | HFSPLUS_I(inode).alloc_blocks += len; | |
414 | mark_inode_dirty(inode); | |
415 | } | |
416 | return res; | |
417 | ||
418 | insert_extent: | |
419 | dprint(DBG_EXTENT, "insert new extent\n"); | |
420 | hfsplus_ext_write_extent(inode); | |
421 | ||
422 | memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); | |
423 | HFSPLUS_I(inode).cached_extents[0].start_block = cpu_to_be32(start); | |
424 | HFSPLUS_I(inode).cached_extents[0].block_count = cpu_to_be32(len); | |
425 | hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); | |
426 | HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW; | |
427 | HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).alloc_blocks; | |
428 | HFSPLUS_I(inode).cached_blocks = len; | |
429 | ||
430 | res = 0; | |
431 | goto out; | |
432 | } | |
433 | ||
434 | void hfsplus_file_truncate(struct inode *inode) | |
435 | { | |
436 | struct super_block *sb = inode->i_sb; | |
437 | struct hfs_find_data fd; | |
438 | u32 alloc_cnt, blk_cnt, start; | |
439 | int res; | |
440 | ||
441 | dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino, | |
442 | (long long)HFSPLUS_I(inode).phys_size, inode->i_size); | |
443 | if (inode->i_size > HFSPLUS_I(inode).phys_size) { | |
444 | struct address_space *mapping = inode->i_mapping; | |
445 | struct page *page; | |
446 | u32 size = inode->i_size - 1; | |
447 | int res; | |
448 | ||
449 | page = grab_cache_page(mapping, size >> PAGE_CACHE_SHIFT); | |
450 | if (!page) | |
451 | return; | |
452 | size &= PAGE_CACHE_SIZE - 1; | |
453 | size++; | |
454 | res = mapping->a_ops->prepare_write(NULL, page, size, size); | |
455 | if (!res) | |
456 | res = mapping->a_ops->commit_write(NULL, page, size, size); | |
457 | if (res) | |
458 | inode->i_size = HFSPLUS_I(inode).phys_size; | |
459 | unlock_page(page); | |
460 | page_cache_release(page); | |
461 | mark_inode_dirty(inode); | |
462 | return; | |
f76d28d2 RZ |
463 | } else if (inode->i_size == HFSPLUS_I(inode).phys_size) |
464 | return; | |
465 | ||
1da177e4 LT |
466 | blk_cnt = (inode->i_size + HFSPLUS_SB(sb).alloc_blksz - 1) >> HFSPLUS_SB(sb).alloc_blksz_shift; |
467 | alloc_cnt = HFSPLUS_I(inode).alloc_blocks; | |
468 | if (blk_cnt == alloc_cnt) | |
469 | goto out; | |
470 | ||
471 | down(&HFSPLUS_I(inode).extents_lock); | |
472 | hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd); | |
473 | while (1) { | |
474 | if (alloc_cnt == HFSPLUS_I(inode).first_blocks) { | |
475 | hfsplus_free_extents(sb, HFSPLUS_I(inode).first_extents, | |
476 | alloc_cnt, alloc_cnt - blk_cnt); | |
477 | hfsplus_dump_extent(HFSPLUS_I(inode).first_extents); | |
478 | HFSPLUS_I(inode).first_blocks = blk_cnt; | |
479 | break; | |
480 | } | |
481 | res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt); | |
482 | if (res) | |
483 | break; | |
484 | start = HFSPLUS_I(inode).cached_start; | |
485 | hfsplus_free_extents(sb, HFSPLUS_I(inode).cached_extents, | |
486 | alloc_cnt - start, alloc_cnt - blk_cnt); | |
487 | hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); | |
488 | if (blk_cnt > start) { | |
489 | HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY; | |
490 | break; | |
491 | } | |
492 | alloc_cnt = start; | |
493 | HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0; | |
494 | HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); | |
495 | hfs_brec_remove(&fd); | |
496 | } | |
497 | hfs_find_exit(&fd); | |
498 | up(&HFSPLUS_I(inode).extents_lock); | |
499 | ||
500 | HFSPLUS_I(inode).alloc_blocks = blk_cnt; | |
501 | out: | |
502 | HFSPLUS_I(inode).phys_size = inode->i_size; | |
503 | HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; | |
504 | inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits); | |
505 | mark_inode_dirty(inode); | |
506 | } |