]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/kernel.h> | |
20 | #include <linux/bio.h> | |
21 | #include <linux/buffer_head.h> | |
22 | #include <linux/file.h> | |
23 | #include <linux/fs.h> | |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/highmem.h> | |
26 | #include <linux/time.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/string.h> | |
29 | #include <linux/backing-dev.h> | |
30 | #include <linux/mpage.h> | |
31 | #include <linux/swap.h> | |
32 | #include <linux/writeback.h> | |
33 | #include <linux/statfs.h> | |
34 | #include <linux/compat.h> | |
35 | #include <linux/bit_spinlock.h> | |
36 | #include <linux/xattr.h> | |
37 | #include <linux/posix_acl.h> | |
38 | #include <linux/falloc.h> | |
39 | #include <linux/slab.h> | |
40 | #include <linux/ratelimit.h> | |
41 | #include <linux/mount.h> | |
42 | #include "compat.h" | |
43 | #include "ctree.h" | |
44 | #include "disk-io.h" | |
45 | #include "transaction.h" | |
46 | #include "btrfs_inode.h" | |
47 | #include "ioctl.h" | |
48 | #include "print-tree.h" | |
49 | #include "ordered-data.h" | |
50 | #include "xattr.h" | |
51 | #include "tree-log.h" | |
52 | #include "volumes.h" | |
53 | #include "compression.h" | |
54 | #include "locking.h" | |
55 | #include "free-space-cache.h" | |
56 | #include "inode-map.h" | |
57 | ||
58 | struct btrfs_iget_args { | |
59 | u64 ino; | |
60 | struct btrfs_root *root; | |
61 | }; | |
62 | ||
63 | static const struct inode_operations btrfs_dir_inode_operations; | |
64 | static const struct inode_operations btrfs_symlink_inode_operations; | |
65 | static const struct inode_operations btrfs_dir_ro_inode_operations; | |
66 | static const struct inode_operations btrfs_special_inode_operations; | |
67 | static const struct inode_operations btrfs_file_inode_operations; | |
68 | static const struct address_space_operations btrfs_aops; | |
69 | static const struct address_space_operations btrfs_symlink_aops; | |
70 | static const struct file_operations btrfs_dir_file_operations; | |
71 | static struct extent_io_ops btrfs_extent_io_ops; | |
72 | ||
73 | static struct kmem_cache *btrfs_inode_cachep; | |
74 | static struct kmem_cache *btrfs_delalloc_work_cachep; | |
75 | struct kmem_cache *btrfs_trans_handle_cachep; | |
76 | struct kmem_cache *btrfs_transaction_cachep; | |
77 | struct kmem_cache *btrfs_path_cachep; | |
78 | struct kmem_cache *btrfs_free_space_cachep; | |
79 | ||
80 | #define S_SHIFT 12 | |
81 | static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { | |
82 | [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, | |
83 | [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, | |
84 | [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, | |
85 | [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, | |
86 | [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, | |
87 | [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, | |
88 | [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, | |
89 | }; | |
90 | ||
91 | static int btrfs_setsize(struct inode *inode, loff_t newsize); | |
92 | static int btrfs_truncate(struct inode *inode); | |
93 | static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); | |
94 | static noinline int cow_file_range(struct inode *inode, | |
95 | struct page *locked_page, | |
96 | u64 start, u64 end, int *page_started, | |
97 | unsigned long *nr_written, int unlock); | |
98 | static struct extent_map *create_pinned_em(struct inode *inode, u64 start, | |
99 | u64 len, u64 orig_start, | |
100 | u64 block_start, u64 block_len, | |
101 | u64 orig_block_len, int type); | |
102 | ||
103 | static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, | |
104 | struct inode *inode, struct inode *dir, | |
105 | const struct qstr *qstr) | |
106 | { | |
107 | int err; | |
108 | ||
109 | err = btrfs_init_acl(trans, inode, dir); | |
110 | if (!err) | |
111 | err = btrfs_xattr_security_init(trans, inode, dir, qstr); | |
112 | return err; | |
113 | } | |
114 | ||
115 | /* | |
116 | * this does all the hard work for inserting an inline extent into | |
117 | * the btree. The caller should have done a btrfs_drop_extents so that | |
118 | * no overlapping inline items exist in the btree | |
119 | */ | |
120 | static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, | |
121 | struct btrfs_root *root, struct inode *inode, | |
122 | u64 start, size_t size, size_t compressed_size, | |
123 | int compress_type, | |
124 | struct page **compressed_pages) | |
125 | { | |
126 | struct btrfs_key key; | |
127 | struct btrfs_path *path; | |
128 | struct extent_buffer *leaf; | |
129 | struct page *page = NULL; | |
130 | char *kaddr; | |
131 | unsigned long ptr; | |
132 | struct btrfs_file_extent_item *ei; | |
133 | int err = 0; | |
134 | int ret; | |
135 | size_t cur_size = size; | |
136 | size_t datasize; | |
137 | unsigned long offset; | |
138 | ||
139 | if (compressed_size && compressed_pages) | |
140 | cur_size = compressed_size; | |
141 | ||
142 | path = btrfs_alloc_path(); | |
143 | if (!path) | |
144 | return -ENOMEM; | |
145 | ||
146 | path->leave_spinning = 1; | |
147 | ||
148 | key.objectid = btrfs_ino(inode); | |
149 | key.offset = start; | |
150 | btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); | |
151 | datasize = btrfs_file_extent_calc_inline_size(cur_size); | |
152 | ||
153 | inode_add_bytes(inode, size); | |
154 | ret = btrfs_insert_empty_item(trans, root, path, &key, | |
155 | datasize); | |
156 | if (ret) { | |
157 | err = ret; | |
158 | goto fail; | |
159 | } | |
160 | leaf = path->nodes[0]; | |
161 | ei = btrfs_item_ptr(leaf, path->slots[0], | |
162 | struct btrfs_file_extent_item); | |
163 | btrfs_set_file_extent_generation(leaf, ei, trans->transid); | |
164 | btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); | |
165 | btrfs_set_file_extent_encryption(leaf, ei, 0); | |
166 | btrfs_set_file_extent_other_encoding(leaf, ei, 0); | |
167 | btrfs_set_file_extent_ram_bytes(leaf, ei, size); | |
168 | ptr = btrfs_file_extent_inline_start(ei); | |
169 | ||
170 | if (compress_type != BTRFS_COMPRESS_NONE) { | |
171 | struct page *cpage; | |
172 | int i = 0; | |
173 | while (compressed_size > 0) { | |
174 | cpage = compressed_pages[i]; | |
175 | cur_size = min_t(unsigned long, compressed_size, | |
176 | PAGE_CACHE_SIZE); | |
177 | ||
178 | kaddr = kmap_atomic(cpage); | |
179 | write_extent_buffer(leaf, kaddr, ptr, cur_size); | |
180 | kunmap_atomic(kaddr); | |
181 | ||
182 | i++; | |
183 | ptr += cur_size; | |
184 | compressed_size -= cur_size; | |
185 | } | |
186 | btrfs_set_file_extent_compression(leaf, ei, | |
187 | compress_type); | |
188 | } else { | |
189 | page = find_get_page(inode->i_mapping, | |
190 | start >> PAGE_CACHE_SHIFT); | |
191 | btrfs_set_file_extent_compression(leaf, ei, 0); | |
192 | kaddr = kmap_atomic(page); | |
193 | offset = start & (PAGE_CACHE_SIZE - 1); | |
194 | write_extent_buffer(leaf, kaddr + offset, ptr, size); | |
195 | kunmap_atomic(kaddr); | |
196 | page_cache_release(page); | |
197 | } | |
198 | btrfs_mark_buffer_dirty(leaf); | |
199 | btrfs_free_path(path); | |
200 | ||
201 | /* | |
202 | * we're an inline extent, so nobody can | |
203 | * extend the file past i_size without locking | |
204 | * a page we already have locked. | |
205 | * | |
206 | * We must do any isize and inode updates | |
207 | * before we unlock the pages. Otherwise we | |
208 | * could end up racing with unlink. | |
209 | */ | |
210 | BTRFS_I(inode)->disk_i_size = inode->i_size; | |
211 | ret = btrfs_update_inode(trans, root, inode); | |
212 | ||
213 | return ret; | |
214 | fail: | |
215 | btrfs_free_path(path); | |
216 | return err; | |
217 | } | |
218 | ||
219 | ||
220 | /* | |
221 | * conditionally insert an inline extent into the file. This | |
222 | * does the checks required to make sure the data is small enough | |
223 | * to fit as an inline extent. | |
224 | */ | |
225 | static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, | |
226 | struct btrfs_root *root, | |
227 | struct inode *inode, u64 start, u64 end, | |
228 | size_t compressed_size, int compress_type, | |
229 | struct page **compressed_pages) | |
230 | { | |
231 | u64 isize = i_size_read(inode); | |
232 | u64 actual_end = min(end + 1, isize); | |
233 | u64 inline_len = actual_end - start; | |
234 | u64 aligned_end = (end + root->sectorsize - 1) & | |
235 | ~((u64)root->sectorsize - 1); | |
236 | u64 data_len = inline_len; | |
237 | int ret; | |
238 | ||
239 | if (compressed_size) | |
240 | data_len = compressed_size; | |
241 | ||
242 | if (start > 0 || | |
243 | actual_end >= PAGE_CACHE_SIZE || | |
244 | data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) || | |
245 | (!compressed_size && | |
246 | (actual_end & (root->sectorsize - 1)) == 0) || | |
247 | end + 1 < isize || | |
248 | data_len > root->fs_info->max_inline) { | |
249 | return 1; | |
250 | } | |
251 | ||
252 | ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1); | |
253 | if (ret) | |
254 | return ret; | |
255 | ||
256 | if (isize > actual_end) | |
257 | inline_len = min_t(u64, isize, actual_end); | |
258 | ret = insert_inline_extent(trans, root, inode, start, | |
259 | inline_len, compressed_size, | |
260 | compress_type, compressed_pages); | |
261 | if (ret && ret != -ENOSPC) { | |
262 | btrfs_abort_transaction(trans, root, ret); | |
263 | return ret; | |
264 | } else if (ret == -ENOSPC) { | |
265 | return 1; | |
266 | } | |
267 | ||
268 | btrfs_delalloc_release_metadata(inode, end + 1 - start); | |
269 | btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); | |
270 | return 0; | |
271 | } | |
272 | ||
273 | struct async_extent { | |
274 | u64 start; | |
275 | u64 ram_size; | |
276 | u64 compressed_size; | |
277 | struct page **pages; | |
278 | unsigned long nr_pages; | |
279 | int compress_type; | |
280 | struct list_head list; | |
281 | }; | |
282 | ||
283 | struct async_cow { | |
284 | struct inode *inode; | |
285 | struct btrfs_root *root; | |
286 | struct page *locked_page; | |
287 | u64 start; | |
288 | u64 end; | |
289 | struct list_head extents; | |
290 | struct btrfs_work work; | |
291 | }; | |
292 | ||
293 | static noinline int add_async_extent(struct async_cow *cow, | |
294 | u64 start, u64 ram_size, | |
295 | u64 compressed_size, | |
296 | struct page **pages, | |
297 | unsigned long nr_pages, | |
298 | int compress_type) | |
299 | { | |
300 | struct async_extent *async_extent; | |
301 | ||
302 | async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); | |
303 | BUG_ON(!async_extent); /* -ENOMEM */ | |
304 | async_extent->start = start; | |
305 | async_extent->ram_size = ram_size; | |
306 | async_extent->compressed_size = compressed_size; | |
307 | async_extent->pages = pages; | |
308 | async_extent->nr_pages = nr_pages; | |
309 | async_extent->compress_type = compress_type; | |
310 | list_add_tail(&async_extent->list, &cow->extents); | |
311 | return 0; | |
312 | } | |
313 | ||
314 | /* | |
315 | * we create compressed extents in two phases. The first | |
316 | * phase compresses a range of pages that have already been | |
317 | * locked (both pages and state bits are locked). | |
318 | * | |
319 | * This is done inside an ordered work queue, and the compression | |
320 | * is spread across many cpus. The actual IO submission is step | |
321 | * two, and the ordered work queue takes care of making sure that | |
322 | * happens in the same order things were put onto the queue by | |
323 | * writepages and friends. | |
324 | * | |
325 | * If this code finds it can't get good compression, it puts an | |
326 | * entry onto the work queue to write the uncompressed bytes. This | |
327 | * makes sure that both compressed inodes and uncompressed inodes | |
328 | * are written in the same order that the flusher thread sent them | |
329 | * down. | |
330 | */ | |
331 | static noinline int compress_file_range(struct inode *inode, | |
332 | struct page *locked_page, | |
333 | u64 start, u64 end, | |
334 | struct async_cow *async_cow, | |
335 | int *num_added) | |
336 | { | |
337 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
338 | struct btrfs_trans_handle *trans; | |
339 | u64 num_bytes; | |
340 | u64 blocksize = root->sectorsize; | |
341 | u64 actual_end; | |
342 | u64 isize = i_size_read(inode); | |
343 | int ret = 0; | |
344 | struct page **pages = NULL; | |
345 | unsigned long nr_pages; | |
346 | unsigned long nr_pages_ret = 0; | |
347 | unsigned long total_compressed = 0; | |
348 | unsigned long total_in = 0; | |
349 | unsigned long max_compressed = 128 * 1024; | |
350 | unsigned long max_uncompressed = 128 * 1024; | |
351 | int i; | |
352 | int will_compress; | |
353 | int compress_type = root->fs_info->compress_type; | |
354 | ||
355 | /* if this is a small write inside eof, kick off a defrag */ | |
356 | if ((end - start + 1) < 16 * 1024 && | |
357 | (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) | |
358 | btrfs_add_inode_defrag(NULL, inode); | |
359 | ||
360 | actual_end = min_t(u64, isize, end + 1); | |
361 | again: | |
362 | will_compress = 0; | |
363 | nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; | |
364 | nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); | |
365 | ||
366 | /* | |
367 | * we don't want to send crud past the end of i_size through | |
368 | * compression, that's just a waste of CPU time. So, if the | |
369 | * end of the file is before the start of our current | |
370 | * requested range of bytes, we bail out to the uncompressed | |
371 | * cleanup code that can deal with all of this. | |
372 | * | |
373 | * It isn't really the fastest way to fix things, but this is a | |
374 | * very uncommon corner. | |
375 | */ | |
376 | if (actual_end <= start) | |
377 | goto cleanup_and_bail_uncompressed; | |
378 | ||
379 | total_compressed = actual_end - start; | |
380 | ||
381 | /* we want to make sure that amount of ram required to uncompress | |
382 | * an extent is reasonable, so we limit the total size in ram | |
383 | * of a compressed extent to 128k. This is a crucial number | |
384 | * because it also controls how easily we can spread reads across | |
385 | * cpus for decompression. | |
386 | * | |
387 | * We also want to make sure the amount of IO required to do | |
388 | * a random read is reasonably small, so we limit the size of | |
389 | * a compressed extent to 128k. | |
390 | */ | |
391 | total_compressed = min(total_compressed, max_uncompressed); | |
392 | num_bytes = (end - start + blocksize) & ~(blocksize - 1); | |
393 | num_bytes = max(blocksize, num_bytes); | |
394 | total_in = 0; | |
395 | ret = 0; | |
396 | ||
397 | /* | |
398 | * we do compression for mount -o compress and when the | |
399 | * inode has not been flagged as nocompress. This flag can | |
400 | * change at any time if we discover bad compression ratios. | |
401 | */ | |
402 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && | |
403 | (btrfs_test_opt(root, COMPRESS) || | |
404 | (BTRFS_I(inode)->force_compress) || | |
405 | (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) { | |
406 | WARN_ON(pages); | |
407 | pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); | |
408 | if (!pages) { | |
409 | /* just bail out to the uncompressed code */ | |
410 | goto cont; | |
411 | } | |
412 | ||
413 | if (BTRFS_I(inode)->force_compress) | |
414 | compress_type = BTRFS_I(inode)->force_compress; | |
415 | ||
416 | ret = btrfs_compress_pages(compress_type, | |
417 | inode->i_mapping, start, | |
418 | total_compressed, pages, | |
419 | nr_pages, &nr_pages_ret, | |
420 | &total_in, | |
421 | &total_compressed, | |
422 | max_compressed); | |
423 | ||
424 | if (!ret) { | |
425 | unsigned long offset = total_compressed & | |
426 | (PAGE_CACHE_SIZE - 1); | |
427 | struct page *page = pages[nr_pages_ret - 1]; | |
428 | char *kaddr; | |
429 | ||
430 | /* zero the tail end of the last page, we might be | |
431 | * sending it down to disk | |
432 | */ | |
433 | if (offset) { | |
434 | kaddr = kmap_atomic(page); | |
435 | memset(kaddr + offset, 0, | |
436 | PAGE_CACHE_SIZE - offset); | |
437 | kunmap_atomic(kaddr); | |
438 | } | |
439 | will_compress = 1; | |
440 | } | |
441 | } | |
442 | cont: | |
443 | if (start == 0) { | |
444 | trans = btrfs_join_transaction(root); | |
445 | if (IS_ERR(trans)) { | |
446 | ret = PTR_ERR(trans); | |
447 | trans = NULL; | |
448 | goto cleanup_and_out; | |
449 | } | |
450 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | |
451 | ||
452 | /* lets try to make an inline extent */ | |
453 | if (ret || total_in < (actual_end - start)) { | |
454 | /* we didn't compress the entire range, try | |
455 | * to make an uncompressed inline extent. | |
456 | */ | |
457 | ret = cow_file_range_inline(trans, root, inode, | |
458 | start, end, 0, 0, NULL); | |
459 | } else { | |
460 | /* try making a compressed inline extent */ | |
461 | ret = cow_file_range_inline(trans, root, inode, | |
462 | start, end, | |
463 | total_compressed, | |
464 | compress_type, pages); | |
465 | } | |
466 | if (ret <= 0) { | |
467 | /* | |
468 | * inline extent creation worked or returned error, | |
469 | * we don't need to create any more async work items. | |
470 | * Unlock and free up our temp pages. | |
471 | */ | |
472 | extent_clear_unlock_delalloc(inode, | |
473 | &BTRFS_I(inode)->io_tree, | |
474 | start, end, NULL, | |
475 | EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY | | |
476 | EXTENT_CLEAR_DELALLOC | | |
477 | EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK); | |
478 | ||
479 | btrfs_end_transaction(trans, root); | |
480 | goto free_pages_out; | |
481 | } | |
482 | btrfs_end_transaction(trans, root); | |
483 | } | |
484 | ||
485 | if (will_compress) { | |
486 | /* | |
487 | * we aren't doing an inline extent round the compressed size | |
488 | * up to a block size boundary so the allocator does sane | |
489 | * things | |
490 | */ | |
491 | total_compressed = (total_compressed + blocksize - 1) & | |
492 | ~(blocksize - 1); | |
493 | ||
494 | /* | |
495 | * one last check to make sure the compression is really a | |
496 | * win, compare the page count read with the blocks on disk | |
497 | */ | |
498 | total_in = (total_in + PAGE_CACHE_SIZE - 1) & | |
499 | ~(PAGE_CACHE_SIZE - 1); | |
500 | if (total_compressed >= total_in) { | |
501 | will_compress = 0; | |
502 | } else { | |
503 | num_bytes = total_in; | |
504 | } | |
505 | } | |
506 | if (!will_compress && pages) { | |
507 | /* | |
508 | * the compression code ran but failed to make things smaller, | |
509 | * free any pages it allocated and our page pointer array | |
510 | */ | |
511 | for (i = 0; i < nr_pages_ret; i++) { | |
512 | WARN_ON(pages[i]->mapping); | |
513 | page_cache_release(pages[i]); | |
514 | } | |
515 | kfree(pages); | |
516 | pages = NULL; | |
517 | total_compressed = 0; | |
518 | nr_pages_ret = 0; | |
519 | ||
520 | /* flag the file so we don't compress in the future */ | |
521 | if (!btrfs_test_opt(root, FORCE_COMPRESS) && | |
522 | !(BTRFS_I(inode)->force_compress)) { | |
523 | BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; | |
524 | } | |
525 | } | |
526 | if (will_compress) { | |
527 | *num_added += 1; | |
528 | ||
529 | /* the async work queues will take care of doing actual | |
530 | * allocation on disk for these compressed pages, | |
531 | * and will submit them to the elevator. | |
532 | */ | |
533 | add_async_extent(async_cow, start, num_bytes, | |
534 | total_compressed, pages, nr_pages_ret, | |
535 | compress_type); | |
536 | ||
537 | if (start + num_bytes < end) { | |
538 | start += num_bytes; | |
539 | pages = NULL; | |
540 | cond_resched(); | |
541 | goto again; | |
542 | } | |
543 | } else { | |
544 | cleanup_and_bail_uncompressed: | |
545 | /* | |
546 | * No compression, but we still need to write the pages in | |
547 | * the file we've been given so far. redirty the locked | |
548 | * page if it corresponds to our extent and set things up | |
549 | * for the async work queue to run cow_file_range to do | |
550 | * the normal delalloc dance | |
551 | */ | |
552 | if (page_offset(locked_page) >= start && | |
553 | page_offset(locked_page) <= end) { | |
554 | __set_page_dirty_nobuffers(locked_page); | |
555 | /* unlocked later on in the async handlers */ | |
556 | } | |
557 | add_async_extent(async_cow, start, end - start + 1, | |
558 | 0, NULL, 0, BTRFS_COMPRESS_NONE); | |
559 | *num_added += 1; | |
560 | } | |
561 | ||
562 | out: | |
563 | return ret; | |
564 | ||
565 | free_pages_out: | |
566 | for (i = 0; i < nr_pages_ret; i++) { | |
567 | WARN_ON(pages[i]->mapping); | |
568 | page_cache_release(pages[i]); | |
569 | } | |
570 | kfree(pages); | |
571 | ||
572 | goto out; | |
573 | ||
574 | cleanup_and_out: | |
575 | extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, | |
576 | start, end, NULL, | |
577 | EXTENT_CLEAR_UNLOCK_PAGE | | |
578 | EXTENT_CLEAR_DIRTY | | |
579 | EXTENT_CLEAR_DELALLOC | | |
580 | EXTENT_SET_WRITEBACK | | |
581 | EXTENT_END_WRITEBACK); | |
582 | if (!trans || IS_ERR(trans)) | |
583 | btrfs_error(root->fs_info, ret, "Failed to join transaction"); | |
584 | else | |
585 | btrfs_abort_transaction(trans, root, ret); | |
586 | goto free_pages_out; | |
587 | } | |
588 | ||
589 | /* | |
590 | * phase two of compressed writeback. This is the ordered portion | |
591 | * of the code, which only gets called in the order the work was | |
592 | * queued. We walk all the async extents created by compress_file_range | |
593 | * and send them down to the disk. | |
594 | */ | |
595 | static noinline int submit_compressed_extents(struct inode *inode, | |
596 | struct async_cow *async_cow) | |
597 | { | |
598 | struct async_extent *async_extent; | |
599 | u64 alloc_hint = 0; | |
600 | struct btrfs_trans_handle *trans; | |
601 | struct btrfs_key ins; | |
602 | struct extent_map *em; | |
603 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
604 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
605 | struct extent_io_tree *io_tree; | |
606 | int ret = 0; | |
607 | ||
608 | if (list_empty(&async_cow->extents)) | |
609 | return 0; | |
610 | ||
611 | ||
612 | while (!list_empty(&async_cow->extents)) { | |
613 | async_extent = list_entry(async_cow->extents.next, | |
614 | struct async_extent, list); | |
615 | list_del(&async_extent->list); | |
616 | ||
617 | io_tree = &BTRFS_I(inode)->io_tree; | |
618 | ||
619 | retry: | |
620 | /* did the compression code fall back to uncompressed IO? */ | |
621 | if (!async_extent->pages) { | |
622 | int page_started = 0; | |
623 | unsigned long nr_written = 0; | |
624 | ||
625 | lock_extent(io_tree, async_extent->start, | |
626 | async_extent->start + | |
627 | async_extent->ram_size - 1); | |
628 | ||
629 | /* allocate blocks */ | |
630 | ret = cow_file_range(inode, async_cow->locked_page, | |
631 | async_extent->start, | |
632 | async_extent->start + | |
633 | async_extent->ram_size - 1, | |
634 | &page_started, &nr_written, 0); | |
635 | ||
636 | /* JDM XXX */ | |
637 | ||
638 | /* | |
639 | * if page_started, cow_file_range inserted an | |
640 | * inline extent and took care of all the unlocking | |
641 | * and IO for us. Otherwise, we need to submit | |
642 | * all those pages down to the drive. | |
643 | */ | |
644 | if (!page_started && !ret) | |
645 | extent_write_locked_range(io_tree, | |
646 | inode, async_extent->start, | |
647 | async_extent->start + | |
648 | async_extent->ram_size - 1, | |
649 | btrfs_get_extent, | |
650 | WB_SYNC_ALL); | |
651 | kfree(async_extent); | |
652 | cond_resched(); | |
653 | continue; | |
654 | } | |
655 | ||
656 | lock_extent(io_tree, async_extent->start, | |
657 | async_extent->start + async_extent->ram_size - 1); | |
658 | ||
659 | trans = btrfs_join_transaction(root); | |
660 | if (IS_ERR(trans)) { | |
661 | ret = PTR_ERR(trans); | |
662 | } else { | |
663 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | |
664 | ret = btrfs_reserve_extent(trans, root, | |
665 | async_extent->compressed_size, | |
666 | async_extent->compressed_size, | |
667 | 0, alloc_hint, &ins, 1); | |
668 | if (ret && ret != -ENOSPC) | |
669 | btrfs_abort_transaction(trans, root, ret); | |
670 | btrfs_end_transaction(trans, root); | |
671 | } | |
672 | ||
673 | if (ret) { | |
674 | int i; | |
675 | for (i = 0; i < async_extent->nr_pages; i++) { | |
676 | WARN_ON(async_extent->pages[i]->mapping); | |
677 | page_cache_release(async_extent->pages[i]); | |
678 | } | |
679 | kfree(async_extent->pages); | |
680 | async_extent->nr_pages = 0; | |
681 | async_extent->pages = NULL; | |
682 | unlock_extent(io_tree, async_extent->start, | |
683 | async_extent->start + | |
684 | async_extent->ram_size - 1); | |
685 | if (ret == -ENOSPC) | |
686 | goto retry; | |
687 | goto out_free; /* JDM: Requeue? */ | |
688 | } | |
689 | ||
690 | /* | |
691 | * here we're doing allocation and writeback of the | |
692 | * compressed pages | |
693 | */ | |
694 | btrfs_drop_extent_cache(inode, async_extent->start, | |
695 | async_extent->start + | |
696 | async_extent->ram_size - 1, 0); | |
697 | ||
698 | em = alloc_extent_map(); | |
699 | BUG_ON(!em); /* -ENOMEM */ | |
700 | em->start = async_extent->start; | |
701 | em->len = async_extent->ram_size; | |
702 | em->orig_start = em->start; | |
703 | ||
704 | em->block_start = ins.objectid; | |
705 | em->block_len = ins.offset; | |
706 | em->orig_block_len = ins.offset; | |
707 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
708 | em->compress_type = async_extent->compress_type; | |
709 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | |
710 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | |
711 | em->generation = -1; | |
712 | ||
713 | while (1) { | |
714 | write_lock(&em_tree->lock); | |
715 | ret = add_extent_mapping(em_tree, em); | |
716 | if (!ret) | |
717 | list_move(&em->list, | |
718 | &em_tree->modified_extents); | |
719 | write_unlock(&em_tree->lock); | |
720 | if (ret != -EEXIST) { | |
721 | free_extent_map(em); | |
722 | break; | |
723 | } | |
724 | btrfs_drop_extent_cache(inode, async_extent->start, | |
725 | async_extent->start + | |
726 | async_extent->ram_size - 1, 0); | |
727 | } | |
728 | ||
729 | ret = btrfs_add_ordered_extent_compress(inode, | |
730 | async_extent->start, | |
731 | ins.objectid, | |
732 | async_extent->ram_size, | |
733 | ins.offset, | |
734 | BTRFS_ORDERED_COMPRESSED, | |
735 | async_extent->compress_type); | |
736 | BUG_ON(ret); /* -ENOMEM */ | |
737 | ||
738 | /* | |
739 | * clear dirty, set writeback and unlock the pages. | |
740 | */ | |
741 | extent_clear_unlock_delalloc(inode, | |
742 | &BTRFS_I(inode)->io_tree, | |
743 | async_extent->start, | |
744 | async_extent->start + | |
745 | async_extent->ram_size - 1, | |
746 | NULL, EXTENT_CLEAR_UNLOCK_PAGE | | |
747 | EXTENT_CLEAR_UNLOCK | | |
748 | EXTENT_CLEAR_DELALLOC | | |
749 | EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK); | |
750 | ||
751 | ret = btrfs_submit_compressed_write(inode, | |
752 | async_extent->start, | |
753 | async_extent->ram_size, | |
754 | ins.objectid, | |
755 | ins.offset, async_extent->pages, | |
756 | async_extent->nr_pages); | |
757 | ||
758 | BUG_ON(ret); /* -ENOMEM */ | |
759 | alloc_hint = ins.objectid + ins.offset; | |
760 | kfree(async_extent); | |
761 | cond_resched(); | |
762 | } | |
763 | ret = 0; | |
764 | out: | |
765 | return ret; | |
766 | out_free: | |
767 | kfree(async_extent); | |
768 | goto out; | |
769 | } | |
770 | ||
771 | static u64 get_extent_allocation_hint(struct inode *inode, u64 start, | |
772 | u64 num_bytes) | |
773 | { | |
774 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
775 | struct extent_map *em; | |
776 | u64 alloc_hint = 0; | |
777 | ||
778 | read_lock(&em_tree->lock); | |
779 | em = search_extent_mapping(em_tree, start, num_bytes); | |
780 | if (em) { | |
781 | /* | |
782 | * if block start isn't an actual block number then find the | |
783 | * first block in this inode and use that as a hint. If that | |
784 | * block is also bogus then just don't worry about it. | |
785 | */ | |
786 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { | |
787 | free_extent_map(em); | |
788 | em = search_extent_mapping(em_tree, 0, 0); | |
789 | if (em && em->block_start < EXTENT_MAP_LAST_BYTE) | |
790 | alloc_hint = em->block_start; | |
791 | if (em) | |
792 | free_extent_map(em); | |
793 | } else { | |
794 | alloc_hint = em->block_start; | |
795 | free_extent_map(em); | |
796 | } | |
797 | } | |
798 | read_unlock(&em_tree->lock); | |
799 | ||
800 | return alloc_hint; | |
801 | } | |
802 | ||
803 | /* | |
804 | * when extent_io.c finds a delayed allocation range in the file, | |
805 | * the call backs end up in this code. The basic idea is to | |
806 | * allocate extents on disk for the range, and create ordered data structs | |
807 | * in ram to track those extents. | |
808 | * | |
809 | * locked_page is the page that writepage had locked already. We use | |
810 | * it to make sure we don't do extra locks or unlocks. | |
811 | * | |
812 | * *page_started is set to one if we unlock locked_page and do everything | |
813 | * required to start IO on it. It may be clean and already done with | |
814 | * IO when we return. | |
815 | */ | |
816 | static noinline int __cow_file_range(struct btrfs_trans_handle *trans, | |
817 | struct inode *inode, | |
818 | struct btrfs_root *root, | |
819 | struct page *locked_page, | |
820 | u64 start, u64 end, int *page_started, | |
821 | unsigned long *nr_written, | |
822 | int unlock) | |
823 | { | |
824 | u64 alloc_hint = 0; | |
825 | u64 num_bytes; | |
826 | unsigned long ram_size; | |
827 | u64 disk_num_bytes; | |
828 | u64 cur_alloc_size; | |
829 | u64 blocksize = root->sectorsize; | |
830 | struct btrfs_key ins; | |
831 | struct extent_map *em; | |
832 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
833 | int ret = 0; | |
834 | ||
835 | BUG_ON(btrfs_is_free_space_inode(inode)); | |
836 | ||
837 | num_bytes = (end - start + blocksize) & ~(blocksize - 1); | |
838 | num_bytes = max(blocksize, num_bytes); | |
839 | disk_num_bytes = num_bytes; | |
840 | ||
841 | /* if this is a small write inside eof, kick off defrag */ | |
842 | if (num_bytes < 64 * 1024 && | |
843 | (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) | |
844 | btrfs_add_inode_defrag(trans, inode); | |
845 | ||
846 | if (start == 0) { | |
847 | /* lets try to make an inline extent */ | |
848 | ret = cow_file_range_inline(trans, root, inode, | |
849 | start, end, 0, 0, NULL); | |
850 | if (ret == 0) { | |
851 | extent_clear_unlock_delalloc(inode, | |
852 | &BTRFS_I(inode)->io_tree, | |
853 | start, end, NULL, | |
854 | EXTENT_CLEAR_UNLOCK_PAGE | | |
855 | EXTENT_CLEAR_UNLOCK | | |
856 | EXTENT_CLEAR_DELALLOC | | |
857 | EXTENT_CLEAR_DIRTY | | |
858 | EXTENT_SET_WRITEBACK | | |
859 | EXTENT_END_WRITEBACK); | |
860 | ||
861 | *nr_written = *nr_written + | |
862 | (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; | |
863 | *page_started = 1; | |
864 | goto out; | |
865 | } else if (ret < 0) { | |
866 | btrfs_abort_transaction(trans, root, ret); | |
867 | goto out_unlock; | |
868 | } | |
869 | } | |
870 | ||
871 | BUG_ON(disk_num_bytes > | |
872 | btrfs_super_total_bytes(root->fs_info->super_copy)); | |
873 | ||
874 | alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); | |
875 | btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); | |
876 | ||
877 | while (disk_num_bytes > 0) { | |
878 | unsigned long op; | |
879 | ||
880 | cur_alloc_size = disk_num_bytes; | |
881 | ret = btrfs_reserve_extent(trans, root, cur_alloc_size, | |
882 | root->sectorsize, 0, alloc_hint, | |
883 | &ins, 1); | |
884 | if (ret < 0) { | |
885 | btrfs_abort_transaction(trans, root, ret); | |
886 | goto out_unlock; | |
887 | } | |
888 | ||
889 | em = alloc_extent_map(); | |
890 | BUG_ON(!em); /* -ENOMEM */ | |
891 | em->start = start; | |
892 | em->orig_start = em->start; | |
893 | ram_size = ins.offset; | |
894 | em->len = ins.offset; | |
895 | ||
896 | em->block_start = ins.objectid; | |
897 | em->block_len = ins.offset; | |
898 | em->orig_block_len = ins.offset; | |
899 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
900 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | |
901 | em->generation = -1; | |
902 | ||
903 | while (1) { | |
904 | write_lock(&em_tree->lock); | |
905 | ret = add_extent_mapping(em_tree, em); | |
906 | if (!ret) | |
907 | list_move(&em->list, | |
908 | &em_tree->modified_extents); | |
909 | write_unlock(&em_tree->lock); | |
910 | if (ret != -EEXIST) { | |
911 | free_extent_map(em); | |
912 | break; | |
913 | } | |
914 | btrfs_drop_extent_cache(inode, start, | |
915 | start + ram_size - 1, 0); | |
916 | } | |
917 | ||
918 | cur_alloc_size = ins.offset; | |
919 | ret = btrfs_add_ordered_extent(inode, start, ins.objectid, | |
920 | ram_size, cur_alloc_size, 0); | |
921 | BUG_ON(ret); /* -ENOMEM */ | |
922 | ||
923 | if (root->root_key.objectid == | |
924 | BTRFS_DATA_RELOC_TREE_OBJECTID) { | |
925 | ret = btrfs_reloc_clone_csums(inode, start, | |
926 | cur_alloc_size); | |
927 | if (ret) { | |
928 | btrfs_abort_transaction(trans, root, ret); | |
929 | goto out_unlock; | |
930 | } | |
931 | } | |
932 | ||
933 | if (disk_num_bytes < cur_alloc_size) | |
934 | break; | |
935 | ||
936 | /* we're not doing compressed IO, don't unlock the first | |
937 | * page (which the caller expects to stay locked), don't | |
938 | * clear any dirty bits and don't set any writeback bits | |
939 | * | |
940 | * Do set the Private2 bit so we know this page was properly | |
941 | * setup for writepage | |
942 | */ | |
943 | op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0; | |
944 | op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC | | |
945 | EXTENT_SET_PRIVATE2; | |
946 | ||
947 | extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, | |
948 | start, start + ram_size - 1, | |
949 | locked_page, op); | |
950 | disk_num_bytes -= cur_alloc_size; | |
951 | num_bytes -= cur_alloc_size; | |
952 | alloc_hint = ins.objectid + ins.offset; | |
953 | start += cur_alloc_size; | |
954 | } | |
955 | out: | |
956 | return ret; | |
957 | ||
958 | out_unlock: | |
959 | extent_clear_unlock_delalloc(inode, | |
960 | &BTRFS_I(inode)->io_tree, | |
961 | start, end, locked_page, | |
962 | EXTENT_CLEAR_UNLOCK_PAGE | | |
963 | EXTENT_CLEAR_UNLOCK | | |
964 | EXTENT_CLEAR_DELALLOC | | |
965 | EXTENT_CLEAR_DIRTY | | |
966 | EXTENT_SET_WRITEBACK | | |
967 | EXTENT_END_WRITEBACK); | |
968 | ||
969 | goto out; | |
970 | } | |
971 | ||
972 | static noinline int cow_file_range(struct inode *inode, | |
973 | struct page *locked_page, | |
974 | u64 start, u64 end, int *page_started, | |
975 | unsigned long *nr_written, | |
976 | int unlock) | |
977 | { | |
978 | struct btrfs_trans_handle *trans; | |
979 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
980 | int ret; | |
981 | ||
982 | trans = btrfs_join_transaction(root); | |
983 | if (IS_ERR(trans)) { | |
984 | extent_clear_unlock_delalloc(inode, | |
985 | &BTRFS_I(inode)->io_tree, | |
986 | start, end, locked_page, | |
987 | EXTENT_CLEAR_UNLOCK_PAGE | | |
988 | EXTENT_CLEAR_UNLOCK | | |
989 | EXTENT_CLEAR_DELALLOC | | |
990 | EXTENT_CLEAR_DIRTY | | |
991 | EXTENT_SET_WRITEBACK | | |
992 | EXTENT_END_WRITEBACK); | |
993 | return PTR_ERR(trans); | |
994 | } | |
995 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | |
996 | ||
997 | ret = __cow_file_range(trans, inode, root, locked_page, start, end, | |
998 | page_started, nr_written, unlock); | |
999 | ||
1000 | btrfs_end_transaction(trans, root); | |
1001 | ||
1002 | return ret; | |
1003 | } | |
1004 | ||
1005 | /* | |
1006 | * work queue call back to started compression on a file and pages | |
1007 | */ | |
1008 | static noinline void async_cow_start(struct btrfs_work *work) | |
1009 | { | |
1010 | struct async_cow *async_cow; | |
1011 | int num_added = 0; | |
1012 | async_cow = container_of(work, struct async_cow, work); | |
1013 | ||
1014 | compress_file_range(async_cow->inode, async_cow->locked_page, | |
1015 | async_cow->start, async_cow->end, async_cow, | |
1016 | &num_added); | |
1017 | if (num_added == 0) { | |
1018 | btrfs_add_delayed_iput(async_cow->inode); | |
1019 | async_cow->inode = NULL; | |
1020 | } | |
1021 | } | |
1022 | ||
1023 | /* | |
1024 | * work queue call back to submit previously compressed pages | |
1025 | */ | |
1026 | static noinline void async_cow_submit(struct btrfs_work *work) | |
1027 | { | |
1028 | struct async_cow *async_cow; | |
1029 | struct btrfs_root *root; | |
1030 | unsigned long nr_pages; | |
1031 | ||
1032 | async_cow = container_of(work, struct async_cow, work); | |
1033 | ||
1034 | root = async_cow->root; | |
1035 | nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> | |
1036 | PAGE_CACHE_SHIFT; | |
1037 | ||
1038 | if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < | |
1039 | 5 * 1024 * 1024 && | |
1040 | waitqueue_active(&root->fs_info->async_submit_wait)) | |
1041 | wake_up(&root->fs_info->async_submit_wait); | |
1042 | ||
1043 | if (async_cow->inode) | |
1044 | submit_compressed_extents(async_cow->inode, async_cow); | |
1045 | } | |
1046 | ||
1047 | static noinline void async_cow_free(struct btrfs_work *work) | |
1048 | { | |
1049 | struct async_cow *async_cow; | |
1050 | async_cow = container_of(work, struct async_cow, work); | |
1051 | if (async_cow->inode) | |
1052 | btrfs_add_delayed_iput(async_cow->inode); | |
1053 | kfree(async_cow); | |
1054 | } | |
1055 | ||
1056 | static int cow_file_range_async(struct inode *inode, struct page *locked_page, | |
1057 | u64 start, u64 end, int *page_started, | |
1058 | unsigned long *nr_written) | |
1059 | { | |
1060 | struct async_cow *async_cow; | |
1061 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1062 | unsigned long nr_pages; | |
1063 | u64 cur_end; | |
1064 | int limit = 10 * 1024 * 1024; | |
1065 | ||
1066 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, | |
1067 | 1, 0, NULL, GFP_NOFS); | |
1068 | while (start < end) { | |
1069 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); | |
1070 | BUG_ON(!async_cow); /* -ENOMEM */ | |
1071 | async_cow->inode = igrab(inode); | |
1072 | async_cow->root = root; | |
1073 | async_cow->locked_page = locked_page; | |
1074 | async_cow->start = start; | |
1075 | ||
1076 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) | |
1077 | cur_end = end; | |
1078 | else | |
1079 | cur_end = min(end, start + 512 * 1024 - 1); | |
1080 | ||
1081 | async_cow->end = cur_end; | |
1082 | INIT_LIST_HEAD(&async_cow->extents); | |
1083 | ||
1084 | async_cow->work.func = async_cow_start; | |
1085 | async_cow->work.ordered_func = async_cow_submit; | |
1086 | async_cow->work.ordered_free = async_cow_free; | |
1087 | async_cow->work.flags = 0; | |
1088 | ||
1089 | nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> | |
1090 | PAGE_CACHE_SHIFT; | |
1091 | atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); | |
1092 | ||
1093 | btrfs_queue_worker(&root->fs_info->delalloc_workers, | |
1094 | &async_cow->work); | |
1095 | ||
1096 | if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { | |
1097 | wait_event(root->fs_info->async_submit_wait, | |
1098 | (atomic_read(&root->fs_info->async_delalloc_pages) < | |
1099 | limit)); | |
1100 | } | |
1101 | ||
1102 | while (atomic_read(&root->fs_info->async_submit_draining) && | |
1103 | atomic_read(&root->fs_info->async_delalloc_pages)) { | |
1104 | wait_event(root->fs_info->async_submit_wait, | |
1105 | (atomic_read(&root->fs_info->async_delalloc_pages) == | |
1106 | 0)); | |
1107 | } | |
1108 | ||
1109 | *nr_written += nr_pages; | |
1110 | start = cur_end + 1; | |
1111 | } | |
1112 | *page_started = 1; | |
1113 | return 0; | |
1114 | } | |
1115 | ||
1116 | static noinline int csum_exist_in_range(struct btrfs_root *root, | |
1117 | u64 bytenr, u64 num_bytes) | |
1118 | { | |
1119 | int ret; | |
1120 | struct btrfs_ordered_sum *sums; | |
1121 | LIST_HEAD(list); | |
1122 | ||
1123 | ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, | |
1124 | bytenr + num_bytes - 1, &list, 0); | |
1125 | if (ret == 0 && list_empty(&list)) | |
1126 | return 0; | |
1127 | ||
1128 | while (!list_empty(&list)) { | |
1129 | sums = list_entry(list.next, struct btrfs_ordered_sum, list); | |
1130 | list_del(&sums->list); | |
1131 | kfree(sums); | |
1132 | } | |
1133 | return 1; | |
1134 | } | |
1135 | ||
1136 | /* | |
1137 | * when nowcow writeback call back. This checks for snapshots or COW copies | |
1138 | * of the extents that exist in the file, and COWs the file as required. | |
1139 | * | |
1140 | * If no cow copies or snapshots exist, we write directly to the existing | |
1141 | * blocks on disk | |
1142 | */ | |
1143 | static noinline int run_delalloc_nocow(struct inode *inode, | |
1144 | struct page *locked_page, | |
1145 | u64 start, u64 end, int *page_started, int force, | |
1146 | unsigned long *nr_written) | |
1147 | { | |
1148 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1149 | struct btrfs_trans_handle *trans; | |
1150 | struct extent_buffer *leaf; | |
1151 | struct btrfs_path *path; | |
1152 | struct btrfs_file_extent_item *fi; | |
1153 | struct btrfs_key found_key; | |
1154 | u64 cow_start; | |
1155 | u64 cur_offset; | |
1156 | u64 extent_end; | |
1157 | u64 extent_offset; | |
1158 | u64 disk_bytenr; | |
1159 | u64 num_bytes; | |
1160 | u64 disk_num_bytes; | |
1161 | int extent_type; | |
1162 | int ret, err; | |
1163 | int type; | |
1164 | int nocow; | |
1165 | int check_prev = 1; | |
1166 | bool nolock; | |
1167 | u64 ino = btrfs_ino(inode); | |
1168 | ||
1169 | path = btrfs_alloc_path(); | |
1170 | if (!path) { | |
1171 | extent_clear_unlock_delalloc(inode, | |
1172 | &BTRFS_I(inode)->io_tree, | |
1173 | start, end, locked_page, | |
1174 | EXTENT_CLEAR_UNLOCK_PAGE | | |
1175 | EXTENT_CLEAR_UNLOCK | | |
1176 | EXTENT_CLEAR_DELALLOC | | |
1177 | EXTENT_CLEAR_DIRTY | | |
1178 | EXTENT_SET_WRITEBACK | | |
1179 | EXTENT_END_WRITEBACK); | |
1180 | return -ENOMEM; | |
1181 | } | |
1182 | ||
1183 | nolock = btrfs_is_free_space_inode(inode); | |
1184 | ||
1185 | if (nolock) | |
1186 | trans = btrfs_join_transaction_nolock(root); | |
1187 | else | |
1188 | trans = btrfs_join_transaction(root); | |
1189 | ||
1190 | if (IS_ERR(trans)) { | |
1191 | extent_clear_unlock_delalloc(inode, | |
1192 | &BTRFS_I(inode)->io_tree, | |
1193 | start, end, locked_page, | |
1194 | EXTENT_CLEAR_UNLOCK_PAGE | | |
1195 | EXTENT_CLEAR_UNLOCK | | |
1196 | EXTENT_CLEAR_DELALLOC | | |
1197 | EXTENT_CLEAR_DIRTY | | |
1198 | EXTENT_SET_WRITEBACK | | |
1199 | EXTENT_END_WRITEBACK); | |
1200 | btrfs_free_path(path); | |
1201 | return PTR_ERR(trans); | |
1202 | } | |
1203 | ||
1204 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | |
1205 | ||
1206 | cow_start = (u64)-1; | |
1207 | cur_offset = start; | |
1208 | while (1) { | |
1209 | ret = btrfs_lookup_file_extent(trans, root, path, ino, | |
1210 | cur_offset, 0); | |
1211 | if (ret < 0) { | |
1212 | btrfs_abort_transaction(trans, root, ret); | |
1213 | goto error; | |
1214 | } | |
1215 | if (ret > 0 && path->slots[0] > 0 && check_prev) { | |
1216 | leaf = path->nodes[0]; | |
1217 | btrfs_item_key_to_cpu(leaf, &found_key, | |
1218 | path->slots[0] - 1); | |
1219 | if (found_key.objectid == ino && | |
1220 | found_key.type == BTRFS_EXTENT_DATA_KEY) | |
1221 | path->slots[0]--; | |
1222 | } | |
1223 | check_prev = 0; | |
1224 | next_slot: | |
1225 | leaf = path->nodes[0]; | |
1226 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
1227 | ret = btrfs_next_leaf(root, path); | |
1228 | if (ret < 0) { | |
1229 | btrfs_abort_transaction(trans, root, ret); | |
1230 | goto error; | |
1231 | } | |
1232 | if (ret > 0) | |
1233 | break; | |
1234 | leaf = path->nodes[0]; | |
1235 | } | |
1236 | ||
1237 | nocow = 0; | |
1238 | disk_bytenr = 0; | |
1239 | num_bytes = 0; | |
1240 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
1241 | ||
1242 | if (found_key.objectid > ino || | |
1243 | found_key.type > BTRFS_EXTENT_DATA_KEY || | |
1244 | found_key.offset > end) | |
1245 | break; | |
1246 | ||
1247 | if (found_key.offset > cur_offset) { | |
1248 | extent_end = found_key.offset; | |
1249 | extent_type = 0; | |
1250 | goto out_check; | |
1251 | } | |
1252 | ||
1253 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
1254 | struct btrfs_file_extent_item); | |
1255 | extent_type = btrfs_file_extent_type(leaf, fi); | |
1256 | ||
1257 | if (extent_type == BTRFS_FILE_EXTENT_REG || | |
1258 | extent_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
1259 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); | |
1260 | extent_offset = btrfs_file_extent_offset(leaf, fi); | |
1261 | extent_end = found_key.offset + | |
1262 | btrfs_file_extent_num_bytes(leaf, fi); | |
1263 | disk_num_bytes = | |
1264 | btrfs_file_extent_disk_num_bytes(leaf, fi); | |
1265 | if (extent_end <= start) { | |
1266 | path->slots[0]++; | |
1267 | goto next_slot; | |
1268 | } | |
1269 | if (disk_bytenr == 0) | |
1270 | goto out_check; | |
1271 | if (btrfs_file_extent_compression(leaf, fi) || | |
1272 | btrfs_file_extent_encryption(leaf, fi) || | |
1273 | btrfs_file_extent_other_encoding(leaf, fi)) | |
1274 | goto out_check; | |
1275 | if (extent_type == BTRFS_FILE_EXTENT_REG && !force) | |
1276 | goto out_check; | |
1277 | if (btrfs_extent_readonly(root, disk_bytenr)) | |
1278 | goto out_check; | |
1279 | if (btrfs_cross_ref_exist(trans, root, ino, | |
1280 | found_key.offset - | |
1281 | extent_offset, disk_bytenr)) | |
1282 | goto out_check; | |
1283 | disk_bytenr += extent_offset; | |
1284 | disk_bytenr += cur_offset - found_key.offset; | |
1285 | num_bytes = min(end + 1, extent_end) - cur_offset; | |
1286 | /* | |
1287 | * force cow if csum exists in the range. | |
1288 | * this ensure that csum for a given extent are | |
1289 | * either valid or do not exist. | |
1290 | */ | |
1291 | if (csum_exist_in_range(root, disk_bytenr, num_bytes)) | |
1292 | goto out_check; | |
1293 | nocow = 1; | |
1294 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | |
1295 | extent_end = found_key.offset + | |
1296 | btrfs_file_extent_inline_len(leaf, fi); | |
1297 | extent_end = ALIGN(extent_end, root->sectorsize); | |
1298 | } else { | |
1299 | BUG_ON(1); | |
1300 | } | |
1301 | out_check: | |
1302 | if (extent_end <= start) { | |
1303 | path->slots[0]++; | |
1304 | goto next_slot; | |
1305 | } | |
1306 | if (!nocow) { | |
1307 | if (cow_start == (u64)-1) | |
1308 | cow_start = cur_offset; | |
1309 | cur_offset = extent_end; | |
1310 | if (cur_offset > end) | |
1311 | break; | |
1312 | path->slots[0]++; | |
1313 | goto next_slot; | |
1314 | } | |
1315 | ||
1316 | btrfs_release_path(path); | |
1317 | if (cow_start != (u64)-1) { | |
1318 | ret = __cow_file_range(trans, inode, root, locked_page, | |
1319 | cow_start, found_key.offset - 1, | |
1320 | page_started, nr_written, 1); | |
1321 | if (ret) { | |
1322 | btrfs_abort_transaction(trans, root, ret); | |
1323 | goto error; | |
1324 | } | |
1325 | cow_start = (u64)-1; | |
1326 | } | |
1327 | ||
1328 | if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
1329 | struct extent_map *em; | |
1330 | struct extent_map_tree *em_tree; | |
1331 | em_tree = &BTRFS_I(inode)->extent_tree; | |
1332 | em = alloc_extent_map(); | |
1333 | BUG_ON(!em); /* -ENOMEM */ | |
1334 | em->start = cur_offset; | |
1335 | em->orig_start = found_key.offset - extent_offset; | |
1336 | em->len = num_bytes; | |
1337 | em->block_len = num_bytes; | |
1338 | em->block_start = disk_bytenr; | |
1339 | em->orig_block_len = disk_num_bytes; | |
1340 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
1341 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | |
1342 | set_bit(EXTENT_FLAG_FILLING, &em->flags); | |
1343 | em->generation = -1; | |
1344 | while (1) { | |
1345 | write_lock(&em_tree->lock); | |
1346 | ret = add_extent_mapping(em_tree, em); | |
1347 | if (!ret) | |
1348 | list_move(&em->list, | |
1349 | &em_tree->modified_extents); | |
1350 | write_unlock(&em_tree->lock); | |
1351 | if (ret != -EEXIST) { | |
1352 | free_extent_map(em); | |
1353 | break; | |
1354 | } | |
1355 | btrfs_drop_extent_cache(inode, em->start, | |
1356 | em->start + em->len - 1, 0); | |
1357 | } | |
1358 | type = BTRFS_ORDERED_PREALLOC; | |
1359 | } else { | |
1360 | type = BTRFS_ORDERED_NOCOW; | |
1361 | } | |
1362 | ||
1363 | ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, | |
1364 | num_bytes, num_bytes, type); | |
1365 | BUG_ON(ret); /* -ENOMEM */ | |
1366 | ||
1367 | if (root->root_key.objectid == | |
1368 | BTRFS_DATA_RELOC_TREE_OBJECTID) { | |
1369 | ret = btrfs_reloc_clone_csums(inode, cur_offset, | |
1370 | num_bytes); | |
1371 | if (ret) { | |
1372 | btrfs_abort_transaction(trans, root, ret); | |
1373 | goto error; | |
1374 | } | |
1375 | } | |
1376 | ||
1377 | extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, | |
1378 | cur_offset, cur_offset + num_bytes - 1, | |
1379 | locked_page, EXTENT_CLEAR_UNLOCK_PAGE | | |
1380 | EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC | | |
1381 | EXTENT_SET_PRIVATE2); | |
1382 | cur_offset = extent_end; | |
1383 | if (cur_offset > end) | |
1384 | break; | |
1385 | } | |
1386 | btrfs_release_path(path); | |
1387 | ||
1388 | if (cur_offset <= end && cow_start == (u64)-1) { | |
1389 | cow_start = cur_offset; | |
1390 | cur_offset = end; | |
1391 | } | |
1392 | ||
1393 | if (cow_start != (u64)-1) { | |
1394 | ret = __cow_file_range(trans, inode, root, locked_page, | |
1395 | cow_start, end, | |
1396 | page_started, nr_written, 1); | |
1397 | if (ret) { | |
1398 | btrfs_abort_transaction(trans, root, ret); | |
1399 | goto error; | |
1400 | } | |
1401 | } | |
1402 | ||
1403 | error: | |
1404 | err = btrfs_end_transaction(trans, root); | |
1405 | if (!ret) | |
1406 | ret = err; | |
1407 | ||
1408 | if (ret && cur_offset < end) | |
1409 | extent_clear_unlock_delalloc(inode, | |
1410 | &BTRFS_I(inode)->io_tree, | |
1411 | cur_offset, end, locked_page, | |
1412 | EXTENT_CLEAR_UNLOCK_PAGE | | |
1413 | EXTENT_CLEAR_UNLOCK | | |
1414 | EXTENT_CLEAR_DELALLOC | | |
1415 | EXTENT_CLEAR_DIRTY | | |
1416 | EXTENT_SET_WRITEBACK | | |
1417 | EXTENT_END_WRITEBACK); | |
1418 | ||
1419 | btrfs_free_path(path); | |
1420 | return ret; | |
1421 | } | |
1422 | ||
1423 | /* | |
1424 | * extent_io.c call back to do delayed allocation processing | |
1425 | */ | |
1426 | static int run_delalloc_range(struct inode *inode, struct page *locked_page, | |
1427 | u64 start, u64 end, int *page_started, | |
1428 | unsigned long *nr_written) | |
1429 | { | |
1430 | int ret; | |
1431 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1432 | ||
1433 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) { | |
1434 | ret = run_delalloc_nocow(inode, locked_page, start, end, | |
1435 | page_started, 1, nr_written); | |
1436 | } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) { | |
1437 | ret = run_delalloc_nocow(inode, locked_page, start, end, | |
1438 | page_started, 0, nr_written); | |
1439 | } else if (!btrfs_test_opt(root, COMPRESS) && | |
1440 | !(BTRFS_I(inode)->force_compress) && | |
1441 | !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) { | |
1442 | ret = cow_file_range(inode, locked_page, start, end, | |
1443 | page_started, nr_written, 1); | |
1444 | } else { | |
1445 | set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | |
1446 | &BTRFS_I(inode)->runtime_flags); | |
1447 | ret = cow_file_range_async(inode, locked_page, start, end, | |
1448 | page_started, nr_written); | |
1449 | } | |
1450 | return ret; | |
1451 | } | |
1452 | ||
1453 | static void btrfs_split_extent_hook(struct inode *inode, | |
1454 | struct extent_state *orig, u64 split) | |
1455 | { | |
1456 | /* not delalloc, ignore it */ | |
1457 | if (!(orig->state & EXTENT_DELALLOC)) | |
1458 | return; | |
1459 | ||
1460 | spin_lock(&BTRFS_I(inode)->lock); | |
1461 | BTRFS_I(inode)->outstanding_extents++; | |
1462 | spin_unlock(&BTRFS_I(inode)->lock); | |
1463 | } | |
1464 | ||
1465 | /* | |
1466 | * extent_io.c merge_extent_hook, used to track merged delayed allocation | |
1467 | * extents so we can keep track of new extents that are just merged onto old | |
1468 | * extents, such as when we are doing sequential writes, so we can properly | |
1469 | * account for the metadata space we'll need. | |
1470 | */ | |
1471 | static void btrfs_merge_extent_hook(struct inode *inode, | |
1472 | struct extent_state *new, | |
1473 | struct extent_state *other) | |
1474 | { | |
1475 | /* not delalloc, ignore it */ | |
1476 | if (!(other->state & EXTENT_DELALLOC)) | |
1477 | return; | |
1478 | ||
1479 | spin_lock(&BTRFS_I(inode)->lock); | |
1480 | BTRFS_I(inode)->outstanding_extents--; | |
1481 | spin_unlock(&BTRFS_I(inode)->lock); | |
1482 | } | |
1483 | ||
1484 | /* | |
1485 | * extent_io.c set_bit_hook, used to track delayed allocation | |
1486 | * bytes in this file, and to maintain the list of inodes that | |
1487 | * have pending delalloc work to be done. | |
1488 | */ | |
1489 | static void btrfs_set_bit_hook(struct inode *inode, | |
1490 | struct extent_state *state, int *bits) | |
1491 | { | |
1492 | ||
1493 | /* | |
1494 | * set_bit and clear bit hooks normally require _irqsave/restore | |
1495 | * but in this case, we are only testing for the DELALLOC | |
1496 | * bit, which is only set or cleared with irqs on | |
1497 | */ | |
1498 | if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { | |
1499 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1500 | u64 len = state->end + 1 - state->start; | |
1501 | bool do_list = !btrfs_is_free_space_inode(inode); | |
1502 | ||
1503 | if (*bits & EXTENT_FIRST_DELALLOC) { | |
1504 | *bits &= ~EXTENT_FIRST_DELALLOC; | |
1505 | } else { | |
1506 | spin_lock(&BTRFS_I(inode)->lock); | |
1507 | BTRFS_I(inode)->outstanding_extents++; | |
1508 | spin_unlock(&BTRFS_I(inode)->lock); | |
1509 | } | |
1510 | ||
1511 | spin_lock(&root->fs_info->delalloc_lock); | |
1512 | BTRFS_I(inode)->delalloc_bytes += len; | |
1513 | root->fs_info->delalloc_bytes += len; | |
1514 | if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) { | |
1515 | list_add_tail(&BTRFS_I(inode)->delalloc_inodes, | |
1516 | &root->fs_info->delalloc_inodes); | |
1517 | } | |
1518 | spin_unlock(&root->fs_info->delalloc_lock); | |
1519 | } | |
1520 | } | |
1521 | ||
1522 | /* | |
1523 | * extent_io.c clear_bit_hook, see set_bit_hook for why | |
1524 | */ | |
1525 | static void btrfs_clear_bit_hook(struct inode *inode, | |
1526 | struct extent_state *state, int *bits) | |
1527 | { | |
1528 | /* | |
1529 | * set_bit and clear bit hooks normally require _irqsave/restore | |
1530 | * but in this case, we are only testing for the DELALLOC | |
1531 | * bit, which is only set or cleared with irqs on | |
1532 | */ | |
1533 | if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { | |
1534 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1535 | u64 len = state->end + 1 - state->start; | |
1536 | bool do_list = !btrfs_is_free_space_inode(inode); | |
1537 | ||
1538 | if (*bits & EXTENT_FIRST_DELALLOC) { | |
1539 | *bits &= ~EXTENT_FIRST_DELALLOC; | |
1540 | } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { | |
1541 | spin_lock(&BTRFS_I(inode)->lock); | |
1542 | BTRFS_I(inode)->outstanding_extents--; | |
1543 | spin_unlock(&BTRFS_I(inode)->lock); | |
1544 | } | |
1545 | ||
1546 | if (*bits & EXTENT_DO_ACCOUNTING) | |
1547 | btrfs_delalloc_release_metadata(inode, len); | |
1548 | ||
1549 | if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID | |
1550 | && do_list) | |
1551 | btrfs_free_reserved_data_space(inode, len); | |
1552 | ||
1553 | spin_lock(&root->fs_info->delalloc_lock); | |
1554 | root->fs_info->delalloc_bytes -= len; | |
1555 | BTRFS_I(inode)->delalloc_bytes -= len; | |
1556 | ||
1557 | if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && | |
1558 | !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { | |
1559 | list_del_init(&BTRFS_I(inode)->delalloc_inodes); | |
1560 | } | |
1561 | spin_unlock(&root->fs_info->delalloc_lock); | |
1562 | } | |
1563 | } | |
1564 | ||
1565 | /* | |
1566 | * extent_io.c merge_bio_hook, this must check the chunk tree to make sure | |
1567 | * we don't create bios that span stripes or chunks | |
1568 | */ | |
1569 | int btrfs_merge_bio_hook(struct page *page, unsigned long offset, | |
1570 | size_t size, struct bio *bio, | |
1571 | unsigned long bio_flags) | |
1572 | { | |
1573 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; | |
1574 | u64 logical = (u64)bio->bi_sector << 9; | |
1575 | u64 length = 0; | |
1576 | u64 map_length; | |
1577 | int ret; | |
1578 | ||
1579 | if (bio_flags & EXTENT_BIO_COMPRESSED) | |
1580 | return 0; | |
1581 | ||
1582 | length = bio->bi_size; | |
1583 | map_length = length; | |
1584 | ret = btrfs_map_block(root->fs_info, READ, logical, | |
1585 | &map_length, NULL, 0); | |
1586 | /* Will always return 0 with map_multi == NULL */ | |
1587 | BUG_ON(ret < 0); | |
1588 | if (map_length < length + size) | |
1589 | return 1; | |
1590 | return 0; | |
1591 | } | |
1592 | ||
1593 | /* | |
1594 | * in order to insert checksums into the metadata in large chunks, | |
1595 | * we wait until bio submission time. All the pages in the bio are | |
1596 | * checksummed and sums are attached onto the ordered extent record. | |
1597 | * | |
1598 | * At IO completion time the cums attached on the ordered extent record | |
1599 | * are inserted into the btree | |
1600 | */ | |
1601 | static int __btrfs_submit_bio_start(struct inode *inode, int rw, | |
1602 | struct bio *bio, int mirror_num, | |
1603 | unsigned long bio_flags, | |
1604 | u64 bio_offset) | |
1605 | { | |
1606 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1607 | int ret = 0; | |
1608 | ||
1609 | ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); | |
1610 | BUG_ON(ret); /* -ENOMEM */ | |
1611 | return 0; | |
1612 | } | |
1613 | ||
1614 | /* | |
1615 | * in order to insert checksums into the metadata in large chunks, | |
1616 | * we wait until bio submission time. All the pages in the bio are | |
1617 | * checksummed and sums are attached onto the ordered extent record. | |
1618 | * | |
1619 | * At IO completion time the cums attached on the ordered extent record | |
1620 | * are inserted into the btree | |
1621 | */ | |
1622 | static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, | |
1623 | int mirror_num, unsigned long bio_flags, | |
1624 | u64 bio_offset) | |
1625 | { | |
1626 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1627 | int ret; | |
1628 | ||
1629 | ret = btrfs_map_bio(root, rw, bio, mirror_num, 1); | |
1630 | if (ret) | |
1631 | bio_endio(bio, ret); | |
1632 | return ret; | |
1633 | } | |
1634 | ||
1635 | /* | |
1636 | * extent_io.c submission hook. This does the right thing for csum calculation | |
1637 | * on write, or reading the csums from the tree before a read | |
1638 | */ | |
1639 | static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |
1640 | int mirror_num, unsigned long bio_flags, | |
1641 | u64 bio_offset) | |
1642 | { | |
1643 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1644 | int ret = 0; | |
1645 | int skip_sum; | |
1646 | int metadata = 0; | |
1647 | int async = !atomic_read(&BTRFS_I(inode)->sync_writers); | |
1648 | ||
1649 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | |
1650 | ||
1651 | if (btrfs_is_free_space_inode(inode)) | |
1652 | metadata = 2; | |
1653 | ||
1654 | if (!(rw & REQ_WRITE)) { | |
1655 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); | |
1656 | if (ret) | |
1657 | goto out; | |
1658 | ||
1659 | if (bio_flags & EXTENT_BIO_COMPRESSED) { | |
1660 | ret = btrfs_submit_compressed_read(inode, bio, | |
1661 | mirror_num, | |
1662 | bio_flags); | |
1663 | goto out; | |
1664 | } else if (!skip_sum) { | |
1665 | ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); | |
1666 | if (ret) | |
1667 | goto out; | |
1668 | } | |
1669 | goto mapit; | |
1670 | } else if (async && !skip_sum) { | |
1671 | /* csum items have already been cloned */ | |
1672 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) | |
1673 | goto mapit; | |
1674 | /* we're doing a write, do the async checksumming */ | |
1675 | ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, | |
1676 | inode, rw, bio, mirror_num, | |
1677 | bio_flags, bio_offset, | |
1678 | __btrfs_submit_bio_start, | |
1679 | __btrfs_submit_bio_done); | |
1680 | goto out; | |
1681 | } else if (!skip_sum) { | |
1682 | ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); | |
1683 | if (ret) | |
1684 | goto out; | |
1685 | } | |
1686 | ||
1687 | mapit: | |
1688 | ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); | |
1689 | ||
1690 | out: | |
1691 | if (ret < 0) | |
1692 | bio_endio(bio, ret); | |
1693 | return ret; | |
1694 | } | |
1695 | ||
1696 | /* | |
1697 | * given a list of ordered sums record them in the inode. This happens | |
1698 | * at IO completion time based on sums calculated at bio submission time. | |
1699 | */ | |
1700 | static noinline int add_pending_csums(struct btrfs_trans_handle *trans, | |
1701 | struct inode *inode, u64 file_offset, | |
1702 | struct list_head *list) | |
1703 | { | |
1704 | struct btrfs_ordered_sum *sum; | |
1705 | ||
1706 | list_for_each_entry(sum, list, list) { | |
1707 | btrfs_csum_file_blocks(trans, | |
1708 | BTRFS_I(inode)->root->fs_info->csum_root, sum); | |
1709 | } | |
1710 | return 0; | |
1711 | } | |
1712 | ||
1713 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, | |
1714 | struct extent_state **cached_state) | |
1715 | { | |
1716 | WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0); | |
1717 | return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, | |
1718 | cached_state, GFP_NOFS); | |
1719 | } | |
1720 | ||
1721 | /* see btrfs_writepage_start_hook for details on why this is required */ | |
1722 | struct btrfs_writepage_fixup { | |
1723 | struct page *page; | |
1724 | struct btrfs_work work; | |
1725 | }; | |
1726 | ||
1727 | static void btrfs_writepage_fixup_worker(struct btrfs_work *work) | |
1728 | { | |
1729 | struct btrfs_writepage_fixup *fixup; | |
1730 | struct btrfs_ordered_extent *ordered; | |
1731 | struct extent_state *cached_state = NULL; | |
1732 | struct page *page; | |
1733 | struct inode *inode; | |
1734 | u64 page_start; | |
1735 | u64 page_end; | |
1736 | int ret; | |
1737 | ||
1738 | fixup = container_of(work, struct btrfs_writepage_fixup, work); | |
1739 | page = fixup->page; | |
1740 | again: | |
1741 | lock_page(page); | |
1742 | if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { | |
1743 | ClearPageChecked(page); | |
1744 | goto out_page; | |
1745 | } | |
1746 | ||
1747 | inode = page->mapping->host; | |
1748 | page_start = page_offset(page); | |
1749 | page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; | |
1750 | ||
1751 | lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, | |
1752 | &cached_state); | |
1753 | ||
1754 | /* already ordered? We're done */ | |
1755 | if (PagePrivate2(page)) | |
1756 | goto out; | |
1757 | ||
1758 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | |
1759 | if (ordered) { | |
1760 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, | |
1761 | page_end, &cached_state, GFP_NOFS); | |
1762 | unlock_page(page); | |
1763 | btrfs_start_ordered_extent(inode, ordered, 1); | |
1764 | btrfs_put_ordered_extent(ordered); | |
1765 | goto again; | |
1766 | } | |
1767 | ||
1768 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); | |
1769 | if (ret) { | |
1770 | mapping_set_error(page->mapping, ret); | |
1771 | end_extent_writepage(page, ret, page_start, page_end); | |
1772 | ClearPageChecked(page); | |
1773 | goto out; | |
1774 | } | |
1775 | ||
1776 | btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); | |
1777 | ClearPageChecked(page); | |
1778 | set_page_dirty(page); | |
1779 | out: | |
1780 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, | |
1781 | &cached_state, GFP_NOFS); | |
1782 | out_page: | |
1783 | unlock_page(page); | |
1784 | page_cache_release(page); | |
1785 | kfree(fixup); | |
1786 | } | |
1787 | ||
1788 | /* | |
1789 | * There are a few paths in the higher layers of the kernel that directly | |
1790 | * set the page dirty bit without asking the filesystem if it is a | |
1791 | * good idea. This causes problems because we want to make sure COW | |
1792 | * properly happens and the data=ordered rules are followed. | |
1793 | * | |
1794 | * In our case any range that doesn't have the ORDERED bit set | |
1795 | * hasn't been properly setup for IO. We kick off an async process | |
1796 | * to fix it up. The async helper will wait for ordered extents, set | |
1797 | * the delalloc bit and make it safe to write the page. | |
1798 | */ | |
1799 | static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) | |
1800 | { | |
1801 | struct inode *inode = page->mapping->host; | |
1802 | struct btrfs_writepage_fixup *fixup; | |
1803 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1804 | ||
1805 | /* this page is properly in the ordered list */ | |
1806 | if (TestClearPagePrivate2(page)) | |
1807 | return 0; | |
1808 | ||
1809 | if (PageChecked(page)) | |
1810 | return -EAGAIN; | |
1811 | ||
1812 | fixup = kzalloc(sizeof(*fixup), GFP_NOFS); | |
1813 | if (!fixup) | |
1814 | return -EAGAIN; | |
1815 | ||
1816 | SetPageChecked(page); | |
1817 | page_cache_get(page); | |
1818 | fixup->work.func = btrfs_writepage_fixup_worker; | |
1819 | fixup->page = page; | |
1820 | btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); | |
1821 | return -EBUSY; | |
1822 | } | |
1823 | ||
1824 | static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, | |
1825 | struct inode *inode, u64 file_pos, | |
1826 | u64 disk_bytenr, u64 disk_num_bytes, | |
1827 | u64 num_bytes, u64 ram_bytes, | |
1828 | u8 compression, u8 encryption, | |
1829 | u16 other_encoding, int extent_type) | |
1830 | { | |
1831 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1832 | struct btrfs_file_extent_item *fi; | |
1833 | struct btrfs_path *path; | |
1834 | struct extent_buffer *leaf; | |
1835 | struct btrfs_key ins; | |
1836 | int ret; | |
1837 | ||
1838 | path = btrfs_alloc_path(); | |
1839 | if (!path) | |
1840 | return -ENOMEM; | |
1841 | ||
1842 | path->leave_spinning = 1; | |
1843 | ||
1844 | /* | |
1845 | * we may be replacing one extent in the tree with another. | |
1846 | * The new extent is pinned in the extent map, and we don't want | |
1847 | * to drop it from the cache until it is completely in the btree. | |
1848 | * | |
1849 | * So, tell btrfs_drop_extents to leave this extent in the cache. | |
1850 | * the caller is expected to unpin it and allow it to be merged | |
1851 | * with the others. | |
1852 | */ | |
1853 | ret = btrfs_drop_extents(trans, root, inode, file_pos, | |
1854 | file_pos + num_bytes, 0); | |
1855 | if (ret) | |
1856 | goto out; | |
1857 | ||
1858 | ins.objectid = btrfs_ino(inode); | |
1859 | ins.offset = file_pos; | |
1860 | ins.type = BTRFS_EXTENT_DATA_KEY; | |
1861 | ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); | |
1862 | if (ret) | |
1863 | goto out; | |
1864 | leaf = path->nodes[0]; | |
1865 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
1866 | struct btrfs_file_extent_item); | |
1867 | btrfs_set_file_extent_generation(leaf, fi, trans->transid); | |
1868 | btrfs_set_file_extent_type(leaf, fi, extent_type); | |
1869 | btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); | |
1870 | btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); | |
1871 | btrfs_set_file_extent_offset(leaf, fi, 0); | |
1872 | btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); | |
1873 | btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); | |
1874 | btrfs_set_file_extent_compression(leaf, fi, compression); | |
1875 | btrfs_set_file_extent_encryption(leaf, fi, encryption); | |
1876 | btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); | |
1877 | ||
1878 | btrfs_mark_buffer_dirty(leaf); | |
1879 | btrfs_release_path(path); | |
1880 | ||
1881 | inode_add_bytes(inode, num_bytes); | |
1882 | ||
1883 | ins.objectid = disk_bytenr; | |
1884 | ins.offset = disk_num_bytes; | |
1885 | ins.type = BTRFS_EXTENT_ITEM_KEY; | |
1886 | ret = btrfs_alloc_reserved_file_extent(trans, root, | |
1887 | root->root_key.objectid, | |
1888 | btrfs_ino(inode), file_pos, &ins); | |
1889 | out: | |
1890 | btrfs_free_path(path); | |
1891 | ||
1892 | return ret; | |
1893 | } | |
1894 | ||
1895 | /* | |
1896 | * helper function for btrfs_finish_ordered_io, this | |
1897 | * just reads in some of the csum leaves to prime them into ram | |
1898 | * before we start the transaction. It limits the amount of btree | |
1899 | * reads required while inside the transaction. | |
1900 | */ | |
1901 | /* as ordered data IO finishes, this gets called so we can finish | |
1902 | * an ordered extent if the range of bytes in the file it covers are | |
1903 | * fully written. | |
1904 | */ | |
1905 | static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) | |
1906 | { | |
1907 | struct inode *inode = ordered_extent->inode; | |
1908 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1909 | struct btrfs_trans_handle *trans = NULL; | |
1910 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
1911 | struct extent_state *cached_state = NULL; | |
1912 | int compress_type = 0; | |
1913 | int ret; | |
1914 | bool nolock; | |
1915 | ||
1916 | nolock = btrfs_is_free_space_inode(inode); | |
1917 | ||
1918 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { | |
1919 | ret = -EIO; | |
1920 | goto out; | |
1921 | } | |
1922 | ||
1923 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { | |
1924 | BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ | |
1925 | btrfs_ordered_update_i_size(inode, 0, ordered_extent); | |
1926 | if (nolock) | |
1927 | trans = btrfs_join_transaction_nolock(root); | |
1928 | else | |
1929 | trans = btrfs_join_transaction(root); | |
1930 | if (IS_ERR(trans)) { | |
1931 | ret = PTR_ERR(trans); | |
1932 | trans = NULL; | |
1933 | goto out; | |
1934 | } | |
1935 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | |
1936 | ret = btrfs_update_inode_fallback(trans, root, inode); | |
1937 | if (ret) /* -ENOMEM or corruption */ | |
1938 | btrfs_abort_transaction(trans, root, ret); | |
1939 | goto out; | |
1940 | } | |
1941 | ||
1942 | lock_extent_bits(io_tree, ordered_extent->file_offset, | |
1943 | ordered_extent->file_offset + ordered_extent->len - 1, | |
1944 | 0, &cached_state); | |
1945 | ||
1946 | if (nolock) | |
1947 | trans = btrfs_join_transaction_nolock(root); | |
1948 | else | |
1949 | trans = btrfs_join_transaction(root); | |
1950 | if (IS_ERR(trans)) { | |
1951 | ret = PTR_ERR(trans); | |
1952 | trans = NULL; | |
1953 | goto out_unlock; | |
1954 | } | |
1955 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | |
1956 | ||
1957 | if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) | |
1958 | compress_type = ordered_extent->compress_type; | |
1959 | if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { | |
1960 | BUG_ON(compress_type); | |
1961 | ret = btrfs_mark_extent_written(trans, inode, | |
1962 | ordered_extent->file_offset, | |
1963 | ordered_extent->file_offset + | |
1964 | ordered_extent->len); | |
1965 | } else { | |
1966 | BUG_ON(root == root->fs_info->tree_root); | |
1967 | ret = insert_reserved_file_extent(trans, inode, | |
1968 | ordered_extent->file_offset, | |
1969 | ordered_extent->start, | |
1970 | ordered_extent->disk_len, | |
1971 | ordered_extent->len, | |
1972 | ordered_extent->len, | |
1973 | compress_type, 0, 0, | |
1974 | BTRFS_FILE_EXTENT_REG); | |
1975 | } | |
1976 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, | |
1977 | ordered_extent->file_offset, ordered_extent->len, | |
1978 | trans->transid); | |
1979 | if (ret < 0) { | |
1980 | btrfs_abort_transaction(trans, root, ret); | |
1981 | goto out_unlock; | |
1982 | } | |
1983 | ||
1984 | add_pending_csums(trans, inode, ordered_extent->file_offset, | |
1985 | &ordered_extent->list); | |
1986 | ||
1987 | btrfs_ordered_update_i_size(inode, 0, ordered_extent); | |
1988 | ret = btrfs_update_inode_fallback(trans, root, inode); | |
1989 | if (ret) { /* -ENOMEM or corruption */ | |
1990 | btrfs_abort_transaction(trans, root, ret); | |
1991 | goto out_unlock; | |
1992 | } | |
1993 | ret = 0; | |
1994 | out_unlock: | |
1995 | unlock_extent_cached(io_tree, ordered_extent->file_offset, | |
1996 | ordered_extent->file_offset + | |
1997 | ordered_extent->len - 1, &cached_state, GFP_NOFS); | |
1998 | out: | |
1999 | if (root != root->fs_info->tree_root) | |
2000 | btrfs_delalloc_release_metadata(inode, ordered_extent->len); | |
2001 | if (trans) | |
2002 | btrfs_end_transaction(trans, root); | |
2003 | ||
2004 | if (ret) | |
2005 | clear_extent_uptodate(io_tree, ordered_extent->file_offset, | |
2006 | ordered_extent->file_offset + | |
2007 | ordered_extent->len - 1, NULL, GFP_NOFS); | |
2008 | ||
2009 | /* | |
2010 | * This needs to be done to make sure anybody waiting knows we are done | |
2011 | * updating everything for this ordered extent. | |
2012 | */ | |
2013 | btrfs_remove_ordered_extent(inode, ordered_extent); | |
2014 | ||
2015 | /* once for us */ | |
2016 | btrfs_put_ordered_extent(ordered_extent); | |
2017 | /* once for the tree */ | |
2018 | btrfs_put_ordered_extent(ordered_extent); | |
2019 | ||
2020 | return ret; | |
2021 | } | |
2022 | ||
2023 | static void finish_ordered_fn(struct btrfs_work *work) | |
2024 | { | |
2025 | struct btrfs_ordered_extent *ordered_extent; | |
2026 | ordered_extent = container_of(work, struct btrfs_ordered_extent, work); | |
2027 | btrfs_finish_ordered_io(ordered_extent); | |
2028 | } | |
2029 | ||
2030 | static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, | |
2031 | struct extent_state *state, int uptodate) | |
2032 | { | |
2033 | struct inode *inode = page->mapping->host; | |
2034 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
2035 | struct btrfs_ordered_extent *ordered_extent = NULL; | |
2036 | struct btrfs_workers *workers; | |
2037 | ||
2038 | trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); | |
2039 | ||
2040 | ClearPagePrivate2(page); | |
2041 | if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, | |
2042 | end - start + 1, uptodate)) | |
2043 | return 0; | |
2044 | ||
2045 | ordered_extent->work.func = finish_ordered_fn; | |
2046 | ordered_extent->work.flags = 0; | |
2047 | ||
2048 | if (btrfs_is_free_space_inode(inode)) | |
2049 | workers = &root->fs_info->endio_freespace_worker; | |
2050 | else | |
2051 | workers = &root->fs_info->endio_write_workers; | |
2052 | btrfs_queue_worker(workers, &ordered_extent->work); | |
2053 | ||
2054 | return 0; | |
2055 | } | |
2056 | ||
2057 | /* | |
2058 | * when reads are done, we need to check csums to verify the data is correct | |
2059 | * if there's a match, we allow the bio to finish. If not, the code in | |
2060 | * extent_io.c will try to find good copies for us. | |
2061 | */ | |
2062 | static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, | |
2063 | struct extent_state *state, int mirror) | |
2064 | { | |
2065 | size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); | |
2066 | struct inode *inode = page->mapping->host; | |
2067 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
2068 | char *kaddr; | |
2069 | u64 private = ~(u32)0; | |
2070 | int ret; | |
2071 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
2072 | u32 csum = ~(u32)0; | |
2073 | ||
2074 | if (PageChecked(page)) { | |
2075 | ClearPageChecked(page); | |
2076 | goto good; | |
2077 | } | |
2078 | ||
2079 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) | |
2080 | goto good; | |
2081 | ||
2082 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && | |
2083 | test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { | |
2084 | clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, | |
2085 | GFP_NOFS); | |
2086 | return 0; | |
2087 | } | |
2088 | ||
2089 | if (state && state->start == start) { | |
2090 | private = state->private; | |
2091 | ret = 0; | |
2092 | } else { | |
2093 | ret = get_state_private(io_tree, start, &private); | |
2094 | } | |
2095 | kaddr = kmap_atomic(page); | |
2096 | if (ret) | |
2097 | goto zeroit; | |
2098 | ||
2099 | csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1); | |
2100 | btrfs_csum_final(csum, (char *)&csum); | |
2101 | if (csum != private) | |
2102 | goto zeroit; | |
2103 | ||
2104 | kunmap_atomic(kaddr); | |
2105 | good: | |
2106 | return 0; | |
2107 | ||
2108 | zeroit: | |
2109 | printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u " | |
2110 | "private %llu\n", | |
2111 | (unsigned long long)btrfs_ino(page->mapping->host), | |
2112 | (unsigned long long)start, csum, | |
2113 | (unsigned long long)private); | |
2114 | memset(kaddr + offset, 1, end - start + 1); | |
2115 | flush_dcache_page(page); | |
2116 | kunmap_atomic(kaddr); | |
2117 | if (private == 0) | |
2118 | return 0; | |
2119 | return -EIO; | |
2120 | } | |
2121 | ||
2122 | struct delayed_iput { | |
2123 | struct list_head list; | |
2124 | struct inode *inode; | |
2125 | }; | |
2126 | ||
2127 | /* JDM: If this is fs-wide, why can't we add a pointer to | |
2128 | * btrfs_inode instead and avoid the allocation? */ | |
2129 | void btrfs_add_delayed_iput(struct inode *inode) | |
2130 | { | |
2131 | struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; | |
2132 | struct delayed_iput *delayed; | |
2133 | ||
2134 | if (atomic_add_unless(&inode->i_count, -1, 1)) | |
2135 | return; | |
2136 | ||
2137 | delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL); | |
2138 | delayed->inode = inode; | |
2139 | ||
2140 | spin_lock(&fs_info->delayed_iput_lock); | |
2141 | list_add_tail(&delayed->list, &fs_info->delayed_iputs); | |
2142 | spin_unlock(&fs_info->delayed_iput_lock); | |
2143 | } | |
2144 | ||
2145 | void btrfs_run_delayed_iputs(struct btrfs_root *root) | |
2146 | { | |
2147 | LIST_HEAD(list); | |
2148 | struct btrfs_fs_info *fs_info = root->fs_info; | |
2149 | struct delayed_iput *delayed; | |
2150 | int empty; | |
2151 | ||
2152 | spin_lock(&fs_info->delayed_iput_lock); | |
2153 | empty = list_empty(&fs_info->delayed_iputs); | |
2154 | spin_unlock(&fs_info->delayed_iput_lock); | |
2155 | if (empty) | |
2156 | return; | |
2157 | ||
2158 | spin_lock(&fs_info->delayed_iput_lock); | |
2159 | list_splice_init(&fs_info->delayed_iputs, &list); | |
2160 | spin_unlock(&fs_info->delayed_iput_lock); | |
2161 | ||
2162 | while (!list_empty(&list)) { | |
2163 | delayed = list_entry(list.next, struct delayed_iput, list); | |
2164 | list_del(&delayed->list); | |
2165 | iput(delayed->inode); | |
2166 | kfree(delayed); | |
2167 | } | |
2168 | } | |
2169 | ||
2170 | enum btrfs_orphan_cleanup_state { | |
2171 | ORPHAN_CLEANUP_STARTED = 1, | |
2172 | ORPHAN_CLEANUP_DONE = 2, | |
2173 | }; | |
2174 | ||
2175 | /* | |
2176 | * This is called in transaction commit time. If there are no orphan | |
2177 | * files in the subvolume, it removes orphan item and frees block_rsv | |
2178 | * structure. | |
2179 | */ | |
2180 | void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, | |
2181 | struct btrfs_root *root) | |
2182 | { | |
2183 | struct btrfs_block_rsv *block_rsv; | |
2184 | int ret; | |
2185 | ||
2186 | if (atomic_read(&root->orphan_inodes) || | |
2187 | root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) | |
2188 | return; | |
2189 | ||
2190 | spin_lock(&root->orphan_lock); | |
2191 | if (atomic_read(&root->orphan_inodes)) { | |
2192 | spin_unlock(&root->orphan_lock); | |
2193 | return; | |
2194 | } | |
2195 | ||
2196 | if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) { | |
2197 | spin_unlock(&root->orphan_lock); | |
2198 | return; | |
2199 | } | |
2200 | ||
2201 | block_rsv = root->orphan_block_rsv; | |
2202 | root->orphan_block_rsv = NULL; | |
2203 | spin_unlock(&root->orphan_lock); | |
2204 | ||
2205 | if (root->orphan_item_inserted && | |
2206 | btrfs_root_refs(&root->root_item) > 0) { | |
2207 | ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root, | |
2208 | root->root_key.objectid); | |
2209 | BUG_ON(ret); | |
2210 | root->orphan_item_inserted = 0; | |
2211 | } | |
2212 | ||
2213 | if (block_rsv) { | |
2214 | WARN_ON(block_rsv->size > 0); | |
2215 | btrfs_free_block_rsv(root, block_rsv); | |
2216 | } | |
2217 | } | |
2218 | ||
2219 | /* | |
2220 | * This creates an orphan entry for the given inode in case something goes | |
2221 | * wrong in the middle of an unlink/truncate. | |
2222 | * | |
2223 | * NOTE: caller of this function should reserve 5 units of metadata for | |
2224 | * this function. | |
2225 | */ | |
2226 | int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) | |
2227 | { | |
2228 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
2229 | struct btrfs_block_rsv *block_rsv = NULL; | |
2230 | int reserve = 0; | |
2231 | int insert = 0; | |
2232 | int ret; | |
2233 | ||
2234 | if (!root->orphan_block_rsv) { | |
2235 | block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); | |
2236 | if (!block_rsv) | |
2237 | return -ENOMEM; | |
2238 | } | |
2239 | ||
2240 | spin_lock(&root->orphan_lock); | |
2241 | if (!root->orphan_block_rsv) { | |
2242 | root->orphan_block_rsv = block_rsv; | |
2243 | } else if (block_rsv) { | |
2244 | btrfs_free_block_rsv(root, block_rsv); | |
2245 | block_rsv = NULL; | |
2246 | } | |
2247 | ||
2248 | if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, | |
2249 | &BTRFS_I(inode)->runtime_flags)) { | |
2250 | #if 0 | |
2251 | /* | |
2252 | * For proper ENOSPC handling, we should do orphan | |
2253 | * cleanup when mounting. But this introduces backward | |
2254 | * compatibility issue. | |
2255 | */ | |
2256 | if (!xchg(&root->orphan_item_inserted, 1)) | |
2257 | insert = 2; | |
2258 | else | |
2259 | insert = 1; | |
2260 | #endif | |
2261 | insert = 1; | |
2262 | atomic_inc(&root->orphan_inodes); | |
2263 | } | |
2264 | ||
2265 | if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, | |
2266 | &BTRFS_I(inode)->runtime_flags)) | |
2267 | reserve = 1; | |
2268 | spin_unlock(&root->orphan_lock); | |
2269 | ||
2270 | /* grab metadata reservation from transaction handle */ | |
2271 | if (reserve) { | |
2272 | ret = btrfs_orphan_reserve_metadata(trans, inode); | |
2273 | BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */ | |
2274 | } | |
2275 | ||
2276 | /* insert an orphan item to track this unlinked/truncated file */ | |
2277 | if (insert >= 1) { | |
2278 | ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); | |
2279 | if (ret && ret != -EEXIST) { | |
2280 | clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, | |
2281 | &BTRFS_I(inode)->runtime_flags); | |
2282 | btrfs_abort_transaction(trans, root, ret); | |
2283 | return ret; | |
2284 | } | |
2285 | ret = 0; | |
2286 | } | |
2287 | ||
2288 | /* insert an orphan item to track subvolume contains orphan files */ | |
2289 | if (insert >= 2) { | |
2290 | ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, | |
2291 | root->root_key.objectid); | |
2292 | if (ret && ret != -EEXIST) { | |
2293 | btrfs_abort_transaction(trans, root, ret); | |
2294 | return ret; | |
2295 | } | |
2296 | } | |
2297 | return 0; | |
2298 | } | |
2299 | ||
2300 | /* | |
2301 | * We have done the truncate/delete so we can go ahead and remove the orphan | |
2302 | * item for this particular inode. | |
2303 | */ | |
2304 | int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) | |
2305 | { | |
2306 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
2307 | int delete_item = 0; | |
2308 | int release_rsv = 0; | |
2309 | int ret = 0; | |
2310 | ||
2311 | spin_lock(&root->orphan_lock); | |
2312 | if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, | |
2313 | &BTRFS_I(inode)->runtime_flags)) | |
2314 | delete_item = 1; | |
2315 | ||
2316 | if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, | |
2317 | &BTRFS_I(inode)->runtime_flags)) | |
2318 | release_rsv = 1; | |
2319 | spin_unlock(&root->orphan_lock); | |
2320 | ||
2321 | if (trans && delete_item) { | |
2322 | ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); | |
2323 | BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */ | |
2324 | } | |
2325 | ||
2326 | if (release_rsv) { | |
2327 | btrfs_orphan_release_metadata(inode); | |
2328 | atomic_dec(&root->orphan_inodes); | |
2329 | } | |
2330 | ||
2331 | return 0; | |
2332 | } | |
2333 | ||
2334 | /* | |
2335 | * this cleans up any orphans that may be left on the list from the last use | |
2336 | * of this root. | |
2337 | */ | |
2338 | int btrfs_orphan_cleanup(struct btrfs_root *root) | |
2339 | { | |
2340 | struct btrfs_path *path; | |
2341 | struct extent_buffer *leaf; | |
2342 | struct btrfs_key key, found_key; | |
2343 | struct btrfs_trans_handle *trans; | |
2344 | struct inode *inode; | |
2345 | u64 last_objectid = 0; | |
2346 | int ret = 0, nr_unlink = 0, nr_truncate = 0; | |
2347 | ||
2348 | if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) | |
2349 | return 0; | |
2350 | ||
2351 | path = btrfs_alloc_path(); | |
2352 | if (!path) { | |
2353 | ret = -ENOMEM; | |
2354 | goto out; | |
2355 | } | |
2356 | path->reada = -1; | |
2357 | ||
2358 | key.objectid = BTRFS_ORPHAN_OBJECTID; | |
2359 | btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); | |
2360 | key.offset = (u64)-1; | |
2361 | ||
2362 | while (1) { | |
2363 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
2364 | if (ret < 0) | |
2365 | goto out; | |
2366 | ||
2367 | /* | |
2368 | * if ret == 0 means we found what we were searching for, which | |
2369 | * is weird, but possible, so only screw with path if we didn't | |
2370 | * find the key and see if we have stuff that matches | |
2371 | */ | |
2372 | if (ret > 0) { | |
2373 | ret = 0; | |
2374 | if (path->slots[0] == 0) | |
2375 | break; | |
2376 | path->slots[0]--; | |
2377 | } | |
2378 | ||
2379 | /* pull out the item */ | |
2380 | leaf = path->nodes[0]; | |
2381 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
2382 | ||
2383 | /* make sure the item matches what we want */ | |
2384 | if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) | |
2385 | break; | |
2386 | if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY) | |
2387 | break; | |
2388 | ||
2389 | /* release the path since we're done with it */ | |
2390 | btrfs_release_path(path); | |
2391 | ||
2392 | /* | |
2393 | * this is where we are basically btrfs_lookup, without the | |
2394 | * crossing root thing. we store the inode number in the | |
2395 | * offset of the orphan item. | |
2396 | */ | |
2397 | ||
2398 | if (found_key.offset == last_objectid) { | |
2399 | printk(KERN_ERR "btrfs: Error removing orphan entry, " | |
2400 | "stopping orphan cleanup\n"); | |
2401 | ret = -EINVAL; | |
2402 | goto out; | |
2403 | } | |
2404 | ||
2405 | last_objectid = found_key.offset; | |
2406 | ||
2407 | found_key.objectid = found_key.offset; | |
2408 | found_key.type = BTRFS_INODE_ITEM_KEY; | |
2409 | found_key.offset = 0; | |
2410 | inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); | |
2411 | ret = PTR_RET(inode); | |
2412 | if (ret && ret != -ESTALE) | |
2413 | goto out; | |
2414 | ||
2415 | if (ret == -ESTALE && root == root->fs_info->tree_root) { | |
2416 | struct btrfs_root *dead_root; | |
2417 | struct btrfs_fs_info *fs_info = root->fs_info; | |
2418 | int is_dead_root = 0; | |
2419 | ||
2420 | /* | |
2421 | * this is an orphan in the tree root. Currently these | |
2422 | * could come from 2 sources: | |
2423 | * a) a snapshot deletion in progress | |
2424 | * b) a free space cache inode | |
2425 | * We need to distinguish those two, as the snapshot | |
2426 | * orphan must not get deleted. | |
2427 | * find_dead_roots already ran before us, so if this | |
2428 | * is a snapshot deletion, we should find the root | |
2429 | * in the dead_roots list | |
2430 | */ | |
2431 | spin_lock(&fs_info->trans_lock); | |
2432 | list_for_each_entry(dead_root, &fs_info->dead_roots, | |
2433 | root_list) { | |
2434 | if (dead_root->root_key.objectid == | |
2435 | found_key.objectid) { | |
2436 | is_dead_root = 1; | |
2437 | break; | |
2438 | } | |
2439 | } | |
2440 | spin_unlock(&fs_info->trans_lock); | |
2441 | if (is_dead_root) { | |
2442 | /* prevent this orphan from being found again */ | |
2443 | key.offset = found_key.objectid - 1; | |
2444 | continue; | |
2445 | } | |
2446 | } | |
2447 | /* | |
2448 | * Inode is already gone but the orphan item is still there, | |
2449 | * kill the orphan item. | |
2450 | */ | |
2451 | if (ret == -ESTALE) { | |
2452 | trans = btrfs_start_transaction(root, 1); | |
2453 | if (IS_ERR(trans)) { | |
2454 | ret = PTR_ERR(trans); | |
2455 | goto out; | |
2456 | } | |
2457 | printk(KERN_ERR "auto deleting %Lu\n", | |
2458 | found_key.objectid); | |
2459 | ret = btrfs_del_orphan_item(trans, root, | |
2460 | found_key.objectid); | |
2461 | BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */ | |
2462 | btrfs_end_transaction(trans, root); | |
2463 | continue; | |
2464 | } | |
2465 | ||
2466 | /* | |
2467 | * add this inode to the orphan list so btrfs_orphan_del does | |
2468 | * the proper thing when we hit it | |
2469 | */ | |
2470 | set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, | |
2471 | &BTRFS_I(inode)->runtime_flags); | |
2472 | ||
2473 | /* if we have links, this was a truncate, lets do that */ | |
2474 | if (inode->i_nlink) { | |
2475 | if (!S_ISREG(inode->i_mode)) { | |
2476 | WARN_ON(1); | |
2477 | iput(inode); | |
2478 | continue; | |
2479 | } | |
2480 | nr_truncate++; | |
2481 | ||
2482 | /* 1 for the orphan item deletion. */ | |
2483 | trans = btrfs_start_transaction(root, 1); | |
2484 | if (IS_ERR(trans)) { | |
2485 | ret = PTR_ERR(trans); | |
2486 | goto out; | |
2487 | } | |
2488 | ret = btrfs_orphan_add(trans, inode); | |
2489 | btrfs_end_transaction(trans, root); | |
2490 | if (ret) | |
2491 | goto out; | |
2492 | ||
2493 | ret = btrfs_truncate(inode); | |
2494 | } else { | |
2495 | nr_unlink++; | |
2496 | } | |
2497 | ||
2498 | /* this will do delete_inode and everything for us */ | |
2499 | iput(inode); | |
2500 | if (ret) | |
2501 | goto out; | |
2502 | } | |
2503 | /* release the path since we're done with it */ | |
2504 | btrfs_release_path(path); | |
2505 | ||
2506 | root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; | |
2507 | ||
2508 | if (root->orphan_block_rsv) | |
2509 | btrfs_block_rsv_release(root, root->orphan_block_rsv, | |
2510 | (u64)-1); | |
2511 | ||
2512 | if (root->orphan_block_rsv || root->orphan_item_inserted) { | |
2513 | trans = btrfs_join_transaction(root); | |
2514 | if (!IS_ERR(trans)) | |
2515 | btrfs_end_transaction(trans, root); | |
2516 | } | |
2517 | ||
2518 | if (nr_unlink) | |
2519 | printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink); | |
2520 | if (nr_truncate) | |
2521 | printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); | |
2522 | ||
2523 | out: | |
2524 | if (ret) | |
2525 | printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret); | |
2526 | btrfs_free_path(path); | |
2527 | return ret; | |
2528 | } | |
2529 | ||
2530 | /* | |
2531 | * very simple check to peek ahead in the leaf looking for xattrs. If we | |
2532 | * don't find any xattrs, we know there can't be any acls. | |
2533 | * | |
2534 | * slot is the slot the inode is in, objectid is the objectid of the inode | |
2535 | */ | |
2536 | static noinline int acls_after_inode_item(struct extent_buffer *leaf, | |
2537 | int slot, u64 objectid) | |
2538 | { | |
2539 | u32 nritems = btrfs_header_nritems(leaf); | |
2540 | struct btrfs_key found_key; | |
2541 | int scanned = 0; | |
2542 | ||
2543 | slot++; | |
2544 | while (slot < nritems) { | |
2545 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
2546 | ||
2547 | /* we found a different objectid, there must not be acls */ | |
2548 | if (found_key.objectid != objectid) | |
2549 | return 0; | |
2550 | ||
2551 | /* we found an xattr, assume we've got an acl */ | |
2552 | if (found_key.type == BTRFS_XATTR_ITEM_KEY) | |
2553 | return 1; | |
2554 | ||
2555 | /* | |
2556 | * we found a key greater than an xattr key, there can't | |
2557 | * be any acls later on | |
2558 | */ | |
2559 | if (found_key.type > BTRFS_XATTR_ITEM_KEY) | |
2560 | return 0; | |
2561 | ||
2562 | slot++; | |
2563 | scanned++; | |
2564 | ||
2565 | /* | |
2566 | * it goes inode, inode backrefs, xattrs, extents, | |
2567 | * so if there are a ton of hard links to an inode there can | |
2568 | * be a lot of backrefs. Don't waste time searching too hard, | |
2569 | * this is just an optimization | |
2570 | */ | |
2571 | if (scanned >= 8) | |
2572 | break; | |
2573 | } | |
2574 | /* we hit the end of the leaf before we found an xattr or | |
2575 | * something larger than an xattr. We have to assume the inode | |
2576 | * has acls | |
2577 | */ | |
2578 | return 1; | |
2579 | } | |
2580 | ||
2581 | /* | |
2582 | * read an inode from the btree into the in-memory inode | |
2583 | */ | |
2584 | static void btrfs_read_locked_inode(struct inode *inode) | |
2585 | { | |
2586 | struct btrfs_path *path; | |
2587 | struct extent_buffer *leaf; | |
2588 | struct btrfs_inode_item *inode_item; | |
2589 | struct btrfs_timespec *tspec; | |
2590 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
2591 | struct btrfs_key location; | |
2592 | int maybe_acls; | |
2593 | u32 rdev; | |
2594 | int ret; | |
2595 | bool filled = false; | |
2596 | ||
2597 | ret = btrfs_fill_inode(inode, &rdev); | |
2598 | if (!ret) | |
2599 | filled = true; | |
2600 | ||
2601 | path = btrfs_alloc_path(); | |
2602 | if (!path) | |
2603 | goto make_bad; | |
2604 | ||
2605 | path->leave_spinning = 1; | |
2606 | memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); | |
2607 | ||
2608 | ret = btrfs_lookup_inode(NULL, root, path, &location, 0); | |
2609 | if (ret) | |
2610 | goto make_bad; | |
2611 | ||
2612 | leaf = path->nodes[0]; | |
2613 | ||
2614 | if (filled) | |
2615 | goto cache_acl; | |
2616 | ||
2617 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
2618 | struct btrfs_inode_item); | |
2619 | inode->i_mode = btrfs_inode_mode(leaf, inode_item); | |
2620 | set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); | |
2621 | i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); | |
2622 | i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); | |
2623 | btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); | |
2624 | ||
2625 | tspec = btrfs_inode_atime(inode_item); | |
2626 | inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec); | |
2627 | inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); | |
2628 | ||
2629 | tspec = btrfs_inode_mtime(inode_item); | |
2630 | inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec); | |
2631 | inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); | |
2632 | ||
2633 | tspec = btrfs_inode_ctime(inode_item); | |
2634 | inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec); | |
2635 | inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); | |
2636 | ||
2637 | inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); | |
2638 | BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); | |
2639 | BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); | |
2640 | ||
2641 | /* | |
2642 | * If we were modified in the current generation and evicted from memory | |
2643 | * and then re-read we need to do a full sync since we don't have any | |
2644 | * idea about which extents were modified before we were evicted from | |
2645 | * cache. | |
2646 | */ | |
2647 | if (BTRFS_I(inode)->last_trans == root->fs_info->generation) | |
2648 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | |
2649 | &BTRFS_I(inode)->runtime_flags); | |
2650 | ||
2651 | inode->i_version = btrfs_inode_sequence(leaf, inode_item); | |
2652 | inode->i_generation = BTRFS_I(inode)->generation; | |
2653 | inode->i_rdev = 0; | |
2654 | rdev = btrfs_inode_rdev(leaf, inode_item); | |
2655 | ||
2656 | BTRFS_I(inode)->index_cnt = (u64)-1; | |
2657 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); | |
2658 | cache_acl: | |
2659 | /* | |
2660 | * try to precache a NULL acl entry for files that don't have | |
2661 | * any xattrs or acls | |
2662 | */ | |
2663 | maybe_acls = acls_after_inode_item(leaf, path->slots[0], | |
2664 | btrfs_ino(inode)); | |
2665 | if (!maybe_acls) | |
2666 | cache_no_acl(inode); | |
2667 | ||
2668 | btrfs_free_path(path); | |
2669 | ||
2670 | switch (inode->i_mode & S_IFMT) { | |
2671 | case S_IFREG: | |
2672 | inode->i_mapping->a_ops = &btrfs_aops; | |
2673 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; | |
2674 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | |
2675 | inode->i_fop = &btrfs_file_operations; | |
2676 | inode->i_op = &btrfs_file_inode_operations; | |
2677 | break; | |
2678 | case S_IFDIR: | |
2679 | inode->i_fop = &btrfs_dir_file_operations; | |
2680 | if (root == root->fs_info->tree_root) | |
2681 | inode->i_op = &btrfs_dir_ro_inode_operations; | |
2682 | else | |
2683 | inode->i_op = &btrfs_dir_inode_operations; | |
2684 | break; | |
2685 | case S_IFLNK: | |
2686 | inode->i_op = &btrfs_symlink_inode_operations; | |
2687 | inode->i_mapping->a_ops = &btrfs_symlink_aops; | |
2688 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; | |
2689 | break; | |
2690 | default: | |
2691 | inode->i_op = &btrfs_special_inode_operations; | |
2692 | init_special_inode(inode, inode->i_mode, rdev); | |
2693 | break; | |
2694 | } | |
2695 | ||
2696 | btrfs_update_iflags(inode); | |
2697 | return; | |
2698 | ||
2699 | make_bad: | |
2700 | btrfs_free_path(path); | |
2701 | make_bad_inode(inode); | |
2702 | } | |
2703 | ||
2704 | /* | |
2705 | * given a leaf and an inode, copy the inode fields into the leaf | |
2706 | */ | |
2707 | static void fill_inode_item(struct btrfs_trans_handle *trans, | |
2708 | struct extent_buffer *leaf, | |
2709 | struct btrfs_inode_item *item, | |
2710 | struct inode *inode) | |
2711 | { | |
2712 | btrfs_set_inode_uid(leaf, item, i_uid_read(inode)); | |
2713 | btrfs_set_inode_gid(leaf, item, i_gid_read(inode)); | |
2714 | btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); | |
2715 | btrfs_set_inode_mode(leaf, item, inode->i_mode); | |
2716 | btrfs_set_inode_nlink(leaf, item, inode->i_nlink); | |
2717 | ||
2718 | btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item), | |
2719 | inode->i_atime.tv_sec); | |
2720 | btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item), | |
2721 | inode->i_atime.tv_nsec); | |
2722 | ||
2723 | btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item), | |
2724 | inode->i_mtime.tv_sec); | |
2725 | btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item), | |
2726 | inode->i_mtime.tv_nsec); | |
2727 | ||
2728 | btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item), | |
2729 | inode->i_ctime.tv_sec); | |
2730 | btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item), | |
2731 | inode->i_ctime.tv_nsec); | |
2732 | ||
2733 | btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode)); | |
2734 | btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation); | |
2735 | btrfs_set_inode_sequence(leaf, item, inode->i_version); | |
2736 | btrfs_set_inode_transid(leaf, item, trans->transid); | |
2737 | btrfs_set_inode_rdev(leaf, item, inode->i_rdev); | |
2738 | btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); | |
2739 | btrfs_set_inode_block_group(leaf, item, 0); | |
2740 | } | |
2741 | ||
2742 | /* | |
2743 | * copy everything in the in-memory inode into the btree. | |
2744 | */ | |
2745 | static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, | |
2746 | struct btrfs_root *root, struct inode *inode) | |
2747 | { | |
2748 | struct btrfs_inode_item *inode_item; | |
2749 | struct btrfs_path *path; | |
2750 | struct extent_buffer *leaf; | |
2751 | int ret; | |
2752 | ||
2753 | path = btrfs_alloc_path(); | |
2754 | if (!path) | |
2755 | return -ENOMEM; | |
2756 | ||
2757 | path->leave_spinning = 1; | |
2758 | ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, | |
2759 | 1); | |
2760 | if (ret) { | |
2761 | if (ret > 0) | |
2762 | ret = -ENOENT; | |
2763 | goto failed; | |
2764 | } | |
2765 | ||
2766 | btrfs_unlock_up_safe(path, 1); | |
2767 | leaf = path->nodes[0]; | |
2768 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
2769 | struct btrfs_inode_item); | |
2770 | ||
2771 | fill_inode_item(trans, leaf, inode_item, inode); | |
2772 | btrfs_mark_buffer_dirty(leaf); | |
2773 | btrfs_set_inode_last_trans(trans, inode); | |
2774 | ret = 0; | |
2775 | failed: | |
2776 | btrfs_free_path(path); | |
2777 | return ret; | |
2778 | } | |
2779 | ||
2780 | /* | |
2781 | * copy everything in the in-memory inode into the btree. | |
2782 | */ | |
2783 | noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, | |
2784 | struct btrfs_root *root, struct inode *inode) | |
2785 | { | |
2786 | int ret; | |
2787 | ||
2788 | /* | |
2789 | * If the inode is a free space inode, we can deadlock during commit | |
2790 | * if we put it into the delayed code. | |
2791 | * | |
2792 | * The data relocation inode should also be directly updated | |
2793 | * without delay | |
2794 | */ | |
2795 | if (!btrfs_is_free_space_inode(inode) | |
2796 | && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { | |
2797 | btrfs_update_root_times(trans, root); | |
2798 | ||
2799 | ret = btrfs_delayed_update_inode(trans, root, inode); | |
2800 | if (!ret) | |
2801 | btrfs_set_inode_last_trans(trans, inode); | |
2802 | return ret; | |
2803 | } | |
2804 | ||
2805 | return btrfs_update_inode_item(trans, root, inode); | |
2806 | } | |
2807 | ||
2808 | noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, | |
2809 | struct btrfs_root *root, | |
2810 | struct inode *inode) | |
2811 | { | |
2812 | int ret; | |
2813 | ||
2814 | ret = btrfs_update_inode(trans, root, inode); | |
2815 | if (ret == -ENOSPC) | |
2816 | return btrfs_update_inode_item(trans, root, inode); | |
2817 | return ret; | |
2818 | } | |
2819 | ||
2820 | /* | |
2821 | * unlink helper that gets used here in inode.c and in the tree logging | |
2822 | * recovery code. It remove a link in a directory with a given name, and | |
2823 | * also drops the back refs in the inode to the directory | |
2824 | */ | |
2825 | static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, | |
2826 | struct btrfs_root *root, | |
2827 | struct inode *dir, struct inode *inode, | |
2828 | const char *name, int name_len) | |
2829 | { | |
2830 | struct btrfs_path *path; | |
2831 | int ret = 0; | |
2832 | struct extent_buffer *leaf; | |
2833 | struct btrfs_dir_item *di; | |
2834 | struct btrfs_key key; | |
2835 | u64 index; | |
2836 | u64 ino = btrfs_ino(inode); | |
2837 | u64 dir_ino = btrfs_ino(dir); | |
2838 | ||
2839 | path = btrfs_alloc_path(); | |
2840 | if (!path) { | |
2841 | ret = -ENOMEM; | |
2842 | goto out; | |
2843 | } | |
2844 | ||
2845 | path->leave_spinning = 1; | |
2846 | di = btrfs_lookup_dir_item(trans, root, path, dir_ino, | |
2847 | name, name_len, -1); | |
2848 | if (IS_ERR(di)) { | |
2849 | ret = PTR_ERR(di); | |
2850 | goto err; | |
2851 | } | |
2852 | if (!di) { | |
2853 | ret = -ENOENT; | |
2854 | goto err; | |
2855 | } | |
2856 | leaf = path->nodes[0]; | |
2857 | btrfs_dir_item_key_to_cpu(leaf, di, &key); | |
2858 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | |
2859 | if (ret) | |
2860 | goto err; | |
2861 | btrfs_release_path(path); | |
2862 | ||
2863 | ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, | |
2864 | dir_ino, &index); | |
2865 | if (ret) { | |
2866 | printk(KERN_INFO "btrfs failed to delete reference to %.*s, " | |
2867 | "inode %llu parent %llu\n", name_len, name, | |
2868 | (unsigned long long)ino, (unsigned long long)dir_ino); | |
2869 | btrfs_abort_transaction(trans, root, ret); | |
2870 | goto err; | |
2871 | } | |
2872 | ||
2873 | ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); | |
2874 | if (ret) { | |
2875 | btrfs_abort_transaction(trans, root, ret); | |
2876 | goto err; | |
2877 | } | |
2878 | ||
2879 | ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, | |
2880 | inode, dir_ino); | |
2881 | if (ret != 0 && ret != -ENOENT) { | |
2882 | btrfs_abort_transaction(trans, root, ret); | |
2883 | goto err; | |
2884 | } | |
2885 | ||
2886 | ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, | |
2887 | dir, index); | |
2888 | if (ret == -ENOENT) | |
2889 | ret = 0; | |
2890 | err: | |
2891 | btrfs_free_path(path); | |
2892 | if (ret) | |
2893 | goto out; | |
2894 | ||
2895 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); | |
2896 | inode_inc_iversion(inode); | |
2897 | inode_inc_iversion(dir); | |
2898 | inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; | |
2899 | ret = btrfs_update_inode(trans, root, dir); | |
2900 | out: | |
2901 | return ret; | |
2902 | } | |
2903 | ||
2904 | int btrfs_unlink_inode(struct btrfs_trans_handle *trans, | |
2905 | struct btrfs_root *root, | |
2906 | struct inode *dir, struct inode *inode, | |
2907 | const char *name, int name_len) | |
2908 | { | |
2909 | int ret; | |
2910 | ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); | |
2911 | if (!ret) { | |
2912 | btrfs_drop_nlink(inode); | |
2913 | ret = btrfs_update_inode(trans, root, inode); | |
2914 | } | |
2915 | return ret; | |
2916 | } | |
2917 | ||
2918 | ||
2919 | /* helper to check if there is any shared block in the path */ | |
2920 | static int check_path_shared(struct btrfs_root *root, | |
2921 | struct btrfs_path *path) | |
2922 | { | |
2923 | struct extent_buffer *eb; | |
2924 | int level; | |
2925 | u64 refs = 1; | |
2926 | ||
2927 | for (level = 0; level < BTRFS_MAX_LEVEL; level++) { | |
2928 | int ret; | |
2929 | ||
2930 | if (!path->nodes[level]) | |
2931 | break; | |
2932 | eb = path->nodes[level]; | |
2933 | if (!btrfs_block_can_be_shared(root, eb)) | |
2934 | continue; | |
2935 | ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len, | |
2936 | &refs, NULL); | |
2937 | if (refs > 1) | |
2938 | return 1; | |
2939 | } | |
2940 | return 0; | |
2941 | } | |
2942 | ||
2943 | /* | |
2944 | * helper to start transaction for unlink and rmdir. | |
2945 | * | |
2946 | * unlink and rmdir are special in btrfs, they do not always free space. | |
2947 | * so in enospc case, we should make sure they will free space before | |
2948 | * allowing them to use the global metadata reservation. | |
2949 | */ | |
2950 | static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, | |
2951 | struct dentry *dentry) | |
2952 | { | |
2953 | struct btrfs_trans_handle *trans; | |
2954 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
2955 | struct btrfs_path *path; | |
2956 | struct btrfs_dir_item *di; | |
2957 | struct inode *inode = dentry->d_inode; | |
2958 | u64 index; | |
2959 | int check_link = 1; | |
2960 | int err = -ENOSPC; | |
2961 | int ret; | |
2962 | u64 ino = btrfs_ino(inode); | |
2963 | u64 dir_ino = btrfs_ino(dir); | |
2964 | ||
2965 | /* | |
2966 | * 1 for the possible orphan item | |
2967 | * 1 for the dir item | |
2968 | * 1 for the dir index | |
2969 | * 1 for the inode ref | |
2970 | * 1 for the inode ref in the tree log | |
2971 | * 2 for the dir entries in the log | |
2972 | * 1 for the inode | |
2973 | */ | |
2974 | trans = btrfs_start_transaction(root, 8); | |
2975 | if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) | |
2976 | return trans; | |
2977 | ||
2978 | if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) | |
2979 | return ERR_PTR(-ENOSPC); | |
2980 | ||
2981 | /* check if there is someone else holds reference */ | |
2982 | if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1) | |
2983 | return ERR_PTR(-ENOSPC); | |
2984 | ||
2985 | if (atomic_read(&inode->i_count) > 2) | |
2986 | return ERR_PTR(-ENOSPC); | |
2987 | ||
2988 | if (xchg(&root->fs_info->enospc_unlink, 1)) | |
2989 | return ERR_PTR(-ENOSPC); | |
2990 | ||
2991 | path = btrfs_alloc_path(); | |
2992 | if (!path) { | |
2993 | root->fs_info->enospc_unlink = 0; | |
2994 | return ERR_PTR(-ENOMEM); | |
2995 | } | |
2996 | ||
2997 | /* 1 for the orphan item */ | |
2998 | trans = btrfs_start_transaction(root, 1); | |
2999 | if (IS_ERR(trans)) { | |
3000 | btrfs_free_path(path); | |
3001 | root->fs_info->enospc_unlink = 0; | |
3002 | return trans; | |
3003 | } | |
3004 | ||
3005 | path->skip_locking = 1; | |
3006 | path->search_commit_root = 1; | |
3007 | ||
3008 | ret = btrfs_lookup_inode(trans, root, path, | |
3009 | &BTRFS_I(dir)->location, 0); | |
3010 | if (ret < 0) { | |
3011 | err = ret; | |
3012 | goto out; | |
3013 | } | |
3014 | if (ret == 0) { | |
3015 | if (check_path_shared(root, path)) | |
3016 | goto out; | |
3017 | } else { | |
3018 | check_link = 0; | |
3019 | } | |
3020 | btrfs_release_path(path); | |
3021 | ||
3022 | ret = btrfs_lookup_inode(trans, root, path, | |
3023 | &BTRFS_I(inode)->location, 0); | |
3024 | if (ret < 0) { | |
3025 | err = ret; | |
3026 | goto out; | |
3027 | } | |
3028 | if (ret == 0) { | |
3029 | if (check_path_shared(root, path)) | |
3030 | goto out; | |
3031 | } else { | |
3032 | check_link = 0; | |
3033 | } | |
3034 | btrfs_release_path(path); | |
3035 | ||
3036 | if (ret == 0 && S_ISREG(inode->i_mode)) { | |
3037 | ret = btrfs_lookup_file_extent(trans, root, path, | |
3038 | ino, (u64)-1, 0); | |
3039 | if (ret < 0) { | |
3040 | err = ret; | |
3041 | goto out; | |
3042 | } | |
3043 | BUG_ON(ret == 0); /* Corruption */ | |
3044 | if (check_path_shared(root, path)) | |
3045 | goto out; | |
3046 | btrfs_release_path(path); | |
3047 | } | |
3048 | ||
3049 | if (!check_link) { | |
3050 | err = 0; | |
3051 | goto out; | |
3052 | } | |
3053 | ||
3054 | di = btrfs_lookup_dir_item(trans, root, path, dir_ino, | |
3055 | dentry->d_name.name, dentry->d_name.len, 0); | |
3056 | if (IS_ERR(di)) { | |
3057 | err = PTR_ERR(di); | |
3058 | goto out; | |
3059 | } | |
3060 | if (di) { | |
3061 | if (check_path_shared(root, path)) | |
3062 | goto out; | |
3063 | } else { | |
3064 | err = 0; | |
3065 | goto out; | |
3066 | } | |
3067 | btrfs_release_path(path); | |
3068 | ||
3069 | ret = btrfs_get_inode_ref_index(trans, root, path, dentry->d_name.name, | |
3070 | dentry->d_name.len, ino, dir_ino, 0, | |
3071 | &index); | |
3072 | if (ret) { | |
3073 | err = ret; | |
3074 | goto out; | |
3075 | } | |
3076 | ||
3077 | if (check_path_shared(root, path)) | |
3078 | goto out; | |
3079 | ||
3080 | btrfs_release_path(path); | |
3081 | ||
3082 | /* | |
3083 | * This is a commit root search, if we can lookup inode item and other | |
3084 | * relative items in the commit root, it means the transaction of | |
3085 | * dir/file creation has been committed, and the dir index item that we | |
3086 | * delay to insert has also been inserted into the commit root. So | |
3087 | * we needn't worry about the delayed insertion of the dir index item | |
3088 | * here. | |
3089 | */ | |
3090 | di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, | |
3091 | dentry->d_name.name, dentry->d_name.len, 0); | |
3092 | if (IS_ERR(di)) { | |
3093 | err = PTR_ERR(di); | |
3094 | goto out; | |
3095 | } | |
3096 | BUG_ON(ret == -ENOENT); | |
3097 | if (check_path_shared(root, path)) | |
3098 | goto out; | |
3099 | ||
3100 | err = 0; | |
3101 | out: | |
3102 | btrfs_free_path(path); | |
3103 | /* Migrate the orphan reservation over */ | |
3104 | if (!err) | |
3105 | err = btrfs_block_rsv_migrate(trans->block_rsv, | |
3106 | &root->fs_info->global_block_rsv, | |
3107 | trans->bytes_reserved); | |
3108 | ||
3109 | if (err) { | |
3110 | btrfs_end_transaction(trans, root); | |
3111 | root->fs_info->enospc_unlink = 0; | |
3112 | return ERR_PTR(err); | |
3113 | } | |
3114 | ||
3115 | trans->block_rsv = &root->fs_info->global_block_rsv; | |
3116 | return trans; | |
3117 | } | |
3118 | ||
3119 | static void __unlink_end_trans(struct btrfs_trans_handle *trans, | |
3120 | struct btrfs_root *root) | |
3121 | { | |
3122 | if (trans->block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL) { | |
3123 | btrfs_block_rsv_release(root, trans->block_rsv, | |
3124 | trans->bytes_reserved); | |
3125 | trans->block_rsv = &root->fs_info->trans_block_rsv; | |
3126 | BUG_ON(!root->fs_info->enospc_unlink); | |
3127 | root->fs_info->enospc_unlink = 0; | |
3128 | } | |
3129 | btrfs_end_transaction(trans, root); | |
3130 | } | |
3131 | ||
3132 | static int btrfs_unlink(struct inode *dir, struct dentry *dentry) | |
3133 | { | |
3134 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
3135 | struct btrfs_trans_handle *trans; | |
3136 | struct inode *inode = dentry->d_inode; | |
3137 | int ret; | |
3138 | ||
3139 | trans = __unlink_start_trans(dir, dentry); | |
3140 | if (IS_ERR(trans)) | |
3141 | return PTR_ERR(trans); | |
3142 | ||
3143 | btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); | |
3144 | ||
3145 | ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, | |
3146 | dentry->d_name.name, dentry->d_name.len); | |
3147 | if (ret) | |
3148 | goto out; | |
3149 | ||
3150 | if (inode->i_nlink == 0) { | |
3151 | ret = btrfs_orphan_add(trans, inode); | |
3152 | if (ret) | |
3153 | goto out; | |
3154 | } | |
3155 | ||
3156 | out: | |
3157 | __unlink_end_trans(trans, root); | |
3158 | btrfs_btree_balance_dirty(root); | |
3159 | return ret; | |
3160 | } | |
3161 | ||
3162 | int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, | |
3163 | struct btrfs_root *root, | |
3164 | struct inode *dir, u64 objectid, | |
3165 | const char *name, int name_len) | |
3166 | { | |
3167 | struct btrfs_path *path; | |
3168 | struct extent_buffer *leaf; | |
3169 | struct btrfs_dir_item *di; | |
3170 | struct btrfs_key key; | |
3171 | u64 index; | |
3172 | int ret; | |
3173 | u64 dir_ino = btrfs_ino(dir); | |
3174 | ||
3175 | path = btrfs_alloc_path(); | |
3176 | if (!path) | |
3177 | return -ENOMEM; | |
3178 | ||
3179 | di = btrfs_lookup_dir_item(trans, root, path, dir_ino, | |
3180 | name, name_len, -1); | |
3181 | if (IS_ERR_OR_NULL(di)) { | |
3182 | if (!di) | |
3183 | ret = -ENOENT; | |
3184 | else | |
3185 | ret = PTR_ERR(di); | |
3186 | goto out; | |
3187 | } | |
3188 | ||
3189 | leaf = path->nodes[0]; | |
3190 | btrfs_dir_item_key_to_cpu(leaf, di, &key); | |
3191 | WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); | |
3192 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | |
3193 | if (ret) { | |
3194 | btrfs_abort_transaction(trans, root, ret); | |
3195 | goto out; | |
3196 | } | |
3197 | btrfs_release_path(path); | |
3198 | ||
3199 | ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, | |
3200 | objectid, root->root_key.objectid, | |
3201 | dir_ino, &index, name, name_len); | |
3202 | if (ret < 0) { | |
3203 | if (ret != -ENOENT) { | |
3204 | btrfs_abort_transaction(trans, root, ret); | |
3205 | goto out; | |
3206 | } | |
3207 | di = btrfs_search_dir_index_item(root, path, dir_ino, | |
3208 | name, name_len); | |
3209 | if (IS_ERR_OR_NULL(di)) { | |
3210 | if (!di) | |
3211 | ret = -ENOENT; | |
3212 | else | |
3213 | ret = PTR_ERR(di); | |
3214 | btrfs_abort_transaction(trans, root, ret); | |
3215 | goto out; | |
3216 | } | |
3217 | ||
3218 | leaf = path->nodes[0]; | |
3219 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
3220 | btrfs_release_path(path); | |
3221 | index = key.offset; | |
3222 | } | |
3223 | btrfs_release_path(path); | |
3224 | ||
3225 | ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); | |
3226 | if (ret) { | |
3227 | btrfs_abort_transaction(trans, root, ret); | |
3228 | goto out; | |
3229 | } | |
3230 | ||
3231 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); | |
3232 | inode_inc_iversion(dir); | |
3233 | dir->i_mtime = dir->i_ctime = CURRENT_TIME; | |
3234 | ret = btrfs_update_inode_fallback(trans, root, dir); | |
3235 | if (ret) | |
3236 | btrfs_abort_transaction(trans, root, ret); | |
3237 | out: | |
3238 | btrfs_free_path(path); | |
3239 | return ret; | |
3240 | } | |
3241 | ||
3242 | static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) | |
3243 | { | |
3244 | struct inode *inode = dentry->d_inode; | |
3245 | int err = 0; | |
3246 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
3247 | struct btrfs_trans_handle *trans; | |
3248 | ||
3249 | if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) | |
3250 | return -ENOTEMPTY; | |
3251 | if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) | |
3252 | return -EPERM; | |
3253 | ||
3254 | trans = __unlink_start_trans(dir, dentry); | |
3255 | if (IS_ERR(trans)) | |
3256 | return PTR_ERR(trans); | |
3257 | ||
3258 | if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { | |
3259 | err = btrfs_unlink_subvol(trans, root, dir, | |
3260 | BTRFS_I(inode)->location.objectid, | |
3261 | dentry->d_name.name, | |
3262 | dentry->d_name.len); | |
3263 | goto out; | |
3264 | } | |
3265 | ||
3266 | err = btrfs_orphan_add(trans, inode); | |
3267 | if (err) | |
3268 | goto out; | |
3269 | ||
3270 | /* now the directory is empty */ | |
3271 | err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, | |
3272 | dentry->d_name.name, dentry->d_name.len); | |
3273 | if (!err) | |
3274 | btrfs_i_size_write(inode, 0); | |
3275 | out: | |
3276 | __unlink_end_trans(trans, root); | |
3277 | btrfs_btree_balance_dirty(root); | |
3278 | ||
3279 | return err; | |
3280 | } | |
3281 | ||
3282 | /* | |
3283 | * this can truncate away extent items, csum items and directory items. | |
3284 | * It starts at a high offset and removes keys until it can't find | |
3285 | * any higher than new_size | |
3286 | * | |
3287 | * csum items that cross the new i_size are truncated to the new size | |
3288 | * as well. | |
3289 | * | |
3290 | * min_type is the minimum key type to truncate down to. If set to 0, this | |
3291 | * will kill all the items on this inode, including the INODE_ITEM_KEY. | |
3292 | */ | |
3293 | int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, | |
3294 | struct btrfs_root *root, | |
3295 | struct inode *inode, | |
3296 | u64 new_size, u32 min_type) | |
3297 | { | |
3298 | struct btrfs_path *path; | |
3299 | struct extent_buffer *leaf; | |
3300 | struct btrfs_file_extent_item *fi; | |
3301 | struct btrfs_key key; | |
3302 | struct btrfs_key found_key; | |
3303 | u64 extent_start = 0; | |
3304 | u64 extent_num_bytes = 0; | |
3305 | u64 extent_offset = 0; | |
3306 | u64 item_end = 0; | |
3307 | u64 mask = root->sectorsize - 1; | |
3308 | u32 found_type = (u8)-1; | |
3309 | int found_extent; | |
3310 | int del_item; | |
3311 | int pending_del_nr = 0; | |
3312 | int pending_del_slot = 0; | |
3313 | int extent_type = -1; | |
3314 | int ret; | |
3315 | int err = 0; | |
3316 | u64 ino = btrfs_ino(inode); | |
3317 | ||
3318 | BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); | |
3319 | ||
3320 | path = btrfs_alloc_path(); | |
3321 | if (!path) | |
3322 | return -ENOMEM; | |
3323 | path->reada = -1; | |
3324 | ||
3325 | /* | |
3326 | * We want to drop from the next block forward in case this new size is | |
3327 | * not block aligned since we will be keeping the last block of the | |
3328 | * extent just the way it is. | |
3329 | */ | |
3330 | if (root->ref_cows || root == root->fs_info->tree_root) | |
3331 | btrfs_drop_extent_cache(inode, (new_size + mask) & (~mask), (u64)-1, 0); | |
3332 | ||
3333 | /* | |
3334 | * This function is also used to drop the items in the log tree before | |
3335 | * we relog the inode, so if root != BTRFS_I(inode)->root, it means | |
3336 | * it is used to drop the loged items. So we shouldn't kill the delayed | |
3337 | * items. | |
3338 | */ | |
3339 | if (min_type == 0 && root == BTRFS_I(inode)->root) | |
3340 | btrfs_kill_delayed_inode_items(inode); | |
3341 | ||
3342 | key.objectid = ino; | |
3343 | key.offset = (u64)-1; | |
3344 | key.type = (u8)-1; | |
3345 | ||
3346 | search_again: | |
3347 | path->leave_spinning = 1; | |
3348 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
3349 | if (ret < 0) { | |
3350 | err = ret; | |
3351 | goto out; | |
3352 | } | |
3353 | ||
3354 | if (ret > 0) { | |
3355 | /* there are no items in the tree for us to truncate, we're | |
3356 | * done | |
3357 | */ | |
3358 | if (path->slots[0] == 0) | |
3359 | goto out; | |
3360 | path->slots[0]--; | |
3361 | } | |
3362 | ||
3363 | while (1) { | |
3364 | fi = NULL; | |
3365 | leaf = path->nodes[0]; | |
3366 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
3367 | found_type = btrfs_key_type(&found_key); | |
3368 | ||
3369 | if (found_key.objectid != ino) | |
3370 | break; | |
3371 | ||
3372 | if (found_type < min_type) | |
3373 | break; | |
3374 | ||
3375 | item_end = found_key.offset; | |
3376 | if (found_type == BTRFS_EXTENT_DATA_KEY) { | |
3377 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
3378 | struct btrfs_file_extent_item); | |
3379 | extent_type = btrfs_file_extent_type(leaf, fi); | |
3380 | if (extent_type != BTRFS_FILE_EXTENT_INLINE) { | |
3381 | item_end += | |
3382 | btrfs_file_extent_num_bytes(leaf, fi); | |
3383 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | |
3384 | item_end += btrfs_file_extent_inline_len(leaf, | |
3385 | fi); | |
3386 | } | |
3387 | item_end--; | |
3388 | } | |
3389 | if (found_type > min_type) { | |
3390 | del_item = 1; | |
3391 | } else { | |
3392 | if (item_end < new_size) | |
3393 | break; | |
3394 | if (found_key.offset >= new_size) | |
3395 | del_item = 1; | |
3396 | else | |
3397 | del_item = 0; | |
3398 | } | |
3399 | found_extent = 0; | |
3400 | /* FIXME, shrink the extent if the ref count is only 1 */ | |
3401 | if (found_type != BTRFS_EXTENT_DATA_KEY) | |
3402 | goto delete; | |
3403 | ||
3404 | if (extent_type != BTRFS_FILE_EXTENT_INLINE) { | |
3405 | u64 num_dec; | |
3406 | extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); | |
3407 | if (!del_item) { | |
3408 | u64 orig_num_bytes = | |
3409 | btrfs_file_extent_num_bytes(leaf, fi); | |
3410 | extent_num_bytes = new_size - | |
3411 | found_key.offset + root->sectorsize - 1; | |
3412 | extent_num_bytes = extent_num_bytes & | |
3413 | ~((u64)root->sectorsize - 1); | |
3414 | btrfs_set_file_extent_num_bytes(leaf, fi, | |
3415 | extent_num_bytes); | |
3416 | num_dec = (orig_num_bytes - | |
3417 | extent_num_bytes); | |
3418 | if (root->ref_cows && extent_start != 0) | |
3419 | inode_sub_bytes(inode, num_dec); | |
3420 | btrfs_mark_buffer_dirty(leaf); | |
3421 | } else { | |
3422 | extent_num_bytes = | |
3423 | btrfs_file_extent_disk_num_bytes(leaf, | |
3424 | fi); | |
3425 | extent_offset = found_key.offset - | |
3426 | btrfs_file_extent_offset(leaf, fi); | |
3427 | ||
3428 | /* FIXME blocksize != 4096 */ | |
3429 | num_dec = btrfs_file_extent_num_bytes(leaf, fi); | |
3430 | if (extent_start != 0) { | |
3431 | found_extent = 1; | |
3432 | if (root->ref_cows) | |
3433 | inode_sub_bytes(inode, num_dec); | |
3434 | } | |
3435 | } | |
3436 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | |
3437 | /* | |
3438 | * we can't truncate inline items that have had | |
3439 | * special encodings | |
3440 | */ | |
3441 | if (!del_item && | |
3442 | btrfs_file_extent_compression(leaf, fi) == 0 && | |
3443 | btrfs_file_extent_encryption(leaf, fi) == 0 && | |
3444 | btrfs_file_extent_other_encoding(leaf, fi) == 0) { | |
3445 | u32 size = new_size - found_key.offset; | |
3446 | ||
3447 | if (root->ref_cows) { | |
3448 | inode_sub_bytes(inode, item_end + 1 - | |
3449 | new_size); | |
3450 | } | |
3451 | size = | |
3452 | btrfs_file_extent_calc_inline_size(size); | |
3453 | btrfs_truncate_item(trans, root, path, | |
3454 | size, 1); | |
3455 | } else if (root->ref_cows) { | |
3456 | inode_sub_bytes(inode, item_end + 1 - | |
3457 | found_key.offset); | |
3458 | } | |
3459 | } | |
3460 | delete: | |
3461 | if (del_item) { | |
3462 | if (!pending_del_nr) { | |
3463 | /* no pending yet, add ourselves */ | |
3464 | pending_del_slot = path->slots[0]; | |
3465 | pending_del_nr = 1; | |
3466 | } else if (pending_del_nr && | |
3467 | path->slots[0] + 1 == pending_del_slot) { | |
3468 | /* hop on the pending chunk */ | |
3469 | pending_del_nr++; | |
3470 | pending_del_slot = path->slots[0]; | |
3471 | } else { | |
3472 | BUG(); | |
3473 | } | |
3474 | } else { | |
3475 | break; | |
3476 | } | |
3477 | if (found_extent && (root->ref_cows || | |
3478 | root == root->fs_info->tree_root)) { | |
3479 | btrfs_set_path_blocking(path); | |
3480 | ret = btrfs_free_extent(trans, root, extent_start, | |
3481 | extent_num_bytes, 0, | |
3482 | btrfs_header_owner(leaf), | |
3483 | ino, extent_offset, 0); | |
3484 | BUG_ON(ret); | |
3485 | } | |
3486 | ||
3487 | if (found_type == BTRFS_INODE_ITEM_KEY) | |
3488 | break; | |
3489 | ||
3490 | if (path->slots[0] == 0 || | |
3491 | path->slots[0] != pending_del_slot) { | |
3492 | if (pending_del_nr) { | |
3493 | ret = btrfs_del_items(trans, root, path, | |
3494 | pending_del_slot, | |
3495 | pending_del_nr); | |
3496 | if (ret) { | |
3497 | btrfs_abort_transaction(trans, | |
3498 | root, ret); | |
3499 | goto error; | |
3500 | } | |
3501 | pending_del_nr = 0; | |
3502 | } | |
3503 | btrfs_release_path(path); | |
3504 | goto search_again; | |
3505 | } else { | |
3506 | path->slots[0]--; | |
3507 | } | |
3508 | } | |
3509 | out: | |
3510 | if (pending_del_nr) { | |
3511 | ret = btrfs_del_items(trans, root, path, pending_del_slot, | |
3512 | pending_del_nr); | |
3513 | if (ret) | |
3514 | btrfs_abort_transaction(trans, root, ret); | |
3515 | } | |
3516 | error: | |
3517 | btrfs_free_path(path); | |
3518 | return err; | |
3519 | } | |
3520 | ||
3521 | /* | |
3522 | * btrfs_truncate_page - read, zero a chunk and write a page | |
3523 | * @inode - inode that we're zeroing | |
3524 | * @from - the offset to start zeroing | |
3525 | * @len - the length to zero, 0 to zero the entire range respective to the | |
3526 | * offset | |
3527 | * @front - zero up to the offset instead of from the offset on | |
3528 | * | |
3529 | * This will find the page for the "from" offset and cow the page and zero the | |
3530 | * part we want to zero. This is used with truncate and hole punching. | |
3531 | */ | |
3532 | int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len, | |
3533 | int front) | |
3534 | { | |
3535 | struct address_space *mapping = inode->i_mapping; | |
3536 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3537 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
3538 | struct btrfs_ordered_extent *ordered; | |
3539 | struct extent_state *cached_state = NULL; | |
3540 | char *kaddr; | |
3541 | u32 blocksize = root->sectorsize; | |
3542 | pgoff_t index = from >> PAGE_CACHE_SHIFT; | |
3543 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | |
3544 | struct page *page; | |
3545 | gfp_t mask = btrfs_alloc_write_mask(mapping); | |
3546 | int ret = 0; | |
3547 | u64 page_start; | |
3548 | u64 page_end; | |
3549 | ||
3550 | if ((offset & (blocksize - 1)) == 0 && | |
3551 | (!len || ((len & (blocksize - 1)) == 0))) | |
3552 | goto out; | |
3553 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); | |
3554 | if (ret) | |
3555 | goto out; | |
3556 | ||
3557 | again: | |
3558 | page = find_or_create_page(mapping, index, mask); | |
3559 | if (!page) { | |
3560 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); | |
3561 | ret = -ENOMEM; | |
3562 | goto out; | |
3563 | } | |
3564 | ||
3565 | page_start = page_offset(page); | |
3566 | page_end = page_start + PAGE_CACHE_SIZE - 1; | |
3567 | ||
3568 | if (!PageUptodate(page)) { | |
3569 | ret = btrfs_readpage(NULL, page); | |
3570 | lock_page(page); | |
3571 | if (page->mapping != mapping) { | |
3572 | unlock_page(page); | |
3573 | page_cache_release(page); | |
3574 | goto again; | |
3575 | } | |
3576 | if (!PageUptodate(page)) { | |
3577 | ret = -EIO; | |
3578 | goto out_unlock; | |
3579 | } | |
3580 | } | |
3581 | wait_on_page_writeback(page); | |
3582 | ||
3583 | lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); | |
3584 | set_page_extent_mapped(page); | |
3585 | ||
3586 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | |
3587 | if (ordered) { | |
3588 | unlock_extent_cached(io_tree, page_start, page_end, | |
3589 | &cached_state, GFP_NOFS); | |
3590 | unlock_page(page); | |
3591 | page_cache_release(page); | |
3592 | btrfs_start_ordered_extent(inode, ordered, 1); | |
3593 | btrfs_put_ordered_extent(ordered); | |
3594 | goto again; | |
3595 | } | |
3596 | ||
3597 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, | |
3598 | EXTENT_DIRTY | EXTENT_DELALLOC | | |
3599 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, | |
3600 | 0, 0, &cached_state, GFP_NOFS); | |
3601 | ||
3602 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, | |
3603 | &cached_state); | |
3604 | if (ret) { | |
3605 | unlock_extent_cached(io_tree, page_start, page_end, | |
3606 | &cached_state, GFP_NOFS); | |
3607 | goto out_unlock; | |
3608 | } | |
3609 | ||
3610 | if (offset != PAGE_CACHE_SIZE) { | |
3611 | if (!len) | |
3612 | len = PAGE_CACHE_SIZE - offset; | |
3613 | kaddr = kmap(page); | |
3614 | if (front) | |
3615 | memset(kaddr, 0, offset); | |
3616 | else | |
3617 | memset(kaddr + offset, 0, len); | |
3618 | flush_dcache_page(page); | |
3619 | kunmap(page); | |
3620 | } | |
3621 | ClearPageChecked(page); | |
3622 | set_page_dirty(page); | |
3623 | unlock_extent_cached(io_tree, page_start, page_end, &cached_state, | |
3624 | GFP_NOFS); | |
3625 | ||
3626 | out_unlock: | |
3627 | if (ret) | |
3628 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); | |
3629 | unlock_page(page); | |
3630 | page_cache_release(page); | |
3631 | out: | |
3632 | return ret; | |
3633 | } | |
3634 | ||
3635 | /* | |
3636 | * This function puts in dummy file extents for the area we're creating a hole | |
3637 | * for. So if we are truncating this file to a larger size we need to insert | |
3638 | * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for | |
3639 | * the range between oldsize and size | |
3640 | */ | |
3641 | int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) | |
3642 | { | |
3643 | struct btrfs_trans_handle *trans; | |
3644 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3645 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
3646 | struct extent_map *em = NULL; | |
3647 | struct extent_state *cached_state = NULL; | |
3648 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
3649 | u64 mask = root->sectorsize - 1; | |
3650 | u64 hole_start = (oldsize + mask) & ~mask; | |
3651 | u64 block_end = (size + mask) & ~mask; | |
3652 | u64 last_byte; | |
3653 | u64 cur_offset; | |
3654 | u64 hole_size; | |
3655 | int err = 0; | |
3656 | ||
3657 | if (size <= hole_start) | |
3658 | return 0; | |
3659 | ||
3660 | while (1) { | |
3661 | struct btrfs_ordered_extent *ordered; | |
3662 | btrfs_wait_ordered_range(inode, hole_start, | |
3663 | block_end - hole_start); | |
3664 | lock_extent_bits(io_tree, hole_start, block_end - 1, 0, | |
3665 | &cached_state); | |
3666 | ordered = btrfs_lookup_ordered_extent(inode, hole_start); | |
3667 | if (!ordered) | |
3668 | break; | |
3669 | unlock_extent_cached(io_tree, hole_start, block_end - 1, | |
3670 | &cached_state, GFP_NOFS); | |
3671 | btrfs_put_ordered_extent(ordered); | |
3672 | } | |
3673 | ||
3674 | cur_offset = hole_start; | |
3675 | while (1) { | |
3676 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, | |
3677 | block_end - cur_offset, 0); | |
3678 | if (IS_ERR(em)) { | |
3679 | err = PTR_ERR(em); | |
3680 | break; | |
3681 | } | |
3682 | last_byte = min(extent_map_end(em), block_end); | |
3683 | last_byte = (last_byte + mask) & ~mask; | |
3684 | if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { | |
3685 | struct extent_map *hole_em; | |
3686 | hole_size = last_byte - cur_offset; | |
3687 | ||
3688 | trans = btrfs_start_transaction(root, 3); | |
3689 | if (IS_ERR(trans)) { | |
3690 | err = PTR_ERR(trans); | |
3691 | break; | |
3692 | } | |
3693 | ||
3694 | err = btrfs_drop_extents(trans, root, inode, | |
3695 | cur_offset, | |
3696 | cur_offset + hole_size, 1); | |
3697 | if (err) { | |
3698 | btrfs_abort_transaction(trans, root, err); | |
3699 | btrfs_end_transaction(trans, root); | |
3700 | break; | |
3701 | } | |
3702 | ||
3703 | err = btrfs_insert_file_extent(trans, root, | |
3704 | btrfs_ino(inode), cur_offset, 0, | |
3705 | 0, hole_size, 0, hole_size, | |
3706 | 0, 0, 0); | |
3707 | if (err) { | |
3708 | btrfs_abort_transaction(trans, root, err); | |
3709 | btrfs_end_transaction(trans, root); | |
3710 | break; | |
3711 | } | |
3712 | ||
3713 | btrfs_drop_extent_cache(inode, cur_offset, | |
3714 | cur_offset + hole_size - 1, 0); | |
3715 | hole_em = alloc_extent_map(); | |
3716 | if (!hole_em) { | |
3717 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | |
3718 | &BTRFS_I(inode)->runtime_flags); | |
3719 | goto next; | |
3720 | } | |
3721 | hole_em->start = cur_offset; | |
3722 | hole_em->len = hole_size; | |
3723 | hole_em->orig_start = cur_offset; | |
3724 | ||
3725 | hole_em->block_start = EXTENT_MAP_HOLE; | |
3726 | hole_em->block_len = 0; | |
3727 | hole_em->orig_block_len = 0; | |
3728 | hole_em->bdev = root->fs_info->fs_devices->latest_bdev; | |
3729 | hole_em->compress_type = BTRFS_COMPRESS_NONE; | |
3730 | hole_em->generation = trans->transid; | |
3731 | ||
3732 | while (1) { | |
3733 | write_lock(&em_tree->lock); | |
3734 | err = add_extent_mapping(em_tree, hole_em); | |
3735 | if (!err) | |
3736 | list_move(&hole_em->list, | |
3737 | &em_tree->modified_extents); | |
3738 | write_unlock(&em_tree->lock); | |
3739 | if (err != -EEXIST) | |
3740 | break; | |
3741 | btrfs_drop_extent_cache(inode, cur_offset, | |
3742 | cur_offset + | |
3743 | hole_size - 1, 0); | |
3744 | } | |
3745 | free_extent_map(hole_em); | |
3746 | next: | |
3747 | btrfs_update_inode(trans, root, inode); | |
3748 | btrfs_end_transaction(trans, root); | |
3749 | } | |
3750 | free_extent_map(em); | |
3751 | em = NULL; | |
3752 | cur_offset = last_byte; | |
3753 | if (cur_offset >= block_end) | |
3754 | break; | |
3755 | } | |
3756 | ||
3757 | free_extent_map(em); | |
3758 | unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, | |
3759 | GFP_NOFS); | |
3760 | return err; | |
3761 | } | |
3762 | ||
3763 | static int btrfs_setsize(struct inode *inode, loff_t newsize) | |
3764 | { | |
3765 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3766 | struct btrfs_trans_handle *trans; | |
3767 | loff_t oldsize = i_size_read(inode); | |
3768 | int ret; | |
3769 | ||
3770 | if (newsize == oldsize) | |
3771 | return 0; | |
3772 | ||
3773 | if (newsize > oldsize) { | |
3774 | truncate_pagecache(inode, oldsize, newsize); | |
3775 | ret = btrfs_cont_expand(inode, oldsize, newsize); | |
3776 | if (ret) | |
3777 | return ret; | |
3778 | ||
3779 | trans = btrfs_start_transaction(root, 1); | |
3780 | if (IS_ERR(trans)) | |
3781 | return PTR_ERR(trans); | |
3782 | ||
3783 | i_size_write(inode, newsize); | |
3784 | btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); | |
3785 | ret = btrfs_update_inode(trans, root, inode); | |
3786 | btrfs_end_transaction(trans, root); | |
3787 | } else { | |
3788 | ||
3789 | /* | |
3790 | * We're truncating a file that used to have good data down to | |
3791 | * zero. Make sure it gets into the ordered flush list so that | |
3792 | * any new writes get down to disk quickly. | |
3793 | */ | |
3794 | if (newsize == 0) | |
3795 | set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, | |
3796 | &BTRFS_I(inode)->runtime_flags); | |
3797 | ||
3798 | /* | |
3799 | * 1 for the orphan item we're going to add | |
3800 | * 1 for the orphan item deletion. | |
3801 | */ | |
3802 | trans = btrfs_start_transaction(root, 2); | |
3803 | if (IS_ERR(trans)) | |
3804 | return PTR_ERR(trans); | |
3805 | ||
3806 | /* | |
3807 | * We need to do this in case we fail at _any_ point during the | |
3808 | * actual truncate. Once we do the truncate_setsize we could | |
3809 | * invalidate pages which forces any outstanding ordered io to | |
3810 | * be instantly completed which will give us extents that need | |
3811 | * to be truncated. If we fail to get an orphan inode down we | |
3812 | * could have left over extents that were never meant to live, | |
3813 | * so we need to garuntee from this point on that everything | |
3814 | * will be consistent. | |
3815 | */ | |
3816 | ret = btrfs_orphan_add(trans, inode); | |
3817 | btrfs_end_transaction(trans, root); | |
3818 | if (ret) | |
3819 | return ret; | |
3820 | ||
3821 | /* we don't support swapfiles, so vmtruncate shouldn't fail */ | |
3822 | truncate_setsize(inode, newsize); | |
3823 | ret = btrfs_truncate(inode); | |
3824 | if (ret && inode->i_nlink) | |
3825 | btrfs_orphan_del(NULL, inode); | |
3826 | } | |
3827 | ||
3828 | return ret; | |
3829 | } | |
3830 | ||
3831 | static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) | |
3832 | { | |
3833 | struct inode *inode = dentry->d_inode; | |
3834 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3835 | int err; | |
3836 | ||
3837 | if (btrfs_root_readonly(root)) | |
3838 | return -EROFS; | |
3839 | ||
3840 | err = inode_change_ok(inode, attr); | |
3841 | if (err) | |
3842 | return err; | |
3843 | ||
3844 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { | |
3845 | err = btrfs_setsize(inode, attr->ia_size); | |
3846 | if (err) | |
3847 | return err; | |
3848 | } | |
3849 | ||
3850 | if (attr->ia_valid) { | |
3851 | setattr_copy(inode, attr); | |
3852 | inode_inc_iversion(inode); | |
3853 | err = btrfs_dirty_inode(inode); | |
3854 | ||
3855 | if (!err && attr->ia_valid & ATTR_MODE) | |
3856 | err = btrfs_acl_chmod(inode); | |
3857 | } | |
3858 | ||
3859 | return err; | |
3860 | } | |
3861 | ||
3862 | void btrfs_evict_inode(struct inode *inode) | |
3863 | { | |
3864 | struct btrfs_trans_handle *trans; | |
3865 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3866 | struct btrfs_block_rsv *rsv, *global_rsv; | |
3867 | u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); | |
3868 | int ret; | |
3869 | ||
3870 | trace_btrfs_inode_evict(inode); | |
3871 | ||
3872 | truncate_inode_pages(&inode->i_data, 0); | |
3873 | if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || | |
3874 | btrfs_is_free_space_inode(inode))) | |
3875 | goto no_delete; | |
3876 | ||
3877 | if (is_bad_inode(inode)) { | |
3878 | btrfs_orphan_del(NULL, inode); | |
3879 | goto no_delete; | |
3880 | } | |
3881 | /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ | |
3882 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | |
3883 | ||
3884 | if (root->fs_info->log_root_recovering) { | |
3885 | BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, | |
3886 | &BTRFS_I(inode)->runtime_flags)); | |
3887 | goto no_delete; | |
3888 | } | |
3889 | ||
3890 | if (inode->i_nlink > 0) { | |
3891 | BUG_ON(btrfs_root_refs(&root->root_item) != 0); | |
3892 | goto no_delete; | |
3893 | } | |
3894 | ||
3895 | rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); | |
3896 | if (!rsv) { | |
3897 | btrfs_orphan_del(NULL, inode); | |
3898 | goto no_delete; | |
3899 | } | |
3900 | rsv->size = min_size; | |
3901 | rsv->failfast = 1; | |
3902 | global_rsv = &root->fs_info->global_block_rsv; | |
3903 | ||
3904 | btrfs_i_size_write(inode, 0); | |
3905 | ||
3906 | /* | |
3907 | * This is a bit simpler than btrfs_truncate since we've already | |
3908 | * reserved our space for our orphan item in the unlink, so we just | |
3909 | * need to reserve some slack space in case we add bytes and update | |
3910 | * inode item when doing the truncate. | |
3911 | */ | |
3912 | while (1) { | |
3913 | ret = btrfs_block_rsv_refill(root, rsv, min_size, | |
3914 | BTRFS_RESERVE_FLUSH_LIMIT); | |
3915 | ||
3916 | /* | |
3917 | * Try and steal from the global reserve since we will | |
3918 | * likely not use this space anyway, we want to try as | |
3919 | * hard as possible to get this to work. | |
3920 | */ | |
3921 | if (ret) | |
3922 | ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size); | |
3923 | ||
3924 | if (ret) { | |
3925 | printk(KERN_WARNING "Could not get space for a " | |
3926 | "delete, will truncate on mount %d\n", ret); | |
3927 | btrfs_orphan_del(NULL, inode); | |
3928 | btrfs_free_block_rsv(root, rsv); | |
3929 | goto no_delete; | |
3930 | } | |
3931 | ||
3932 | trans = btrfs_start_transaction_lflush(root, 1); | |
3933 | if (IS_ERR(trans)) { | |
3934 | btrfs_orphan_del(NULL, inode); | |
3935 | btrfs_free_block_rsv(root, rsv); | |
3936 | goto no_delete; | |
3937 | } | |
3938 | ||
3939 | trans->block_rsv = rsv; | |
3940 | ||
3941 | ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); | |
3942 | if (ret != -ENOSPC) | |
3943 | break; | |
3944 | ||
3945 | trans->block_rsv = &root->fs_info->trans_block_rsv; | |
3946 | ret = btrfs_update_inode(trans, root, inode); | |
3947 | BUG_ON(ret); | |
3948 | ||
3949 | btrfs_end_transaction(trans, root); | |
3950 | trans = NULL; | |
3951 | btrfs_btree_balance_dirty(root); | |
3952 | } | |
3953 | ||
3954 | btrfs_free_block_rsv(root, rsv); | |
3955 | ||
3956 | if (ret == 0) { | |
3957 | trans->block_rsv = root->orphan_block_rsv; | |
3958 | ret = btrfs_orphan_del(trans, inode); | |
3959 | BUG_ON(ret); | |
3960 | } | |
3961 | ||
3962 | trans->block_rsv = &root->fs_info->trans_block_rsv; | |
3963 | if (!(root == root->fs_info->tree_root || | |
3964 | root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) | |
3965 | btrfs_return_ino(root, btrfs_ino(inode)); | |
3966 | ||
3967 | btrfs_end_transaction(trans, root); | |
3968 | btrfs_btree_balance_dirty(root); | |
3969 | no_delete: | |
3970 | clear_inode(inode); | |
3971 | return; | |
3972 | } | |
3973 | ||
3974 | /* | |
3975 | * this returns the key found in the dir entry in the location pointer. | |
3976 | * If no dir entries were found, location->objectid is 0. | |
3977 | */ | |
3978 | static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, | |
3979 | struct btrfs_key *location) | |
3980 | { | |
3981 | const char *name = dentry->d_name.name; | |
3982 | int namelen = dentry->d_name.len; | |
3983 | struct btrfs_dir_item *di; | |
3984 | struct btrfs_path *path; | |
3985 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
3986 | int ret = 0; | |
3987 | ||
3988 | path = btrfs_alloc_path(); | |
3989 | if (!path) | |
3990 | return -ENOMEM; | |
3991 | ||
3992 | di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, | |
3993 | namelen, 0); | |
3994 | if (IS_ERR(di)) | |
3995 | ret = PTR_ERR(di); | |
3996 | ||
3997 | if (IS_ERR_OR_NULL(di)) | |
3998 | goto out_err; | |
3999 | ||
4000 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); | |
4001 | out: | |
4002 | btrfs_free_path(path); | |
4003 | return ret; | |
4004 | out_err: | |
4005 | location->objectid = 0; | |
4006 | goto out; | |
4007 | } | |
4008 | ||
4009 | /* | |
4010 | * when we hit a tree root in a directory, the btrfs part of the inode | |
4011 | * needs to be changed to reflect the root directory of the tree root. This | |
4012 | * is kind of like crossing a mount point. | |
4013 | */ | |
4014 | static int fixup_tree_root_location(struct btrfs_root *root, | |
4015 | struct inode *dir, | |
4016 | struct dentry *dentry, | |
4017 | struct btrfs_key *location, | |
4018 | struct btrfs_root **sub_root) | |
4019 | { | |
4020 | struct btrfs_path *path; | |
4021 | struct btrfs_root *new_root; | |
4022 | struct btrfs_root_ref *ref; | |
4023 | struct extent_buffer *leaf; | |
4024 | int ret; | |
4025 | int err = 0; | |
4026 | ||
4027 | path = btrfs_alloc_path(); | |
4028 | if (!path) { | |
4029 | err = -ENOMEM; | |
4030 | goto out; | |
4031 | } | |
4032 | ||
4033 | err = -ENOENT; | |
4034 | ret = btrfs_find_root_ref(root->fs_info->tree_root, path, | |
4035 | BTRFS_I(dir)->root->root_key.objectid, | |
4036 | location->objectid); | |
4037 | if (ret) { | |
4038 | if (ret < 0) | |
4039 | err = ret; | |
4040 | goto out; | |
4041 | } | |
4042 | ||
4043 | leaf = path->nodes[0]; | |
4044 | ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); | |
4045 | if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || | |
4046 | btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) | |
4047 | goto out; | |
4048 | ||
4049 | ret = memcmp_extent_buffer(leaf, dentry->d_name.name, | |
4050 | (unsigned long)(ref + 1), | |
4051 | dentry->d_name.len); | |
4052 | if (ret) | |
4053 | goto out; | |
4054 | ||
4055 | btrfs_release_path(path); | |
4056 | ||
4057 | new_root = btrfs_read_fs_root_no_name(root->fs_info, location); | |
4058 | if (IS_ERR(new_root)) { | |
4059 | err = PTR_ERR(new_root); | |
4060 | goto out; | |
4061 | } | |
4062 | ||
4063 | if (btrfs_root_refs(&new_root->root_item) == 0) { | |
4064 | err = -ENOENT; | |
4065 | goto out; | |
4066 | } | |
4067 | ||
4068 | *sub_root = new_root; | |
4069 | location->objectid = btrfs_root_dirid(&new_root->root_item); | |
4070 | location->type = BTRFS_INODE_ITEM_KEY; | |
4071 | location->offset = 0; | |
4072 | err = 0; | |
4073 | out: | |
4074 | btrfs_free_path(path); | |
4075 | return err; | |
4076 | } | |
4077 | ||
4078 | static void inode_tree_add(struct inode *inode) | |
4079 | { | |
4080 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4081 | struct btrfs_inode *entry; | |
4082 | struct rb_node **p; | |
4083 | struct rb_node *parent; | |
4084 | u64 ino = btrfs_ino(inode); | |
4085 | again: | |
4086 | p = &root->inode_tree.rb_node; | |
4087 | parent = NULL; | |
4088 | ||
4089 | if (inode_unhashed(inode)) | |
4090 | return; | |
4091 | ||
4092 | spin_lock(&root->inode_lock); | |
4093 | while (*p) { | |
4094 | parent = *p; | |
4095 | entry = rb_entry(parent, struct btrfs_inode, rb_node); | |
4096 | ||
4097 | if (ino < btrfs_ino(&entry->vfs_inode)) | |
4098 | p = &parent->rb_left; | |
4099 | else if (ino > btrfs_ino(&entry->vfs_inode)) | |
4100 | p = &parent->rb_right; | |
4101 | else { | |
4102 | WARN_ON(!(entry->vfs_inode.i_state & | |
4103 | (I_WILL_FREE | I_FREEING))); | |
4104 | rb_erase(parent, &root->inode_tree); | |
4105 | RB_CLEAR_NODE(parent); | |
4106 | spin_unlock(&root->inode_lock); | |
4107 | goto again; | |
4108 | } | |
4109 | } | |
4110 | rb_link_node(&BTRFS_I(inode)->rb_node, parent, p); | |
4111 | rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree); | |
4112 | spin_unlock(&root->inode_lock); | |
4113 | } | |
4114 | ||
4115 | static void inode_tree_del(struct inode *inode) | |
4116 | { | |
4117 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4118 | int empty = 0; | |
4119 | ||
4120 | spin_lock(&root->inode_lock); | |
4121 | if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) { | |
4122 | rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree); | |
4123 | RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); | |
4124 | empty = RB_EMPTY_ROOT(&root->inode_tree); | |
4125 | } | |
4126 | spin_unlock(&root->inode_lock); | |
4127 | ||
4128 | /* | |
4129 | * Free space cache has inodes in the tree root, but the tree root has a | |
4130 | * root_refs of 0, so this could end up dropping the tree root as a | |
4131 | * snapshot, so we need the extra !root->fs_info->tree_root check to | |
4132 | * make sure we don't drop it. | |
4133 | */ | |
4134 | if (empty && btrfs_root_refs(&root->root_item) == 0 && | |
4135 | root != root->fs_info->tree_root) { | |
4136 | synchronize_srcu(&root->fs_info->subvol_srcu); | |
4137 | spin_lock(&root->inode_lock); | |
4138 | empty = RB_EMPTY_ROOT(&root->inode_tree); | |
4139 | spin_unlock(&root->inode_lock); | |
4140 | if (empty) | |
4141 | btrfs_add_dead_root(root); | |
4142 | } | |
4143 | } | |
4144 | ||
4145 | void btrfs_invalidate_inodes(struct btrfs_root *root) | |
4146 | { | |
4147 | struct rb_node *node; | |
4148 | struct rb_node *prev; | |
4149 | struct btrfs_inode *entry; | |
4150 | struct inode *inode; | |
4151 | u64 objectid = 0; | |
4152 | ||
4153 | WARN_ON(btrfs_root_refs(&root->root_item) != 0); | |
4154 | ||
4155 | spin_lock(&root->inode_lock); | |
4156 | again: | |
4157 | node = root->inode_tree.rb_node; | |
4158 | prev = NULL; | |
4159 | while (node) { | |
4160 | prev = node; | |
4161 | entry = rb_entry(node, struct btrfs_inode, rb_node); | |
4162 | ||
4163 | if (objectid < btrfs_ino(&entry->vfs_inode)) | |
4164 | node = node->rb_left; | |
4165 | else if (objectid > btrfs_ino(&entry->vfs_inode)) | |
4166 | node = node->rb_right; | |
4167 | else | |
4168 | break; | |
4169 | } | |
4170 | if (!node) { | |
4171 | while (prev) { | |
4172 | entry = rb_entry(prev, struct btrfs_inode, rb_node); | |
4173 | if (objectid <= btrfs_ino(&entry->vfs_inode)) { | |
4174 | node = prev; | |
4175 | break; | |
4176 | } | |
4177 | prev = rb_next(prev); | |
4178 | } | |
4179 | } | |
4180 | while (node) { | |
4181 | entry = rb_entry(node, struct btrfs_inode, rb_node); | |
4182 | objectid = btrfs_ino(&entry->vfs_inode) + 1; | |
4183 | inode = igrab(&entry->vfs_inode); | |
4184 | if (inode) { | |
4185 | spin_unlock(&root->inode_lock); | |
4186 | if (atomic_read(&inode->i_count) > 1) | |
4187 | d_prune_aliases(inode); | |
4188 | /* | |
4189 | * btrfs_drop_inode will have it removed from | |
4190 | * the inode cache when its usage count | |
4191 | * hits zero. | |
4192 | */ | |
4193 | iput(inode); | |
4194 | cond_resched(); | |
4195 | spin_lock(&root->inode_lock); | |
4196 | goto again; | |
4197 | } | |
4198 | ||
4199 | if (cond_resched_lock(&root->inode_lock)) | |
4200 | goto again; | |
4201 | ||
4202 | node = rb_next(node); | |
4203 | } | |
4204 | spin_unlock(&root->inode_lock); | |
4205 | } | |
4206 | ||
4207 | static int btrfs_init_locked_inode(struct inode *inode, void *p) | |
4208 | { | |
4209 | struct btrfs_iget_args *args = p; | |
4210 | inode->i_ino = args->ino; | |
4211 | BTRFS_I(inode)->root = args->root; | |
4212 | return 0; | |
4213 | } | |
4214 | ||
4215 | static int btrfs_find_actor(struct inode *inode, void *opaque) | |
4216 | { | |
4217 | struct btrfs_iget_args *args = opaque; | |
4218 | return args->ino == btrfs_ino(inode) && | |
4219 | args->root == BTRFS_I(inode)->root; | |
4220 | } | |
4221 | ||
4222 | static struct inode *btrfs_iget_locked(struct super_block *s, | |
4223 | u64 objectid, | |
4224 | struct btrfs_root *root) | |
4225 | { | |
4226 | struct inode *inode; | |
4227 | struct btrfs_iget_args args; | |
4228 | args.ino = objectid; | |
4229 | args.root = root; | |
4230 | ||
4231 | inode = iget5_locked(s, objectid, btrfs_find_actor, | |
4232 | btrfs_init_locked_inode, | |
4233 | (void *)&args); | |
4234 | return inode; | |
4235 | } | |
4236 | ||
4237 | /* Get an inode object given its location and corresponding root. | |
4238 | * Returns in *is_new if the inode was read from disk | |
4239 | */ | |
4240 | struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, | |
4241 | struct btrfs_root *root, int *new) | |
4242 | { | |
4243 | struct inode *inode; | |
4244 | ||
4245 | inode = btrfs_iget_locked(s, location->objectid, root); | |
4246 | if (!inode) | |
4247 | return ERR_PTR(-ENOMEM); | |
4248 | ||
4249 | if (inode->i_state & I_NEW) { | |
4250 | BTRFS_I(inode)->root = root; | |
4251 | memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); | |
4252 | btrfs_read_locked_inode(inode); | |
4253 | if (!is_bad_inode(inode)) { | |
4254 | inode_tree_add(inode); | |
4255 | unlock_new_inode(inode); | |
4256 | if (new) | |
4257 | *new = 1; | |
4258 | } else { | |
4259 | unlock_new_inode(inode); | |
4260 | iput(inode); | |
4261 | inode = ERR_PTR(-ESTALE); | |
4262 | } | |
4263 | } | |
4264 | ||
4265 | return inode; | |
4266 | } | |
4267 | ||
4268 | static struct inode *new_simple_dir(struct super_block *s, | |
4269 | struct btrfs_key *key, | |
4270 | struct btrfs_root *root) | |
4271 | { | |
4272 | struct inode *inode = new_inode(s); | |
4273 | ||
4274 | if (!inode) | |
4275 | return ERR_PTR(-ENOMEM); | |
4276 | ||
4277 | BTRFS_I(inode)->root = root; | |
4278 | memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); | |
4279 | set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); | |
4280 | ||
4281 | inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; | |
4282 | inode->i_op = &btrfs_dir_ro_inode_operations; | |
4283 | inode->i_fop = &simple_dir_operations; | |
4284 | inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; | |
4285 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | |
4286 | ||
4287 | return inode; | |
4288 | } | |
4289 | ||
4290 | struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) | |
4291 | { | |
4292 | struct inode *inode; | |
4293 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
4294 | struct btrfs_root *sub_root = root; | |
4295 | struct btrfs_key location; | |
4296 | int index; | |
4297 | int ret = 0; | |
4298 | ||
4299 | if (dentry->d_name.len > BTRFS_NAME_LEN) | |
4300 | return ERR_PTR(-ENAMETOOLONG); | |
4301 | ||
4302 | if (unlikely(d_need_lookup(dentry))) { | |
4303 | memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key)); | |
4304 | kfree(dentry->d_fsdata); | |
4305 | dentry->d_fsdata = NULL; | |
4306 | /* This thing is hashed, drop it for now */ | |
4307 | d_drop(dentry); | |
4308 | } else { | |
4309 | ret = btrfs_inode_by_name(dir, dentry, &location); | |
4310 | } | |
4311 | ||
4312 | if (ret < 0) | |
4313 | return ERR_PTR(ret); | |
4314 | ||
4315 | if (location.objectid == 0) | |
4316 | return NULL; | |
4317 | ||
4318 | if (location.type == BTRFS_INODE_ITEM_KEY) { | |
4319 | inode = btrfs_iget(dir->i_sb, &location, root, NULL); | |
4320 | return inode; | |
4321 | } | |
4322 | ||
4323 | BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY); | |
4324 | ||
4325 | index = srcu_read_lock(&root->fs_info->subvol_srcu); | |
4326 | ret = fixup_tree_root_location(root, dir, dentry, | |
4327 | &location, &sub_root); | |
4328 | if (ret < 0) { | |
4329 | if (ret != -ENOENT) | |
4330 | inode = ERR_PTR(ret); | |
4331 | else | |
4332 | inode = new_simple_dir(dir->i_sb, &location, sub_root); | |
4333 | } else { | |
4334 | inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); | |
4335 | } | |
4336 | srcu_read_unlock(&root->fs_info->subvol_srcu, index); | |
4337 | ||
4338 | if (!IS_ERR(inode) && root != sub_root) { | |
4339 | down_read(&root->fs_info->cleanup_work_sem); | |
4340 | if (!(inode->i_sb->s_flags & MS_RDONLY)) | |
4341 | ret = btrfs_orphan_cleanup(sub_root); | |
4342 | up_read(&root->fs_info->cleanup_work_sem); | |
4343 | if (ret) | |
4344 | inode = ERR_PTR(ret); | |
4345 | } | |
4346 | ||
4347 | return inode; | |
4348 | } | |
4349 | ||
4350 | static int btrfs_dentry_delete(const struct dentry *dentry) | |
4351 | { | |
4352 | struct btrfs_root *root; | |
4353 | struct inode *inode = dentry->d_inode; | |
4354 | ||
4355 | if (!inode && !IS_ROOT(dentry)) | |
4356 | inode = dentry->d_parent->d_inode; | |
4357 | ||
4358 | if (inode) { | |
4359 | root = BTRFS_I(inode)->root; | |
4360 | if (btrfs_root_refs(&root->root_item) == 0) | |
4361 | return 1; | |
4362 | ||
4363 | if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) | |
4364 | return 1; | |
4365 | } | |
4366 | return 0; | |
4367 | } | |
4368 | ||
4369 | static void btrfs_dentry_release(struct dentry *dentry) | |
4370 | { | |
4371 | if (dentry->d_fsdata) | |
4372 | kfree(dentry->d_fsdata); | |
4373 | } | |
4374 | ||
4375 | static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, | |
4376 | unsigned int flags) | |
4377 | { | |
4378 | struct dentry *ret; | |
4379 | ||
4380 | ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); | |
4381 | if (unlikely(d_need_lookup(dentry))) { | |
4382 | spin_lock(&dentry->d_lock); | |
4383 | dentry->d_flags &= ~DCACHE_NEED_LOOKUP; | |
4384 | spin_unlock(&dentry->d_lock); | |
4385 | } | |
4386 | return ret; | |
4387 | } | |
4388 | ||
4389 | unsigned char btrfs_filetype_table[] = { | |
4390 | DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK | |
4391 | }; | |
4392 | ||
4393 | static int btrfs_real_readdir(struct file *filp, void *dirent, | |
4394 | filldir_t filldir) | |
4395 | { | |
4396 | struct inode *inode = filp->f_dentry->d_inode; | |
4397 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4398 | struct btrfs_item *item; | |
4399 | struct btrfs_dir_item *di; | |
4400 | struct btrfs_key key; | |
4401 | struct btrfs_key found_key; | |
4402 | struct btrfs_path *path; | |
4403 | struct list_head ins_list; | |
4404 | struct list_head del_list; | |
4405 | int ret; | |
4406 | struct extent_buffer *leaf; | |
4407 | int slot; | |
4408 | unsigned char d_type; | |
4409 | int over = 0; | |
4410 | u32 di_cur; | |
4411 | u32 di_total; | |
4412 | u32 di_len; | |
4413 | int key_type = BTRFS_DIR_INDEX_KEY; | |
4414 | char tmp_name[32]; | |
4415 | char *name_ptr; | |
4416 | int name_len; | |
4417 | int is_curr = 0; /* filp->f_pos points to the current index? */ | |
4418 | ||
4419 | /* FIXME, use a real flag for deciding about the key type */ | |
4420 | if (root->fs_info->tree_root == root) | |
4421 | key_type = BTRFS_DIR_ITEM_KEY; | |
4422 | ||
4423 | /* special case for "." */ | |
4424 | if (filp->f_pos == 0) { | |
4425 | over = filldir(dirent, ".", 1, | |
4426 | filp->f_pos, btrfs_ino(inode), DT_DIR); | |
4427 | if (over) | |
4428 | return 0; | |
4429 | filp->f_pos = 1; | |
4430 | } | |
4431 | /* special case for .., just use the back ref */ | |
4432 | if (filp->f_pos == 1) { | |
4433 | u64 pino = parent_ino(filp->f_path.dentry); | |
4434 | over = filldir(dirent, "..", 2, | |
4435 | filp->f_pos, pino, DT_DIR); | |
4436 | if (over) | |
4437 | return 0; | |
4438 | filp->f_pos = 2; | |
4439 | } | |
4440 | path = btrfs_alloc_path(); | |
4441 | if (!path) | |
4442 | return -ENOMEM; | |
4443 | ||
4444 | path->reada = 1; | |
4445 | ||
4446 | if (key_type == BTRFS_DIR_INDEX_KEY) { | |
4447 | INIT_LIST_HEAD(&ins_list); | |
4448 | INIT_LIST_HEAD(&del_list); | |
4449 | btrfs_get_delayed_items(inode, &ins_list, &del_list); | |
4450 | } | |
4451 | ||
4452 | btrfs_set_key_type(&key, key_type); | |
4453 | key.offset = filp->f_pos; | |
4454 | key.objectid = btrfs_ino(inode); | |
4455 | ||
4456 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
4457 | if (ret < 0) | |
4458 | goto err; | |
4459 | ||
4460 | while (1) { | |
4461 | leaf = path->nodes[0]; | |
4462 | slot = path->slots[0]; | |
4463 | if (slot >= btrfs_header_nritems(leaf)) { | |
4464 | ret = btrfs_next_leaf(root, path); | |
4465 | if (ret < 0) | |
4466 | goto err; | |
4467 | else if (ret > 0) | |
4468 | break; | |
4469 | continue; | |
4470 | } | |
4471 | ||
4472 | item = btrfs_item_nr(leaf, slot); | |
4473 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
4474 | ||
4475 | if (found_key.objectid != key.objectid) | |
4476 | break; | |
4477 | if (btrfs_key_type(&found_key) != key_type) | |
4478 | break; | |
4479 | if (found_key.offset < filp->f_pos) | |
4480 | goto next; | |
4481 | if (key_type == BTRFS_DIR_INDEX_KEY && | |
4482 | btrfs_should_delete_dir_index(&del_list, | |
4483 | found_key.offset)) | |
4484 | goto next; | |
4485 | ||
4486 | filp->f_pos = found_key.offset; | |
4487 | is_curr = 1; | |
4488 | ||
4489 | di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); | |
4490 | di_cur = 0; | |
4491 | di_total = btrfs_item_size(leaf, item); | |
4492 | ||
4493 | while (di_cur < di_total) { | |
4494 | struct btrfs_key location; | |
4495 | ||
4496 | if (verify_dir_item(root, leaf, di)) | |
4497 | break; | |
4498 | ||
4499 | name_len = btrfs_dir_name_len(leaf, di); | |
4500 | if (name_len <= sizeof(tmp_name)) { | |
4501 | name_ptr = tmp_name; | |
4502 | } else { | |
4503 | name_ptr = kmalloc(name_len, GFP_NOFS); | |
4504 | if (!name_ptr) { | |
4505 | ret = -ENOMEM; | |
4506 | goto err; | |
4507 | } | |
4508 | } | |
4509 | read_extent_buffer(leaf, name_ptr, | |
4510 | (unsigned long)(di + 1), name_len); | |
4511 | ||
4512 | d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; | |
4513 | btrfs_dir_item_key_to_cpu(leaf, di, &location); | |
4514 | ||
4515 | ||
4516 | /* is this a reference to our own snapshot? If so | |
4517 | * skip it. | |
4518 | * | |
4519 | * In contrast to old kernels, we insert the snapshot's | |
4520 | * dir item and dir index after it has been created, so | |
4521 | * we won't find a reference to our own snapshot. We | |
4522 | * still keep the following code for backward | |
4523 | * compatibility. | |
4524 | */ | |
4525 | if (location.type == BTRFS_ROOT_ITEM_KEY && | |
4526 | location.objectid == root->root_key.objectid) { | |
4527 | over = 0; | |
4528 | goto skip; | |
4529 | } | |
4530 | over = filldir(dirent, name_ptr, name_len, | |
4531 | found_key.offset, location.objectid, | |
4532 | d_type); | |
4533 | ||
4534 | skip: | |
4535 | if (name_ptr != tmp_name) | |
4536 | kfree(name_ptr); | |
4537 | ||
4538 | if (over) | |
4539 | goto nopos; | |
4540 | di_len = btrfs_dir_name_len(leaf, di) + | |
4541 | btrfs_dir_data_len(leaf, di) + sizeof(*di); | |
4542 | di_cur += di_len; | |
4543 | di = (struct btrfs_dir_item *)((char *)di + di_len); | |
4544 | } | |
4545 | next: | |
4546 | path->slots[0]++; | |
4547 | } | |
4548 | ||
4549 | if (key_type == BTRFS_DIR_INDEX_KEY) { | |
4550 | if (is_curr) | |
4551 | filp->f_pos++; | |
4552 | ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir, | |
4553 | &ins_list); | |
4554 | if (ret) | |
4555 | goto nopos; | |
4556 | } | |
4557 | ||
4558 | /* Reached end of directory/root. Bump pos past the last item. */ | |
4559 | if (key_type == BTRFS_DIR_INDEX_KEY) | |
4560 | /* | |
4561 | * 32-bit glibc will use getdents64, but then strtol - | |
4562 | * so the last number we can serve is this. | |
4563 | */ | |
4564 | filp->f_pos = 0x7fffffff; | |
4565 | else | |
4566 | filp->f_pos++; | |
4567 | nopos: | |
4568 | ret = 0; | |
4569 | err: | |
4570 | if (key_type == BTRFS_DIR_INDEX_KEY) | |
4571 | btrfs_put_delayed_items(&ins_list, &del_list); | |
4572 | btrfs_free_path(path); | |
4573 | return ret; | |
4574 | } | |
4575 | ||
4576 | int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) | |
4577 | { | |
4578 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4579 | struct btrfs_trans_handle *trans; | |
4580 | int ret = 0; | |
4581 | bool nolock = false; | |
4582 | ||
4583 | if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) | |
4584 | return 0; | |
4585 | ||
4586 | if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode)) | |
4587 | nolock = true; | |
4588 | ||
4589 | if (wbc->sync_mode == WB_SYNC_ALL) { | |
4590 | if (nolock) | |
4591 | trans = btrfs_join_transaction_nolock(root); | |
4592 | else | |
4593 | trans = btrfs_join_transaction(root); | |
4594 | if (IS_ERR(trans)) | |
4595 | return PTR_ERR(trans); | |
4596 | ret = btrfs_commit_transaction(trans, root); | |
4597 | } | |
4598 | return ret; | |
4599 | } | |
4600 | ||
4601 | /* | |
4602 | * This is somewhat expensive, updating the tree every time the | |
4603 | * inode changes. But, it is most likely to find the inode in cache. | |
4604 | * FIXME, needs more benchmarking...there are no reasons other than performance | |
4605 | * to keep or drop this code. | |
4606 | */ | |
4607 | int btrfs_dirty_inode(struct inode *inode) | |
4608 | { | |
4609 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4610 | struct btrfs_trans_handle *trans; | |
4611 | int ret; | |
4612 | ||
4613 | if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) | |
4614 | return 0; | |
4615 | ||
4616 | trans = btrfs_join_transaction(root); | |
4617 | if (IS_ERR(trans)) | |
4618 | return PTR_ERR(trans); | |
4619 | ||
4620 | ret = btrfs_update_inode(trans, root, inode); | |
4621 | if (ret && ret == -ENOSPC) { | |
4622 | /* whoops, lets try again with the full transaction */ | |
4623 | btrfs_end_transaction(trans, root); | |
4624 | trans = btrfs_start_transaction(root, 1); | |
4625 | if (IS_ERR(trans)) | |
4626 | return PTR_ERR(trans); | |
4627 | ||
4628 | ret = btrfs_update_inode(trans, root, inode); | |
4629 | } | |
4630 | btrfs_end_transaction(trans, root); | |
4631 | if (BTRFS_I(inode)->delayed_node) | |
4632 | btrfs_balance_delayed_items(root); | |
4633 | ||
4634 | return ret; | |
4635 | } | |
4636 | ||
4637 | /* | |
4638 | * This is a copy of file_update_time. We need this so we can return error on | |
4639 | * ENOSPC for updating the inode in the case of file write and mmap writes. | |
4640 | */ | |
4641 | static int btrfs_update_time(struct inode *inode, struct timespec *now, | |
4642 | int flags) | |
4643 | { | |
4644 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4645 | ||
4646 | if (btrfs_root_readonly(root)) | |
4647 | return -EROFS; | |
4648 | ||
4649 | if (flags & S_VERSION) | |
4650 | inode_inc_iversion(inode); | |
4651 | if (flags & S_CTIME) | |
4652 | inode->i_ctime = *now; | |
4653 | if (flags & S_MTIME) | |
4654 | inode->i_mtime = *now; | |
4655 | if (flags & S_ATIME) | |
4656 | inode->i_atime = *now; | |
4657 | return btrfs_dirty_inode(inode); | |
4658 | } | |
4659 | ||
4660 | /* | |
4661 | * find the highest existing sequence number in a directory | |
4662 | * and then set the in-memory index_cnt variable to reflect | |
4663 | * free sequence numbers | |
4664 | */ | |
4665 | static int btrfs_set_inode_index_count(struct inode *inode) | |
4666 | { | |
4667 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4668 | struct btrfs_key key, found_key; | |
4669 | struct btrfs_path *path; | |
4670 | struct extent_buffer *leaf; | |
4671 | int ret; | |
4672 | ||
4673 | key.objectid = btrfs_ino(inode); | |
4674 | btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); | |
4675 | key.offset = (u64)-1; | |
4676 | ||
4677 | path = btrfs_alloc_path(); | |
4678 | if (!path) | |
4679 | return -ENOMEM; | |
4680 | ||
4681 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
4682 | if (ret < 0) | |
4683 | goto out; | |
4684 | /* FIXME: we should be able to handle this */ | |
4685 | if (ret == 0) | |
4686 | goto out; | |
4687 | ret = 0; | |
4688 | ||
4689 | /* | |
4690 | * MAGIC NUMBER EXPLANATION: | |
4691 | * since we search a directory based on f_pos we have to start at 2 | |
4692 | * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody | |
4693 | * else has to start at 2 | |
4694 | */ | |
4695 | if (path->slots[0] == 0) { | |
4696 | BTRFS_I(inode)->index_cnt = 2; | |
4697 | goto out; | |
4698 | } | |
4699 | ||
4700 | path->slots[0]--; | |
4701 | ||
4702 | leaf = path->nodes[0]; | |
4703 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
4704 | ||
4705 | if (found_key.objectid != btrfs_ino(inode) || | |
4706 | btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { | |
4707 | BTRFS_I(inode)->index_cnt = 2; | |
4708 | goto out; | |
4709 | } | |
4710 | ||
4711 | BTRFS_I(inode)->index_cnt = found_key.offset + 1; | |
4712 | out: | |
4713 | btrfs_free_path(path); | |
4714 | return ret; | |
4715 | } | |
4716 | ||
4717 | /* | |
4718 | * helper to find a free sequence number in a given directory. This current | |
4719 | * code is very simple, later versions will do smarter things in the btree | |
4720 | */ | |
4721 | int btrfs_set_inode_index(struct inode *dir, u64 *index) | |
4722 | { | |
4723 | int ret = 0; | |
4724 | ||
4725 | if (BTRFS_I(dir)->index_cnt == (u64)-1) { | |
4726 | ret = btrfs_inode_delayed_dir_index_count(dir); | |
4727 | if (ret) { | |
4728 | ret = btrfs_set_inode_index_count(dir); | |
4729 | if (ret) | |
4730 | return ret; | |
4731 | } | |
4732 | } | |
4733 | ||
4734 | *index = BTRFS_I(dir)->index_cnt; | |
4735 | BTRFS_I(dir)->index_cnt++; | |
4736 | ||
4737 | return ret; | |
4738 | } | |
4739 | ||
4740 | static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |
4741 | struct btrfs_root *root, | |
4742 | struct inode *dir, | |
4743 | const char *name, int name_len, | |
4744 | u64 ref_objectid, u64 objectid, | |
4745 | umode_t mode, u64 *index) | |
4746 | { | |
4747 | struct inode *inode; | |
4748 | struct btrfs_inode_item *inode_item; | |
4749 | struct btrfs_key *location; | |
4750 | struct btrfs_path *path; | |
4751 | struct btrfs_inode_ref *ref; | |
4752 | struct btrfs_key key[2]; | |
4753 | u32 sizes[2]; | |
4754 | unsigned long ptr; | |
4755 | int ret; | |
4756 | int owner; | |
4757 | ||
4758 | path = btrfs_alloc_path(); | |
4759 | if (!path) | |
4760 | return ERR_PTR(-ENOMEM); | |
4761 | ||
4762 | inode = new_inode(root->fs_info->sb); | |
4763 | if (!inode) { | |
4764 | btrfs_free_path(path); | |
4765 | return ERR_PTR(-ENOMEM); | |
4766 | } | |
4767 | ||
4768 | /* | |
4769 | * we have to initialize this early, so we can reclaim the inode | |
4770 | * number if we fail afterwards in this function. | |
4771 | */ | |
4772 | inode->i_ino = objectid; | |
4773 | ||
4774 | if (dir) { | |
4775 | trace_btrfs_inode_request(dir); | |
4776 | ||
4777 | ret = btrfs_set_inode_index(dir, index); | |
4778 | if (ret) { | |
4779 | btrfs_free_path(path); | |
4780 | iput(inode); | |
4781 | return ERR_PTR(ret); | |
4782 | } | |
4783 | } | |
4784 | /* | |
4785 | * index_cnt is ignored for everything but a dir, | |
4786 | * btrfs_get_inode_index_count has an explanation for the magic | |
4787 | * number | |
4788 | */ | |
4789 | BTRFS_I(inode)->index_cnt = 2; | |
4790 | BTRFS_I(inode)->root = root; | |
4791 | BTRFS_I(inode)->generation = trans->transid; | |
4792 | inode->i_generation = BTRFS_I(inode)->generation; | |
4793 | ||
4794 | /* | |
4795 | * We could have gotten an inode number from somebody who was fsynced | |
4796 | * and then removed in this same transaction, so let's just set full | |
4797 | * sync since it will be a full sync anyway and this will blow away the | |
4798 | * old info in the log. | |
4799 | */ | |
4800 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); | |
4801 | ||
4802 | if (S_ISDIR(mode)) | |
4803 | owner = 0; | |
4804 | else | |
4805 | owner = 1; | |
4806 | ||
4807 | key[0].objectid = objectid; | |
4808 | btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); | |
4809 | key[0].offset = 0; | |
4810 | ||
4811 | /* | |
4812 | * Start new inodes with an inode_ref. This is slightly more | |
4813 | * efficient for small numbers of hard links since they will | |
4814 | * be packed into one item. Extended refs will kick in if we | |
4815 | * add more hard links than can fit in the ref item. | |
4816 | */ | |
4817 | key[1].objectid = objectid; | |
4818 | btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY); | |
4819 | key[1].offset = ref_objectid; | |
4820 | ||
4821 | sizes[0] = sizeof(struct btrfs_inode_item); | |
4822 | sizes[1] = name_len + sizeof(*ref); | |
4823 | ||
4824 | path->leave_spinning = 1; | |
4825 | ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2); | |
4826 | if (ret != 0) | |
4827 | goto fail; | |
4828 | ||
4829 | inode_init_owner(inode, dir, mode); | |
4830 | inode_set_bytes(inode, 0); | |
4831 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | |
4832 | inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], | |
4833 | struct btrfs_inode_item); | |
4834 | memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item, | |
4835 | sizeof(*inode_item)); | |
4836 | fill_inode_item(trans, path->nodes[0], inode_item, inode); | |
4837 | ||
4838 | ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, | |
4839 | struct btrfs_inode_ref); | |
4840 | btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); | |
4841 | btrfs_set_inode_ref_index(path->nodes[0], ref, *index); | |
4842 | ptr = (unsigned long)(ref + 1); | |
4843 | write_extent_buffer(path->nodes[0], name, ptr, name_len); | |
4844 | ||
4845 | btrfs_mark_buffer_dirty(path->nodes[0]); | |
4846 | btrfs_free_path(path); | |
4847 | ||
4848 | location = &BTRFS_I(inode)->location; | |
4849 | location->objectid = objectid; | |
4850 | location->offset = 0; | |
4851 | btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY); | |
4852 | ||
4853 | btrfs_inherit_iflags(inode, dir); | |
4854 | ||
4855 | if (S_ISREG(mode)) { | |
4856 | if (btrfs_test_opt(root, NODATASUM)) | |
4857 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; | |
4858 | if (btrfs_test_opt(root, NODATACOW)) | |
4859 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; | |
4860 | } | |
4861 | ||
4862 | insert_inode_hash(inode); | |
4863 | inode_tree_add(inode); | |
4864 | ||
4865 | trace_btrfs_inode_new(inode); | |
4866 | btrfs_set_inode_last_trans(trans, inode); | |
4867 | ||
4868 | btrfs_update_root_times(trans, root); | |
4869 | ||
4870 | return inode; | |
4871 | fail: | |
4872 | if (dir) | |
4873 | BTRFS_I(dir)->index_cnt--; | |
4874 | btrfs_free_path(path); | |
4875 | iput(inode); | |
4876 | return ERR_PTR(ret); | |
4877 | } | |
4878 | ||
4879 | static inline u8 btrfs_inode_type(struct inode *inode) | |
4880 | { | |
4881 | return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; | |
4882 | } | |
4883 | ||
4884 | /* | |
4885 | * utility function to add 'inode' into 'parent_inode' with | |
4886 | * a give name and a given sequence number. | |
4887 | * if 'add_backref' is true, also insert a backref from the | |
4888 | * inode to the parent directory. | |
4889 | */ | |
4890 | int btrfs_add_link(struct btrfs_trans_handle *trans, | |
4891 | struct inode *parent_inode, struct inode *inode, | |
4892 | const char *name, int name_len, int add_backref, u64 index) | |
4893 | { | |
4894 | int ret = 0; | |
4895 | struct btrfs_key key; | |
4896 | struct btrfs_root *root = BTRFS_I(parent_inode)->root; | |
4897 | u64 ino = btrfs_ino(inode); | |
4898 | u64 parent_ino = btrfs_ino(parent_inode); | |
4899 | ||
4900 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { | |
4901 | memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); | |
4902 | } else { | |
4903 | key.objectid = ino; | |
4904 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); | |
4905 | key.offset = 0; | |
4906 | } | |
4907 | ||
4908 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { | |
4909 | ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, | |
4910 | key.objectid, root->root_key.objectid, | |
4911 | parent_ino, index, name, name_len); | |
4912 | } else if (add_backref) { | |
4913 | ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, | |
4914 | parent_ino, index); | |
4915 | } | |
4916 | ||
4917 | /* Nothing to clean up yet */ | |
4918 | if (ret) | |
4919 | return ret; | |
4920 | ||
4921 | ret = btrfs_insert_dir_item(trans, root, name, name_len, | |
4922 | parent_inode, &key, | |
4923 | btrfs_inode_type(inode), index); | |
4924 | if (ret == -EEXIST || ret == -EOVERFLOW) | |
4925 | goto fail_dir_item; | |
4926 | else if (ret) { | |
4927 | btrfs_abort_transaction(trans, root, ret); | |
4928 | return ret; | |
4929 | } | |
4930 | ||
4931 | btrfs_i_size_write(parent_inode, parent_inode->i_size + | |
4932 | name_len * 2); | |
4933 | inode_inc_iversion(parent_inode); | |
4934 | parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; | |
4935 | ret = btrfs_update_inode(trans, root, parent_inode); | |
4936 | if (ret) | |
4937 | btrfs_abort_transaction(trans, root, ret); | |
4938 | return ret; | |
4939 | ||
4940 | fail_dir_item: | |
4941 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { | |
4942 | u64 local_index; | |
4943 | int err; | |
4944 | err = btrfs_del_root_ref(trans, root->fs_info->tree_root, | |
4945 | key.objectid, root->root_key.objectid, | |
4946 | parent_ino, &local_index, name, name_len); | |
4947 | ||
4948 | } else if (add_backref) { | |
4949 | u64 local_index; | |
4950 | int err; | |
4951 | ||
4952 | err = btrfs_del_inode_ref(trans, root, name, name_len, | |
4953 | ino, parent_ino, &local_index); | |
4954 | } | |
4955 | return ret; | |
4956 | } | |
4957 | ||
4958 | static int btrfs_add_nondir(struct btrfs_trans_handle *trans, | |
4959 | struct inode *dir, struct dentry *dentry, | |
4960 | struct inode *inode, int backref, u64 index) | |
4961 | { | |
4962 | int err = btrfs_add_link(trans, dir, inode, | |
4963 | dentry->d_name.name, dentry->d_name.len, | |
4964 | backref, index); | |
4965 | if (err > 0) | |
4966 | err = -EEXIST; | |
4967 | return err; | |
4968 | } | |
4969 | ||
4970 | static int btrfs_mknod(struct inode *dir, struct dentry *dentry, | |
4971 | umode_t mode, dev_t rdev) | |
4972 | { | |
4973 | struct btrfs_trans_handle *trans; | |
4974 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
4975 | struct inode *inode = NULL; | |
4976 | int err; | |
4977 | int drop_inode = 0; | |
4978 | u64 objectid; | |
4979 | u64 index = 0; | |
4980 | ||
4981 | if (!new_valid_dev(rdev)) | |
4982 | return -EINVAL; | |
4983 | ||
4984 | /* | |
4985 | * 2 for inode item and ref | |
4986 | * 2 for dir items | |
4987 | * 1 for xattr if selinux is on | |
4988 | */ | |
4989 | trans = btrfs_start_transaction(root, 5); | |
4990 | if (IS_ERR(trans)) | |
4991 | return PTR_ERR(trans); | |
4992 | ||
4993 | err = btrfs_find_free_ino(root, &objectid); | |
4994 | if (err) | |
4995 | goto out_unlock; | |
4996 | ||
4997 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | |
4998 | dentry->d_name.len, btrfs_ino(dir), objectid, | |
4999 | mode, &index); | |
5000 | if (IS_ERR(inode)) { | |
5001 | err = PTR_ERR(inode); | |
5002 | goto out_unlock; | |
5003 | } | |
5004 | ||
5005 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | |
5006 | if (err) { | |
5007 | drop_inode = 1; | |
5008 | goto out_unlock; | |
5009 | } | |
5010 | ||
5011 | err = btrfs_update_inode(trans, root, inode); | |
5012 | if (err) { | |
5013 | drop_inode = 1; | |
5014 | goto out_unlock; | |
5015 | } | |
5016 | ||
5017 | /* | |
5018 | * If the active LSM wants to access the inode during | |
5019 | * d_instantiate it needs these. Smack checks to see | |
5020 | * if the filesystem supports xattrs by looking at the | |
5021 | * ops vector. | |
5022 | */ | |
5023 | ||
5024 | inode->i_op = &btrfs_special_inode_operations; | |
5025 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); | |
5026 | if (err) | |
5027 | drop_inode = 1; | |
5028 | else { | |
5029 | init_special_inode(inode, inode->i_mode, rdev); | |
5030 | btrfs_update_inode(trans, root, inode); | |
5031 | d_instantiate(dentry, inode); | |
5032 | } | |
5033 | out_unlock: | |
5034 | btrfs_end_transaction(trans, root); | |
5035 | btrfs_btree_balance_dirty(root); | |
5036 | if (drop_inode) { | |
5037 | inode_dec_link_count(inode); | |
5038 | iput(inode); | |
5039 | } | |
5040 | return err; | |
5041 | } | |
5042 | ||
5043 | static int btrfs_create(struct inode *dir, struct dentry *dentry, | |
5044 | umode_t mode, bool excl) | |
5045 | { | |
5046 | struct btrfs_trans_handle *trans; | |
5047 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
5048 | struct inode *inode = NULL; | |
5049 | int drop_inode_on_err = 0; | |
5050 | int err; | |
5051 | u64 objectid; | |
5052 | u64 index = 0; | |
5053 | ||
5054 | /* | |
5055 | * 2 for inode item and ref | |
5056 | * 2 for dir items | |
5057 | * 1 for xattr if selinux is on | |
5058 | */ | |
5059 | trans = btrfs_start_transaction(root, 5); | |
5060 | if (IS_ERR(trans)) | |
5061 | return PTR_ERR(trans); | |
5062 | ||
5063 | err = btrfs_find_free_ino(root, &objectid); | |
5064 | if (err) | |
5065 | goto out_unlock; | |
5066 | ||
5067 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | |
5068 | dentry->d_name.len, btrfs_ino(dir), objectid, | |
5069 | mode, &index); | |
5070 | if (IS_ERR(inode)) { | |
5071 | err = PTR_ERR(inode); | |
5072 | goto out_unlock; | |
5073 | } | |
5074 | drop_inode_on_err = 1; | |
5075 | ||
5076 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | |
5077 | if (err) | |
5078 | goto out_unlock; | |
5079 | ||
5080 | err = btrfs_update_inode(trans, root, inode); | |
5081 | if (err) | |
5082 | goto out_unlock; | |
5083 | ||
5084 | /* | |
5085 | * If the active LSM wants to access the inode during | |
5086 | * d_instantiate it needs these. Smack checks to see | |
5087 | * if the filesystem supports xattrs by looking at the | |
5088 | * ops vector. | |
5089 | */ | |
5090 | inode->i_fop = &btrfs_file_operations; | |
5091 | inode->i_op = &btrfs_file_inode_operations; | |
5092 | ||
5093 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); | |
5094 | if (err) | |
5095 | goto out_unlock; | |
5096 | ||
5097 | inode->i_mapping->a_ops = &btrfs_aops; | |
5098 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; | |
5099 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | |
5100 | d_instantiate(dentry, inode); | |
5101 | ||
5102 | out_unlock: | |
5103 | btrfs_end_transaction(trans, root); | |
5104 | if (err && drop_inode_on_err) { | |
5105 | inode_dec_link_count(inode); | |
5106 | iput(inode); | |
5107 | } | |
5108 | btrfs_btree_balance_dirty(root); | |
5109 | return err; | |
5110 | } | |
5111 | ||
5112 | static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |
5113 | struct dentry *dentry) | |
5114 | { | |
5115 | struct btrfs_trans_handle *trans; | |
5116 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
5117 | struct inode *inode = old_dentry->d_inode; | |
5118 | u64 index; | |
5119 | int err; | |
5120 | int drop_inode = 0; | |
5121 | ||
5122 | /* do not allow sys_link's with other subvols of the same device */ | |
5123 | if (root->objectid != BTRFS_I(inode)->root->objectid) | |
5124 | return -EXDEV; | |
5125 | ||
5126 | if (inode->i_nlink >= BTRFS_LINK_MAX) | |
5127 | return -EMLINK; | |
5128 | ||
5129 | err = btrfs_set_inode_index(dir, &index); | |
5130 | if (err) | |
5131 | goto fail; | |
5132 | ||
5133 | /* | |
5134 | * 2 items for inode and inode ref | |
5135 | * 2 items for dir items | |
5136 | * 1 item for parent inode | |
5137 | */ | |
5138 | trans = btrfs_start_transaction(root, 5); | |
5139 | if (IS_ERR(trans)) { | |
5140 | err = PTR_ERR(trans); | |
5141 | goto fail; | |
5142 | } | |
5143 | ||
5144 | btrfs_inc_nlink(inode); | |
5145 | inode_inc_iversion(inode); | |
5146 | inode->i_ctime = CURRENT_TIME; | |
5147 | ihold(inode); | |
5148 | set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); | |
5149 | ||
5150 | err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); | |
5151 | ||
5152 | if (err) { | |
5153 | drop_inode = 1; | |
5154 | } else { | |
5155 | struct dentry *parent = dentry->d_parent; | |
5156 | err = btrfs_update_inode(trans, root, inode); | |
5157 | if (err) | |
5158 | goto fail; | |
5159 | d_instantiate(dentry, inode); | |
5160 | btrfs_log_new_name(trans, inode, NULL, parent); | |
5161 | } | |
5162 | ||
5163 | btrfs_end_transaction(trans, root); | |
5164 | fail: | |
5165 | if (drop_inode) { | |
5166 | inode_dec_link_count(inode); | |
5167 | iput(inode); | |
5168 | } | |
5169 | btrfs_btree_balance_dirty(root); | |
5170 | return err; | |
5171 | } | |
5172 | ||
5173 | static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |
5174 | { | |
5175 | struct inode *inode = NULL; | |
5176 | struct btrfs_trans_handle *trans; | |
5177 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
5178 | int err = 0; | |
5179 | int drop_on_err = 0; | |
5180 | u64 objectid = 0; | |
5181 | u64 index = 0; | |
5182 | ||
5183 | /* | |
5184 | * 2 items for inode and ref | |
5185 | * 2 items for dir items | |
5186 | * 1 for xattr if selinux is on | |
5187 | */ | |
5188 | trans = btrfs_start_transaction(root, 5); | |
5189 | if (IS_ERR(trans)) | |
5190 | return PTR_ERR(trans); | |
5191 | ||
5192 | err = btrfs_find_free_ino(root, &objectid); | |
5193 | if (err) | |
5194 | goto out_fail; | |
5195 | ||
5196 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | |
5197 | dentry->d_name.len, btrfs_ino(dir), objectid, | |
5198 | S_IFDIR | mode, &index); | |
5199 | if (IS_ERR(inode)) { | |
5200 | err = PTR_ERR(inode); | |
5201 | goto out_fail; | |
5202 | } | |
5203 | ||
5204 | drop_on_err = 1; | |
5205 | ||
5206 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | |
5207 | if (err) | |
5208 | goto out_fail; | |
5209 | ||
5210 | inode->i_op = &btrfs_dir_inode_operations; | |
5211 | inode->i_fop = &btrfs_dir_file_operations; | |
5212 | ||
5213 | btrfs_i_size_write(inode, 0); | |
5214 | err = btrfs_update_inode(trans, root, inode); | |
5215 | if (err) | |
5216 | goto out_fail; | |
5217 | ||
5218 | err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, | |
5219 | dentry->d_name.len, 0, index); | |
5220 | if (err) | |
5221 | goto out_fail; | |
5222 | ||
5223 | d_instantiate(dentry, inode); | |
5224 | drop_on_err = 0; | |
5225 | ||
5226 | out_fail: | |
5227 | btrfs_end_transaction(trans, root); | |
5228 | if (drop_on_err) | |
5229 | iput(inode); | |
5230 | btrfs_btree_balance_dirty(root); | |
5231 | return err; | |
5232 | } | |
5233 | ||
5234 | /* helper for btfs_get_extent. Given an existing extent in the tree, | |
5235 | * and an extent that you want to insert, deal with overlap and insert | |
5236 | * the new extent into the tree. | |
5237 | */ | |
5238 | static int merge_extent_mapping(struct extent_map_tree *em_tree, | |
5239 | struct extent_map *existing, | |
5240 | struct extent_map *em, | |
5241 | u64 map_start, u64 map_len) | |
5242 | { | |
5243 | u64 start_diff; | |
5244 | ||
5245 | BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); | |
5246 | start_diff = map_start - em->start; | |
5247 | em->start = map_start; | |
5248 | em->len = map_len; | |
5249 | if (em->block_start < EXTENT_MAP_LAST_BYTE && | |
5250 | !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { | |
5251 | em->block_start += start_diff; | |
5252 | em->block_len -= start_diff; | |
5253 | } | |
5254 | return add_extent_mapping(em_tree, em); | |
5255 | } | |
5256 | ||
5257 | static noinline int uncompress_inline(struct btrfs_path *path, | |
5258 | struct inode *inode, struct page *page, | |
5259 | size_t pg_offset, u64 extent_offset, | |
5260 | struct btrfs_file_extent_item *item) | |
5261 | { | |
5262 | int ret; | |
5263 | struct extent_buffer *leaf = path->nodes[0]; | |
5264 | char *tmp; | |
5265 | size_t max_size; | |
5266 | unsigned long inline_size; | |
5267 | unsigned long ptr; | |
5268 | int compress_type; | |
5269 | ||
5270 | WARN_ON(pg_offset != 0); | |
5271 | compress_type = btrfs_file_extent_compression(leaf, item); | |
5272 | max_size = btrfs_file_extent_ram_bytes(leaf, item); | |
5273 | inline_size = btrfs_file_extent_inline_item_len(leaf, | |
5274 | btrfs_item_nr(leaf, path->slots[0])); | |
5275 | tmp = kmalloc(inline_size, GFP_NOFS); | |
5276 | if (!tmp) | |
5277 | return -ENOMEM; | |
5278 | ptr = btrfs_file_extent_inline_start(item); | |
5279 | ||
5280 | read_extent_buffer(leaf, tmp, ptr, inline_size); | |
5281 | ||
5282 | max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); | |
5283 | ret = btrfs_decompress(compress_type, tmp, page, | |
5284 | extent_offset, inline_size, max_size); | |
5285 | if (ret) { | |
5286 | char *kaddr = kmap_atomic(page); | |
5287 | unsigned long copy_size = min_t(u64, | |
5288 | PAGE_CACHE_SIZE - pg_offset, | |
5289 | max_size - extent_offset); | |
5290 | memset(kaddr + pg_offset, 0, copy_size); | |
5291 | kunmap_atomic(kaddr); | |
5292 | } | |
5293 | kfree(tmp); | |
5294 | return 0; | |
5295 | } | |
5296 | ||
5297 | /* | |
5298 | * a bit scary, this does extent mapping from logical file offset to the disk. | |
5299 | * the ugly parts come from merging extents from the disk with the in-ram | |
5300 | * representation. This gets more complex because of the data=ordered code, | |
5301 | * where the in-ram extents might be locked pending data=ordered completion. | |
5302 | * | |
5303 | * This also copies inline extents directly into the page. | |
5304 | */ | |
5305 | ||
5306 | struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, | |
5307 | size_t pg_offset, u64 start, u64 len, | |
5308 | int create) | |
5309 | { | |
5310 | int ret; | |
5311 | int err = 0; | |
5312 | u64 bytenr; | |
5313 | u64 extent_start = 0; | |
5314 | u64 extent_end = 0; | |
5315 | u64 objectid = btrfs_ino(inode); | |
5316 | u32 found_type; | |
5317 | struct btrfs_path *path = NULL; | |
5318 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5319 | struct btrfs_file_extent_item *item; | |
5320 | struct extent_buffer *leaf; | |
5321 | struct btrfs_key found_key; | |
5322 | struct extent_map *em = NULL; | |
5323 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
5324 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
5325 | struct btrfs_trans_handle *trans = NULL; | |
5326 | int compress_type; | |
5327 | ||
5328 | again: | |
5329 | read_lock(&em_tree->lock); | |
5330 | em = lookup_extent_mapping(em_tree, start, len); | |
5331 | if (em) | |
5332 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
5333 | read_unlock(&em_tree->lock); | |
5334 | ||
5335 | if (em) { | |
5336 | if (em->start > start || em->start + em->len <= start) | |
5337 | free_extent_map(em); | |
5338 | else if (em->block_start == EXTENT_MAP_INLINE && page) | |
5339 | free_extent_map(em); | |
5340 | else | |
5341 | goto out; | |
5342 | } | |
5343 | em = alloc_extent_map(); | |
5344 | if (!em) { | |
5345 | err = -ENOMEM; | |
5346 | goto out; | |
5347 | } | |
5348 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
5349 | em->start = EXTENT_MAP_HOLE; | |
5350 | em->orig_start = EXTENT_MAP_HOLE; | |
5351 | em->len = (u64)-1; | |
5352 | em->block_len = (u64)-1; | |
5353 | ||
5354 | if (!path) { | |
5355 | path = btrfs_alloc_path(); | |
5356 | if (!path) { | |
5357 | err = -ENOMEM; | |
5358 | goto out; | |
5359 | } | |
5360 | /* | |
5361 | * Chances are we'll be called again, so go ahead and do | |
5362 | * readahead | |
5363 | */ | |
5364 | path->reada = 1; | |
5365 | } | |
5366 | ||
5367 | ret = btrfs_lookup_file_extent(trans, root, path, | |
5368 | objectid, start, trans != NULL); | |
5369 | if (ret < 0) { | |
5370 | err = ret; | |
5371 | goto out; | |
5372 | } | |
5373 | ||
5374 | if (ret != 0) { | |
5375 | if (path->slots[0] == 0) | |
5376 | goto not_found; | |
5377 | path->slots[0]--; | |
5378 | } | |
5379 | ||
5380 | leaf = path->nodes[0]; | |
5381 | item = btrfs_item_ptr(leaf, path->slots[0], | |
5382 | struct btrfs_file_extent_item); | |
5383 | /* are we inside the extent that was found? */ | |
5384 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
5385 | found_type = btrfs_key_type(&found_key); | |
5386 | if (found_key.objectid != objectid || | |
5387 | found_type != BTRFS_EXTENT_DATA_KEY) { | |
5388 | goto not_found; | |
5389 | } | |
5390 | ||
5391 | found_type = btrfs_file_extent_type(leaf, item); | |
5392 | extent_start = found_key.offset; | |
5393 | compress_type = btrfs_file_extent_compression(leaf, item); | |
5394 | if (found_type == BTRFS_FILE_EXTENT_REG || | |
5395 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
5396 | extent_end = extent_start + | |
5397 | btrfs_file_extent_num_bytes(leaf, item); | |
5398 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | |
5399 | size_t size; | |
5400 | size = btrfs_file_extent_inline_len(leaf, item); | |
5401 | extent_end = (extent_start + size + root->sectorsize - 1) & | |
5402 | ~((u64)root->sectorsize - 1); | |
5403 | } | |
5404 | ||
5405 | if (start >= extent_end) { | |
5406 | path->slots[0]++; | |
5407 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
5408 | ret = btrfs_next_leaf(root, path); | |
5409 | if (ret < 0) { | |
5410 | err = ret; | |
5411 | goto out; | |
5412 | } | |
5413 | if (ret > 0) | |
5414 | goto not_found; | |
5415 | leaf = path->nodes[0]; | |
5416 | } | |
5417 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
5418 | if (found_key.objectid != objectid || | |
5419 | found_key.type != BTRFS_EXTENT_DATA_KEY) | |
5420 | goto not_found; | |
5421 | if (start + len <= found_key.offset) | |
5422 | goto not_found; | |
5423 | em->start = start; | |
5424 | em->orig_start = start; | |
5425 | em->len = found_key.offset - start; | |
5426 | goto not_found_em; | |
5427 | } | |
5428 | ||
5429 | if (found_type == BTRFS_FILE_EXTENT_REG || | |
5430 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
5431 | em->start = extent_start; | |
5432 | em->len = extent_end - extent_start; | |
5433 | em->orig_start = extent_start - | |
5434 | btrfs_file_extent_offset(leaf, item); | |
5435 | em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, | |
5436 | item); | |
5437 | bytenr = btrfs_file_extent_disk_bytenr(leaf, item); | |
5438 | if (bytenr == 0) { | |
5439 | em->block_start = EXTENT_MAP_HOLE; | |
5440 | goto insert; | |
5441 | } | |
5442 | if (compress_type != BTRFS_COMPRESS_NONE) { | |
5443 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | |
5444 | em->compress_type = compress_type; | |
5445 | em->block_start = bytenr; | |
5446 | em->block_len = em->orig_block_len; | |
5447 | } else { | |
5448 | bytenr += btrfs_file_extent_offset(leaf, item); | |
5449 | em->block_start = bytenr; | |
5450 | em->block_len = em->len; | |
5451 | if (found_type == BTRFS_FILE_EXTENT_PREALLOC) | |
5452 | set_bit(EXTENT_FLAG_PREALLOC, &em->flags); | |
5453 | } | |
5454 | goto insert; | |
5455 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | |
5456 | unsigned long ptr; | |
5457 | char *map; | |
5458 | size_t size; | |
5459 | size_t extent_offset; | |
5460 | size_t copy_size; | |
5461 | ||
5462 | em->block_start = EXTENT_MAP_INLINE; | |
5463 | if (!page || create) { | |
5464 | em->start = extent_start; | |
5465 | em->len = extent_end - extent_start; | |
5466 | goto out; | |
5467 | } | |
5468 | ||
5469 | size = btrfs_file_extent_inline_len(leaf, item); | |
5470 | extent_offset = page_offset(page) + pg_offset - extent_start; | |
5471 | copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, | |
5472 | size - extent_offset); | |
5473 | em->start = extent_start + extent_offset; | |
5474 | em->len = (copy_size + root->sectorsize - 1) & | |
5475 | ~((u64)root->sectorsize - 1); | |
5476 | em->orig_block_len = em->len; | |
5477 | em->orig_start = em->start; | |
5478 | if (compress_type) { | |
5479 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | |
5480 | em->compress_type = compress_type; | |
5481 | } | |
5482 | ptr = btrfs_file_extent_inline_start(item) + extent_offset; | |
5483 | if (create == 0 && !PageUptodate(page)) { | |
5484 | if (btrfs_file_extent_compression(leaf, item) != | |
5485 | BTRFS_COMPRESS_NONE) { | |
5486 | ret = uncompress_inline(path, inode, page, | |
5487 | pg_offset, | |
5488 | extent_offset, item); | |
5489 | BUG_ON(ret); /* -ENOMEM */ | |
5490 | } else { | |
5491 | map = kmap(page); | |
5492 | read_extent_buffer(leaf, map + pg_offset, ptr, | |
5493 | copy_size); | |
5494 | if (pg_offset + copy_size < PAGE_CACHE_SIZE) { | |
5495 | memset(map + pg_offset + copy_size, 0, | |
5496 | PAGE_CACHE_SIZE - pg_offset - | |
5497 | copy_size); | |
5498 | } | |
5499 | kunmap(page); | |
5500 | } | |
5501 | flush_dcache_page(page); | |
5502 | } else if (create && PageUptodate(page)) { | |
5503 | BUG(); | |
5504 | if (!trans) { | |
5505 | kunmap(page); | |
5506 | free_extent_map(em); | |
5507 | em = NULL; | |
5508 | ||
5509 | btrfs_release_path(path); | |
5510 | trans = btrfs_join_transaction(root); | |
5511 | ||
5512 | if (IS_ERR(trans)) | |
5513 | return ERR_CAST(trans); | |
5514 | goto again; | |
5515 | } | |
5516 | map = kmap(page); | |
5517 | write_extent_buffer(leaf, map + pg_offset, ptr, | |
5518 | copy_size); | |
5519 | kunmap(page); | |
5520 | btrfs_mark_buffer_dirty(leaf); | |
5521 | } | |
5522 | set_extent_uptodate(io_tree, em->start, | |
5523 | extent_map_end(em) - 1, NULL, GFP_NOFS); | |
5524 | goto insert; | |
5525 | } else { | |
5526 | WARN(1, KERN_ERR "btrfs unknown found_type %d\n", found_type); | |
5527 | } | |
5528 | not_found: | |
5529 | em->start = start; | |
5530 | em->orig_start = start; | |
5531 | em->len = len; | |
5532 | not_found_em: | |
5533 | em->block_start = EXTENT_MAP_HOLE; | |
5534 | set_bit(EXTENT_FLAG_VACANCY, &em->flags); | |
5535 | insert: | |
5536 | btrfs_release_path(path); | |
5537 | if (em->start > start || extent_map_end(em) <= start) { | |
5538 | printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " | |
5539 | "[%llu %llu]\n", (unsigned long long)em->start, | |
5540 | (unsigned long long)em->len, | |
5541 | (unsigned long long)start, | |
5542 | (unsigned long long)len); | |
5543 | err = -EIO; | |
5544 | goto out; | |
5545 | } | |
5546 | ||
5547 | err = 0; | |
5548 | write_lock(&em_tree->lock); | |
5549 | ret = add_extent_mapping(em_tree, em); | |
5550 | /* it is possible that someone inserted the extent into the tree | |
5551 | * while we had the lock dropped. It is also possible that | |
5552 | * an overlapping map exists in the tree | |
5553 | */ | |
5554 | if (ret == -EEXIST) { | |
5555 | struct extent_map *existing; | |
5556 | ||
5557 | ret = 0; | |
5558 | ||
5559 | existing = lookup_extent_mapping(em_tree, start, len); | |
5560 | if (existing && (existing->start > start || | |
5561 | existing->start + existing->len <= start)) { | |
5562 | free_extent_map(existing); | |
5563 | existing = NULL; | |
5564 | } | |
5565 | if (!existing) { | |
5566 | existing = lookup_extent_mapping(em_tree, em->start, | |
5567 | em->len); | |
5568 | if (existing) { | |
5569 | err = merge_extent_mapping(em_tree, existing, | |
5570 | em, start, | |
5571 | root->sectorsize); | |
5572 | free_extent_map(existing); | |
5573 | if (err) { | |
5574 | free_extent_map(em); | |
5575 | em = NULL; | |
5576 | } | |
5577 | } else { | |
5578 | err = -EIO; | |
5579 | free_extent_map(em); | |
5580 | em = NULL; | |
5581 | } | |
5582 | } else { | |
5583 | free_extent_map(em); | |
5584 | em = existing; | |
5585 | err = 0; | |
5586 | } | |
5587 | } | |
5588 | write_unlock(&em_tree->lock); | |
5589 | out: | |
5590 | ||
5591 | if (em) | |
5592 | trace_btrfs_get_extent(root, em); | |
5593 | ||
5594 | if (path) | |
5595 | btrfs_free_path(path); | |
5596 | if (trans) { | |
5597 | ret = btrfs_end_transaction(trans, root); | |
5598 | if (!err) | |
5599 | err = ret; | |
5600 | } | |
5601 | if (err) { | |
5602 | free_extent_map(em); | |
5603 | return ERR_PTR(err); | |
5604 | } | |
5605 | BUG_ON(!em); /* Error is always set */ | |
5606 | return em; | |
5607 | } | |
5608 | ||
5609 | struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, | |
5610 | size_t pg_offset, u64 start, u64 len, | |
5611 | int create) | |
5612 | { | |
5613 | struct extent_map *em; | |
5614 | struct extent_map *hole_em = NULL; | |
5615 | u64 range_start = start; | |
5616 | u64 end; | |
5617 | u64 found; | |
5618 | u64 found_end; | |
5619 | int err = 0; | |
5620 | ||
5621 | em = btrfs_get_extent(inode, page, pg_offset, start, len, create); | |
5622 | if (IS_ERR(em)) | |
5623 | return em; | |
5624 | if (em) { | |
5625 | /* | |
5626 | * if our em maps to a hole, there might | |
5627 | * actually be delalloc bytes behind it | |
5628 | */ | |
5629 | if (em->block_start != EXTENT_MAP_HOLE) | |
5630 | return em; | |
5631 | else | |
5632 | hole_em = em; | |
5633 | } | |
5634 | ||
5635 | /* check to see if we've wrapped (len == -1 or similar) */ | |
5636 | end = start + len; | |
5637 | if (end < start) | |
5638 | end = (u64)-1; | |
5639 | else | |
5640 | end -= 1; | |
5641 | ||
5642 | em = NULL; | |
5643 | ||
5644 | /* ok, we didn't find anything, lets look for delalloc */ | |
5645 | found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, | |
5646 | end, len, EXTENT_DELALLOC, 1); | |
5647 | found_end = range_start + found; | |
5648 | if (found_end < range_start) | |
5649 | found_end = (u64)-1; | |
5650 | ||
5651 | /* | |
5652 | * we didn't find anything useful, return | |
5653 | * the original results from get_extent() | |
5654 | */ | |
5655 | if (range_start > end || found_end <= start) { | |
5656 | em = hole_em; | |
5657 | hole_em = NULL; | |
5658 | goto out; | |
5659 | } | |
5660 | ||
5661 | /* adjust the range_start to make sure it doesn't | |
5662 | * go backwards from the start they passed in | |
5663 | */ | |
5664 | range_start = max(start,range_start); | |
5665 | found = found_end - range_start; | |
5666 | ||
5667 | if (found > 0) { | |
5668 | u64 hole_start = start; | |
5669 | u64 hole_len = len; | |
5670 | ||
5671 | em = alloc_extent_map(); | |
5672 | if (!em) { | |
5673 | err = -ENOMEM; | |
5674 | goto out; | |
5675 | } | |
5676 | /* | |
5677 | * when btrfs_get_extent can't find anything it | |
5678 | * returns one huge hole | |
5679 | * | |
5680 | * make sure what it found really fits our range, and | |
5681 | * adjust to make sure it is based on the start from | |
5682 | * the caller | |
5683 | */ | |
5684 | if (hole_em) { | |
5685 | u64 calc_end = extent_map_end(hole_em); | |
5686 | ||
5687 | if (calc_end <= start || (hole_em->start > end)) { | |
5688 | free_extent_map(hole_em); | |
5689 | hole_em = NULL; | |
5690 | } else { | |
5691 | hole_start = max(hole_em->start, start); | |
5692 | hole_len = calc_end - hole_start; | |
5693 | } | |
5694 | } | |
5695 | em->bdev = NULL; | |
5696 | if (hole_em && range_start > hole_start) { | |
5697 | /* our hole starts before our delalloc, so we | |
5698 | * have to return just the parts of the hole | |
5699 | * that go until the delalloc starts | |
5700 | */ | |
5701 | em->len = min(hole_len, | |
5702 | range_start - hole_start); | |
5703 | em->start = hole_start; | |
5704 | em->orig_start = hole_start; | |
5705 | /* | |
5706 | * don't adjust block start at all, | |
5707 | * it is fixed at EXTENT_MAP_HOLE | |
5708 | */ | |
5709 | em->block_start = hole_em->block_start; | |
5710 | em->block_len = hole_len; | |
5711 | } else { | |
5712 | em->start = range_start; | |
5713 | em->len = found; | |
5714 | em->orig_start = range_start; | |
5715 | em->block_start = EXTENT_MAP_DELALLOC; | |
5716 | em->block_len = found; | |
5717 | } | |
5718 | } else if (hole_em) { | |
5719 | return hole_em; | |
5720 | } | |
5721 | out: | |
5722 | ||
5723 | free_extent_map(hole_em); | |
5724 | if (err) { | |
5725 | free_extent_map(em); | |
5726 | return ERR_PTR(err); | |
5727 | } | |
5728 | return em; | |
5729 | } | |
5730 | ||
5731 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode, | |
5732 | u64 start, u64 len) | |
5733 | { | |
5734 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5735 | struct btrfs_trans_handle *trans; | |
5736 | struct extent_map *em; | |
5737 | struct btrfs_key ins; | |
5738 | u64 alloc_hint; | |
5739 | int ret; | |
5740 | ||
5741 | trans = btrfs_join_transaction(root); | |
5742 | if (IS_ERR(trans)) | |
5743 | return ERR_CAST(trans); | |
5744 | ||
5745 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | |
5746 | ||
5747 | alloc_hint = get_extent_allocation_hint(inode, start, len); | |
5748 | ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0, | |
5749 | alloc_hint, &ins, 1); | |
5750 | if (ret) { | |
5751 | em = ERR_PTR(ret); | |
5752 | goto out; | |
5753 | } | |
5754 | ||
5755 | em = create_pinned_em(inode, start, ins.offset, start, ins.objectid, | |
5756 | ins.offset, ins.offset, 0); | |
5757 | if (IS_ERR(em)) | |
5758 | goto out; | |
5759 | ||
5760 | ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, | |
5761 | ins.offset, ins.offset, 0); | |
5762 | if (ret) { | |
5763 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset); | |
5764 | em = ERR_PTR(ret); | |
5765 | } | |
5766 | out: | |
5767 | btrfs_end_transaction(trans, root); | |
5768 | return em; | |
5769 | } | |
5770 | ||
5771 | /* | |
5772 | * returns 1 when the nocow is safe, < 1 on error, 0 if the | |
5773 | * block must be cow'd | |
5774 | */ | |
5775 | static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, | |
5776 | struct inode *inode, u64 offset, u64 len) | |
5777 | { | |
5778 | struct btrfs_path *path; | |
5779 | int ret; | |
5780 | struct extent_buffer *leaf; | |
5781 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5782 | struct btrfs_file_extent_item *fi; | |
5783 | struct btrfs_key key; | |
5784 | u64 disk_bytenr; | |
5785 | u64 backref_offset; | |
5786 | u64 extent_end; | |
5787 | u64 num_bytes; | |
5788 | int slot; | |
5789 | int found_type; | |
5790 | ||
5791 | path = btrfs_alloc_path(); | |
5792 | if (!path) | |
5793 | return -ENOMEM; | |
5794 | ||
5795 | ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), | |
5796 | offset, 0); | |
5797 | if (ret < 0) | |
5798 | goto out; | |
5799 | ||
5800 | slot = path->slots[0]; | |
5801 | if (ret == 1) { | |
5802 | if (slot == 0) { | |
5803 | /* can't find the item, must cow */ | |
5804 | ret = 0; | |
5805 | goto out; | |
5806 | } | |
5807 | slot--; | |
5808 | } | |
5809 | ret = 0; | |
5810 | leaf = path->nodes[0]; | |
5811 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
5812 | if (key.objectid != btrfs_ino(inode) || | |
5813 | key.type != BTRFS_EXTENT_DATA_KEY) { | |
5814 | /* not our file or wrong item type, must cow */ | |
5815 | goto out; | |
5816 | } | |
5817 | ||
5818 | if (key.offset > offset) { | |
5819 | /* Wrong offset, must cow */ | |
5820 | goto out; | |
5821 | } | |
5822 | ||
5823 | fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); | |
5824 | found_type = btrfs_file_extent_type(leaf, fi); | |
5825 | if (found_type != BTRFS_FILE_EXTENT_REG && | |
5826 | found_type != BTRFS_FILE_EXTENT_PREALLOC) { | |
5827 | /* not a regular extent, must cow */ | |
5828 | goto out; | |
5829 | } | |
5830 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); | |
5831 | backref_offset = btrfs_file_extent_offset(leaf, fi); | |
5832 | ||
5833 | extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); | |
5834 | if (extent_end < offset + len) { | |
5835 | /* extent doesn't include our full range, must cow */ | |
5836 | goto out; | |
5837 | } | |
5838 | ||
5839 | if (btrfs_extent_readonly(root, disk_bytenr)) | |
5840 | goto out; | |
5841 | ||
5842 | /* | |
5843 | * look for other files referencing this extent, if we | |
5844 | * find any we must cow | |
5845 | */ | |
5846 | if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), | |
5847 | key.offset - backref_offset, disk_bytenr)) | |
5848 | goto out; | |
5849 | ||
5850 | /* | |
5851 | * adjust disk_bytenr and num_bytes to cover just the bytes | |
5852 | * in this extent we are about to write. If there | |
5853 | * are any csums in that range we have to cow in order | |
5854 | * to keep the csums correct | |
5855 | */ | |
5856 | disk_bytenr += backref_offset; | |
5857 | disk_bytenr += offset - key.offset; | |
5858 | num_bytes = min(offset + len, extent_end) - offset; | |
5859 | if (csum_exist_in_range(root, disk_bytenr, num_bytes)) | |
5860 | goto out; | |
5861 | /* | |
5862 | * all of the above have passed, it is safe to overwrite this extent | |
5863 | * without cow | |
5864 | */ | |
5865 | ret = 1; | |
5866 | out: | |
5867 | btrfs_free_path(path); | |
5868 | return ret; | |
5869 | } | |
5870 | ||
5871 | static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, | |
5872 | struct extent_state **cached_state, int writing) | |
5873 | { | |
5874 | struct btrfs_ordered_extent *ordered; | |
5875 | int ret = 0; | |
5876 | ||
5877 | while (1) { | |
5878 | lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, | |
5879 | 0, cached_state); | |
5880 | /* | |
5881 | * We're concerned with the entire range that we're going to be | |
5882 | * doing DIO to, so we need to make sure theres no ordered | |
5883 | * extents in this range. | |
5884 | */ | |
5885 | ordered = btrfs_lookup_ordered_range(inode, lockstart, | |
5886 | lockend - lockstart + 1); | |
5887 | ||
5888 | /* | |
5889 | * We need to make sure there are no buffered pages in this | |
5890 | * range either, we could have raced between the invalidate in | |
5891 | * generic_file_direct_write and locking the extent. The | |
5892 | * invalidate needs to happen so that reads after a write do not | |
5893 | * get stale data. | |
5894 | */ | |
5895 | if (!ordered && (!writing || | |
5896 | !test_range_bit(&BTRFS_I(inode)->io_tree, | |
5897 | lockstart, lockend, EXTENT_UPTODATE, 0, | |
5898 | *cached_state))) | |
5899 | break; | |
5900 | ||
5901 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, | |
5902 | cached_state, GFP_NOFS); | |
5903 | ||
5904 | if (ordered) { | |
5905 | btrfs_start_ordered_extent(inode, ordered, 1); | |
5906 | btrfs_put_ordered_extent(ordered); | |
5907 | } else { | |
5908 | /* Screw you mmap */ | |
5909 | ret = filemap_write_and_wait_range(inode->i_mapping, | |
5910 | lockstart, | |
5911 | lockend); | |
5912 | if (ret) | |
5913 | break; | |
5914 | ||
5915 | /* | |
5916 | * If we found a page that couldn't be invalidated just | |
5917 | * fall back to buffered. | |
5918 | */ | |
5919 | ret = invalidate_inode_pages2_range(inode->i_mapping, | |
5920 | lockstart >> PAGE_CACHE_SHIFT, | |
5921 | lockend >> PAGE_CACHE_SHIFT); | |
5922 | if (ret) | |
5923 | break; | |
5924 | } | |
5925 | ||
5926 | cond_resched(); | |
5927 | } | |
5928 | ||
5929 | return ret; | |
5930 | } | |
5931 | ||
5932 | static struct extent_map *create_pinned_em(struct inode *inode, u64 start, | |
5933 | u64 len, u64 orig_start, | |
5934 | u64 block_start, u64 block_len, | |
5935 | u64 orig_block_len, int type) | |
5936 | { | |
5937 | struct extent_map_tree *em_tree; | |
5938 | struct extent_map *em; | |
5939 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5940 | int ret; | |
5941 | ||
5942 | em_tree = &BTRFS_I(inode)->extent_tree; | |
5943 | em = alloc_extent_map(); | |
5944 | if (!em) | |
5945 | return ERR_PTR(-ENOMEM); | |
5946 | ||
5947 | em->start = start; | |
5948 | em->orig_start = orig_start; | |
5949 | em->len = len; | |
5950 | em->block_len = block_len; | |
5951 | em->block_start = block_start; | |
5952 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
5953 | em->orig_block_len = orig_block_len; | |
5954 | em->generation = -1; | |
5955 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | |
5956 | if (type == BTRFS_ORDERED_PREALLOC) | |
5957 | set_bit(EXTENT_FLAG_FILLING, &em->flags); | |
5958 | ||
5959 | do { | |
5960 | btrfs_drop_extent_cache(inode, em->start, | |
5961 | em->start + em->len - 1, 0); | |
5962 | write_lock(&em_tree->lock); | |
5963 | ret = add_extent_mapping(em_tree, em); | |
5964 | if (!ret) | |
5965 | list_move(&em->list, | |
5966 | &em_tree->modified_extents); | |
5967 | write_unlock(&em_tree->lock); | |
5968 | } while (ret == -EEXIST); | |
5969 | ||
5970 | if (ret) { | |
5971 | free_extent_map(em); | |
5972 | return ERR_PTR(ret); | |
5973 | } | |
5974 | ||
5975 | return em; | |
5976 | } | |
5977 | ||
5978 | ||
5979 | static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |
5980 | struct buffer_head *bh_result, int create) | |
5981 | { | |
5982 | struct extent_map *em; | |
5983 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5984 | struct extent_state *cached_state = NULL; | |
5985 | u64 start = iblock << inode->i_blkbits; | |
5986 | u64 lockstart, lockend; | |
5987 | u64 len = bh_result->b_size; | |
5988 | struct btrfs_trans_handle *trans; | |
5989 | int unlock_bits = EXTENT_LOCKED; | |
5990 | int ret; | |
5991 | ||
5992 | if (create) { | |
5993 | ret = btrfs_delalloc_reserve_space(inode, len); | |
5994 | if (ret) | |
5995 | return ret; | |
5996 | unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY; | |
5997 | } else { | |
5998 | len = min_t(u64, len, root->sectorsize); | |
5999 | } | |
6000 | ||
6001 | lockstart = start; | |
6002 | lockend = start + len - 1; | |
6003 | ||
6004 | /* | |
6005 | * If this errors out it's because we couldn't invalidate pagecache for | |
6006 | * this range and we need to fallback to buffered. | |
6007 | */ | |
6008 | if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create)) | |
6009 | return -ENOTBLK; | |
6010 | ||
6011 | if (create) { | |
6012 | ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, | |
6013 | lockend, EXTENT_DELALLOC, NULL, | |
6014 | &cached_state, GFP_NOFS); | |
6015 | if (ret) | |
6016 | goto unlock_err; | |
6017 | } | |
6018 | ||
6019 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); | |
6020 | if (IS_ERR(em)) { | |
6021 | ret = PTR_ERR(em); | |
6022 | goto unlock_err; | |
6023 | } | |
6024 | ||
6025 | /* | |
6026 | * Ok for INLINE and COMPRESSED extents we need to fallback on buffered | |
6027 | * io. INLINE is special, and we could probably kludge it in here, but | |
6028 | * it's still buffered so for safety lets just fall back to the generic | |
6029 | * buffered path. | |
6030 | * | |
6031 | * For COMPRESSED we _have_ to read the entire extent in so we can | |
6032 | * decompress it, so there will be buffering required no matter what we | |
6033 | * do, so go ahead and fallback to buffered. | |
6034 | * | |
6035 | * We return -ENOTBLK because thats what makes DIO go ahead and go back | |
6036 | * to buffered IO. Don't blame me, this is the price we pay for using | |
6037 | * the generic code. | |
6038 | */ | |
6039 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || | |
6040 | em->block_start == EXTENT_MAP_INLINE) { | |
6041 | free_extent_map(em); | |
6042 | ret = -ENOTBLK; | |
6043 | goto unlock_err; | |
6044 | } | |
6045 | ||
6046 | /* Just a good old fashioned hole, return */ | |
6047 | if (!create && (em->block_start == EXTENT_MAP_HOLE || | |
6048 | test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { | |
6049 | free_extent_map(em); | |
6050 | ret = 0; | |
6051 | goto unlock_err; | |
6052 | } | |
6053 | ||
6054 | /* | |
6055 | * We don't allocate a new extent in the following cases | |
6056 | * | |
6057 | * 1) The inode is marked as NODATACOW. In this case we'll just use the | |
6058 | * existing extent. | |
6059 | * 2) The extent is marked as PREALLOC. We're good to go here and can | |
6060 | * just use the extent. | |
6061 | * | |
6062 | */ | |
6063 | if (!create) { | |
6064 | len = min(len, em->len - (start - em->start)); | |
6065 | lockstart = start + len; | |
6066 | goto unlock; | |
6067 | } | |
6068 | ||
6069 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || | |
6070 | ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && | |
6071 | em->block_start != EXTENT_MAP_HOLE)) { | |
6072 | int type; | |
6073 | int ret; | |
6074 | u64 block_start; | |
6075 | ||
6076 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) | |
6077 | type = BTRFS_ORDERED_PREALLOC; | |
6078 | else | |
6079 | type = BTRFS_ORDERED_NOCOW; | |
6080 | len = min(len, em->len - (start - em->start)); | |
6081 | block_start = em->block_start + (start - em->start); | |
6082 | ||
6083 | /* | |
6084 | * we're not going to log anything, but we do need | |
6085 | * to make sure the current transaction stays open | |
6086 | * while we look for nocow cross refs | |
6087 | */ | |
6088 | trans = btrfs_join_transaction(root); | |
6089 | if (IS_ERR(trans)) | |
6090 | goto must_cow; | |
6091 | ||
6092 | if (can_nocow_odirect(trans, inode, start, len) == 1) { | |
6093 | u64 orig_start = em->orig_start; | |
6094 | u64 orig_block_len = em->orig_block_len; | |
6095 | ||
6096 | if (type == BTRFS_ORDERED_PREALLOC) { | |
6097 | free_extent_map(em); | |
6098 | em = create_pinned_em(inode, start, len, | |
6099 | orig_start, | |
6100 | block_start, len, | |
6101 | orig_block_len, type); | |
6102 | if (IS_ERR(em)) { | |
6103 | btrfs_end_transaction(trans, root); | |
6104 | goto unlock_err; | |
6105 | } | |
6106 | } | |
6107 | ||
6108 | ret = btrfs_add_ordered_extent_dio(inode, start, | |
6109 | block_start, len, len, type); | |
6110 | btrfs_end_transaction(trans, root); | |
6111 | if (ret) { | |
6112 | free_extent_map(em); | |
6113 | goto unlock_err; | |
6114 | } | |
6115 | goto unlock; | |
6116 | } | |
6117 | btrfs_end_transaction(trans, root); | |
6118 | } | |
6119 | must_cow: | |
6120 | /* | |
6121 | * this will cow the extent, reset the len in case we changed | |
6122 | * it above | |
6123 | */ | |
6124 | len = bh_result->b_size; | |
6125 | free_extent_map(em); | |
6126 | em = btrfs_new_extent_direct(inode, start, len); | |
6127 | if (IS_ERR(em)) { | |
6128 | ret = PTR_ERR(em); | |
6129 | goto unlock_err; | |
6130 | } | |
6131 | len = min(len, em->len - (start - em->start)); | |
6132 | unlock: | |
6133 | bh_result->b_blocknr = (em->block_start + (start - em->start)) >> | |
6134 | inode->i_blkbits; | |
6135 | bh_result->b_size = len; | |
6136 | bh_result->b_bdev = em->bdev; | |
6137 | set_buffer_mapped(bh_result); | |
6138 | if (create) { | |
6139 | if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) | |
6140 | set_buffer_new(bh_result); | |
6141 | ||
6142 | /* | |
6143 | * Need to update the i_size under the extent lock so buffered | |
6144 | * readers will get the updated i_size when we unlock. | |
6145 | */ | |
6146 | if (start + len > i_size_read(inode)) | |
6147 | i_size_write(inode, start + len); | |
6148 | } | |
6149 | ||
6150 | /* | |
6151 | * In the case of write we need to clear and unlock the entire range, | |
6152 | * in the case of read we need to unlock only the end area that we | |
6153 | * aren't using if there is any left over space. | |
6154 | */ | |
6155 | if (lockstart < lockend) { | |
6156 | if (create && len < lockend - lockstart) { | |
6157 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, | |
6158 | lockstart + len - 1, | |
6159 | unlock_bits | EXTENT_DEFRAG, 1, 0, | |
6160 | &cached_state, GFP_NOFS); | |
6161 | /* | |
6162 | * Beside unlock, we also need to cleanup reserved space | |
6163 | * for the left range by attaching EXTENT_DO_ACCOUNTING. | |
6164 | */ | |
6165 | clear_extent_bit(&BTRFS_I(inode)->io_tree, | |
6166 | lockstart + len, lockend, | |
6167 | unlock_bits | EXTENT_DO_ACCOUNTING | | |
6168 | EXTENT_DEFRAG, 1, 0, NULL, GFP_NOFS); | |
6169 | } else { | |
6170 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, | |
6171 | lockend, unlock_bits, 1, 0, | |
6172 | &cached_state, GFP_NOFS); | |
6173 | } | |
6174 | } else { | |
6175 | free_extent_state(cached_state); | |
6176 | } | |
6177 | ||
6178 | free_extent_map(em); | |
6179 | ||
6180 | return 0; | |
6181 | ||
6182 | unlock_err: | |
6183 | if (create) | |
6184 | unlock_bits |= EXTENT_DO_ACCOUNTING; | |
6185 | ||
6186 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, | |
6187 | unlock_bits, 1, 0, &cached_state, GFP_NOFS); | |
6188 | return ret; | |
6189 | } | |
6190 | ||
6191 | struct btrfs_dio_private { | |
6192 | struct inode *inode; | |
6193 | u64 logical_offset; | |
6194 | u64 disk_bytenr; | |
6195 | u64 bytes; | |
6196 | void *private; | |
6197 | ||
6198 | /* number of bios pending for this dio */ | |
6199 | atomic_t pending_bios; | |
6200 | ||
6201 | /* IO errors */ | |
6202 | int errors; | |
6203 | ||
6204 | struct bio *orig_bio; | |
6205 | }; | |
6206 | ||
6207 | static void btrfs_endio_direct_read(struct bio *bio, int err) | |
6208 | { | |
6209 | struct btrfs_dio_private *dip = bio->bi_private; | |
6210 | struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; | |
6211 | struct bio_vec *bvec = bio->bi_io_vec; | |
6212 | struct inode *inode = dip->inode; | |
6213 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6214 | u64 start; | |
6215 | ||
6216 | start = dip->logical_offset; | |
6217 | do { | |
6218 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { | |
6219 | struct page *page = bvec->bv_page; | |
6220 | char *kaddr; | |
6221 | u32 csum = ~(u32)0; | |
6222 | u64 private = ~(u32)0; | |
6223 | unsigned long flags; | |
6224 | ||
6225 | if (get_state_private(&BTRFS_I(inode)->io_tree, | |
6226 | start, &private)) | |
6227 | goto failed; | |
6228 | local_irq_save(flags); | |
6229 | kaddr = kmap_atomic(page); | |
6230 | csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, | |
6231 | csum, bvec->bv_len); | |
6232 | btrfs_csum_final(csum, (char *)&csum); | |
6233 | kunmap_atomic(kaddr); | |
6234 | local_irq_restore(flags); | |
6235 | ||
6236 | flush_dcache_page(bvec->bv_page); | |
6237 | if (csum != private) { | |
6238 | failed: | |
6239 | printk(KERN_ERR "btrfs csum failed ino %llu off" | |
6240 | " %llu csum %u private %u\n", | |
6241 | (unsigned long long)btrfs_ino(inode), | |
6242 | (unsigned long long)start, | |
6243 | csum, (unsigned)private); | |
6244 | err = -EIO; | |
6245 | } | |
6246 | } | |
6247 | ||
6248 | start += bvec->bv_len; | |
6249 | bvec++; | |
6250 | } while (bvec <= bvec_end); | |
6251 | ||
6252 | unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, | |
6253 | dip->logical_offset + dip->bytes - 1); | |
6254 | bio->bi_private = dip->private; | |
6255 | ||
6256 | kfree(dip); | |
6257 | ||
6258 | /* If we had a csum failure make sure to clear the uptodate flag */ | |
6259 | if (err) | |
6260 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | |
6261 | dio_end_io(bio, err); | |
6262 | } | |
6263 | ||
6264 | static void btrfs_endio_direct_write(struct bio *bio, int err) | |
6265 | { | |
6266 | struct btrfs_dio_private *dip = bio->bi_private; | |
6267 | struct inode *inode = dip->inode; | |
6268 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6269 | struct btrfs_ordered_extent *ordered = NULL; | |
6270 | u64 ordered_offset = dip->logical_offset; | |
6271 | u64 ordered_bytes = dip->bytes; | |
6272 | int ret; | |
6273 | ||
6274 | if (err) | |
6275 | goto out_done; | |
6276 | again: | |
6277 | ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, | |
6278 | &ordered_offset, | |
6279 | ordered_bytes, !err); | |
6280 | if (!ret) | |
6281 | goto out_test; | |
6282 | ||
6283 | ordered->work.func = finish_ordered_fn; | |
6284 | ordered->work.flags = 0; | |
6285 | btrfs_queue_worker(&root->fs_info->endio_write_workers, | |
6286 | &ordered->work); | |
6287 | out_test: | |
6288 | /* | |
6289 | * our bio might span multiple ordered extents. If we haven't | |
6290 | * completed the accounting for the whole dio, go back and try again | |
6291 | */ | |
6292 | if (ordered_offset < dip->logical_offset + dip->bytes) { | |
6293 | ordered_bytes = dip->logical_offset + dip->bytes - | |
6294 | ordered_offset; | |
6295 | ordered = NULL; | |
6296 | goto again; | |
6297 | } | |
6298 | out_done: | |
6299 | bio->bi_private = dip->private; | |
6300 | ||
6301 | kfree(dip); | |
6302 | ||
6303 | /* If we had an error make sure to clear the uptodate flag */ | |
6304 | if (err) | |
6305 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | |
6306 | dio_end_io(bio, err); | |
6307 | } | |
6308 | ||
6309 | static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, | |
6310 | struct bio *bio, int mirror_num, | |
6311 | unsigned long bio_flags, u64 offset) | |
6312 | { | |
6313 | int ret; | |
6314 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6315 | ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); | |
6316 | BUG_ON(ret); /* -ENOMEM */ | |
6317 | return 0; | |
6318 | } | |
6319 | ||
6320 | static void btrfs_end_dio_bio(struct bio *bio, int err) | |
6321 | { | |
6322 | struct btrfs_dio_private *dip = bio->bi_private; | |
6323 | ||
6324 | if (err) { | |
6325 | printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " | |
6326 | "sector %#Lx len %u err no %d\n", | |
6327 | (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw, | |
6328 | (unsigned long long)bio->bi_sector, bio->bi_size, err); | |
6329 | dip->errors = 1; | |
6330 | ||
6331 | /* | |
6332 | * before atomic variable goto zero, we must make sure | |
6333 | * dip->errors is perceived to be set. | |
6334 | */ | |
6335 | smp_mb__before_atomic_dec(); | |
6336 | } | |
6337 | ||
6338 | /* if there are more bios still pending for this dio, just exit */ | |
6339 | if (!atomic_dec_and_test(&dip->pending_bios)) | |
6340 | goto out; | |
6341 | ||
6342 | if (dip->errors) | |
6343 | bio_io_error(dip->orig_bio); | |
6344 | else { | |
6345 | set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags); | |
6346 | bio_endio(dip->orig_bio, 0); | |
6347 | } | |
6348 | out: | |
6349 | bio_put(bio); | |
6350 | } | |
6351 | ||
6352 | static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, | |
6353 | u64 first_sector, gfp_t gfp_flags) | |
6354 | { | |
6355 | int nr_vecs = bio_get_nr_vecs(bdev); | |
6356 | return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags); | |
6357 | } | |
6358 | ||
6359 | static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | |
6360 | int rw, u64 file_offset, int skip_sum, | |
6361 | int async_submit) | |
6362 | { | |
6363 | int write = rw & REQ_WRITE; | |
6364 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6365 | int ret; | |
6366 | ||
6367 | if (async_submit) | |
6368 | async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); | |
6369 | ||
6370 | bio_get(bio); | |
6371 | ||
6372 | if (!write) { | |
6373 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); | |
6374 | if (ret) | |
6375 | goto err; | |
6376 | } | |
6377 | ||
6378 | if (skip_sum) | |
6379 | goto map; | |
6380 | ||
6381 | if (write && async_submit) { | |
6382 | ret = btrfs_wq_submit_bio(root->fs_info, | |
6383 | inode, rw, bio, 0, 0, | |
6384 | file_offset, | |
6385 | __btrfs_submit_bio_start_direct_io, | |
6386 | __btrfs_submit_bio_done); | |
6387 | goto err; | |
6388 | } else if (write) { | |
6389 | /* | |
6390 | * If we aren't doing async submit, calculate the csum of the | |
6391 | * bio now. | |
6392 | */ | |
6393 | ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); | |
6394 | if (ret) | |
6395 | goto err; | |
6396 | } else if (!skip_sum) { | |
6397 | ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset); | |
6398 | if (ret) | |
6399 | goto err; | |
6400 | } | |
6401 | ||
6402 | map: | |
6403 | ret = btrfs_map_bio(root, rw, bio, 0, async_submit); | |
6404 | err: | |
6405 | bio_put(bio); | |
6406 | return ret; | |
6407 | } | |
6408 | ||
6409 | static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |
6410 | int skip_sum) | |
6411 | { | |
6412 | struct inode *inode = dip->inode; | |
6413 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6414 | struct bio *bio; | |
6415 | struct bio *orig_bio = dip->orig_bio; | |
6416 | struct bio_vec *bvec = orig_bio->bi_io_vec; | |
6417 | u64 start_sector = orig_bio->bi_sector; | |
6418 | u64 file_offset = dip->logical_offset; | |
6419 | u64 submit_len = 0; | |
6420 | u64 map_length; | |
6421 | int nr_pages = 0; | |
6422 | int ret = 0; | |
6423 | int async_submit = 0; | |
6424 | ||
6425 | map_length = orig_bio->bi_size; | |
6426 | ret = btrfs_map_block(root->fs_info, READ, start_sector << 9, | |
6427 | &map_length, NULL, 0); | |
6428 | if (ret) { | |
6429 | bio_put(orig_bio); | |
6430 | return -EIO; | |
6431 | } | |
6432 | ||
6433 | if (map_length >= orig_bio->bi_size) { | |
6434 | bio = orig_bio; | |
6435 | goto submit; | |
6436 | } | |
6437 | ||
6438 | async_submit = 1; | |
6439 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); | |
6440 | if (!bio) | |
6441 | return -ENOMEM; | |
6442 | bio->bi_private = dip; | |
6443 | bio->bi_end_io = btrfs_end_dio_bio; | |
6444 | atomic_inc(&dip->pending_bios); | |
6445 | ||
6446 | while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { | |
6447 | if (unlikely(map_length < submit_len + bvec->bv_len || | |
6448 | bio_add_page(bio, bvec->bv_page, bvec->bv_len, | |
6449 | bvec->bv_offset) < bvec->bv_len)) { | |
6450 | /* | |
6451 | * inc the count before we submit the bio so | |
6452 | * we know the end IO handler won't happen before | |
6453 | * we inc the count. Otherwise, the dip might get freed | |
6454 | * before we're done setting it up | |
6455 | */ | |
6456 | atomic_inc(&dip->pending_bios); | |
6457 | ret = __btrfs_submit_dio_bio(bio, inode, rw, | |
6458 | file_offset, skip_sum, | |
6459 | async_submit); | |
6460 | if (ret) { | |
6461 | bio_put(bio); | |
6462 | atomic_dec(&dip->pending_bios); | |
6463 | goto out_err; | |
6464 | } | |
6465 | ||
6466 | start_sector += submit_len >> 9; | |
6467 | file_offset += submit_len; | |
6468 | ||
6469 | submit_len = 0; | |
6470 | nr_pages = 0; | |
6471 | ||
6472 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, | |
6473 | start_sector, GFP_NOFS); | |
6474 | if (!bio) | |
6475 | goto out_err; | |
6476 | bio->bi_private = dip; | |
6477 | bio->bi_end_io = btrfs_end_dio_bio; | |
6478 | ||
6479 | map_length = orig_bio->bi_size; | |
6480 | ret = btrfs_map_block(root->fs_info, READ, | |
6481 | start_sector << 9, | |
6482 | &map_length, NULL, 0); | |
6483 | if (ret) { | |
6484 | bio_put(bio); | |
6485 | goto out_err; | |
6486 | } | |
6487 | } else { | |
6488 | submit_len += bvec->bv_len; | |
6489 | nr_pages ++; | |
6490 | bvec++; | |
6491 | } | |
6492 | } | |
6493 | ||
6494 | submit: | |
6495 | ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, | |
6496 | async_submit); | |
6497 | if (!ret) | |
6498 | return 0; | |
6499 | ||
6500 | bio_put(bio); | |
6501 | out_err: | |
6502 | dip->errors = 1; | |
6503 | /* | |
6504 | * before atomic variable goto zero, we must | |
6505 | * make sure dip->errors is perceived to be set. | |
6506 | */ | |
6507 | smp_mb__before_atomic_dec(); | |
6508 | if (atomic_dec_and_test(&dip->pending_bios)) | |
6509 | bio_io_error(dip->orig_bio); | |
6510 | ||
6511 | /* bio_end_io() will handle error, so we needn't return it */ | |
6512 | return 0; | |
6513 | } | |
6514 | ||
6515 | static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, | |
6516 | loff_t file_offset) | |
6517 | { | |
6518 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6519 | struct btrfs_dio_private *dip; | |
6520 | struct bio_vec *bvec = bio->bi_io_vec; | |
6521 | int skip_sum; | |
6522 | int write = rw & REQ_WRITE; | |
6523 | int ret = 0; | |
6524 | ||
6525 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | |
6526 | ||
6527 | dip = kmalloc(sizeof(*dip), GFP_NOFS); | |
6528 | if (!dip) { | |
6529 | ret = -ENOMEM; | |
6530 | goto free_ordered; | |
6531 | } | |
6532 | ||
6533 | dip->private = bio->bi_private; | |
6534 | dip->inode = inode; | |
6535 | dip->logical_offset = file_offset; | |
6536 | ||
6537 | dip->bytes = 0; | |
6538 | do { | |
6539 | dip->bytes += bvec->bv_len; | |
6540 | bvec++; | |
6541 | } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1)); | |
6542 | ||
6543 | dip->disk_bytenr = (u64)bio->bi_sector << 9; | |
6544 | bio->bi_private = dip; | |
6545 | dip->errors = 0; | |
6546 | dip->orig_bio = bio; | |
6547 | atomic_set(&dip->pending_bios, 0); | |
6548 | ||
6549 | if (write) | |
6550 | bio->bi_end_io = btrfs_endio_direct_write; | |
6551 | else | |
6552 | bio->bi_end_io = btrfs_endio_direct_read; | |
6553 | ||
6554 | ret = btrfs_submit_direct_hook(rw, dip, skip_sum); | |
6555 | if (!ret) | |
6556 | return; | |
6557 | free_ordered: | |
6558 | /* | |
6559 | * If this is a write, we need to clean up the reserved space and kill | |
6560 | * the ordered extent. | |
6561 | */ | |
6562 | if (write) { | |
6563 | struct btrfs_ordered_extent *ordered; | |
6564 | ordered = btrfs_lookup_ordered_extent(inode, file_offset); | |
6565 | if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && | |
6566 | !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) | |
6567 | btrfs_free_reserved_extent(root, ordered->start, | |
6568 | ordered->disk_len); | |
6569 | btrfs_put_ordered_extent(ordered); | |
6570 | btrfs_put_ordered_extent(ordered); | |
6571 | } | |
6572 | bio_endio(bio, ret); | |
6573 | } | |
6574 | ||
6575 | static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, | |
6576 | const struct iovec *iov, loff_t offset, | |
6577 | unsigned long nr_segs) | |
6578 | { | |
6579 | int seg; | |
6580 | int i; | |
6581 | size_t size; | |
6582 | unsigned long addr; | |
6583 | unsigned blocksize_mask = root->sectorsize - 1; | |
6584 | ssize_t retval = -EINVAL; | |
6585 | loff_t end = offset; | |
6586 | ||
6587 | if (offset & blocksize_mask) | |
6588 | goto out; | |
6589 | ||
6590 | /* Check the memory alignment. Blocks cannot straddle pages */ | |
6591 | for (seg = 0; seg < nr_segs; seg++) { | |
6592 | addr = (unsigned long)iov[seg].iov_base; | |
6593 | size = iov[seg].iov_len; | |
6594 | end += size; | |
6595 | if ((addr & blocksize_mask) || (size & blocksize_mask)) | |
6596 | goto out; | |
6597 | ||
6598 | /* If this is a write we don't need to check anymore */ | |
6599 | if (rw & WRITE) | |
6600 | continue; | |
6601 | ||
6602 | /* | |
6603 | * Check to make sure we don't have duplicate iov_base's in this | |
6604 | * iovec, if so return EINVAL, otherwise we'll get csum errors | |
6605 | * when reading back. | |
6606 | */ | |
6607 | for (i = seg + 1; i < nr_segs; i++) { | |
6608 | if (iov[seg].iov_base == iov[i].iov_base) | |
6609 | goto out; | |
6610 | } | |
6611 | } | |
6612 | retval = 0; | |
6613 | out: | |
6614 | return retval; | |
6615 | } | |
6616 | ||
6617 | static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |
6618 | const struct iovec *iov, loff_t offset, | |
6619 | unsigned long nr_segs) | |
6620 | { | |
6621 | struct file *file = iocb->ki_filp; | |
6622 | struct inode *inode = file->f_mapping->host; | |
6623 | ||
6624 | if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, | |
6625 | offset, nr_segs)) | |
6626 | return 0; | |
6627 | ||
6628 | return __blockdev_direct_IO(rw, iocb, inode, | |
6629 | BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, | |
6630 | iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, | |
6631 | btrfs_submit_direct, 0); | |
6632 | } | |
6633 | ||
6634 | #define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) | |
6635 | ||
6636 | static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |
6637 | __u64 start, __u64 len) | |
6638 | { | |
6639 | int ret; | |
6640 | ||
6641 | ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS); | |
6642 | if (ret) | |
6643 | return ret; | |
6644 | ||
6645 | return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); | |
6646 | } | |
6647 | ||
6648 | int btrfs_readpage(struct file *file, struct page *page) | |
6649 | { | |
6650 | struct extent_io_tree *tree; | |
6651 | tree = &BTRFS_I(page->mapping->host)->io_tree; | |
6652 | return extent_read_full_page(tree, page, btrfs_get_extent, 0); | |
6653 | } | |
6654 | ||
6655 | static int btrfs_writepage(struct page *page, struct writeback_control *wbc) | |
6656 | { | |
6657 | struct extent_io_tree *tree; | |
6658 | ||
6659 | ||
6660 | if (current->flags & PF_MEMALLOC) { | |
6661 | redirty_page_for_writepage(wbc, page); | |
6662 | unlock_page(page); | |
6663 | return 0; | |
6664 | } | |
6665 | tree = &BTRFS_I(page->mapping->host)->io_tree; | |
6666 | return extent_write_full_page(tree, page, btrfs_get_extent, wbc); | |
6667 | } | |
6668 | ||
6669 | int btrfs_writepages(struct address_space *mapping, | |
6670 | struct writeback_control *wbc) | |
6671 | { | |
6672 | struct extent_io_tree *tree; | |
6673 | ||
6674 | tree = &BTRFS_I(mapping->host)->io_tree; | |
6675 | return extent_writepages(tree, mapping, btrfs_get_extent, wbc); | |
6676 | } | |
6677 | ||
6678 | static int | |
6679 | btrfs_readpages(struct file *file, struct address_space *mapping, | |
6680 | struct list_head *pages, unsigned nr_pages) | |
6681 | { | |
6682 | struct extent_io_tree *tree; | |
6683 | tree = &BTRFS_I(mapping->host)->io_tree; | |
6684 | return extent_readpages(tree, mapping, pages, nr_pages, | |
6685 | btrfs_get_extent); | |
6686 | } | |
6687 | static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) | |
6688 | { | |
6689 | struct extent_io_tree *tree; | |
6690 | struct extent_map_tree *map; | |
6691 | int ret; | |
6692 | ||
6693 | tree = &BTRFS_I(page->mapping->host)->io_tree; | |
6694 | map = &BTRFS_I(page->mapping->host)->extent_tree; | |
6695 | ret = try_release_extent_mapping(map, tree, page, gfp_flags); | |
6696 | if (ret == 1) { | |
6697 | ClearPagePrivate(page); | |
6698 | set_page_private(page, 0); | |
6699 | page_cache_release(page); | |
6700 | } | |
6701 | return ret; | |
6702 | } | |
6703 | ||
6704 | static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) | |
6705 | { | |
6706 | if (PageWriteback(page) || PageDirty(page)) | |
6707 | return 0; | |
6708 | return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); | |
6709 | } | |
6710 | ||
6711 | static void btrfs_invalidatepage(struct page *page, unsigned long offset) | |
6712 | { | |
6713 | struct inode *inode = page->mapping->host; | |
6714 | struct extent_io_tree *tree; | |
6715 | struct btrfs_ordered_extent *ordered; | |
6716 | struct extent_state *cached_state = NULL; | |
6717 | u64 page_start = page_offset(page); | |
6718 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; | |
6719 | ||
6720 | /* | |
6721 | * we have the page locked, so new writeback can't start, | |
6722 | * and the dirty bit won't be cleared while we are here. | |
6723 | * | |
6724 | * Wait for IO on this page so that we can safely clear | |
6725 | * the PagePrivate2 bit and do ordered accounting | |
6726 | */ | |
6727 | wait_on_page_writeback(page); | |
6728 | ||
6729 | tree = &BTRFS_I(inode)->io_tree; | |
6730 | if (offset) { | |
6731 | btrfs_releasepage(page, GFP_NOFS); | |
6732 | return; | |
6733 | } | |
6734 | lock_extent_bits(tree, page_start, page_end, 0, &cached_state); | |
6735 | ordered = btrfs_lookup_ordered_extent(inode, | |
6736 | page_offset(page)); | |
6737 | if (ordered) { | |
6738 | /* | |
6739 | * IO on this page will never be started, so we need | |
6740 | * to account for any ordered extents now | |
6741 | */ | |
6742 | clear_extent_bit(tree, page_start, page_end, | |
6743 | EXTENT_DIRTY | EXTENT_DELALLOC | | |
6744 | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | | |
6745 | EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS); | |
6746 | /* | |
6747 | * whoever cleared the private bit is responsible | |
6748 | * for the finish_ordered_io | |
6749 | */ | |
6750 | if (TestClearPagePrivate2(page) && | |
6751 | btrfs_dec_test_ordered_pending(inode, &ordered, page_start, | |
6752 | PAGE_CACHE_SIZE, 1)) { | |
6753 | btrfs_finish_ordered_io(ordered); | |
6754 | } | |
6755 | btrfs_put_ordered_extent(ordered); | |
6756 | cached_state = NULL; | |
6757 | lock_extent_bits(tree, page_start, page_end, 0, &cached_state); | |
6758 | } | |
6759 | clear_extent_bit(tree, page_start, page_end, | |
6760 | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | | |
6761 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1, | |
6762 | &cached_state, GFP_NOFS); | |
6763 | __btrfs_releasepage(page, GFP_NOFS); | |
6764 | ||
6765 | ClearPageChecked(page); | |
6766 | if (PagePrivate(page)) { | |
6767 | ClearPagePrivate(page); | |
6768 | set_page_private(page, 0); | |
6769 | page_cache_release(page); | |
6770 | } | |
6771 | } | |
6772 | ||
6773 | /* | |
6774 | * btrfs_page_mkwrite() is not allowed to change the file size as it gets | |
6775 | * called from a page fault handler when a page is first dirtied. Hence we must | |
6776 | * be careful to check for EOF conditions here. We set the page up correctly | |
6777 | * for a written page which means we get ENOSPC checking when writing into | |
6778 | * holes and correct delalloc and unwritten extent mapping on filesystems that | |
6779 | * support these features. | |
6780 | * | |
6781 | * We are not allowed to take the i_mutex here so we have to play games to | |
6782 | * protect against truncate races as the page could now be beyond EOF. Because | |
6783 | * vmtruncate() writes the inode size before removing pages, once we have the | |
6784 | * page lock we can determine safely if the page is beyond EOF. If it is not | |
6785 | * beyond EOF, then the page is guaranteed safe against truncation until we | |
6786 | * unlock the page. | |
6787 | */ | |
6788 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |
6789 | { | |
6790 | struct page *page = vmf->page; | |
6791 | struct inode *inode = fdentry(vma->vm_file)->d_inode; | |
6792 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6793 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
6794 | struct btrfs_ordered_extent *ordered; | |
6795 | struct extent_state *cached_state = NULL; | |
6796 | char *kaddr; | |
6797 | unsigned long zero_start; | |
6798 | loff_t size; | |
6799 | int ret; | |
6800 | int reserved = 0; | |
6801 | u64 page_start; | |
6802 | u64 page_end; | |
6803 | ||
6804 | sb_start_pagefault(inode->i_sb); | |
6805 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); | |
6806 | if (!ret) { | |
6807 | ret = file_update_time(vma->vm_file); | |
6808 | reserved = 1; | |
6809 | } | |
6810 | if (ret) { | |
6811 | if (ret == -ENOMEM) | |
6812 | ret = VM_FAULT_OOM; | |
6813 | else /* -ENOSPC, -EIO, etc */ | |
6814 | ret = VM_FAULT_SIGBUS; | |
6815 | if (reserved) | |
6816 | goto out; | |
6817 | goto out_noreserve; | |
6818 | } | |
6819 | ||
6820 | ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ | |
6821 | again: | |
6822 | lock_page(page); | |
6823 | size = i_size_read(inode); | |
6824 | page_start = page_offset(page); | |
6825 | page_end = page_start + PAGE_CACHE_SIZE - 1; | |
6826 | ||
6827 | if ((page->mapping != inode->i_mapping) || | |
6828 | (page_start >= size)) { | |
6829 | /* page got truncated out from underneath us */ | |
6830 | goto out_unlock; | |
6831 | } | |
6832 | wait_on_page_writeback(page); | |
6833 | ||
6834 | lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); | |
6835 | set_page_extent_mapped(page); | |
6836 | ||
6837 | /* | |
6838 | * we can't set the delalloc bits if there are pending ordered | |
6839 | * extents. Drop our locks and wait for them to finish | |
6840 | */ | |
6841 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | |
6842 | if (ordered) { | |
6843 | unlock_extent_cached(io_tree, page_start, page_end, | |
6844 | &cached_state, GFP_NOFS); | |
6845 | unlock_page(page); | |
6846 | btrfs_start_ordered_extent(inode, ordered, 1); | |
6847 | btrfs_put_ordered_extent(ordered); | |
6848 | goto again; | |
6849 | } | |
6850 | ||
6851 | /* | |
6852 | * XXX - page_mkwrite gets called every time the page is dirtied, even | |
6853 | * if it was already dirty, so for space accounting reasons we need to | |
6854 | * clear any delalloc bits for the range we are fixing to save. There | |
6855 | * is probably a better way to do this, but for now keep consistent with | |
6856 | * prepare_pages in the normal write path. | |
6857 | */ | |
6858 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, | |
6859 | EXTENT_DIRTY | EXTENT_DELALLOC | | |
6860 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, | |
6861 | 0, 0, &cached_state, GFP_NOFS); | |
6862 | ||
6863 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, | |
6864 | &cached_state); | |
6865 | if (ret) { | |
6866 | unlock_extent_cached(io_tree, page_start, page_end, | |
6867 | &cached_state, GFP_NOFS); | |
6868 | ret = VM_FAULT_SIGBUS; | |
6869 | goto out_unlock; | |
6870 | } | |
6871 | ret = 0; | |
6872 | ||
6873 | /* page is wholly or partially inside EOF */ | |
6874 | if (page_start + PAGE_CACHE_SIZE > size) | |
6875 | zero_start = size & ~PAGE_CACHE_MASK; | |
6876 | else | |
6877 | zero_start = PAGE_CACHE_SIZE; | |
6878 | ||
6879 | if (zero_start != PAGE_CACHE_SIZE) { | |
6880 | kaddr = kmap(page); | |
6881 | memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); | |
6882 | flush_dcache_page(page); | |
6883 | kunmap(page); | |
6884 | } | |
6885 | ClearPageChecked(page); | |
6886 | set_page_dirty(page); | |
6887 | SetPageUptodate(page); | |
6888 | ||
6889 | BTRFS_I(inode)->last_trans = root->fs_info->generation; | |
6890 | BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; | |
6891 | BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; | |
6892 | ||
6893 | unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); | |
6894 | ||
6895 | out_unlock: | |
6896 | if (!ret) { | |
6897 | sb_end_pagefault(inode->i_sb); | |
6898 | return VM_FAULT_LOCKED; | |
6899 | } | |
6900 | unlock_page(page); | |
6901 | out: | |
6902 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); | |
6903 | out_noreserve: | |
6904 | sb_end_pagefault(inode->i_sb); | |
6905 | return ret; | |
6906 | } | |
6907 | ||
6908 | static int btrfs_truncate(struct inode *inode) | |
6909 | { | |
6910 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6911 | struct btrfs_block_rsv *rsv; | |
6912 | int ret; | |
6913 | int err = 0; | |
6914 | struct btrfs_trans_handle *trans; | |
6915 | u64 mask = root->sectorsize - 1; | |
6916 | u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); | |
6917 | ||
6918 | ret = btrfs_truncate_page(inode, inode->i_size, 0, 0); | |
6919 | if (ret) | |
6920 | return ret; | |
6921 | ||
6922 | btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); | |
6923 | btrfs_ordered_update_i_size(inode, inode->i_size, NULL); | |
6924 | ||
6925 | /* | |
6926 | * Yes ladies and gentelment, this is indeed ugly. The fact is we have | |
6927 | * 3 things going on here | |
6928 | * | |
6929 | * 1) We need to reserve space for our orphan item and the space to | |
6930 | * delete our orphan item. Lord knows we don't want to have a dangling | |
6931 | * orphan item because we didn't reserve space to remove it. | |
6932 | * | |
6933 | * 2) We need to reserve space to update our inode. | |
6934 | * | |
6935 | * 3) We need to have something to cache all the space that is going to | |
6936 | * be free'd up by the truncate operation, but also have some slack | |
6937 | * space reserved in case it uses space during the truncate (thank you | |
6938 | * very much snapshotting). | |
6939 | * | |
6940 | * And we need these to all be seperate. The fact is we can use alot of | |
6941 | * space doing the truncate, and we have no earthly idea how much space | |
6942 | * we will use, so we need the truncate reservation to be seperate so it | |
6943 | * doesn't end up using space reserved for updating the inode or | |
6944 | * removing the orphan item. We also need to be able to stop the | |
6945 | * transaction and start a new one, which means we need to be able to | |
6946 | * update the inode several times, and we have no idea of knowing how | |
6947 | * many times that will be, so we can't just reserve 1 item for the | |
6948 | * entirety of the opration, so that has to be done seperately as well. | |
6949 | * Then there is the orphan item, which does indeed need to be held on | |
6950 | * to for the whole operation, and we need nobody to touch this reserved | |
6951 | * space except the orphan code. | |
6952 | * | |
6953 | * So that leaves us with | |
6954 | * | |
6955 | * 1) root->orphan_block_rsv - for the orphan deletion. | |
6956 | * 2) rsv - for the truncate reservation, which we will steal from the | |
6957 | * transaction reservation. | |
6958 | * 3) fs_info->trans_block_rsv - this will have 1 items worth left for | |
6959 | * updating the inode. | |
6960 | */ | |
6961 | rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); | |
6962 | if (!rsv) | |
6963 | return -ENOMEM; | |
6964 | rsv->size = min_size; | |
6965 | rsv->failfast = 1; | |
6966 | ||
6967 | /* | |
6968 | * 1 for the truncate slack space | |
6969 | * 1 for updating the inode. | |
6970 | */ | |
6971 | trans = btrfs_start_transaction(root, 2); | |
6972 | if (IS_ERR(trans)) { | |
6973 | err = PTR_ERR(trans); | |
6974 | goto out; | |
6975 | } | |
6976 | ||
6977 | /* Migrate the slack space for the truncate to our reserve */ | |
6978 | ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, | |
6979 | min_size); | |
6980 | BUG_ON(ret); | |
6981 | ||
6982 | /* | |
6983 | * setattr is responsible for setting the ordered_data_close flag, | |
6984 | * but that is only tested during the last file release. That | |
6985 | * could happen well after the next commit, leaving a great big | |
6986 | * window where new writes may get lost if someone chooses to write | |
6987 | * to this file after truncating to zero | |
6988 | * | |
6989 | * The inode doesn't have any dirty data here, and so if we commit | |
6990 | * this is a noop. If someone immediately starts writing to the inode | |
6991 | * it is very likely we'll catch some of their writes in this | |
6992 | * transaction, and the commit will find this file on the ordered | |
6993 | * data list with good things to send down. | |
6994 | * | |
6995 | * This is a best effort solution, there is still a window where | |
6996 | * using truncate to replace the contents of the file will | |
6997 | * end up with a zero length file after a crash. | |
6998 | */ | |
6999 | if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, | |
7000 | &BTRFS_I(inode)->runtime_flags)) | |
7001 | btrfs_add_ordered_operation(trans, root, inode); | |
7002 | ||
7003 | /* | |
7004 | * So if we truncate and then write and fsync we normally would just | |
7005 | * write the extents that changed, which is a problem if we need to | |
7006 | * first truncate that entire inode. So set this flag so we write out | |
7007 | * all of the extents in the inode to the sync log so we're completely | |
7008 | * safe. | |
7009 | */ | |
7010 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); | |
7011 | trans->block_rsv = rsv; | |
7012 | ||
7013 | while (1) { | |
7014 | ret = btrfs_truncate_inode_items(trans, root, inode, | |
7015 | inode->i_size, | |
7016 | BTRFS_EXTENT_DATA_KEY); | |
7017 | if (ret != -ENOSPC) { | |
7018 | err = ret; | |
7019 | break; | |
7020 | } | |
7021 | ||
7022 | trans->block_rsv = &root->fs_info->trans_block_rsv; | |
7023 | ret = btrfs_update_inode(trans, root, inode); | |
7024 | if (ret) { | |
7025 | err = ret; | |
7026 | break; | |
7027 | } | |
7028 | ||
7029 | btrfs_end_transaction(trans, root); | |
7030 | btrfs_btree_balance_dirty(root); | |
7031 | ||
7032 | trans = btrfs_start_transaction(root, 2); | |
7033 | if (IS_ERR(trans)) { | |
7034 | ret = err = PTR_ERR(trans); | |
7035 | trans = NULL; | |
7036 | break; | |
7037 | } | |
7038 | ||
7039 | ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, | |
7040 | rsv, min_size); | |
7041 | BUG_ON(ret); /* shouldn't happen */ | |
7042 | trans->block_rsv = rsv; | |
7043 | } | |
7044 | ||
7045 | if (ret == 0 && inode->i_nlink > 0) { | |
7046 | trans->block_rsv = root->orphan_block_rsv; | |
7047 | ret = btrfs_orphan_del(trans, inode); | |
7048 | if (ret) | |
7049 | err = ret; | |
7050 | } | |
7051 | ||
7052 | if (trans) { | |
7053 | trans->block_rsv = &root->fs_info->trans_block_rsv; | |
7054 | ret = btrfs_update_inode(trans, root, inode); | |
7055 | if (ret && !err) | |
7056 | err = ret; | |
7057 | ||
7058 | ret = btrfs_end_transaction(trans, root); | |
7059 | btrfs_btree_balance_dirty(root); | |
7060 | } | |
7061 | ||
7062 | out: | |
7063 | btrfs_free_block_rsv(root, rsv); | |
7064 | ||
7065 | if (ret && !err) | |
7066 | err = ret; | |
7067 | ||
7068 | return err; | |
7069 | } | |
7070 | ||
7071 | /* | |
7072 | * create a new subvolume directory/inode (helper for the ioctl). | |
7073 | */ | |
7074 | int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, | |
7075 | struct btrfs_root *new_root, u64 new_dirid) | |
7076 | { | |
7077 | struct inode *inode; | |
7078 | int err; | |
7079 | u64 index = 0; | |
7080 | ||
7081 | inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, | |
7082 | new_dirid, new_dirid, | |
7083 | S_IFDIR | (~current_umask() & S_IRWXUGO), | |
7084 | &index); | |
7085 | if (IS_ERR(inode)) | |
7086 | return PTR_ERR(inode); | |
7087 | inode->i_op = &btrfs_dir_inode_operations; | |
7088 | inode->i_fop = &btrfs_dir_file_operations; | |
7089 | ||
7090 | set_nlink(inode, 1); | |
7091 | btrfs_i_size_write(inode, 0); | |
7092 | ||
7093 | err = btrfs_update_inode(trans, new_root, inode); | |
7094 | ||
7095 | iput(inode); | |
7096 | return err; | |
7097 | } | |
7098 | ||
7099 | struct inode *btrfs_alloc_inode(struct super_block *sb) | |
7100 | { | |
7101 | struct btrfs_inode *ei; | |
7102 | struct inode *inode; | |
7103 | ||
7104 | ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); | |
7105 | if (!ei) | |
7106 | return NULL; | |
7107 | ||
7108 | ei->root = NULL; | |
7109 | ei->generation = 0; | |
7110 | ei->last_trans = 0; | |
7111 | ei->last_sub_trans = 0; | |
7112 | ei->logged_trans = 0; | |
7113 | ei->delalloc_bytes = 0; | |
7114 | ei->disk_i_size = 0; | |
7115 | ei->flags = 0; | |
7116 | ei->csum_bytes = 0; | |
7117 | ei->index_cnt = (u64)-1; | |
7118 | ei->last_unlink_trans = 0; | |
7119 | ei->last_log_commit = 0; | |
7120 | ||
7121 | spin_lock_init(&ei->lock); | |
7122 | ei->outstanding_extents = 0; | |
7123 | ei->reserved_extents = 0; | |
7124 | ||
7125 | ei->runtime_flags = 0; | |
7126 | ei->force_compress = BTRFS_COMPRESS_NONE; | |
7127 | ||
7128 | ei->delayed_node = NULL; | |
7129 | ||
7130 | inode = &ei->vfs_inode; | |
7131 | extent_map_tree_init(&ei->extent_tree); | |
7132 | extent_io_tree_init(&ei->io_tree, &inode->i_data); | |
7133 | extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); | |
7134 | ei->io_tree.track_uptodate = 1; | |
7135 | ei->io_failure_tree.track_uptodate = 1; | |
7136 | atomic_set(&ei->sync_writers, 0); | |
7137 | mutex_init(&ei->log_mutex); | |
7138 | mutex_init(&ei->delalloc_mutex); | |
7139 | btrfs_ordered_inode_tree_init(&ei->ordered_tree); | |
7140 | INIT_LIST_HEAD(&ei->delalloc_inodes); | |
7141 | INIT_LIST_HEAD(&ei->ordered_operations); | |
7142 | RB_CLEAR_NODE(&ei->rb_node); | |
7143 | ||
7144 | return inode; | |
7145 | } | |
7146 | ||
7147 | static void btrfs_i_callback(struct rcu_head *head) | |
7148 | { | |
7149 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
7150 | kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); | |
7151 | } | |
7152 | ||
7153 | void btrfs_destroy_inode(struct inode *inode) | |
7154 | { | |
7155 | struct btrfs_ordered_extent *ordered; | |
7156 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
7157 | ||
7158 | WARN_ON(!hlist_empty(&inode->i_dentry)); | |
7159 | WARN_ON(inode->i_data.nrpages); | |
7160 | WARN_ON(BTRFS_I(inode)->outstanding_extents); | |
7161 | WARN_ON(BTRFS_I(inode)->reserved_extents); | |
7162 | WARN_ON(BTRFS_I(inode)->delalloc_bytes); | |
7163 | WARN_ON(BTRFS_I(inode)->csum_bytes); | |
7164 | ||
7165 | /* | |
7166 | * This can happen where we create an inode, but somebody else also | |
7167 | * created the same inode and we need to destroy the one we already | |
7168 | * created. | |
7169 | */ | |
7170 | if (!root) | |
7171 | goto free; | |
7172 | ||
7173 | /* | |
7174 | * Make sure we're properly removed from the ordered operation | |
7175 | * lists. | |
7176 | */ | |
7177 | smp_mb(); | |
7178 | if (!list_empty(&BTRFS_I(inode)->ordered_operations)) { | |
7179 | spin_lock(&root->fs_info->ordered_extent_lock); | |
7180 | list_del_init(&BTRFS_I(inode)->ordered_operations); | |
7181 | spin_unlock(&root->fs_info->ordered_extent_lock); | |
7182 | } | |
7183 | ||
7184 | if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, | |
7185 | &BTRFS_I(inode)->runtime_flags)) { | |
7186 | printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", | |
7187 | (unsigned long long)btrfs_ino(inode)); | |
7188 | atomic_dec(&root->orphan_inodes); | |
7189 | } | |
7190 | ||
7191 | while (1) { | |
7192 | ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); | |
7193 | if (!ordered) | |
7194 | break; | |
7195 | else { | |
7196 | printk(KERN_ERR "btrfs found ordered " | |
7197 | "extent %llu %llu on inode cleanup\n", | |
7198 | (unsigned long long)ordered->file_offset, | |
7199 | (unsigned long long)ordered->len); | |
7200 | btrfs_remove_ordered_extent(inode, ordered); | |
7201 | btrfs_put_ordered_extent(ordered); | |
7202 | btrfs_put_ordered_extent(ordered); | |
7203 | } | |
7204 | } | |
7205 | inode_tree_del(inode); | |
7206 | btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); | |
7207 | free: | |
7208 | btrfs_remove_delayed_node(inode); | |
7209 | call_rcu(&inode->i_rcu, btrfs_i_callback); | |
7210 | } | |
7211 | ||
7212 | int btrfs_drop_inode(struct inode *inode) | |
7213 | { | |
7214 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
7215 | ||
7216 | if (btrfs_root_refs(&root->root_item) == 0 && | |
7217 | !btrfs_is_free_space_inode(inode)) | |
7218 | return 1; | |
7219 | else | |
7220 | return generic_drop_inode(inode); | |
7221 | } | |
7222 | ||
7223 | static void init_once(void *foo) | |
7224 | { | |
7225 | struct btrfs_inode *ei = (struct btrfs_inode *) foo; | |
7226 | ||
7227 | inode_init_once(&ei->vfs_inode); | |
7228 | } | |
7229 | ||
7230 | void btrfs_destroy_cachep(void) | |
7231 | { | |
7232 | /* | |
7233 | * Make sure all delayed rcu free inodes are flushed before we | |
7234 | * destroy cache. | |
7235 | */ | |
7236 | rcu_barrier(); | |
7237 | if (btrfs_inode_cachep) | |
7238 | kmem_cache_destroy(btrfs_inode_cachep); | |
7239 | if (btrfs_trans_handle_cachep) | |
7240 | kmem_cache_destroy(btrfs_trans_handle_cachep); | |
7241 | if (btrfs_transaction_cachep) | |
7242 | kmem_cache_destroy(btrfs_transaction_cachep); | |
7243 | if (btrfs_path_cachep) | |
7244 | kmem_cache_destroy(btrfs_path_cachep); | |
7245 | if (btrfs_free_space_cachep) | |
7246 | kmem_cache_destroy(btrfs_free_space_cachep); | |
7247 | if (btrfs_delalloc_work_cachep) | |
7248 | kmem_cache_destroy(btrfs_delalloc_work_cachep); | |
7249 | } | |
7250 | ||
7251 | int btrfs_init_cachep(void) | |
7252 | { | |
7253 | btrfs_inode_cachep = kmem_cache_create("btrfs_inode", | |
7254 | sizeof(struct btrfs_inode), 0, | |
7255 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once); | |
7256 | if (!btrfs_inode_cachep) | |
7257 | goto fail; | |
7258 | ||
7259 | btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle", | |
7260 | sizeof(struct btrfs_trans_handle), 0, | |
7261 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
7262 | if (!btrfs_trans_handle_cachep) | |
7263 | goto fail; | |
7264 | ||
7265 | btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction", | |
7266 | sizeof(struct btrfs_transaction), 0, | |
7267 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
7268 | if (!btrfs_transaction_cachep) | |
7269 | goto fail; | |
7270 | ||
7271 | btrfs_path_cachep = kmem_cache_create("btrfs_path", | |
7272 | sizeof(struct btrfs_path), 0, | |
7273 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
7274 | if (!btrfs_path_cachep) | |
7275 | goto fail; | |
7276 | ||
7277 | btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space", | |
7278 | sizeof(struct btrfs_free_space), 0, | |
7279 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
7280 | if (!btrfs_free_space_cachep) | |
7281 | goto fail; | |
7282 | ||
7283 | btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work", | |
7284 | sizeof(struct btrfs_delalloc_work), 0, | |
7285 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, | |
7286 | NULL); | |
7287 | if (!btrfs_delalloc_work_cachep) | |
7288 | goto fail; | |
7289 | ||
7290 | return 0; | |
7291 | fail: | |
7292 | btrfs_destroy_cachep(); | |
7293 | return -ENOMEM; | |
7294 | } | |
7295 | ||
7296 | static int btrfs_getattr(struct vfsmount *mnt, | |
7297 | struct dentry *dentry, struct kstat *stat) | |
7298 | { | |
7299 | struct inode *inode = dentry->d_inode; | |
7300 | u32 blocksize = inode->i_sb->s_blocksize; | |
7301 | ||
7302 | generic_fillattr(inode, stat); | |
7303 | stat->dev = BTRFS_I(inode)->root->anon_dev; | |
7304 | stat->blksize = PAGE_CACHE_SIZE; | |
7305 | stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + | |
7306 | ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9; | |
7307 | return 0; | |
7308 | } | |
7309 | ||
7310 | /* | |
7311 | * If a file is moved, it will inherit the cow and compression flags of the new | |
7312 | * directory. | |
7313 | */ | |
7314 | static void fixup_inode_flags(struct inode *dir, struct inode *inode) | |
7315 | { | |
7316 | struct btrfs_inode *b_dir = BTRFS_I(dir); | |
7317 | struct btrfs_inode *b_inode = BTRFS_I(inode); | |
7318 | ||
7319 | if (b_dir->flags & BTRFS_INODE_NODATACOW) | |
7320 | b_inode->flags |= BTRFS_INODE_NODATACOW; | |
7321 | else | |
7322 | b_inode->flags &= ~BTRFS_INODE_NODATACOW; | |
7323 | ||
7324 | if (b_dir->flags & BTRFS_INODE_COMPRESS) { | |
7325 | b_inode->flags |= BTRFS_INODE_COMPRESS; | |
7326 | b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS; | |
7327 | } else { | |
7328 | b_inode->flags &= ~(BTRFS_INODE_COMPRESS | | |
7329 | BTRFS_INODE_NOCOMPRESS); | |
7330 | } | |
7331 | } | |
7332 | ||
7333 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |
7334 | struct inode *new_dir, struct dentry *new_dentry) | |
7335 | { | |
7336 | struct btrfs_trans_handle *trans; | |
7337 | struct btrfs_root *root = BTRFS_I(old_dir)->root; | |
7338 | struct btrfs_root *dest = BTRFS_I(new_dir)->root; | |
7339 | struct inode *new_inode = new_dentry->d_inode; | |
7340 | struct inode *old_inode = old_dentry->d_inode; | |
7341 | struct timespec ctime = CURRENT_TIME; | |
7342 | u64 index = 0; | |
7343 | u64 root_objectid; | |
7344 | int ret; | |
7345 | u64 old_ino = btrfs_ino(old_inode); | |
7346 | ||
7347 | if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) | |
7348 | return -EPERM; | |
7349 | ||
7350 | /* we only allow rename subvolume link between subvolumes */ | |
7351 | if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) | |
7352 | return -EXDEV; | |
7353 | ||
7354 | if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || | |
7355 | (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) | |
7356 | return -ENOTEMPTY; | |
7357 | ||
7358 | if (S_ISDIR(old_inode->i_mode) && new_inode && | |
7359 | new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) | |
7360 | return -ENOTEMPTY; | |
7361 | ||
7362 | ||
7363 | /* check for collisions, even if the name isn't there */ | |
7364 | ret = btrfs_check_dir_item_collision(root, new_dir->i_ino, | |
7365 | new_dentry->d_name.name, | |
7366 | new_dentry->d_name.len); | |
7367 | ||
7368 | if (ret) { | |
7369 | if (ret == -EEXIST) { | |
7370 | /* we shouldn't get | |
7371 | * eexist without a new_inode */ | |
7372 | if (!new_inode) { | |
7373 | WARN_ON(1); | |
7374 | return ret; | |
7375 | } | |
7376 | } else { | |
7377 | /* maybe -EOVERFLOW */ | |
7378 | return ret; | |
7379 | } | |
7380 | } | |
7381 | ret = 0; | |
7382 | ||
7383 | /* | |
7384 | * we're using rename to replace one file with another. | |
7385 | * and the replacement file is large. Start IO on it now so | |
7386 | * we don't add too much work to the end of the transaction | |
7387 | */ | |
7388 | if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && | |
7389 | old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) | |
7390 | filemap_flush(old_inode->i_mapping); | |
7391 | ||
7392 | /* close the racy window with snapshot create/destroy ioctl */ | |
7393 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) | |
7394 | down_read(&root->fs_info->subvol_sem); | |
7395 | /* | |
7396 | * We want to reserve the absolute worst case amount of items. So if | |
7397 | * both inodes are subvols and we need to unlink them then that would | |
7398 | * require 4 item modifications, but if they are both normal inodes it | |
7399 | * would require 5 item modifications, so we'll assume their normal | |
7400 | * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items | |
7401 | * should cover the worst case number of items we'll modify. | |
7402 | */ | |
7403 | trans = btrfs_start_transaction(root, 20); | |
7404 | if (IS_ERR(trans)) { | |
7405 | ret = PTR_ERR(trans); | |
7406 | goto out_notrans; | |
7407 | } | |
7408 | ||
7409 | if (dest != root) | |
7410 | btrfs_record_root_in_trans(trans, dest); | |
7411 | ||
7412 | ret = btrfs_set_inode_index(new_dir, &index); | |
7413 | if (ret) | |
7414 | goto out_fail; | |
7415 | ||
7416 | if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { | |
7417 | /* force full log commit if subvolume involved. */ | |
7418 | root->fs_info->last_trans_log_full_commit = trans->transid; | |
7419 | } else { | |
7420 | ret = btrfs_insert_inode_ref(trans, dest, | |
7421 | new_dentry->d_name.name, | |
7422 | new_dentry->d_name.len, | |
7423 | old_ino, | |
7424 | btrfs_ino(new_dir), index); | |
7425 | if (ret) | |
7426 | goto out_fail; | |
7427 | /* | |
7428 | * this is an ugly little race, but the rename is required | |
7429 | * to make sure that if we crash, the inode is either at the | |
7430 | * old name or the new one. pinning the log transaction lets | |
7431 | * us make sure we don't allow a log commit to come in after | |
7432 | * we unlink the name but before we add the new name back in. | |
7433 | */ | |
7434 | btrfs_pin_log_trans(root); | |
7435 | } | |
7436 | /* | |
7437 | * make sure the inode gets flushed if it is replacing | |
7438 | * something. | |
7439 | */ | |
7440 | if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode)) | |
7441 | btrfs_add_ordered_operation(trans, root, old_inode); | |
7442 | ||
7443 | inode_inc_iversion(old_dir); | |
7444 | inode_inc_iversion(new_dir); | |
7445 | inode_inc_iversion(old_inode); | |
7446 | old_dir->i_ctime = old_dir->i_mtime = ctime; | |
7447 | new_dir->i_ctime = new_dir->i_mtime = ctime; | |
7448 | old_inode->i_ctime = ctime; | |
7449 | ||
7450 | if (old_dentry->d_parent != new_dentry->d_parent) | |
7451 | btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); | |
7452 | ||
7453 | if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { | |
7454 | root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; | |
7455 | ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, | |
7456 | old_dentry->d_name.name, | |
7457 | old_dentry->d_name.len); | |
7458 | } else { | |
7459 | ret = __btrfs_unlink_inode(trans, root, old_dir, | |
7460 | old_dentry->d_inode, | |
7461 | old_dentry->d_name.name, | |
7462 | old_dentry->d_name.len); | |
7463 | if (!ret) | |
7464 | ret = btrfs_update_inode(trans, root, old_inode); | |
7465 | } | |
7466 | if (ret) { | |
7467 | btrfs_abort_transaction(trans, root, ret); | |
7468 | goto out_fail; | |
7469 | } | |
7470 | ||
7471 | if (new_inode) { | |
7472 | inode_inc_iversion(new_inode); | |
7473 | new_inode->i_ctime = CURRENT_TIME; | |
7474 | if (unlikely(btrfs_ino(new_inode) == | |
7475 | BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { | |
7476 | root_objectid = BTRFS_I(new_inode)->location.objectid; | |
7477 | ret = btrfs_unlink_subvol(trans, dest, new_dir, | |
7478 | root_objectid, | |
7479 | new_dentry->d_name.name, | |
7480 | new_dentry->d_name.len); | |
7481 | BUG_ON(new_inode->i_nlink == 0); | |
7482 | } else { | |
7483 | ret = btrfs_unlink_inode(trans, dest, new_dir, | |
7484 | new_dentry->d_inode, | |
7485 | new_dentry->d_name.name, | |
7486 | new_dentry->d_name.len); | |
7487 | } | |
7488 | if (!ret && new_inode->i_nlink == 0) { | |
7489 | ret = btrfs_orphan_add(trans, new_dentry->d_inode); | |
7490 | BUG_ON(ret); | |
7491 | } | |
7492 | if (ret) { | |
7493 | btrfs_abort_transaction(trans, root, ret); | |
7494 | goto out_fail; | |
7495 | } | |
7496 | } | |
7497 | ||
7498 | fixup_inode_flags(new_dir, old_inode); | |
7499 | ||
7500 | ret = btrfs_add_link(trans, new_dir, old_inode, | |
7501 | new_dentry->d_name.name, | |
7502 | new_dentry->d_name.len, 0, index); | |
7503 | if (ret) { | |
7504 | btrfs_abort_transaction(trans, root, ret); | |
7505 | goto out_fail; | |
7506 | } | |
7507 | ||
7508 | if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { | |
7509 | struct dentry *parent = new_dentry->d_parent; | |
7510 | btrfs_log_new_name(trans, old_inode, old_dir, parent); | |
7511 | btrfs_end_log_trans(root); | |
7512 | } | |
7513 | out_fail: | |
7514 | btrfs_end_transaction(trans, root); | |
7515 | out_notrans: | |
7516 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) | |
7517 | up_read(&root->fs_info->subvol_sem); | |
7518 | ||
7519 | return ret; | |
7520 | } | |
7521 | ||
7522 | static void btrfs_run_delalloc_work(struct btrfs_work *work) | |
7523 | { | |
7524 | struct btrfs_delalloc_work *delalloc_work; | |
7525 | ||
7526 | delalloc_work = container_of(work, struct btrfs_delalloc_work, | |
7527 | work); | |
7528 | if (delalloc_work->wait) | |
7529 | btrfs_wait_ordered_range(delalloc_work->inode, 0, (u64)-1); | |
7530 | else | |
7531 | filemap_flush(delalloc_work->inode->i_mapping); | |
7532 | ||
7533 | if (delalloc_work->delay_iput) | |
7534 | btrfs_add_delayed_iput(delalloc_work->inode); | |
7535 | else | |
7536 | iput(delalloc_work->inode); | |
7537 | complete(&delalloc_work->completion); | |
7538 | } | |
7539 | ||
7540 | struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, | |
7541 | int wait, int delay_iput) | |
7542 | { | |
7543 | struct btrfs_delalloc_work *work; | |
7544 | ||
7545 | work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS); | |
7546 | if (!work) | |
7547 | return NULL; | |
7548 | ||
7549 | init_completion(&work->completion); | |
7550 | INIT_LIST_HEAD(&work->list); | |
7551 | work->inode = inode; | |
7552 | work->wait = wait; | |
7553 | work->delay_iput = delay_iput; | |
7554 | work->work.func = btrfs_run_delalloc_work; | |
7555 | ||
7556 | return work; | |
7557 | } | |
7558 | ||
7559 | void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work) | |
7560 | { | |
7561 | wait_for_completion(&work->completion); | |
7562 | kmem_cache_free(btrfs_delalloc_work_cachep, work); | |
7563 | } | |
7564 | ||
7565 | /* | |
7566 | * some fairly slow code that needs optimization. This walks the list | |
7567 | * of all the inodes with pending delalloc and forces them to disk. | |
7568 | */ | |
7569 | int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) | |
7570 | { | |
7571 | struct list_head *head = &root->fs_info->delalloc_inodes; | |
7572 | struct btrfs_inode *binode; | |
7573 | struct inode *inode; | |
7574 | struct btrfs_delalloc_work *work, *next; | |
7575 | struct list_head works; | |
7576 | int ret = 0; | |
7577 | ||
7578 | if (root->fs_info->sb->s_flags & MS_RDONLY) | |
7579 | return -EROFS; | |
7580 | ||
7581 | INIT_LIST_HEAD(&works); | |
7582 | ||
7583 | spin_lock(&root->fs_info->delalloc_lock); | |
7584 | while (!list_empty(head)) { | |
7585 | binode = list_entry(head->next, struct btrfs_inode, | |
7586 | delalloc_inodes); | |
7587 | inode = igrab(&binode->vfs_inode); | |
7588 | if (!inode) | |
7589 | list_del_init(&binode->delalloc_inodes); | |
7590 | spin_unlock(&root->fs_info->delalloc_lock); | |
7591 | if (inode) { | |
7592 | work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); | |
7593 | if (!work) { | |
7594 | ret = -ENOMEM; | |
7595 | goto out; | |
7596 | } | |
7597 | list_add_tail(&work->list, &works); | |
7598 | btrfs_queue_worker(&root->fs_info->flush_workers, | |
7599 | &work->work); | |
7600 | } | |
7601 | cond_resched(); | |
7602 | spin_lock(&root->fs_info->delalloc_lock); | |
7603 | } | |
7604 | spin_unlock(&root->fs_info->delalloc_lock); | |
7605 | ||
7606 | /* the filemap_flush will queue IO into the worker threads, but | |
7607 | * we have to make sure the IO is actually started and that | |
7608 | * ordered extents get created before we return | |
7609 | */ | |
7610 | atomic_inc(&root->fs_info->async_submit_draining); | |
7611 | while (atomic_read(&root->fs_info->nr_async_submits) || | |
7612 | atomic_read(&root->fs_info->async_delalloc_pages)) { | |
7613 | wait_event(root->fs_info->async_submit_wait, | |
7614 | (atomic_read(&root->fs_info->nr_async_submits) == 0 && | |
7615 | atomic_read(&root->fs_info->async_delalloc_pages) == 0)); | |
7616 | } | |
7617 | atomic_dec(&root->fs_info->async_submit_draining); | |
7618 | out: | |
7619 | list_for_each_entry_safe(work, next, &works, list) { | |
7620 | list_del_init(&work->list); | |
7621 | btrfs_wait_and_free_delalloc_work(work); | |
7622 | } | |
7623 | return ret; | |
7624 | } | |
7625 | ||
7626 | static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |
7627 | const char *symname) | |
7628 | { | |
7629 | struct btrfs_trans_handle *trans; | |
7630 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
7631 | struct btrfs_path *path; | |
7632 | struct btrfs_key key; | |
7633 | struct inode *inode = NULL; | |
7634 | int err; | |
7635 | int drop_inode = 0; | |
7636 | u64 objectid; | |
7637 | u64 index = 0 ; | |
7638 | int name_len; | |
7639 | int datasize; | |
7640 | unsigned long ptr; | |
7641 | struct btrfs_file_extent_item *ei; | |
7642 | struct extent_buffer *leaf; | |
7643 | ||
7644 | name_len = strlen(symname) + 1; | |
7645 | if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) | |
7646 | return -ENAMETOOLONG; | |
7647 | ||
7648 | /* | |
7649 | * 2 items for inode item and ref | |
7650 | * 2 items for dir items | |
7651 | * 1 item for xattr if selinux is on | |
7652 | */ | |
7653 | trans = btrfs_start_transaction(root, 5); | |
7654 | if (IS_ERR(trans)) | |
7655 | return PTR_ERR(trans); | |
7656 | ||
7657 | err = btrfs_find_free_ino(root, &objectid); | |
7658 | if (err) | |
7659 | goto out_unlock; | |
7660 | ||
7661 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | |
7662 | dentry->d_name.len, btrfs_ino(dir), objectid, | |
7663 | S_IFLNK|S_IRWXUGO, &index); | |
7664 | if (IS_ERR(inode)) { | |
7665 | err = PTR_ERR(inode); | |
7666 | goto out_unlock; | |
7667 | } | |
7668 | ||
7669 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | |
7670 | if (err) { | |
7671 | drop_inode = 1; | |
7672 | goto out_unlock; | |
7673 | } | |
7674 | ||
7675 | /* | |
7676 | * If the active LSM wants to access the inode during | |
7677 | * d_instantiate it needs these. Smack checks to see | |
7678 | * if the filesystem supports xattrs by looking at the | |
7679 | * ops vector. | |
7680 | */ | |
7681 | inode->i_fop = &btrfs_file_operations; | |
7682 | inode->i_op = &btrfs_file_inode_operations; | |
7683 | ||
7684 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); | |
7685 | if (err) | |
7686 | drop_inode = 1; | |
7687 | else { | |
7688 | inode->i_mapping->a_ops = &btrfs_aops; | |
7689 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; | |
7690 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | |
7691 | } | |
7692 | if (drop_inode) | |
7693 | goto out_unlock; | |
7694 | ||
7695 | path = btrfs_alloc_path(); | |
7696 | if (!path) { | |
7697 | err = -ENOMEM; | |
7698 | drop_inode = 1; | |
7699 | goto out_unlock; | |
7700 | } | |
7701 | key.objectid = btrfs_ino(inode); | |
7702 | key.offset = 0; | |
7703 | btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); | |
7704 | datasize = btrfs_file_extent_calc_inline_size(name_len); | |
7705 | err = btrfs_insert_empty_item(trans, root, path, &key, | |
7706 | datasize); | |
7707 | if (err) { | |
7708 | drop_inode = 1; | |
7709 | btrfs_free_path(path); | |
7710 | goto out_unlock; | |
7711 | } | |
7712 | leaf = path->nodes[0]; | |
7713 | ei = btrfs_item_ptr(leaf, path->slots[0], | |
7714 | struct btrfs_file_extent_item); | |
7715 | btrfs_set_file_extent_generation(leaf, ei, trans->transid); | |
7716 | btrfs_set_file_extent_type(leaf, ei, | |
7717 | BTRFS_FILE_EXTENT_INLINE); | |
7718 | btrfs_set_file_extent_encryption(leaf, ei, 0); | |
7719 | btrfs_set_file_extent_compression(leaf, ei, 0); | |
7720 | btrfs_set_file_extent_other_encoding(leaf, ei, 0); | |
7721 | btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); | |
7722 | ||
7723 | ptr = btrfs_file_extent_inline_start(ei); | |
7724 | write_extent_buffer(leaf, symname, ptr, name_len); | |
7725 | btrfs_mark_buffer_dirty(leaf); | |
7726 | btrfs_free_path(path); | |
7727 | ||
7728 | inode->i_op = &btrfs_symlink_inode_operations; | |
7729 | inode->i_mapping->a_ops = &btrfs_symlink_aops; | |
7730 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; | |
7731 | inode_set_bytes(inode, name_len); | |
7732 | btrfs_i_size_write(inode, name_len - 1); | |
7733 | err = btrfs_update_inode(trans, root, inode); | |
7734 | if (err) | |
7735 | drop_inode = 1; | |
7736 | ||
7737 | out_unlock: | |
7738 | if (!err) | |
7739 | d_instantiate(dentry, inode); | |
7740 | btrfs_end_transaction(trans, root); | |
7741 | if (drop_inode) { | |
7742 | inode_dec_link_count(inode); | |
7743 | iput(inode); | |
7744 | } | |
7745 | btrfs_btree_balance_dirty(root); | |
7746 | return err; | |
7747 | } | |
7748 | ||
7749 | static int __btrfs_prealloc_file_range(struct inode *inode, int mode, | |
7750 | u64 start, u64 num_bytes, u64 min_size, | |
7751 | loff_t actual_len, u64 *alloc_hint, | |
7752 | struct btrfs_trans_handle *trans) | |
7753 | { | |
7754 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
7755 | struct extent_map *em; | |
7756 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
7757 | struct btrfs_key ins; | |
7758 | u64 cur_offset = start; | |
7759 | u64 i_size; | |
7760 | int ret = 0; | |
7761 | bool own_trans = true; | |
7762 | ||
7763 | if (trans) | |
7764 | own_trans = false; | |
7765 | while (num_bytes > 0) { | |
7766 | if (own_trans) { | |
7767 | trans = btrfs_start_transaction(root, 3); | |
7768 | if (IS_ERR(trans)) { | |
7769 | ret = PTR_ERR(trans); | |
7770 | break; | |
7771 | } | |
7772 | } | |
7773 | ||
7774 | ret = btrfs_reserve_extent(trans, root, num_bytes, min_size, | |
7775 | 0, *alloc_hint, &ins, 1); | |
7776 | if (ret) { | |
7777 | if (own_trans) | |
7778 | btrfs_end_transaction(trans, root); | |
7779 | break; | |
7780 | } | |
7781 | ||
7782 | ret = insert_reserved_file_extent(trans, inode, | |
7783 | cur_offset, ins.objectid, | |
7784 | ins.offset, ins.offset, | |
7785 | ins.offset, 0, 0, 0, | |
7786 | BTRFS_FILE_EXTENT_PREALLOC); | |
7787 | if (ret) { | |
7788 | btrfs_abort_transaction(trans, root, ret); | |
7789 | if (own_trans) | |
7790 | btrfs_end_transaction(trans, root); | |
7791 | break; | |
7792 | } | |
7793 | btrfs_drop_extent_cache(inode, cur_offset, | |
7794 | cur_offset + ins.offset -1, 0); | |
7795 | ||
7796 | em = alloc_extent_map(); | |
7797 | if (!em) { | |
7798 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | |
7799 | &BTRFS_I(inode)->runtime_flags); | |
7800 | goto next; | |
7801 | } | |
7802 | ||
7803 | em->start = cur_offset; | |
7804 | em->orig_start = cur_offset; | |
7805 | em->len = ins.offset; | |
7806 | em->block_start = ins.objectid; | |
7807 | em->block_len = ins.offset; | |
7808 | em->orig_block_len = ins.offset; | |
7809 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
7810 | set_bit(EXTENT_FLAG_PREALLOC, &em->flags); | |
7811 | em->generation = trans->transid; | |
7812 | ||
7813 | while (1) { | |
7814 | write_lock(&em_tree->lock); | |
7815 | ret = add_extent_mapping(em_tree, em); | |
7816 | if (!ret) | |
7817 | list_move(&em->list, | |
7818 | &em_tree->modified_extents); | |
7819 | write_unlock(&em_tree->lock); | |
7820 | if (ret != -EEXIST) | |
7821 | break; | |
7822 | btrfs_drop_extent_cache(inode, cur_offset, | |
7823 | cur_offset + ins.offset - 1, | |
7824 | 0); | |
7825 | } | |
7826 | free_extent_map(em); | |
7827 | next: | |
7828 | num_bytes -= ins.offset; | |
7829 | cur_offset += ins.offset; | |
7830 | *alloc_hint = ins.objectid + ins.offset; | |
7831 | ||
7832 | inode_inc_iversion(inode); | |
7833 | inode->i_ctime = CURRENT_TIME; | |
7834 | BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; | |
7835 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | |
7836 | (actual_len > inode->i_size) && | |
7837 | (cur_offset > inode->i_size)) { | |
7838 | if (cur_offset > actual_len) | |
7839 | i_size = actual_len; | |
7840 | else | |
7841 | i_size = cur_offset; | |
7842 | i_size_write(inode, i_size); | |
7843 | btrfs_ordered_update_i_size(inode, i_size, NULL); | |
7844 | } | |
7845 | ||
7846 | ret = btrfs_update_inode(trans, root, inode); | |
7847 | ||
7848 | if (ret) { | |
7849 | btrfs_abort_transaction(trans, root, ret); | |
7850 | if (own_trans) | |
7851 | btrfs_end_transaction(trans, root); | |
7852 | break; | |
7853 | } | |
7854 | ||
7855 | if (own_trans) | |
7856 | btrfs_end_transaction(trans, root); | |
7857 | } | |
7858 | return ret; | |
7859 | } | |
7860 | ||
7861 | int btrfs_prealloc_file_range(struct inode *inode, int mode, | |
7862 | u64 start, u64 num_bytes, u64 min_size, | |
7863 | loff_t actual_len, u64 *alloc_hint) | |
7864 | { | |
7865 | return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, | |
7866 | min_size, actual_len, alloc_hint, | |
7867 | NULL); | |
7868 | } | |
7869 | ||
7870 | int btrfs_prealloc_file_range_trans(struct inode *inode, | |
7871 | struct btrfs_trans_handle *trans, int mode, | |
7872 | u64 start, u64 num_bytes, u64 min_size, | |
7873 | loff_t actual_len, u64 *alloc_hint) | |
7874 | { | |
7875 | return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, | |
7876 | min_size, actual_len, alloc_hint, trans); | |
7877 | } | |
7878 | ||
7879 | static int btrfs_set_page_dirty(struct page *page) | |
7880 | { | |
7881 | return __set_page_dirty_nobuffers(page); | |
7882 | } | |
7883 | ||
7884 | static int btrfs_permission(struct inode *inode, int mask) | |
7885 | { | |
7886 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
7887 | umode_t mode = inode->i_mode; | |
7888 | ||
7889 | if (mask & MAY_WRITE && | |
7890 | (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { | |
7891 | if (btrfs_root_readonly(root)) | |
7892 | return -EROFS; | |
7893 | if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) | |
7894 | return -EACCES; | |
7895 | } | |
7896 | return generic_permission(inode, mask); | |
7897 | } | |
7898 | ||
7899 | static const struct inode_operations btrfs_dir_inode_operations = { | |
7900 | .getattr = btrfs_getattr, | |
7901 | .lookup = btrfs_lookup, | |
7902 | .create = btrfs_create, | |
7903 | .unlink = btrfs_unlink, | |
7904 | .link = btrfs_link, | |
7905 | .mkdir = btrfs_mkdir, | |
7906 | .rmdir = btrfs_rmdir, | |
7907 | .rename = btrfs_rename, | |
7908 | .symlink = btrfs_symlink, | |
7909 | .setattr = btrfs_setattr, | |
7910 | .mknod = btrfs_mknod, | |
7911 | .setxattr = btrfs_setxattr, | |
7912 | .getxattr = btrfs_getxattr, | |
7913 | .listxattr = btrfs_listxattr, | |
7914 | .removexattr = btrfs_removexattr, | |
7915 | .permission = btrfs_permission, | |
7916 | .get_acl = btrfs_get_acl, | |
7917 | }; | |
7918 | static const struct inode_operations btrfs_dir_ro_inode_operations = { | |
7919 | .lookup = btrfs_lookup, | |
7920 | .permission = btrfs_permission, | |
7921 | .get_acl = btrfs_get_acl, | |
7922 | }; | |
7923 | ||
7924 | static const struct file_operations btrfs_dir_file_operations = { | |
7925 | .llseek = generic_file_llseek, | |
7926 | .read = generic_read_dir, | |
7927 | .readdir = btrfs_real_readdir, | |
7928 | .unlocked_ioctl = btrfs_ioctl, | |
7929 | #ifdef CONFIG_COMPAT | |
7930 | .compat_ioctl = btrfs_ioctl, | |
7931 | #endif | |
7932 | .release = btrfs_release_file, | |
7933 | .fsync = btrfs_sync_file, | |
7934 | }; | |
7935 | ||
7936 | static struct extent_io_ops btrfs_extent_io_ops = { | |
7937 | .fill_delalloc = run_delalloc_range, | |
7938 | .submit_bio_hook = btrfs_submit_bio_hook, | |
7939 | .merge_bio_hook = btrfs_merge_bio_hook, | |
7940 | .readpage_end_io_hook = btrfs_readpage_end_io_hook, | |
7941 | .writepage_end_io_hook = btrfs_writepage_end_io_hook, | |
7942 | .writepage_start_hook = btrfs_writepage_start_hook, | |
7943 | .set_bit_hook = btrfs_set_bit_hook, | |
7944 | .clear_bit_hook = btrfs_clear_bit_hook, | |
7945 | .merge_extent_hook = btrfs_merge_extent_hook, | |
7946 | .split_extent_hook = btrfs_split_extent_hook, | |
7947 | }; | |
7948 | ||
7949 | /* | |
7950 | * btrfs doesn't support the bmap operation because swapfiles | |
7951 | * use bmap to make a mapping of extents in the file. They assume | |
7952 | * these extents won't change over the life of the file and they | |
7953 | * use the bmap result to do IO directly to the drive. | |
7954 | * | |
7955 | * the btrfs bmap call would return logical addresses that aren't | |
7956 | * suitable for IO and they also will change frequently as COW | |
7957 | * operations happen. So, swapfile + btrfs == corruption. | |
7958 | * | |
7959 | * For now we're avoiding this by dropping bmap. | |
7960 | */ | |
7961 | static const struct address_space_operations btrfs_aops = { | |
7962 | .readpage = btrfs_readpage, | |
7963 | .writepage = btrfs_writepage, | |
7964 | .writepages = btrfs_writepages, | |
7965 | .readpages = btrfs_readpages, | |
7966 | .direct_IO = btrfs_direct_IO, | |
7967 | .invalidatepage = btrfs_invalidatepage, | |
7968 | .releasepage = btrfs_releasepage, | |
7969 | .set_page_dirty = btrfs_set_page_dirty, | |
7970 | .error_remove_page = generic_error_remove_page, | |
7971 | }; | |
7972 | ||
7973 | static const struct address_space_operations btrfs_symlink_aops = { | |
7974 | .readpage = btrfs_readpage, | |
7975 | .writepage = btrfs_writepage, | |
7976 | .invalidatepage = btrfs_invalidatepage, | |
7977 | .releasepage = btrfs_releasepage, | |
7978 | }; | |
7979 | ||
7980 | static const struct inode_operations btrfs_file_inode_operations = { | |
7981 | .getattr = btrfs_getattr, | |
7982 | .setattr = btrfs_setattr, | |
7983 | .setxattr = btrfs_setxattr, | |
7984 | .getxattr = btrfs_getxattr, | |
7985 | .listxattr = btrfs_listxattr, | |
7986 | .removexattr = btrfs_removexattr, | |
7987 | .permission = btrfs_permission, | |
7988 | .fiemap = btrfs_fiemap, | |
7989 | .get_acl = btrfs_get_acl, | |
7990 | .update_time = btrfs_update_time, | |
7991 | }; | |
7992 | static const struct inode_operations btrfs_special_inode_operations = { | |
7993 | .getattr = btrfs_getattr, | |
7994 | .setattr = btrfs_setattr, | |
7995 | .permission = btrfs_permission, | |
7996 | .setxattr = btrfs_setxattr, | |
7997 | .getxattr = btrfs_getxattr, | |
7998 | .listxattr = btrfs_listxattr, | |
7999 | .removexattr = btrfs_removexattr, | |
8000 | .get_acl = btrfs_get_acl, | |
8001 | .update_time = btrfs_update_time, | |
8002 | }; | |
8003 | static const struct inode_operations btrfs_symlink_inode_operations = { | |
8004 | .readlink = generic_readlink, | |
8005 | .follow_link = page_follow_link_light, | |
8006 | .put_link = page_put_link, | |
8007 | .getattr = btrfs_getattr, | |
8008 | .setattr = btrfs_setattr, | |
8009 | .permission = btrfs_permission, | |
8010 | .setxattr = btrfs_setxattr, | |
8011 | .getxattr = btrfs_getxattr, | |
8012 | .listxattr = btrfs_listxattr, | |
8013 | .removexattr = btrfs_removexattr, | |
8014 | .get_acl = btrfs_get_acl, | |
8015 | .update_time = btrfs_update_time, | |
8016 | }; | |
8017 | ||
8018 | const struct dentry_operations btrfs_dentry_operations = { | |
8019 | .d_delete = btrfs_dentry_delete, | |
8020 | .d_release = btrfs_dentry_release, | |
8021 | }; |