]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/btrfs/inode.c
Fix btrfs_open_devices to deal with changes since the scan ioctls
[mirror_ubuntu-jammy-kernel.git] / fs / btrfs / inode.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/version.h>
38 #include <linux/xattr.h>
39 #include "ctree.h"
40 #include "disk-io.h"
41 #include "transaction.h"
42 #include "btrfs_inode.h"
43 #include "ioctl.h"
44 #include "print-tree.h"
45 #include "volumes.h"
46
47 struct btrfs_iget_args {
48 u64 ino;
49 struct btrfs_root *root;
50 };
51
52 static struct inode_operations btrfs_dir_inode_operations;
53 static struct inode_operations btrfs_symlink_inode_operations;
54 static struct inode_operations btrfs_dir_ro_inode_operations;
55 static struct inode_operations btrfs_special_inode_operations;
56 static struct inode_operations btrfs_file_inode_operations;
57 static struct address_space_operations btrfs_aops;
58 static struct address_space_operations btrfs_symlink_aops;
59 static struct file_operations btrfs_dir_file_operations;
60 static struct extent_io_ops btrfs_extent_io_ops;
61
62 static struct kmem_cache *btrfs_inode_cachep;
63 struct kmem_cache *btrfs_trans_handle_cachep;
64 struct kmem_cache *btrfs_transaction_cachep;
65 struct kmem_cache *btrfs_bit_radix_cachep;
66 struct kmem_cache *btrfs_path_cachep;
67
68 #define S_SHIFT 12
69 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
70 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
71 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
72 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
73 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
74 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
75 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
76 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
77 };
78
79 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
80 int for_del)
81 {
82 u64 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
83 u64 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
84 u64 thresh;
85 unsigned long flags;
86 int ret = 0;
87
88 if (for_del)
89 thresh = total * 90;
90 else
91 thresh = total * 85;
92
93 do_div(thresh, 100);
94
95 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
96 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
97 ret = -ENOSPC;
98 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
99 return ret;
100 }
101
102 static int cow_file_range(struct inode *inode, u64 start, u64 end)
103 {
104 struct btrfs_root *root = BTRFS_I(inode)->root;
105 struct btrfs_trans_handle *trans;
106 u64 alloc_hint = 0;
107 u64 num_bytes;
108 u64 cur_alloc_size;
109 u64 blocksize = root->sectorsize;
110 u64 orig_start = start;
111 u64 orig_num_bytes;
112 struct btrfs_key ins;
113 int ret;
114
115 trans = btrfs_start_transaction(root, 1);
116 BUG_ON(!trans);
117 btrfs_set_trans_block_group(trans, inode);
118
119 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
120 num_bytes = max(blocksize, num_bytes);
121 ret = btrfs_drop_extents(trans, root, inode,
122 start, start + num_bytes, start, &alloc_hint);
123 orig_num_bytes = num_bytes;
124
125 if (alloc_hint == EXTENT_MAP_INLINE)
126 goto out;
127
128 BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy));
129
130 while(num_bytes > 0) {
131 cur_alloc_size = min(num_bytes, root->fs_info->max_extent);
132 ret = btrfs_alloc_extent(trans, root, cur_alloc_size,
133 root->sectorsize,
134 root->root_key.objectid,
135 trans->transid,
136 inode->i_ino, start, 0,
137 alloc_hint, (u64)-1, &ins, 1);
138 if (ret) {
139 WARN_ON(1);
140 goto out;
141 }
142 cur_alloc_size = ins.offset;
143 ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
144 start, ins.objectid, ins.offset,
145 ins.offset, 0);
146 inode->i_blocks += ins.offset >> 9;
147 btrfs_check_file(root, inode);
148 if (num_bytes < cur_alloc_size) {
149 printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes,
150 cur_alloc_size);
151 break;
152 }
153 num_bytes -= cur_alloc_size;
154 alloc_hint = ins.objectid + ins.offset;
155 start += cur_alloc_size;
156 }
157 btrfs_drop_extent_cache(inode, orig_start,
158 orig_start + orig_num_bytes - 1);
159 btrfs_add_ordered_inode(inode);
160 btrfs_update_inode(trans, root, inode);
161 out:
162 btrfs_end_transaction(trans, root);
163 return ret;
164 }
165
166 static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end)
167 {
168 u64 extent_start;
169 u64 extent_end;
170 u64 bytenr;
171 u64 cow_end;
172 u64 loops = 0;
173 u64 total_fs_bytes;
174 struct btrfs_root *root = BTRFS_I(inode)->root;
175 struct btrfs_block_group_cache *block_group;
176 struct extent_buffer *leaf;
177 int found_type;
178 struct btrfs_path *path;
179 struct btrfs_file_extent_item *item;
180 int ret;
181 int err;
182 struct btrfs_key found_key;
183
184 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
185 path = btrfs_alloc_path();
186 BUG_ON(!path);
187 again:
188 ret = btrfs_lookup_file_extent(NULL, root, path,
189 inode->i_ino, start, 0);
190 if (ret < 0) {
191 btrfs_free_path(path);
192 return ret;
193 }
194
195 cow_end = end;
196 if (ret != 0) {
197 if (path->slots[0] == 0)
198 goto not_found;
199 path->slots[0]--;
200 }
201
202 leaf = path->nodes[0];
203 item = btrfs_item_ptr(leaf, path->slots[0],
204 struct btrfs_file_extent_item);
205
206 /* are we inside the extent that was found? */
207 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
208 found_type = btrfs_key_type(&found_key);
209 if (found_key.objectid != inode->i_ino ||
210 found_type != BTRFS_EXTENT_DATA_KEY)
211 goto not_found;
212
213 found_type = btrfs_file_extent_type(leaf, item);
214 extent_start = found_key.offset;
215 if (found_type == BTRFS_FILE_EXTENT_REG) {
216 u64 extent_num_bytes;
217
218 extent_num_bytes = btrfs_file_extent_num_bytes(leaf, item);
219 extent_end = extent_start + extent_num_bytes;
220 err = 0;
221
222 if (loops && start != extent_start)
223 goto not_found;
224
225 if (start < extent_start || start >= extent_end)
226 goto not_found;
227
228 cow_end = min(end, extent_end - 1);
229 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
230 if (bytenr == 0)
231 goto not_found;
232
233 if (btrfs_count_snapshots_in_path(root, path, inode->i_ino,
234 bytenr) != 1) {
235 goto not_found;
236 }
237
238 /*
239 * we may be called by the resizer, make sure we're inside
240 * the limits of the FS
241 */
242 block_group = btrfs_lookup_block_group(root->fs_info,
243 bytenr);
244 if (!block_group || block_group->ro)
245 goto not_found;
246
247 start = extent_end;
248 } else {
249 goto not_found;
250 }
251 loop:
252 if (start > end) {
253 btrfs_free_path(path);
254 return 0;
255 }
256 btrfs_release_path(root, path);
257 loops++;
258 goto again;
259
260 not_found:
261 cow_file_range(inode, start, end);
262 start = end + 1;
263 goto loop;
264 }
265
266 static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
267 {
268 struct btrfs_root *root = BTRFS_I(inode)->root;
269 int ret;
270 mutex_lock(&root->fs_info->fs_mutex);
271 if (btrfs_test_opt(root, NODATACOW) ||
272 btrfs_test_flag(inode, NODATACOW))
273 ret = run_delalloc_nocow(inode, start, end);
274 else
275 ret = cow_file_range(inode, start, end);
276
277 mutex_unlock(&root->fs_info->fs_mutex);
278 return ret;
279 }
280
281 int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
282 unsigned long old, unsigned long bits)
283 {
284 unsigned long flags;
285 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
286 struct btrfs_root *root = BTRFS_I(inode)->root;
287 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
288 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
289 root->fs_info->delalloc_bytes += end - start + 1;
290 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
291 }
292 return 0;
293 }
294
295 int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
296 unsigned long old, unsigned long bits)
297 {
298 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
299 struct btrfs_root *root = BTRFS_I(inode)->root;
300 unsigned long flags;
301
302 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
303 if (end - start + 1 > root->fs_info->delalloc_bytes) {
304 printk("warning: delalloc account %Lu %Lu\n",
305 end - start + 1, root->fs_info->delalloc_bytes);
306 root->fs_info->delalloc_bytes = 0;
307 BTRFS_I(inode)->delalloc_bytes = 0;
308 } else {
309 root->fs_info->delalloc_bytes -= end - start + 1;
310 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
311 }
312 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
313 }
314 return 0;
315 }
316
317 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
318 size_t size, struct bio *bio)
319 {
320 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
321 struct btrfs_mapping_tree *map_tree;
322 u64 logical = bio->bi_sector << 9;
323 u64 length = 0;
324 u64 map_length;
325 int ret;
326
327 length = bio->bi_size;
328 map_tree = &root->fs_info->mapping_tree;
329 map_length = length;
330 ret = btrfs_map_block(map_tree, READ, logical,
331 &map_length, NULL, 0);
332
333 if (map_length < length + size) {
334 return 1;
335 }
336 return 0;
337 }
338
339 int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
340 int mirror_num)
341 {
342 struct btrfs_root *root = BTRFS_I(inode)->root;
343 struct btrfs_trans_handle *trans;
344 int ret = 0;
345 char *sums = NULL;
346
347 ret = btrfs_csum_one_bio(root, bio, &sums);
348 BUG_ON(ret);
349
350 mutex_lock(&root->fs_info->fs_mutex);
351 trans = btrfs_start_transaction(root, 1);
352
353 btrfs_set_trans_block_group(trans, inode);
354 btrfs_csum_file_blocks(trans, root, inode, bio, sums);
355
356 ret = btrfs_end_transaction(trans, root);
357 BUG_ON(ret);
358 mutex_unlock(&root->fs_info->fs_mutex);
359
360 kfree(sums);
361
362 return btrfs_map_bio(root, rw, bio, mirror_num);
363 }
364
365 int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
366 int mirror_num)
367 {
368 struct btrfs_root *root = BTRFS_I(inode)->root;
369 int ret = 0;
370
371 if (!(rw & (1 << BIO_RW))) {
372 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
373 BUG_ON(ret);
374 goto mapit;
375 }
376
377 if (btrfs_test_opt(root, NODATASUM) ||
378 btrfs_test_flag(inode, NODATASUM)) {
379 goto mapit;
380 }
381
382 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
383 inode, rw, bio, mirror_num,
384 __btrfs_submit_bio_hook);
385 mapit:
386 return btrfs_map_bio(root, rw, bio, mirror_num);
387 }
388
389 int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
390 {
391 int ret = 0;
392 struct inode *inode = page->mapping->host;
393 struct btrfs_root *root = BTRFS_I(inode)->root;
394 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
395 struct btrfs_csum_item *item;
396 struct btrfs_path *path = NULL;
397 u32 csum;
398
399 if (btrfs_test_opt(root, NODATASUM) ||
400 btrfs_test_flag(inode, NODATASUM))
401 return 0;
402
403 mutex_lock(&root->fs_info->fs_mutex);
404 path = btrfs_alloc_path();
405 item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0);
406 if (IS_ERR(item)) {
407 ret = PTR_ERR(item);
408 /* a csum that isn't present is a preallocated region. */
409 if (ret == -ENOENT || ret == -EFBIG)
410 ret = 0;
411 csum = 0;
412 printk("no csum found for inode %lu start %Lu\n", inode->i_ino, start);
413 goto out;
414 }
415 read_extent_buffer(path->nodes[0], &csum, (unsigned long)item,
416 BTRFS_CRC32_SIZE);
417 set_state_private(io_tree, start, csum);
418 out:
419 if (path)
420 btrfs_free_path(path);
421 mutex_unlock(&root->fs_info->fs_mutex);
422 return ret;
423 }
424
425 struct io_failure_record {
426 struct page *page;
427 u64 start;
428 u64 len;
429 u64 logical;
430 int last_mirror;
431 };
432
433 int btrfs_io_failed_hook(struct bio *failed_bio,
434 struct page *page, u64 start, u64 end,
435 struct extent_state *state)
436 {
437 struct io_failure_record *failrec = NULL;
438 u64 private;
439 struct extent_map *em;
440 struct inode *inode = page->mapping->host;
441 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
442 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
443 struct bio *bio;
444 int num_copies;
445 int ret;
446 int rw;
447 u64 logical;
448
449 ret = get_state_private(failure_tree, start, &private);
450 if (ret) {
451 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
452 if (!failrec)
453 return -ENOMEM;
454 failrec->start = start;
455 failrec->len = end - start + 1;
456 failrec->last_mirror = 0;
457
458 spin_lock(&em_tree->lock);
459 em = lookup_extent_mapping(em_tree, start, failrec->len);
460 if (em->start > start || em->start + em->len < start) {
461 free_extent_map(em);
462 em = NULL;
463 }
464 spin_unlock(&em_tree->lock);
465
466 if (!em || IS_ERR(em)) {
467 kfree(failrec);
468 return -EIO;
469 }
470 logical = start - em->start;
471 logical = em->block_start + logical;
472 failrec->logical = logical;
473 free_extent_map(em);
474 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
475 EXTENT_DIRTY, GFP_NOFS);
476 set_state_private(failure_tree, start,
477 (u64)(unsigned long)failrec);
478 } else {
479 failrec = (struct io_failure_record *)(unsigned long)private;
480 }
481 num_copies = btrfs_num_copies(
482 &BTRFS_I(inode)->root->fs_info->mapping_tree,
483 failrec->logical, failrec->len);
484 failrec->last_mirror++;
485 if (!state) {
486 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
487 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
488 failrec->start,
489 EXTENT_LOCKED);
490 if (state && state->start != failrec->start)
491 state = NULL;
492 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
493 }
494 if (!state || failrec->last_mirror > num_copies) {
495 set_state_private(failure_tree, failrec->start, 0);
496 clear_extent_bits(failure_tree, failrec->start,
497 failrec->start + failrec->len - 1,
498 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
499 kfree(failrec);
500 return -EIO;
501 }
502 bio = bio_alloc(GFP_NOFS, 1);
503 bio->bi_private = state;
504 bio->bi_end_io = failed_bio->bi_end_io;
505 bio->bi_sector = failrec->logical >> 9;
506 bio->bi_bdev = failed_bio->bi_bdev;
507 bio->bi_size = 0;
508 bio_add_page(bio, page, failrec->len, start - page_offset(page));
509 if (failed_bio->bi_rw & (1 << BIO_RW))
510 rw = WRITE;
511 else
512 rw = READ;
513
514 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
515 failrec->last_mirror);
516 return 0;
517 }
518
519 int btrfs_clean_io_failures(struct inode *inode, u64 start)
520 {
521 u64 private;
522 u64 private_failure;
523 struct io_failure_record *failure;
524 int ret;
525
526 private = 0;
527 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
528 (u64)-1, 1, EXTENT_DIRTY)) {
529 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
530 start, &private_failure);
531 if (ret == 0) {
532 failure = (struct io_failure_record *)(unsigned long)
533 private_failure;
534 set_state_private(&BTRFS_I(inode)->io_failure_tree,
535 failure->start, 0);
536 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
537 failure->start,
538 failure->start + failure->len - 1,
539 EXTENT_DIRTY | EXTENT_LOCKED,
540 GFP_NOFS);
541 kfree(failure);
542 }
543 }
544 return 0;
545 }
546
547 int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
548 struct extent_state *state)
549 {
550 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
551 struct inode *inode = page->mapping->host;
552 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
553 char *kaddr;
554 u64 private = ~(u32)0;
555 int ret;
556 struct btrfs_root *root = BTRFS_I(inode)->root;
557 u32 csum = ~(u32)0;
558 unsigned long flags;
559
560 if (btrfs_test_opt(root, NODATASUM) ||
561 btrfs_test_flag(inode, NODATASUM))
562 return 0;
563 if (state && state->start == start) {
564 private = state->private;
565 ret = 0;
566 } else {
567 ret = get_state_private(io_tree, start, &private);
568 }
569 local_irq_save(flags);
570 kaddr = kmap_atomic(page, KM_IRQ0);
571 if (ret) {
572 goto zeroit;
573 }
574 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
575 btrfs_csum_final(csum, (char *)&csum);
576 if (csum != private) {
577 goto zeroit;
578 }
579 kunmap_atomic(kaddr, KM_IRQ0);
580 local_irq_restore(flags);
581
582 /* if the io failure tree for this inode is non-empty,
583 * check to see if we've recovered from a failed IO
584 */
585 btrfs_clean_io_failures(inode, start);
586 return 0;
587
588 zeroit:
589 printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n",
590 page->mapping->host->i_ino, (unsigned long long)start, csum,
591 private);
592 memset(kaddr + offset, 1, end - start + 1);
593 flush_dcache_page(page);
594 kunmap_atomic(kaddr, KM_IRQ0);
595 local_irq_restore(flags);
596 if (private == 0)
597 return 0;
598 return -EIO;
599 }
600
601 void btrfs_read_locked_inode(struct inode *inode)
602 {
603 struct btrfs_path *path;
604 struct extent_buffer *leaf;
605 struct btrfs_inode_item *inode_item;
606 struct btrfs_timespec *tspec;
607 struct btrfs_root *root = BTRFS_I(inode)->root;
608 struct btrfs_key location;
609 u64 alloc_group_block;
610 u32 rdev;
611 int ret;
612
613 path = btrfs_alloc_path();
614 BUG_ON(!path);
615 mutex_lock(&root->fs_info->fs_mutex);
616 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
617
618 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
619 if (ret)
620 goto make_bad;
621
622 leaf = path->nodes[0];
623 inode_item = btrfs_item_ptr(leaf, path->slots[0],
624 struct btrfs_inode_item);
625
626 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
627 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
628 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
629 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
630 inode->i_size = btrfs_inode_size(leaf, inode_item);
631
632 tspec = btrfs_inode_atime(inode_item);
633 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
634 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
635
636 tspec = btrfs_inode_mtime(inode_item);
637 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
638 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
639
640 tspec = btrfs_inode_ctime(inode_item);
641 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
642 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
643
644 inode->i_blocks = btrfs_inode_nblocks(leaf, inode_item);
645 inode->i_generation = btrfs_inode_generation(leaf, inode_item);
646 inode->i_rdev = 0;
647 rdev = btrfs_inode_rdev(leaf, inode_item);
648
649 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
650 BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
651 alloc_group_block);
652 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
653 if (!BTRFS_I(inode)->block_group) {
654 BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
655 NULL, 0,
656 BTRFS_BLOCK_GROUP_METADATA, 0);
657 }
658 btrfs_free_path(path);
659 inode_item = NULL;
660
661 mutex_unlock(&root->fs_info->fs_mutex);
662
663 switch (inode->i_mode & S_IFMT) {
664 case S_IFREG:
665 inode->i_mapping->a_ops = &btrfs_aops;
666 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
667 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
668 inode->i_fop = &btrfs_file_operations;
669 inode->i_op = &btrfs_file_inode_operations;
670 break;
671 case S_IFDIR:
672 inode->i_fop = &btrfs_dir_file_operations;
673 if (root == root->fs_info->tree_root)
674 inode->i_op = &btrfs_dir_ro_inode_operations;
675 else
676 inode->i_op = &btrfs_dir_inode_operations;
677 break;
678 case S_IFLNK:
679 inode->i_op = &btrfs_symlink_inode_operations;
680 inode->i_mapping->a_ops = &btrfs_symlink_aops;
681 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
682 break;
683 default:
684 init_special_inode(inode, inode->i_mode, rdev);
685 break;
686 }
687 return;
688
689 make_bad:
690 btrfs_release_path(root, path);
691 btrfs_free_path(path);
692 mutex_unlock(&root->fs_info->fs_mutex);
693 make_bad_inode(inode);
694 }
695
696 static void fill_inode_item(struct extent_buffer *leaf,
697 struct btrfs_inode_item *item,
698 struct inode *inode)
699 {
700 btrfs_set_inode_uid(leaf, item, inode->i_uid);
701 btrfs_set_inode_gid(leaf, item, inode->i_gid);
702 btrfs_set_inode_size(leaf, item, inode->i_size);
703 btrfs_set_inode_mode(leaf, item, inode->i_mode);
704 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
705
706 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
707 inode->i_atime.tv_sec);
708 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
709 inode->i_atime.tv_nsec);
710
711 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
712 inode->i_mtime.tv_sec);
713 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
714 inode->i_mtime.tv_nsec);
715
716 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
717 inode->i_ctime.tv_sec);
718 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
719 inode->i_ctime.tv_nsec);
720
721 btrfs_set_inode_nblocks(leaf, item, inode->i_blocks);
722 btrfs_set_inode_generation(leaf, item, inode->i_generation);
723 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
724 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
725 btrfs_set_inode_block_group(leaf, item,
726 BTRFS_I(inode)->block_group->key.objectid);
727 }
728
729 int btrfs_update_inode(struct btrfs_trans_handle *trans,
730 struct btrfs_root *root,
731 struct inode *inode)
732 {
733 struct btrfs_inode_item *inode_item;
734 struct btrfs_path *path;
735 struct extent_buffer *leaf;
736 int ret;
737
738 path = btrfs_alloc_path();
739 BUG_ON(!path);
740 ret = btrfs_lookup_inode(trans, root, path,
741 &BTRFS_I(inode)->location, 1);
742 if (ret) {
743 if (ret > 0)
744 ret = -ENOENT;
745 goto failed;
746 }
747
748 leaf = path->nodes[0];
749 inode_item = btrfs_item_ptr(leaf, path->slots[0],
750 struct btrfs_inode_item);
751
752 fill_inode_item(leaf, inode_item, inode);
753 btrfs_mark_buffer_dirty(leaf);
754 btrfs_set_inode_last_trans(trans, inode);
755 ret = 0;
756 failed:
757 btrfs_release_path(root, path);
758 btrfs_free_path(path);
759 return ret;
760 }
761
762
763 static int btrfs_unlink_trans(struct btrfs_trans_handle *trans,
764 struct btrfs_root *root,
765 struct inode *dir,
766 struct dentry *dentry)
767 {
768 struct btrfs_path *path;
769 const char *name = dentry->d_name.name;
770 int name_len = dentry->d_name.len;
771 int ret = 0;
772 struct extent_buffer *leaf;
773 struct btrfs_dir_item *di;
774 struct btrfs_key key;
775
776 path = btrfs_alloc_path();
777 if (!path) {
778 ret = -ENOMEM;
779 goto err;
780 }
781
782 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
783 name, name_len, -1);
784 if (IS_ERR(di)) {
785 ret = PTR_ERR(di);
786 goto err;
787 }
788 if (!di) {
789 ret = -ENOENT;
790 goto err;
791 }
792 leaf = path->nodes[0];
793 btrfs_dir_item_key_to_cpu(leaf, di, &key);
794 ret = btrfs_delete_one_dir_name(trans, root, path, di);
795 if (ret)
796 goto err;
797 btrfs_release_path(root, path);
798
799 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
800 key.objectid, name, name_len, -1);
801 if (IS_ERR(di)) {
802 ret = PTR_ERR(di);
803 goto err;
804 }
805 if (!di) {
806 ret = -ENOENT;
807 goto err;
808 }
809 ret = btrfs_delete_one_dir_name(trans, root, path, di);
810
811 dentry->d_inode->i_ctime = dir->i_ctime;
812 ret = btrfs_del_inode_ref(trans, root, name, name_len,
813 dentry->d_inode->i_ino,
814 dentry->d_parent->d_inode->i_ino);
815 if (ret) {
816 printk("failed to delete reference to %.*s, "
817 "inode %lu parent %lu\n", name_len, name,
818 dentry->d_inode->i_ino,
819 dentry->d_parent->d_inode->i_ino);
820 }
821 err:
822 btrfs_free_path(path);
823 if (!ret) {
824 dir->i_size -= name_len * 2;
825 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
826 btrfs_update_inode(trans, root, dir);
827 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
828 dentry->d_inode->i_nlink--;
829 #else
830 drop_nlink(dentry->d_inode);
831 #endif
832 ret = btrfs_update_inode(trans, root, dentry->d_inode);
833 dir->i_sb->s_dirt = 1;
834 }
835 return ret;
836 }
837
838 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
839 {
840 struct btrfs_root *root;
841 struct btrfs_trans_handle *trans;
842 struct inode *inode = dentry->d_inode;
843 int ret;
844 unsigned long nr = 0;
845
846 root = BTRFS_I(dir)->root;
847 mutex_lock(&root->fs_info->fs_mutex);
848
849 ret = btrfs_check_free_space(root, 1, 1);
850 if (ret)
851 goto fail;
852
853 trans = btrfs_start_transaction(root, 1);
854
855 btrfs_set_trans_block_group(trans, dir);
856 ret = btrfs_unlink_trans(trans, root, dir, dentry);
857 nr = trans->blocks_used;
858
859 if (inode->i_nlink == 0) {
860 int found;
861 /* if the inode isn't linked anywhere,
862 * we don't need to worry about
863 * data=ordered
864 */
865 found = btrfs_del_ordered_inode(inode);
866 if (found == 1) {
867 atomic_dec(&inode->i_count);
868 }
869 }
870
871 btrfs_end_transaction(trans, root);
872 fail:
873 mutex_unlock(&root->fs_info->fs_mutex);
874 btrfs_btree_balance_dirty(root, nr);
875 btrfs_throttle(root);
876 return ret;
877 }
878
879 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
880 {
881 struct inode *inode = dentry->d_inode;
882 int err = 0;
883 int ret;
884 struct btrfs_root *root = BTRFS_I(dir)->root;
885 struct btrfs_trans_handle *trans;
886 unsigned long nr = 0;
887
888 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
889 return -ENOTEMPTY;
890
891 mutex_lock(&root->fs_info->fs_mutex);
892 ret = btrfs_check_free_space(root, 1, 1);
893 if (ret)
894 goto fail;
895
896 trans = btrfs_start_transaction(root, 1);
897 btrfs_set_trans_block_group(trans, dir);
898
899 /* now the directory is empty */
900 err = btrfs_unlink_trans(trans, root, dir, dentry);
901 if (!err) {
902 inode->i_size = 0;
903 }
904
905 nr = trans->blocks_used;
906 ret = btrfs_end_transaction(trans, root);
907 fail:
908 mutex_unlock(&root->fs_info->fs_mutex);
909 btrfs_btree_balance_dirty(root, nr);
910 btrfs_throttle(root);
911
912 if (ret && !err)
913 err = ret;
914 return err;
915 }
916
917 /*
918 * this can truncate away extent items, csum items and directory items.
919 * It starts at a high offset and removes keys until it can't find
920 * any higher than i_size.
921 *
922 * csum items that cross the new i_size are truncated to the new size
923 * as well.
924 */
925 static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
926 struct btrfs_root *root,
927 struct inode *inode,
928 u32 min_type)
929 {
930 int ret;
931 struct btrfs_path *path;
932 struct btrfs_key key;
933 struct btrfs_key found_key;
934 u32 found_type;
935 struct extent_buffer *leaf;
936 struct btrfs_file_extent_item *fi;
937 u64 extent_start = 0;
938 u64 extent_num_bytes = 0;
939 u64 item_end = 0;
940 u64 root_gen = 0;
941 u64 root_owner = 0;
942 int found_extent;
943 int del_item;
944 int pending_del_nr = 0;
945 int pending_del_slot = 0;
946 int extent_type = -1;
947 u64 mask = root->sectorsize - 1;
948
949 btrfs_drop_extent_cache(inode, inode->i_size & (~mask), (u64)-1);
950 path = btrfs_alloc_path();
951 path->reada = -1;
952 BUG_ON(!path);
953
954 /* FIXME, add redo link to tree so we don't leak on crash */
955 key.objectid = inode->i_ino;
956 key.offset = (u64)-1;
957 key.type = (u8)-1;
958
959 btrfs_init_path(path);
960 search_again:
961 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
962 if (ret < 0) {
963 goto error;
964 }
965 if (ret > 0) {
966 BUG_ON(path->slots[0] == 0);
967 path->slots[0]--;
968 }
969
970 while(1) {
971 fi = NULL;
972 leaf = path->nodes[0];
973 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
974 found_type = btrfs_key_type(&found_key);
975
976 if (found_key.objectid != inode->i_ino)
977 break;
978
979 if (found_type < min_type)
980 break;
981
982 item_end = found_key.offset;
983 if (found_type == BTRFS_EXTENT_DATA_KEY) {
984 fi = btrfs_item_ptr(leaf, path->slots[0],
985 struct btrfs_file_extent_item);
986 extent_type = btrfs_file_extent_type(leaf, fi);
987 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
988 item_end +=
989 btrfs_file_extent_num_bytes(leaf, fi);
990 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
991 struct btrfs_item *item = btrfs_item_nr(leaf,
992 path->slots[0]);
993 item_end += btrfs_file_extent_inline_len(leaf,
994 item);
995 }
996 item_end--;
997 }
998 if (found_type == BTRFS_CSUM_ITEM_KEY) {
999 ret = btrfs_csum_truncate(trans, root, path,
1000 inode->i_size);
1001 BUG_ON(ret);
1002 }
1003 if (item_end < inode->i_size) {
1004 if (found_type == BTRFS_DIR_ITEM_KEY) {
1005 found_type = BTRFS_INODE_ITEM_KEY;
1006 } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
1007 found_type = BTRFS_CSUM_ITEM_KEY;
1008 } else if (found_type == BTRFS_EXTENT_DATA_KEY) {
1009 found_type = BTRFS_XATTR_ITEM_KEY;
1010 } else if (found_type == BTRFS_XATTR_ITEM_KEY) {
1011 found_type = BTRFS_INODE_REF_KEY;
1012 } else if (found_type) {
1013 found_type--;
1014 } else {
1015 break;
1016 }
1017 btrfs_set_key_type(&key, found_type);
1018 goto next;
1019 }
1020 if (found_key.offset >= inode->i_size)
1021 del_item = 1;
1022 else
1023 del_item = 0;
1024 found_extent = 0;
1025
1026 /* FIXME, shrink the extent if the ref count is only 1 */
1027 if (found_type != BTRFS_EXTENT_DATA_KEY)
1028 goto delete;
1029
1030 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
1031 u64 num_dec;
1032 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
1033 if (!del_item) {
1034 u64 orig_num_bytes =
1035 btrfs_file_extent_num_bytes(leaf, fi);
1036 extent_num_bytes = inode->i_size -
1037 found_key.offset + root->sectorsize - 1;
1038 extent_num_bytes = extent_num_bytes &
1039 ~((u64)root->sectorsize - 1);
1040 btrfs_set_file_extent_num_bytes(leaf, fi,
1041 extent_num_bytes);
1042 num_dec = (orig_num_bytes -
1043 extent_num_bytes);
1044 if (extent_start != 0)
1045 dec_i_blocks(inode, num_dec);
1046 btrfs_mark_buffer_dirty(leaf);
1047 } else {
1048 extent_num_bytes =
1049 btrfs_file_extent_disk_num_bytes(leaf,
1050 fi);
1051 /* FIXME blocksize != 4096 */
1052 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
1053 if (extent_start != 0) {
1054 found_extent = 1;
1055 dec_i_blocks(inode, num_dec);
1056 }
1057 root_gen = btrfs_header_generation(leaf);
1058 root_owner = btrfs_header_owner(leaf);
1059 }
1060 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1061 if (!del_item) {
1062 u32 newsize = inode->i_size - found_key.offset;
1063 dec_i_blocks(inode, item_end + 1 -
1064 found_key.offset - newsize);
1065 newsize =
1066 btrfs_file_extent_calc_inline_size(newsize);
1067 ret = btrfs_truncate_item(trans, root, path,
1068 newsize, 1);
1069 BUG_ON(ret);
1070 } else {
1071 dec_i_blocks(inode, item_end + 1 -
1072 found_key.offset);
1073 }
1074 }
1075 delete:
1076 if (del_item) {
1077 if (!pending_del_nr) {
1078 /* no pending yet, add ourselves */
1079 pending_del_slot = path->slots[0];
1080 pending_del_nr = 1;
1081 } else if (pending_del_nr &&
1082 path->slots[0] + 1 == pending_del_slot) {
1083 /* hop on the pending chunk */
1084 pending_del_nr++;
1085 pending_del_slot = path->slots[0];
1086 } else {
1087 printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot);
1088 }
1089 } else {
1090 break;
1091 }
1092 if (found_extent) {
1093 ret = btrfs_free_extent(trans, root, extent_start,
1094 extent_num_bytes,
1095 root_owner,
1096 root_gen, inode->i_ino,
1097 found_key.offset, 0);
1098 BUG_ON(ret);
1099 }
1100 next:
1101 if (path->slots[0] == 0) {
1102 if (pending_del_nr)
1103 goto del_pending;
1104 btrfs_release_path(root, path);
1105 goto search_again;
1106 }
1107
1108 path->slots[0]--;
1109 if (pending_del_nr &&
1110 path->slots[0] + 1 != pending_del_slot) {
1111 struct btrfs_key debug;
1112 del_pending:
1113 btrfs_item_key_to_cpu(path->nodes[0], &debug,
1114 pending_del_slot);
1115 ret = btrfs_del_items(trans, root, path,
1116 pending_del_slot,
1117 pending_del_nr);
1118 BUG_ON(ret);
1119 pending_del_nr = 0;
1120 btrfs_release_path(root, path);
1121 goto search_again;
1122 }
1123 }
1124 ret = 0;
1125 error:
1126 if (pending_del_nr) {
1127 ret = btrfs_del_items(trans, root, path, pending_del_slot,
1128 pending_del_nr);
1129 }
1130 btrfs_release_path(root, path);
1131 btrfs_free_path(path);
1132 inode->i_sb->s_dirt = 1;
1133 return ret;
1134 }
1135
1136 static int btrfs_cow_one_page(struct inode *inode, struct page *page,
1137 size_t zero_start)
1138 {
1139 char *kaddr;
1140 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1141 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1142 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1143 int ret = 0;
1144
1145 WARN_ON(!PageLocked(page));
1146 set_page_extent_mapped(page);
1147
1148 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
1149 set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start,
1150 page_end, GFP_NOFS);
1151
1152 if (zero_start != PAGE_CACHE_SIZE) {
1153 kaddr = kmap(page);
1154 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
1155 flush_dcache_page(page);
1156 kunmap(page);
1157 }
1158 set_page_dirty(page);
1159 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
1160
1161 return ret;
1162 }
1163
1164 /*
1165 * taken from block_truncate_page, but does cow as it zeros out
1166 * any bytes left in the last page in the file.
1167 */
1168 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
1169 {
1170 struct inode *inode = mapping->host;
1171 struct btrfs_root *root = BTRFS_I(inode)->root;
1172 u32 blocksize = root->sectorsize;
1173 pgoff_t index = from >> PAGE_CACHE_SHIFT;
1174 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1175 struct page *page;
1176 int ret = 0;
1177 u64 page_start;
1178
1179 if ((offset & (blocksize - 1)) == 0)
1180 goto out;
1181
1182 ret = -ENOMEM;
1183 page = grab_cache_page(mapping, index);
1184 if (!page)
1185 goto out;
1186 if (!PageUptodate(page)) {
1187 ret = btrfs_readpage(NULL, page);
1188 lock_page(page);
1189 if (!PageUptodate(page)) {
1190 ret = -EIO;
1191 goto out;
1192 }
1193 }
1194 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1195
1196 ret = btrfs_cow_one_page(inode, page, offset);
1197
1198 unlock_page(page);
1199 page_cache_release(page);
1200 out:
1201 return ret;
1202 }
1203
1204 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
1205 {
1206 struct inode *inode = dentry->d_inode;
1207 int err;
1208
1209 err = inode_change_ok(inode, attr);
1210 if (err)
1211 return err;
1212
1213 if (S_ISREG(inode->i_mode) &&
1214 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
1215 struct btrfs_trans_handle *trans;
1216 struct btrfs_root *root = BTRFS_I(inode)->root;
1217 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1218
1219 u64 mask = root->sectorsize - 1;
1220 u64 hole_start = (inode->i_size + mask) & ~mask;
1221 u64 block_end = (attr->ia_size + mask) & ~mask;
1222 u64 hole_size;
1223 u64 alloc_hint = 0;
1224
1225 if (attr->ia_size <= hole_start)
1226 goto out;
1227
1228 mutex_lock(&root->fs_info->fs_mutex);
1229 err = btrfs_check_free_space(root, 1, 0);
1230 mutex_unlock(&root->fs_info->fs_mutex);
1231 if (err)
1232 goto fail;
1233
1234 btrfs_truncate_page(inode->i_mapping, inode->i_size);
1235
1236 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
1237 hole_size = block_end - hole_start;
1238
1239 mutex_lock(&root->fs_info->fs_mutex);
1240 trans = btrfs_start_transaction(root, 1);
1241 btrfs_set_trans_block_group(trans, inode);
1242 err = btrfs_drop_extents(trans, root, inode,
1243 hole_start, block_end, hole_start,
1244 &alloc_hint);
1245
1246 if (alloc_hint != EXTENT_MAP_INLINE) {
1247 err = btrfs_insert_file_extent(trans, root,
1248 inode->i_ino,
1249 hole_start, 0, 0,
1250 hole_size, 0);
1251 btrfs_drop_extent_cache(inode, hole_start,
1252 (u64)-1);
1253 btrfs_check_file(root, inode);
1254 }
1255 btrfs_end_transaction(trans, root);
1256 mutex_unlock(&root->fs_info->fs_mutex);
1257 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
1258 if (err)
1259 return err;
1260 }
1261 out:
1262 err = inode_setattr(inode, attr);
1263 fail:
1264 return err;
1265 }
1266
1267 void btrfs_put_inode(struct inode *inode)
1268 {
1269 int ret;
1270
1271 if (!BTRFS_I(inode)->ordered_trans) {
1272 return;
1273 }
1274
1275 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) ||
1276 mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
1277 return;
1278
1279 ret = btrfs_del_ordered_inode(inode);
1280 if (ret == 1) {
1281 atomic_dec(&inode->i_count);
1282 }
1283 }
1284
1285 void btrfs_delete_inode(struct inode *inode)
1286 {
1287 struct btrfs_trans_handle *trans;
1288 struct btrfs_root *root = BTRFS_I(inode)->root;
1289 unsigned long nr;
1290 int ret;
1291
1292 truncate_inode_pages(&inode->i_data, 0);
1293 if (is_bad_inode(inode)) {
1294 goto no_delete;
1295 }
1296
1297 inode->i_size = 0;
1298 mutex_lock(&root->fs_info->fs_mutex);
1299 trans = btrfs_start_transaction(root, 1);
1300
1301 btrfs_set_trans_block_group(trans, inode);
1302 ret = btrfs_truncate_in_trans(trans, root, inode, 0);
1303 if (ret)
1304 goto no_delete_lock;
1305
1306 nr = trans->blocks_used;
1307 clear_inode(inode);
1308
1309 btrfs_end_transaction(trans, root);
1310 mutex_unlock(&root->fs_info->fs_mutex);
1311 btrfs_btree_balance_dirty(root, nr);
1312 btrfs_throttle(root);
1313 return;
1314
1315 no_delete_lock:
1316 nr = trans->blocks_used;
1317 btrfs_end_transaction(trans, root);
1318 mutex_unlock(&root->fs_info->fs_mutex);
1319 btrfs_btree_balance_dirty(root, nr);
1320 btrfs_throttle(root);
1321 no_delete:
1322 clear_inode(inode);
1323 }
1324
1325 /*
1326 * this returns the key found in the dir entry in the location pointer.
1327 * If no dir entries were found, location->objectid is 0.
1328 */
1329 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
1330 struct btrfs_key *location)
1331 {
1332 const char *name = dentry->d_name.name;
1333 int namelen = dentry->d_name.len;
1334 struct btrfs_dir_item *di;
1335 struct btrfs_path *path;
1336 struct btrfs_root *root = BTRFS_I(dir)->root;
1337 int ret = 0;
1338
1339 if (namelen == 1 && strcmp(name, ".") == 0) {
1340 location->objectid = dir->i_ino;
1341 location->type = BTRFS_INODE_ITEM_KEY;
1342 location->offset = 0;
1343 return 0;
1344 }
1345 path = btrfs_alloc_path();
1346 BUG_ON(!path);
1347
1348 if (namelen == 2 && strcmp(name, "..") == 0) {
1349 struct btrfs_key key;
1350 struct extent_buffer *leaf;
1351 u32 nritems;
1352 int slot;
1353
1354 key.objectid = dir->i_ino;
1355 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
1356 key.offset = 0;
1357 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1358 BUG_ON(ret == 0);
1359 ret = 0;
1360
1361 leaf = path->nodes[0];
1362 slot = path->slots[0];
1363 nritems = btrfs_header_nritems(leaf);
1364 if (slot >= nritems)
1365 goto out_err;
1366
1367 btrfs_item_key_to_cpu(leaf, &key, slot);
1368 if (key.objectid != dir->i_ino ||
1369 key.type != BTRFS_INODE_REF_KEY) {
1370 goto out_err;
1371 }
1372 location->objectid = key.offset;
1373 location->type = BTRFS_INODE_ITEM_KEY;
1374 location->offset = 0;
1375 goto out;
1376 }
1377
1378 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
1379 namelen, 0);
1380 if (IS_ERR(di))
1381 ret = PTR_ERR(di);
1382 if (!di || IS_ERR(di)) {
1383 goto out_err;
1384 }
1385 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
1386 out:
1387 btrfs_free_path(path);
1388 return ret;
1389 out_err:
1390 location->objectid = 0;
1391 goto out;
1392 }
1393
1394 /*
1395 * when we hit a tree root in a directory, the btrfs part of the inode
1396 * needs to be changed to reflect the root directory of the tree root. This
1397 * is kind of like crossing a mount point.
1398 */
1399 static int fixup_tree_root_location(struct btrfs_root *root,
1400 struct btrfs_key *location,
1401 struct btrfs_root **sub_root,
1402 struct dentry *dentry)
1403 {
1404 struct btrfs_path *path;
1405 struct btrfs_root_item *ri;
1406
1407 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
1408 return 0;
1409 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1410 return 0;
1411
1412 path = btrfs_alloc_path();
1413 BUG_ON(!path);
1414 mutex_lock(&root->fs_info->fs_mutex);
1415
1416 *sub_root = btrfs_read_fs_root(root->fs_info, location,
1417 dentry->d_name.name,
1418 dentry->d_name.len);
1419 if (IS_ERR(*sub_root))
1420 return PTR_ERR(*sub_root);
1421
1422 ri = &(*sub_root)->root_item;
1423 location->objectid = btrfs_root_dirid(ri);
1424 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1425 location->offset = 0;
1426
1427 btrfs_free_path(path);
1428 mutex_unlock(&root->fs_info->fs_mutex);
1429 return 0;
1430 }
1431
1432 static int btrfs_init_locked_inode(struct inode *inode, void *p)
1433 {
1434 struct btrfs_iget_args *args = p;
1435 inode->i_ino = args->ino;
1436 BTRFS_I(inode)->root = args->root;
1437 BTRFS_I(inode)->delalloc_bytes = 0;
1438 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1439 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1440 inode->i_mapping, GFP_NOFS);
1441 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1442 inode->i_mapping, GFP_NOFS);
1443 atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
1444 return 0;
1445 }
1446
1447 static int btrfs_find_actor(struct inode *inode, void *opaque)
1448 {
1449 struct btrfs_iget_args *args = opaque;
1450 return (args->ino == inode->i_ino &&
1451 args->root == BTRFS_I(inode)->root);
1452 }
1453
1454 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
1455 u64 root_objectid)
1456 {
1457 struct btrfs_iget_args args;
1458 args.ino = objectid;
1459 args.root = btrfs_lookup_fs_root(btrfs_sb(s)->fs_info, root_objectid);
1460
1461 if (!args.root)
1462 return NULL;
1463
1464 return ilookup5(s, objectid, btrfs_find_actor, (void *)&args);
1465 }
1466
1467 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
1468 struct btrfs_root *root)
1469 {
1470 struct inode *inode;
1471 struct btrfs_iget_args args;
1472 args.ino = objectid;
1473 args.root = root;
1474
1475 inode = iget5_locked(s, objectid, btrfs_find_actor,
1476 btrfs_init_locked_inode,
1477 (void *)&args);
1478 return inode;
1479 }
1480
1481 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
1482 struct nameidata *nd)
1483 {
1484 struct inode * inode;
1485 struct btrfs_inode *bi = BTRFS_I(dir);
1486 struct btrfs_root *root = bi->root;
1487 struct btrfs_root *sub_root = root;
1488 struct btrfs_key location;
1489 int ret;
1490
1491 if (dentry->d_name.len > BTRFS_NAME_LEN)
1492 return ERR_PTR(-ENAMETOOLONG);
1493
1494 mutex_lock(&root->fs_info->fs_mutex);
1495 ret = btrfs_inode_by_name(dir, dentry, &location);
1496 mutex_unlock(&root->fs_info->fs_mutex);
1497
1498 if (ret < 0)
1499 return ERR_PTR(ret);
1500
1501 inode = NULL;
1502 if (location.objectid) {
1503 ret = fixup_tree_root_location(root, &location, &sub_root,
1504 dentry);
1505 if (ret < 0)
1506 return ERR_PTR(ret);
1507 if (ret > 0)
1508 return ERR_PTR(-ENOENT);
1509 inode = btrfs_iget_locked(dir->i_sb, location.objectid,
1510 sub_root);
1511 if (!inode)
1512 return ERR_PTR(-EACCES);
1513 if (inode->i_state & I_NEW) {
1514 /* the inode and parent dir are two different roots */
1515 if (sub_root != root) {
1516 igrab(inode);
1517 sub_root->inode = inode;
1518 }
1519 BTRFS_I(inode)->root = sub_root;
1520 memcpy(&BTRFS_I(inode)->location, &location,
1521 sizeof(location));
1522 btrfs_read_locked_inode(inode);
1523 unlock_new_inode(inode);
1524 }
1525 }
1526 return d_splice_alias(inode, dentry);
1527 }
1528
1529 static unsigned char btrfs_filetype_table[] = {
1530 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
1531 };
1532
1533 static int btrfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
1534 {
1535 struct inode *inode = filp->f_dentry->d_inode;
1536 struct btrfs_root *root = BTRFS_I(inode)->root;
1537 struct btrfs_item *item;
1538 struct btrfs_dir_item *di;
1539 struct btrfs_key key;
1540 struct btrfs_key found_key;
1541 struct btrfs_path *path;
1542 int ret;
1543 u32 nritems;
1544 struct extent_buffer *leaf;
1545 int slot;
1546 int advance;
1547 unsigned char d_type;
1548 int over = 0;
1549 u32 di_cur;
1550 u32 di_total;
1551 u32 di_len;
1552 int key_type = BTRFS_DIR_INDEX_KEY;
1553 char tmp_name[32];
1554 char *name_ptr;
1555 int name_len;
1556
1557 /* FIXME, use a real flag for deciding about the key type */
1558 if (root->fs_info->tree_root == root)
1559 key_type = BTRFS_DIR_ITEM_KEY;
1560
1561 /* special case for "." */
1562 if (filp->f_pos == 0) {
1563 over = filldir(dirent, ".", 1,
1564 1, inode->i_ino,
1565 DT_DIR);
1566 if (over)
1567 return 0;
1568 filp->f_pos = 1;
1569 }
1570
1571 mutex_lock(&root->fs_info->fs_mutex);
1572 key.objectid = inode->i_ino;
1573 path = btrfs_alloc_path();
1574 path->reada = 2;
1575
1576 /* special case for .., just use the back ref */
1577 if (filp->f_pos == 1) {
1578 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
1579 key.offset = 0;
1580 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1581 BUG_ON(ret == 0);
1582 leaf = path->nodes[0];
1583 slot = path->slots[0];
1584 nritems = btrfs_header_nritems(leaf);
1585 if (slot >= nritems) {
1586 btrfs_release_path(root, path);
1587 goto read_dir_items;
1588 }
1589 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1590 btrfs_release_path(root, path);
1591 if (found_key.objectid != key.objectid ||
1592 found_key.type != BTRFS_INODE_REF_KEY)
1593 goto read_dir_items;
1594 over = filldir(dirent, "..", 2,
1595 2, found_key.offset, DT_DIR);
1596 if (over)
1597 goto nopos;
1598 filp->f_pos = 2;
1599 }
1600
1601 read_dir_items:
1602 btrfs_set_key_type(&key, key_type);
1603 key.offset = filp->f_pos;
1604
1605 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1606 if (ret < 0)
1607 goto err;
1608 advance = 0;
1609 while(1) {
1610 leaf = path->nodes[0];
1611 nritems = btrfs_header_nritems(leaf);
1612 slot = path->slots[0];
1613 if (advance || slot >= nritems) {
1614 if (slot >= nritems -1) {
1615 ret = btrfs_next_leaf(root, path);
1616 if (ret)
1617 break;
1618 leaf = path->nodes[0];
1619 nritems = btrfs_header_nritems(leaf);
1620 slot = path->slots[0];
1621 } else {
1622 slot++;
1623 path->slots[0]++;
1624 }
1625 }
1626 advance = 1;
1627 item = btrfs_item_nr(leaf, slot);
1628 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1629
1630 if (found_key.objectid != key.objectid)
1631 break;
1632 if (btrfs_key_type(&found_key) != key_type)
1633 break;
1634 if (found_key.offset < filp->f_pos)
1635 continue;
1636
1637 filp->f_pos = found_key.offset;
1638 advance = 1;
1639 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
1640 di_cur = 0;
1641 di_total = btrfs_item_size(leaf, item);
1642 while(di_cur < di_total) {
1643 struct btrfs_key location;
1644
1645 name_len = btrfs_dir_name_len(leaf, di);
1646 if (name_len < 32) {
1647 name_ptr = tmp_name;
1648 } else {
1649 name_ptr = kmalloc(name_len, GFP_NOFS);
1650 BUG_ON(!name_ptr);
1651 }
1652 read_extent_buffer(leaf, name_ptr,
1653 (unsigned long)(di + 1), name_len);
1654
1655 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
1656 btrfs_dir_item_key_to_cpu(leaf, di, &location);
1657 over = filldir(dirent, name_ptr, name_len,
1658 found_key.offset,
1659 location.objectid,
1660 d_type);
1661
1662 if (name_ptr != tmp_name)
1663 kfree(name_ptr);
1664
1665 if (over)
1666 goto nopos;
1667 di_len = btrfs_dir_name_len(leaf, di) +
1668 btrfs_dir_data_len(leaf, di) +sizeof(*di);
1669 di_cur += di_len;
1670 di = (struct btrfs_dir_item *)((char *)di + di_len);
1671 }
1672 }
1673 if (key_type == BTRFS_DIR_INDEX_KEY)
1674 filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
1675 else
1676 filp->f_pos++;
1677 nopos:
1678 ret = 0;
1679 err:
1680 btrfs_release_path(root, path);
1681 btrfs_free_path(path);
1682 mutex_unlock(&root->fs_info->fs_mutex);
1683 return ret;
1684 }
1685
1686 int btrfs_write_inode(struct inode *inode, int wait)
1687 {
1688 struct btrfs_root *root = BTRFS_I(inode)->root;
1689 struct btrfs_trans_handle *trans;
1690 int ret = 0;
1691
1692 if (wait) {
1693 mutex_lock(&root->fs_info->fs_mutex);
1694 trans = btrfs_start_transaction(root, 1);
1695 btrfs_set_trans_block_group(trans, inode);
1696 ret = btrfs_commit_transaction(trans, root);
1697 mutex_unlock(&root->fs_info->fs_mutex);
1698 }
1699 return ret;
1700 }
1701
1702 /*
1703 * This is somewhat expensive, updating the tree every time the
1704 * inode changes. But, it is most likely to find the inode in cache.
1705 * FIXME, needs more benchmarking...there are no reasons other than performance
1706 * to keep or drop this code.
1707 */
1708 void btrfs_dirty_inode(struct inode *inode)
1709 {
1710 struct btrfs_root *root = BTRFS_I(inode)->root;
1711 struct btrfs_trans_handle *trans;
1712
1713 mutex_lock(&root->fs_info->fs_mutex);
1714 trans = btrfs_start_transaction(root, 1);
1715 btrfs_set_trans_block_group(trans, inode);
1716 btrfs_update_inode(trans, root, inode);
1717 btrfs_end_transaction(trans, root);
1718 mutex_unlock(&root->fs_info->fs_mutex);
1719 }
1720
1721 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
1722 struct btrfs_root *root,
1723 const char *name, int name_len,
1724 u64 ref_objectid,
1725 u64 objectid,
1726 struct btrfs_block_group_cache *group,
1727 int mode)
1728 {
1729 struct inode *inode;
1730 struct btrfs_inode_item *inode_item;
1731 struct btrfs_block_group_cache *new_inode_group;
1732 struct btrfs_key *location;
1733 struct btrfs_path *path;
1734 struct btrfs_inode_ref *ref;
1735 struct btrfs_key key[2];
1736 u32 sizes[2];
1737 unsigned long ptr;
1738 int ret;
1739 int owner;
1740
1741 path = btrfs_alloc_path();
1742 BUG_ON(!path);
1743
1744 inode = new_inode(root->fs_info->sb);
1745 if (!inode)
1746 return ERR_PTR(-ENOMEM);
1747
1748 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1749 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1750 inode->i_mapping, GFP_NOFS);
1751 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1752 inode->i_mapping, GFP_NOFS);
1753 atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
1754 BTRFS_I(inode)->delalloc_bytes = 0;
1755 BTRFS_I(inode)->root = root;
1756
1757 if (mode & S_IFDIR)
1758 owner = 0;
1759 else
1760 owner = 1;
1761 new_inode_group = btrfs_find_block_group(root, group, 0,
1762 BTRFS_BLOCK_GROUP_METADATA, owner);
1763 if (!new_inode_group) {
1764 printk("find_block group failed\n");
1765 new_inode_group = group;
1766 }
1767 BTRFS_I(inode)->block_group = new_inode_group;
1768 BTRFS_I(inode)->flags = 0;
1769
1770 key[0].objectid = objectid;
1771 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
1772 key[0].offset = 0;
1773
1774 key[1].objectid = objectid;
1775 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
1776 key[1].offset = ref_objectid;
1777
1778 sizes[0] = sizeof(struct btrfs_inode_item);
1779 sizes[1] = name_len + sizeof(*ref);
1780
1781 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
1782 if (ret != 0)
1783 goto fail;
1784
1785 if (objectid > root->highest_inode)
1786 root->highest_inode = objectid;
1787
1788 inode->i_uid = current->fsuid;
1789 inode->i_gid = current->fsgid;
1790 inode->i_mode = mode;
1791 inode->i_ino = objectid;
1792 inode->i_blocks = 0;
1793 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1794 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1795 struct btrfs_inode_item);
1796 fill_inode_item(path->nodes[0], inode_item, inode);
1797
1798 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
1799 struct btrfs_inode_ref);
1800 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
1801 ptr = (unsigned long)(ref + 1);
1802 write_extent_buffer(path->nodes[0], name, ptr, name_len);
1803
1804 btrfs_mark_buffer_dirty(path->nodes[0]);
1805 btrfs_free_path(path);
1806
1807 location = &BTRFS_I(inode)->location;
1808 location->objectid = objectid;
1809 location->offset = 0;
1810 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1811
1812 insert_inode_hash(inode);
1813 return inode;
1814 fail:
1815 btrfs_free_path(path);
1816 return ERR_PTR(ret);
1817 }
1818
1819 static inline u8 btrfs_inode_type(struct inode *inode)
1820 {
1821 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
1822 }
1823
1824 static int btrfs_add_link(struct btrfs_trans_handle *trans,
1825 struct dentry *dentry, struct inode *inode,
1826 int add_backref)
1827 {
1828 int ret;
1829 struct btrfs_key key;
1830 struct btrfs_root *root = BTRFS_I(dentry->d_parent->d_inode)->root;
1831 struct inode *parent_inode;
1832
1833 key.objectid = inode->i_ino;
1834 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1835 key.offset = 0;
1836
1837 ret = btrfs_insert_dir_item(trans, root,
1838 dentry->d_name.name, dentry->d_name.len,
1839 dentry->d_parent->d_inode->i_ino,
1840 &key, btrfs_inode_type(inode));
1841 if (ret == 0) {
1842 if (add_backref) {
1843 ret = btrfs_insert_inode_ref(trans, root,
1844 dentry->d_name.name,
1845 dentry->d_name.len,
1846 inode->i_ino,
1847 dentry->d_parent->d_inode->i_ino);
1848 }
1849 parent_inode = dentry->d_parent->d_inode;
1850 parent_inode->i_size += dentry->d_name.len * 2;
1851 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1852 ret = btrfs_update_inode(trans, root,
1853 dentry->d_parent->d_inode);
1854 }
1855 return ret;
1856 }
1857
1858 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
1859 struct dentry *dentry, struct inode *inode,
1860 int backref)
1861 {
1862 int err = btrfs_add_link(trans, dentry, inode, backref);
1863 if (!err) {
1864 d_instantiate(dentry, inode);
1865 return 0;
1866 }
1867 if (err > 0)
1868 err = -EEXIST;
1869 return err;
1870 }
1871
1872 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
1873 int mode, dev_t rdev)
1874 {
1875 struct btrfs_trans_handle *trans;
1876 struct btrfs_root *root = BTRFS_I(dir)->root;
1877 struct inode *inode = NULL;
1878 int err;
1879 int drop_inode = 0;
1880 u64 objectid;
1881 unsigned long nr = 0;
1882
1883 if (!new_valid_dev(rdev))
1884 return -EINVAL;
1885
1886 mutex_lock(&root->fs_info->fs_mutex);
1887 err = btrfs_check_free_space(root, 1, 0);
1888 if (err)
1889 goto fail;
1890
1891 trans = btrfs_start_transaction(root, 1);
1892 btrfs_set_trans_block_group(trans, dir);
1893
1894 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1895 if (err) {
1896 err = -ENOSPC;
1897 goto out_unlock;
1898 }
1899
1900 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
1901 dentry->d_name.len,
1902 dentry->d_parent->d_inode->i_ino, objectid,
1903 BTRFS_I(dir)->block_group, mode);
1904 err = PTR_ERR(inode);
1905 if (IS_ERR(inode))
1906 goto out_unlock;
1907
1908 btrfs_set_trans_block_group(trans, inode);
1909 err = btrfs_add_nondir(trans, dentry, inode, 0);
1910 if (err)
1911 drop_inode = 1;
1912 else {
1913 inode->i_op = &btrfs_special_inode_operations;
1914 init_special_inode(inode, inode->i_mode, rdev);
1915 btrfs_update_inode(trans, root, inode);
1916 }
1917 dir->i_sb->s_dirt = 1;
1918 btrfs_update_inode_block_group(trans, inode);
1919 btrfs_update_inode_block_group(trans, dir);
1920 out_unlock:
1921 nr = trans->blocks_used;
1922 btrfs_end_transaction(trans, root);
1923 fail:
1924 mutex_unlock(&root->fs_info->fs_mutex);
1925
1926 if (drop_inode) {
1927 inode_dec_link_count(inode);
1928 iput(inode);
1929 }
1930 btrfs_btree_balance_dirty(root, nr);
1931 btrfs_throttle(root);
1932 return err;
1933 }
1934
1935 static int btrfs_create(struct inode *dir, struct dentry *dentry,
1936 int mode, struct nameidata *nd)
1937 {
1938 struct btrfs_trans_handle *trans;
1939 struct btrfs_root *root = BTRFS_I(dir)->root;
1940 struct inode *inode = NULL;
1941 int err;
1942 int drop_inode = 0;
1943 unsigned long nr = 0;
1944 u64 objectid;
1945
1946 mutex_lock(&root->fs_info->fs_mutex);
1947 err = btrfs_check_free_space(root, 1, 0);
1948 if (err)
1949 goto fail;
1950 trans = btrfs_start_transaction(root, 1);
1951 btrfs_set_trans_block_group(trans, dir);
1952
1953 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1954 if (err) {
1955 err = -ENOSPC;
1956 goto out_unlock;
1957 }
1958
1959 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
1960 dentry->d_name.len,
1961 dentry->d_parent->d_inode->i_ino,
1962 objectid, BTRFS_I(dir)->block_group, mode);
1963 err = PTR_ERR(inode);
1964 if (IS_ERR(inode))
1965 goto out_unlock;
1966
1967 btrfs_set_trans_block_group(trans, inode);
1968 err = btrfs_add_nondir(trans, dentry, inode, 0);
1969 if (err)
1970 drop_inode = 1;
1971 else {
1972 inode->i_mapping->a_ops = &btrfs_aops;
1973 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1974 inode->i_fop = &btrfs_file_operations;
1975 inode->i_op = &btrfs_file_inode_operations;
1976 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1977 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1978 inode->i_mapping, GFP_NOFS);
1979 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1980 inode->i_mapping, GFP_NOFS);
1981 BTRFS_I(inode)->delalloc_bytes = 0;
1982 atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
1983 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
1984 }
1985 dir->i_sb->s_dirt = 1;
1986 btrfs_update_inode_block_group(trans, inode);
1987 btrfs_update_inode_block_group(trans, dir);
1988 out_unlock:
1989 nr = trans->blocks_used;
1990 btrfs_end_transaction(trans, root);
1991 fail:
1992 mutex_unlock(&root->fs_info->fs_mutex);
1993
1994 if (drop_inode) {
1995 inode_dec_link_count(inode);
1996 iput(inode);
1997 }
1998 btrfs_btree_balance_dirty(root, nr);
1999 btrfs_throttle(root);
2000 return err;
2001 }
2002
2003 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
2004 struct dentry *dentry)
2005 {
2006 struct btrfs_trans_handle *trans;
2007 struct btrfs_root *root = BTRFS_I(dir)->root;
2008 struct inode *inode = old_dentry->d_inode;
2009 unsigned long nr = 0;
2010 int err;
2011 int drop_inode = 0;
2012
2013 if (inode->i_nlink == 0)
2014 return -ENOENT;
2015
2016 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
2017 inode->i_nlink++;
2018 #else
2019 inc_nlink(inode);
2020 #endif
2021 mutex_lock(&root->fs_info->fs_mutex);
2022 err = btrfs_check_free_space(root, 1, 0);
2023 if (err)
2024 goto fail;
2025 trans = btrfs_start_transaction(root, 1);
2026
2027 btrfs_set_trans_block_group(trans, dir);
2028 atomic_inc(&inode->i_count);
2029 err = btrfs_add_nondir(trans, dentry, inode, 1);
2030
2031 if (err)
2032 drop_inode = 1;
2033
2034 dir->i_sb->s_dirt = 1;
2035 btrfs_update_inode_block_group(trans, dir);
2036 err = btrfs_update_inode(trans, root, inode);
2037
2038 if (err)
2039 drop_inode = 1;
2040
2041 nr = trans->blocks_used;
2042 btrfs_end_transaction(trans, root);
2043 fail:
2044 mutex_unlock(&root->fs_info->fs_mutex);
2045
2046 if (drop_inode) {
2047 inode_dec_link_count(inode);
2048 iput(inode);
2049 }
2050 btrfs_btree_balance_dirty(root, nr);
2051 btrfs_throttle(root);
2052 return err;
2053 }
2054
2055 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2056 {
2057 struct inode *inode = NULL;
2058 struct btrfs_trans_handle *trans;
2059 struct btrfs_root *root = BTRFS_I(dir)->root;
2060 int err = 0;
2061 int drop_on_err = 0;
2062 u64 objectid = 0;
2063 unsigned long nr = 1;
2064
2065 mutex_lock(&root->fs_info->fs_mutex);
2066 err = btrfs_check_free_space(root, 1, 0);
2067 if (err)
2068 goto out_unlock;
2069
2070 trans = btrfs_start_transaction(root, 1);
2071 btrfs_set_trans_block_group(trans, dir);
2072
2073 if (IS_ERR(trans)) {
2074 err = PTR_ERR(trans);
2075 goto out_unlock;
2076 }
2077
2078 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
2079 if (err) {
2080 err = -ENOSPC;
2081 goto out_unlock;
2082 }
2083
2084 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
2085 dentry->d_name.len,
2086 dentry->d_parent->d_inode->i_ino, objectid,
2087 BTRFS_I(dir)->block_group, S_IFDIR | mode);
2088 if (IS_ERR(inode)) {
2089 err = PTR_ERR(inode);
2090 goto out_fail;
2091 }
2092
2093 drop_on_err = 1;
2094 inode->i_op = &btrfs_dir_inode_operations;
2095 inode->i_fop = &btrfs_dir_file_operations;
2096 btrfs_set_trans_block_group(trans, inode);
2097
2098 inode->i_size = 0;
2099 err = btrfs_update_inode(trans, root, inode);
2100 if (err)
2101 goto out_fail;
2102
2103 err = btrfs_add_link(trans, dentry, inode, 0);
2104 if (err)
2105 goto out_fail;
2106
2107 d_instantiate(dentry, inode);
2108 drop_on_err = 0;
2109 dir->i_sb->s_dirt = 1;
2110 btrfs_update_inode_block_group(trans, inode);
2111 btrfs_update_inode_block_group(trans, dir);
2112
2113 out_fail:
2114 nr = trans->blocks_used;
2115 btrfs_end_transaction(trans, root);
2116
2117 out_unlock:
2118 mutex_unlock(&root->fs_info->fs_mutex);
2119 if (drop_on_err)
2120 iput(inode);
2121 btrfs_btree_balance_dirty(root, nr);
2122 btrfs_throttle(root);
2123 return err;
2124 }
2125
2126 static int merge_extent_mapping(struct extent_map_tree *em_tree,
2127 struct extent_map *existing,
2128 struct extent_map *em)
2129 {
2130 u64 start_diff;
2131 u64 new_end;
2132 int ret = 0;
2133 int real_blocks = existing->block_start < EXTENT_MAP_LAST_BYTE;
2134
2135 if (real_blocks && em->block_start >= EXTENT_MAP_LAST_BYTE)
2136 goto invalid;
2137
2138 if (!real_blocks && em->block_start != existing->block_start)
2139 goto invalid;
2140
2141 new_end = max(existing->start + existing->len, em->start + em->len);
2142
2143 if (existing->start >= em->start) {
2144 if (em->start + em->len < existing->start)
2145 goto invalid;
2146
2147 start_diff = existing->start - em->start;
2148 if (real_blocks && em->block_start + start_diff !=
2149 existing->block_start)
2150 goto invalid;
2151
2152 em->len = new_end - em->start;
2153
2154 remove_extent_mapping(em_tree, existing);
2155 /* free for the tree */
2156 free_extent_map(existing);
2157 ret = add_extent_mapping(em_tree, em);
2158
2159 } else if (em->start > existing->start) {
2160
2161 if (existing->start + existing->len < em->start)
2162 goto invalid;
2163
2164 start_diff = em->start - existing->start;
2165 if (real_blocks && existing->block_start + start_diff !=
2166 em->block_start)
2167 goto invalid;
2168
2169 remove_extent_mapping(em_tree, existing);
2170 em->block_start = existing->block_start;
2171 em->start = existing->start;
2172 em->len = new_end - existing->start;
2173 free_extent_map(existing);
2174
2175 ret = add_extent_mapping(em_tree, em);
2176 } else {
2177 goto invalid;
2178 }
2179 return ret;
2180
2181 invalid:
2182 printk("invalid extent map merge [%Lu %Lu %Lu] [%Lu %Lu %Lu]\n",
2183 existing->start, existing->len, existing->block_start,
2184 em->start, em->len, em->block_start);
2185 return -EIO;
2186 }
2187
2188 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
2189 size_t pg_offset, u64 start, u64 len,
2190 int create)
2191 {
2192 int ret;
2193 int err = 0;
2194 u64 bytenr;
2195 u64 extent_start = 0;
2196 u64 extent_end = 0;
2197 u64 objectid = inode->i_ino;
2198 u32 found_type;
2199 struct btrfs_path *path;
2200 struct btrfs_root *root = BTRFS_I(inode)->root;
2201 struct btrfs_file_extent_item *item;
2202 struct extent_buffer *leaf;
2203 struct btrfs_key found_key;
2204 struct extent_map *em = NULL;
2205 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2206 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2207 struct btrfs_trans_handle *trans = NULL;
2208
2209 path = btrfs_alloc_path();
2210 BUG_ON(!path);
2211 mutex_lock(&root->fs_info->fs_mutex);
2212
2213 again:
2214 spin_lock(&em_tree->lock);
2215 em = lookup_extent_mapping(em_tree, start, len);
2216 if (em)
2217 em->bdev = root->fs_info->fs_devices->latest_bdev;
2218 spin_unlock(&em_tree->lock);
2219
2220 if (em) {
2221 if (em->start > start || em->start + em->len <= start)
2222 free_extent_map(em);
2223 else if (em->block_start == EXTENT_MAP_INLINE && page)
2224 free_extent_map(em);
2225 else
2226 goto out;
2227 }
2228 em = alloc_extent_map(GFP_NOFS);
2229 if (!em) {
2230 err = -ENOMEM;
2231 goto out;
2232 }
2233
2234 em->start = EXTENT_MAP_HOLE;
2235 em->len = (u64)-1;
2236 em->bdev = root->fs_info->fs_devices->latest_bdev;
2237 ret = btrfs_lookup_file_extent(trans, root, path,
2238 objectid, start, trans != NULL);
2239 if (ret < 0) {
2240 err = ret;
2241 goto out;
2242 }
2243
2244 if (ret != 0) {
2245 if (path->slots[0] == 0)
2246 goto not_found;
2247 path->slots[0]--;
2248 }
2249
2250 leaf = path->nodes[0];
2251 item = btrfs_item_ptr(leaf, path->slots[0],
2252 struct btrfs_file_extent_item);
2253 /* are we inside the extent that was found? */
2254 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2255 found_type = btrfs_key_type(&found_key);
2256 if (found_key.objectid != objectid ||
2257 found_type != BTRFS_EXTENT_DATA_KEY) {
2258 goto not_found;
2259 }
2260
2261 found_type = btrfs_file_extent_type(leaf, item);
2262 extent_start = found_key.offset;
2263 if (found_type == BTRFS_FILE_EXTENT_REG) {
2264 extent_end = extent_start +
2265 btrfs_file_extent_num_bytes(leaf, item);
2266 err = 0;
2267 if (start < extent_start || start >= extent_end) {
2268 em->start = start;
2269 if (start < extent_start) {
2270 if (start + len <= extent_start)
2271 goto not_found;
2272 em->len = extent_end - extent_start;
2273 } else {
2274 em->len = len;
2275 }
2276 goto not_found_em;
2277 }
2278 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
2279 if (bytenr == 0) {
2280 em->start = extent_start;
2281 em->len = extent_end - extent_start;
2282 em->block_start = EXTENT_MAP_HOLE;
2283 goto insert;
2284 }
2285 bytenr += btrfs_file_extent_offset(leaf, item);
2286 em->block_start = bytenr;
2287 em->start = extent_start;
2288 em->len = extent_end - extent_start;
2289 goto insert;
2290 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
2291 u64 page_start;
2292 unsigned long ptr;
2293 char *map;
2294 size_t size;
2295 size_t extent_offset;
2296 size_t copy_size;
2297
2298 size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf,
2299 path->slots[0]));
2300 extent_end = (extent_start + size + root->sectorsize - 1) &
2301 ~((u64)root->sectorsize - 1);
2302 if (start < extent_start || start >= extent_end) {
2303 em->start = start;
2304 if (start < extent_start) {
2305 if (start + len <= extent_start)
2306 goto not_found;
2307 em->len = extent_end - extent_start;
2308 } else {
2309 em->len = len;
2310 }
2311 goto not_found_em;
2312 }
2313 em->block_start = EXTENT_MAP_INLINE;
2314
2315 if (!page) {
2316 em->start = extent_start;
2317 em->len = size;
2318 goto out;
2319 }
2320
2321 page_start = page_offset(page) + pg_offset;
2322 extent_offset = page_start - extent_start;
2323 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
2324 size - extent_offset);
2325 em->start = extent_start + extent_offset;
2326 em->len = (copy_size + root->sectorsize - 1) &
2327 ~((u64)root->sectorsize - 1);
2328 map = kmap(page);
2329 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
2330 if (create == 0 && !PageUptodate(page)) {
2331 read_extent_buffer(leaf, map + pg_offset, ptr,
2332 copy_size);
2333 flush_dcache_page(page);
2334 } else if (create && PageUptodate(page)) {
2335 if (!trans) {
2336 kunmap(page);
2337 free_extent_map(em);
2338 em = NULL;
2339 btrfs_release_path(root, path);
2340 trans = btrfs_start_transaction(root, 1);
2341 goto again;
2342 }
2343 write_extent_buffer(leaf, map + pg_offset, ptr,
2344 copy_size);
2345 btrfs_mark_buffer_dirty(leaf);
2346 }
2347 kunmap(page);
2348 set_extent_uptodate(io_tree, em->start,
2349 extent_map_end(em) - 1, GFP_NOFS);
2350 goto insert;
2351 } else {
2352 printk("unkknown found_type %d\n", found_type);
2353 WARN_ON(1);
2354 }
2355 not_found:
2356 em->start = start;
2357 em->len = len;
2358 not_found_em:
2359 em->block_start = EXTENT_MAP_HOLE;
2360 insert:
2361 btrfs_release_path(root, path);
2362 if (em->start > start || extent_map_end(em) <= start) {
2363 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
2364 err = -EIO;
2365 goto out;
2366 }
2367
2368 err = 0;
2369 spin_lock(&em_tree->lock);
2370 ret = add_extent_mapping(em_tree, em);
2371 /* it is possible that someone inserted the extent into the tree
2372 * while we had the lock dropped. It is also possible that
2373 * an overlapping map exists in the tree
2374 */
2375 if (ret == -EEXIST) {
2376 struct extent_map *existing;
2377 existing = lookup_extent_mapping(em_tree, start, len);
2378 if (existing && (existing->start > start ||
2379 existing->start + existing->len <= start)) {
2380 free_extent_map(existing);
2381 existing = NULL;
2382 }
2383 if (!existing) {
2384 existing = lookup_extent_mapping(em_tree, em->start,
2385 em->len);
2386 if (existing) {
2387 err = merge_extent_mapping(em_tree, existing,
2388 em);
2389 free_extent_map(existing);
2390 if (err) {
2391 free_extent_map(em);
2392 em = NULL;
2393 }
2394 } else {
2395 err = -EIO;
2396 printk("failing to insert %Lu %Lu\n",
2397 start, len);
2398 free_extent_map(em);
2399 em = NULL;
2400 }
2401 } else {
2402 free_extent_map(em);
2403 em = existing;
2404 }
2405 }
2406 spin_unlock(&em_tree->lock);
2407 out:
2408 btrfs_free_path(path);
2409 if (trans) {
2410 ret = btrfs_end_transaction(trans, root);
2411 if (!err)
2412 err = ret;
2413 }
2414 mutex_unlock(&root->fs_info->fs_mutex);
2415 if (err) {
2416 free_extent_map(em);
2417 WARN_ON(1);
2418 return ERR_PTR(err);
2419 }
2420 return em;
2421 }
2422
2423 #if 0 /* waiting for O_DIRECT reads */
2424 static int btrfs_get_block(struct inode *inode, sector_t iblock,
2425 struct buffer_head *bh_result, int create)
2426 {
2427 struct extent_map *em;
2428 u64 start = (u64)iblock << inode->i_blkbits;
2429 struct btrfs_multi_bio *multi = NULL;
2430 struct btrfs_root *root = BTRFS_I(inode)->root;
2431 u64 len;
2432 u64 logical;
2433 u64 map_length;
2434 int ret = 0;
2435
2436 em = btrfs_get_extent(inode, NULL, 0, start, bh_result->b_size, 0);
2437
2438 if (!em || IS_ERR(em))
2439 goto out;
2440
2441 if (em->start > start || em->start + em->len <= start) {
2442 goto out;
2443 }
2444
2445 if (em->block_start == EXTENT_MAP_INLINE) {
2446 ret = -EINVAL;
2447 goto out;
2448 }
2449
2450 len = em->start + em->len - start;
2451 len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size)));
2452
2453 if (em->block_start == EXTENT_MAP_HOLE ||
2454 em->block_start == EXTENT_MAP_DELALLOC) {
2455 bh_result->b_size = len;
2456 goto out;
2457 }
2458
2459 logical = start - em->start;
2460 logical = em->block_start + logical;
2461
2462 map_length = len;
2463 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
2464 logical, &map_length, &multi, 0);
2465 BUG_ON(ret);
2466 bh_result->b_blocknr = multi->stripes[0].physical >> inode->i_blkbits;
2467 bh_result->b_size = min(map_length, len);
2468
2469 bh_result->b_bdev = multi->stripes[0].dev->bdev;
2470 set_buffer_mapped(bh_result);
2471 kfree(multi);
2472 out:
2473 free_extent_map(em);
2474 return ret;
2475 }
2476 #endif
2477
2478 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
2479 const struct iovec *iov, loff_t offset,
2480 unsigned long nr_segs)
2481 {
2482 return -EINVAL;
2483 #if 0
2484 struct file *file = iocb->ki_filp;
2485 struct inode *inode = file->f_mapping->host;
2486
2487 if (rw == WRITE)
2488 return -EINVAL;
2489
2490 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
2491 offset, nr_segs, btrfs_get_block, NULL);
2492 #endif
2493 }
2494
2495 static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
2496 {
2497 return extent_bmap(mapping, iblock, btrfs_get_extent);
2498 }
2499
2500 int btrfs_readpage(struct file *file, struct page *page)
2501 {
2502 struct extent_io_tree *tree;
2503 tree = &BTRFS_I(page->mapping->host)->io_tree;
2504 return extent_read_full_page(tree, page, btrfs_get_extent);
2505 }
2506
2507 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
2508 {
2509 struct extent_io_tree *tree;
2510
2511
2512 if (current->flags & PF_MEMALLOC) {
2513 redirty_page_for_writepage(wbc, page);
2514 unlock_page(page);
2515 return 0;
2516 }
2517 tree = &BTRFS_I(page->mapping->host)->io_tree;
2518 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
2519 }
2520
2521 static int btrfs_writepages(struct address_space *mapping,
2522 struct writeback_control *wbc)
2523 {
2524 struct extent_io_tree *tree;
2525 tree = &BTRFS_I(mapping->host)->io_tree;
2526 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
2527 }
2528
2529 static int
2530 btrfs_readpages(struct file *file, struct address_space *mapping,
2531 struct list_head *pages, unsigned nr_pages)
2532 {
2533 struct extent_io_tree *tree;
2534 tree = &BTRFS_I(mapping->host)->io_tree;
2535 return extent_readpages(tree, mapping, pages, nr_pages,
2536 btrfs_get_extent);
2537 }
2538
2539 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
2540 {
2541 struct extent_io_tree *tree;
2542 struct extent_map_tree *map;
2543 int ret;
2544
2545 tree = &BTRFS_I(page->mapping->host)->io_tree;
2546 map = &BTRFS_I(page->mapping->host)->extent_tree;
2547 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
2548 if (ret == 1) {
2549 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
2550 ClearPagePrivate(page);
2551 set_page_private(page, 0);
2552 page_cache_release(page);
2553 }
2554 return ret;
2555 }
2556
2557 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
2558 {
2559 struct extent_io_tree *tree;
2560
2561 tree = &BTRFS_I(page->mapping->host)->io_tree;
2562 extent_invalidatepage(tree, page, offset);
2563 btrfs_releasepage(page, GFP_NOFS);
2564 if (PagePrivate(page)) {
2565 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
2566 ClearPagePrivate(page);
2567 set_page_private(page, 0);
2568 page_cache_release(page);
2569 }
2570 }
2571
2572 /*
2573 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
2574 * called from a page fault handler when a page is first dirtied. Hence we must
2575 * be careful to check for EOF conditions here. We set the page up correctly
2576 * for a written page which means we get ENOSPC checking when writing into
2577 * holes and correct delalloc and unwritten extent mapping on filesystems that
2578 * support these features.
2579 *
2580 * We are not allowed to take the i_mutex here so we have to play games to
2581 * protect against truncate races as the page could now be beyond EOF. Because
2582 * vmtruncate() writes the inode size before removing pages, once we have the
2583 * page lock we can determine safely if the page is beyond EOF. If it is not
2584 * beyond EOF, then the page is guaranteed safe against truncation until we
2585 * unlock the page.
2586 */
2587 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
2588 {
2589 struct inode *inode = fdentry(vma->vm_file)->d_inode;
2590 struct btrfs_root *root = BTRFS_I(inode)->root;
2591 unsigned long end;
2592 loff_t size;
2593 int ret;
2594 u64 page_start;
2595
2596 mutex_lock(&root->fs_info->fs_mutex);
2597 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
2598 mutex_unlock(&root->fs_info->fs_mutex);
2599 if (ret)
2600 goto out;
2601
2602 ret = -EINVAL;
2603
2604 lock_page(page);
2605 wait_on_page_writeback(page);
2606 size = i_size_read(inode);
2607 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2608
2609 if ((page->mapping != inode->i_mapping) ||
2610 (page_start > size)) {
2611 /* page got truncated out from underneath us */
2612 goto out_unlock;
2613 }
2614
2615 /* page is wholly or partially inside EOF */
2616 if (page_start + PAGE_CACHE_SIZE > size)
2617 end = size & ~PAGE_CACHE_MASK;
2618 else
2619 end = PAGE_CACHE_SIZE;
2620
2621 ret = btrfs_cow_one_page(inode, page, end);
2622
2623 out_unlock:
2624 unlock_page(page);
2625 out:
2626 return ret;
2627 }
2628
2629 static void btrfs_truncate(struct inode *inode)
2630 {
2631 struct btrfs_root *root = BTRFS_I(inode)->root;
2632 int ret;
2633 struct btrfs_trans_handle *trans;
2634 unsigned long nr;
2635
2636 if (!S_ISREG(inode->i_mode))
2637 return;
2638 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2639 return;
2640
2641 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2642
2643 mutex_lock(&root->fs_info->fs_mutex);
2644 trans = btrfs_start_transaction(root, 1);
2645 btrfs_set_trans_block_group(trans, inode);
2646
2647 /* FIXME, add redo link to tree so we don't leak on crash */
2648 ret = btrfs_truncate_in_trans(trans, root, inode,
2649 BTRFS_EXTENT_DATA_KEY);
2650 btrfs_update_inode(trans, root, inode);
2651 nr = trans->blocks_used;
2652
2653 ret = btrfs_end_transaction(trans, root);
2654 BUG_ON(ret);
2655 mutex_unlock(&root->fs_info->fs_mutex);
2656 btrfs_btree_balance_dirty(root, nr);
2657 btrfs_throttle(root);
2658 }
2659
2660 static int noinline create_subvol(struct btrfs_root *root, char *name,
2661 int namelen)
2662 {
2663 struct btrfs_trans_handle *trans;
2664 struct btrfs_key key;
2665 struct btrfs_root_item root_item;
2666 struct btrfs_inode_item *inode_item;
2667 struct extent_buffer *leaf;
2668 struct btrfs_root *new_root = root;
2669 struct inode *inode;
2670 struct inode *dir;
2671 int ret;
2672 int err;
2673 u64 objectid;
2674 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
2675 unsigned long nr = 1;
2676
2677 mutex_lock(&root->fs_info->fs_mutex);
2678 ret = btrfs_check_free_space(root, 1, 0);
2679 if (ret)
2680 goto fail_commit;
2681
2682 trans = btrfs_start_transaction(root, 1);
2683 BUG_ON(!trans);
2684
2685 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
2686 0, &objectid);
2687 if (ret)
2688 goto fail;
2689
2690 leaf = __btrfs_alloc_free_block(trans, root, root->leafsize,
2691 objectid, trans->transid, 0, 0,
2692 0, 0);
2693 if (IS_ERR(leaf))
2694 return PTR_ERR(leaf);
2695
2696 btrfs_set_header_nritems(leaf, 0);
2697 btrfs_set_header_level(leaf, 0);
2698 btrfs_set_header_bytenr(leaf, leaf->start);
2699 btrfs_set_header_generation(leaf, trans->transid);
2700 btrfs_set_header_owner(leaf, objectid);
2701
2702 write_extent_buffer(leaf, root->fs_info->fsid,
2703 (unsigned long)btrfs_header_fsid(leaf),
2704 BTRFS_FSID_SIZE);
2705 btrfs_mark_buffer_dirty(leaf);
2706
2707 inode_item = &root_item.inode;
2708 memset(inode_item, 0, sizeof(*inode_item));
2709 inode_item->generation = cpu_to_le64(1);
2710 inode_item->size = cpu_to_le64(3);
2711 inode_item->nlink = cpu_to_le32(1);
2712 inode_item->nblocks = cpu_to_le64(1);
2713 inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
2714
2715 btrfs_set_root_bytenr(&root_item, leaf->start);
2716 btrfs_set_root_level(&root_item, 0);
2717 btrfs_set_root_refs(&root_item, 1);
2718 btrfs_set_root_used(&root_item, 0);
2719
2720 memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
2721 root_item.drop_level = 0;
2722
2723 free_extent_buffer(leaf);
2724 leaf = NULL;
2725
2726 btrfs_set_root_dirid(&root_item, new_dirid);
2727
2728 key.objectid = objectid;
2729 key.offset = 1;
2730 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2731 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
2732 &root_item);
2733 if (ret)
2734 goto fail;
2735
2736 /*
2737 * insert the directory item
2738 */
2739 key.offset = (u64)-1;
2740 dir = root->fs_info->sb->s_root->d_inode;
2741 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
2742 name, namelen, dir->i_ino, &key,
2743 BTRFS_FT_DIR);
2744 if (ret)
2745 goto fail;
2746
2747 ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
2748 name, namelen, objectid,
2749 root->fs_info->sb->s_root->d_inode->i_ino);
2750 if (ret)
2751 goto fail;
2752
2753 ret = btrfs_commit_transaction(trans, root);
2754 if (ret)
2755 goto fail_commit;
2756
2757 new_root = btrfs_read_fs_root(root->fs_info, &key, name, namelen);
2758 BUG_ON(!new_root);
2759
2760 trans = btrfs_start_transaction(new_root, 1);
2761 BUG_ON(!trans);
2762
2763 inode = btrfs_new_inode(trans, new_root, "..", 2, new_dirid,
2764 new_dirid,
2765 BTRFS_I(dir)->block_group, S_IFDIR | 0700);
2766 if (IS_ERR(inode))
2767 goto fail;
2768 inode->i_op = &btrfs_dir_inode_operations;
2769 inode->i_fop = &btrfs_dir_file_operations;
2770 new_root->inode = inode;
2771
2772 ret = btrfs_insert_inode_ref(trans, new_root, "..", 2, new_dirid,
2773 new_dirid);
2774 inode->i_nlink = 1;
2775 inode->i_size = 0;
2776 ret = btrfs_update_inode(trans, new_root, inode);
2777 if (ret)
2778 goto fail;
2779 fail:
2780 nr = trans->blocks_used;
2781 err = btrfs_commit_transaction(trans, new_root);
2782 if (err && !ret)
2783 ret = err;
2784 fail_commit:
2785 mutex_unlock(&root->fs_info->fs_mutex);
2786 btrfs_btree_balance_dirty(root, nr);
2787 btrfs_throttle(root);
2788 return ret;
2789 }
2790
2791 static int create_snapshot(struct btrfs_root *root, char *name, int namelen)
2792 {
2793 struct btrfs_pending_snapshot *pending_snapshot;
2794 struct btrfs_trans_handle *trans;
2795 int ret;
2796 int err;
2797 unsigned long nr = 0;
2798
2799 if (!root->ref_cows)
2800 return -EINVAL;
2801
2802 mutex_lock(&root->fs_info->fs_mutex);
2803 ret = btrfs_check_free_space(root, 1, 0);
2804 if (ret)
2805 goto fail_unlock;
2806
2807 pending_snapshot = kmalloc(sizeof(*pending_snapshot), GFP_NOFS);
2808 if (!pending_snapshot) {
2809 ret = -ENOMEM;
2810 goto fail_unlock;
2811 }
2812 pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
2813 if (!pending_snapshot->name) {
2814 ret = -ENOMEM;
2815 kfree(pending_snapshot);
2816 goto fail_unlock;
2817 }
2818 memcpy(pending_snapshot->name, name, namelen);
2819 pending_snapshot->name[namelen] = '\0';
2820 trans = btrfs_start_transaction(root, 1);
2821 BUG_ON(!trans);
2822 pending_snapshot->root = root;
2823 list_add(&pending_snapshot->list,
2824 &trans->transaction->pending_snapshots);
2825 ret = btrfs_update_inode(trans, root, root->inode);
2826 err = btrfs_commit_transaction(trans, root);
2827
2828 fail_unlock:
2829 mutex_unlock(&root->fs_info->fs_mutex);
2830 btrfs_btree_balance_dirty(root, nr);
2831 btrfs_throttle(root);
2832 return ret;
2833 }
2834
2835 unsigned long btrfs_force_ra(struct address_space *mapping,
2836 struct file_ra_state *ra, struct file *file,
2837 pgoff_t offset, pgoff_t last_index)
2838 {
2839 pgoff_t req_size = last_index - offset + 1;
2840
2841 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
2842 offset = page_cache_readahead(mapping, ra, file, offset, req_size);
2843 return offset;
2844 #else
2845 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
2846 return offset + req_size;
2847 #endif
2848 }
2849
2850 int btrfs_defrag_file(struct file *file) {
2851 struct inode *inode = fdentry(file)->d_inode;
2852 struct btrfs_root *root = BTRFS_I(inode)->root;
2853 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2854 struct page *page;
2855 unsigned long last_index;
2856 unsigned long ra_pages = root->fs_info->bdi.ra_pages;
2857 unsigned long total_read = 0;
2858 u64 page_start;
2859 u64 page_end;
2860 unsigned long i;
2861 int ret;
2862
2863 mutex_lock(&root->fs_info->fs_mutex);
2864 ret = btrfs_check_free_space(root, inode->i_size, 0);
2865 mutex_unlock(&root->fs_info->fs_mutex);
2866 if (ret)
2867 return -ENOSPC;
2868
2869 mutex_lock(&inode->i_mutex);
2870 last_index = inode->i_size >> PAGE_CACHE_SHIFT;
2871 for (i = 0; i <= last_index; i++) {
2872 if (total_read % ra_pages == 0) {
2873 btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i,
2874 min(last_index, i + ra_pages - 1));
2875 }
2876 total_read++;
2877 page = grab_cache_page(inode->i_mapping, i);
2878 if (!page)
2879 goto out_unlock;
2880 if (!PageUptodate(page)) {
2881 btrfs_readpage(NULL, page);
2882 lock_page(page);
2883 if (!PageUptodate(page)) {
2884 unlock_page(page);
2885 page_cache_release(page);
2886 goto out_unlock;
2887 }
2888 }
2889
2890 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
2891 ClearPageDirty(page);
2892 #else
2893 cancel_dirty_page(page, PAGE_CACHE_SIZE);
2894 #endif
2895 wait_on_page_writeback(page);
2896 set_page_extent_mapped(page);
2897
2898 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2899 page_end = page_start + PAGE_CACHE_SIZE - 1;
2900
2901 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2902 set_extent_delalloc(io_tree, page_start,
2903 page_end, GFP_NOFS);
2904
2905 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2906 set_page_dirty(page);
2907 unlock_page(page);
2908 page_cache_release(page);
2909 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
2910 }
2911
2912 out_unlock:
2913 mutex_unlock(&inode->i_mutex);
2914 return 0;
2915 }
2916
2917 static int btrfs_ioctl_resize(struct btrfs_root *root, void __user *arg)
2918 {
2919 u64 new_size;
2920 u64 old_size;
2921 u64 devid = 1;
2922 struct btrfs_ioctl_vol_args *vol_args;
2923 struct btrfs_trans_handle *trans;
2924 struct btrfs_device *device = NULL;
2925 char *sizestr;
2926 char *devstr = NULL;
2927 int ret = 0;
2928 int namelen;
2929 int mod = 0;
2930
2931 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
2932
2933 if (!vol_args)
2934 return -ENOMEM;
2935
2936 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
2937 ret = -EFAULT;
2938 goto out;
2939 }
2940 namelen = strlen(vol_args->name);
2941 if (namelen > BTRFS_VOL_NAME_MAX) {
2942 ret = -EINVAL;
2943 goto out;
2944 }
2945
2946 mutex_lock(&root->fs_info->fs_mutex);
2947 sizestr = vol_args->name;
2948 devstr = strchr(sizestr, ':');
2949 if (devstr) {
2950 char *end;
2951 sizestr = devstr + 1;
2952 *devstr = '\0';
2953 devstr = vol_args->name;
2954 devid = simple_strtoull(devstr, &end, 10);
2955 printk("resizing devid %Lu\n", devid);
2956 }
2957 device = btrfs_find_device(root, devid, NULL);
2958 if (!device) {
2959 printk("resizer unable to find device %Lu\n", devid);
2960 ret = -EINVAL;
2961 goto out_unlock;
2962 }
2963 if (!strcmp(sizestr, "max"))
2964 new_size = device->bdev->bd_inode->i_size;
2965 else {
2966 if (sizestr[0] == '-') {
2967 mod = -1;
2968 sizestr++;
2969 } else if (sizestr[0] == '+') {
2970 mod = 1;
2971 sizestr++;
2972 }
2973 new_size = btrfs_parse_size(sizestr);
2974 if (new_size == 0) {
2975 ret = -EINVAL;
2976 goto out_unlock;
2977 }
2978 }
2979
2980 old_size = device->total_bytes;
2981
2982 if (mod < 0) {
2983 if (new_size > old_size) {
2984 ret = -EINVAL;
2985 goto out_unlock;
2986 }
2987 new_size = old_size - new_size;
2988 } else if (mod > 0) {
2989 new_size = old_size + new_size;
2990 }
2991
2992 if (new_size < 256 * 1024 * 1024) {
2993 ret = -EINVAL;
2994 goto out_unlock;
2995 }
2996 if (new_size > device->bdev->bd_inode->i_size) {
2997 ret = -EFBIG;
2998 goto out_unlock;
2999 }
3000
3001 do_div(new_size, root->sectorsize);
3002 new_size *= root->sectorsize;
3003
3004 printk("new size for %s is %llu\n", device->name, (unsigned long long)new_size);
3005
3006 if (new_size > old_size) {
3007 trans = btrfs_start_transaction(root, 1);
3008 ret = btrfs_grow_device(trans, device, new_size);
3009 btrfs_commit_transaction(trans, root);
3010 } else {
3011 ret = btrfs_shrink_device(device, new_size);
3012 }
3013
3014 out_unlock:
3015 mutex_unlock(&root->fs_info->fs_mutex);
3016 out:
3017 kfree(vol_args);
3018 return ret;
3019 }
3020
3021 static int noinline btrfs_ioctl_snap_create(struct btrfs_root *root,
3022 void __user *arg)
3023 {
3024 struct btrfs_ioctl_vol_args *vol_args;
3025 struct btrfs_dir_item *di;
3026 struct btrfs_path *path;
3027 u64 root_dirid;
3028 int namelen;
3029 int ret;
3030
3031 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
3032
3033 if (!vol_args)
3034 return -ENOMEM;
3035
3036 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
3037 ret = -EFAULT;
3038 goto out;
3039 }
3040
3041 namelen = strlen(vol_args->name);
3042 if (namelen > BTRFS_VOL_NAME_MAX) {
3043 ret = -EINVAL;
3044 goto out;
3045 }
3046 if (strchr(vol_args->name, '/')) {
3047 ret = -EINVAL;
3048 goto out;
3049 }
3050
3051 path = btrfs_alloc_path();
3052 if (!path) {
3053 ret = -ENOMEM;
3054 goto out;
3055 }
3056
3057 root_dirid = root->fs_info->sb->s_root->d_inode->i_ino,
3058 mutex_lock(&root->fs_info->fs_mutex);
3059 di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root,
3060 path, root_dirid,
3061 vol_args->name, namelen, 0);
3062 mutex_unlock(&root->fs_info->fs_mutex);
3063 btrfs_free_path(path);
3064
3065 if (di && !IS_ERR(di)) {
3066 ret = -EEXIST;
3067 goto out;
3068 }
3069
3070 if (IS_ERR(di)) {
3071 ret = PTR_ERR(di);
3072 goto out;
3073 }
3074
3075 if (root == root->fs_info->tree_root)
3076 ret = create_subvol(root, vol_args->name, namelen);
3077 else
3078 ret = create_snapshot(root, vol_args->name, namelen);
3079 out:
3080 kfree(vol_args);
3081 return ret;
3082 }
3083
3084 static int btrfs_ioctl_defrag(struct file *file)
3085 {
3086 struct inode *inode = fdentry(file)->d_inode;
3087 struct btrfs_root *root = BTRFS_I(inode)->root;
3088
3089 switch (inode->i_mode & S_IFMT) {
3090 case S_IFDIR:
3091 mutex_lock(&root->fs_info->fs_mutex);
3092 btrfs_defrag_root(root, 0);
3093 btrfs_defrag_root(root->fs_info->extent_root, 0);
3094 mutex_unlock(&root->fs_info->fs_mutex);
3095 break;
3096 case S_IFREG:
3097 btrfs_defrag_file(file);
3098 break;
3099 }
3100
3101 return 0;
3102 }
3103
3104 long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
3105 {
3106 struct btrfs_ioctl_vol_args *vol_args;
3107 int ret;
3108
3109 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
3110
3111 if (!vol_args)
3112 return -ENOMEM;
3113
3114 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
3115 ret = -EFAULT;
3116 goto out;
3117 }
3118 ret = btrfs_init_new_device(root, vol_args->name);
3119
3120 out:
3121 kfree(vol_args);
3122 return ret;
3123 }
3124
3125 long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
3126 {
3127 struct btrfs_ioctl_vol_args *vol_args;
3128 int ret;
3129
3130 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
3131
3132 if (!vol_args)
3133 return -ENOMEM;
3134
3135 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
3136 ret = -EFAULT;
3137 goto out;
3138 }
3139 ret = btrfs_rm_device(root, vol_args->name);
3140
3141 out:
3142 kfree(vol_args);
3143 return ret;
3144 }
3145
3146 int dup_item_to_inode(struct btrfs_trans_handle *trans,
3147 struct btrfs_root *root,
3148 struct btrfs_path *path,
3149 struct extent_buffer *leaf,
3150 int slot,
3151 struct btrfs_key *key,
3152 u64 destino)
3153 {
3154 char *dup;
3155 int len = btrfs_item_size_nr(leaf, slot);
3156 struct btrfs_key ckey = *key;
3157 int ret = 0;
3158
3159 dup = kmalloc(len, GFP_NOFS);
3160 if (!dup)
3161 return -ENOMEM;
3162
3163 read_extent_buffer(leaf, dup, btrfs_item_ptr_offset(leaf, slot), len);
3164 btrfs_release_path(root, path);
3165
3166 ckey.objectid = destino;
3167 ret = btrfs_insert_item(trans, root, &ckey, dup, len);
3168 kfree(dup);
3169 return ret;
3170 }
3171
3172 long btrfs_ioctl_clone(struct file *file, unsigned long src_fd)
3173 {
3174 struct inode *inode = fdentry(file)->d_inode;
3175 struct btrfs_root *root = BTRFS_I(inode)->root;
3176 struct file *src_file;
3177 struct inode *src;
3178 struct btrfs_trans_handle *trans;
3179 int ret;
3180 u64 pos;
3181 struct btrfs_path *path;
3182 struct btrfs_key key;
3183 struct extent_buffer *leaf;
3184 u32 nritems;
3185 int slot;
3186
3187 src_file = fget(src_fd);
3188 if (!src_file)
3189 return -EBADF;
3190 src = src_file->f_dentry->d_inode;
3191
3192 ret = -EXDEV;
3193 if (src->i_sb != inode->i_sb)
3194 goto out_fput;
3195
3196 if (inode < src) {
3197 mutex_lock(&inode->i_mutex);
3198 mutex_lock(&src->i_mutex);
3199 } else {
3200 mutex_lock(&src->i_mutex);
3201 mutex_lock(&inode->i_mutex);
3202 }
3203
3204 ret = -ENOTEMPTY;
3205 if (inode->i_size)
3206 goto out_unlock;
3207
3208 /* do any pending delalloc/csum calc on src, one way or
3209 another, and lock file content */
3210 while (1) {
3211 filemap_write_and_wait(src->i_mapping);
3212 lock_extent(&BTRFS_I(src)->io_tree, 0, (u64)-1, GFP_NOFS);
3213 if (BTRFS_I(src)->delalloc_bytes == 0)
3214 break;
3215 unlock_extent(&BTRFS_I(src)->io_tree, 0, (u64)-1, GFP_NOFS);
3216 }
3217
3218 mutex_lock(&root->fs_info->fs_mutex);
3219 trans = btrfs_start_transaction(root, 0);
3220 path = btrfs_alloc_path();
3221 if (!path) {
3222 ret = -ENOMEM;
3223 goto out;
3224 }
3225 key.offset = 0;
3226 key.type = BTRFS_EXTENT_DATA_KEY;
3227 key.objectid = src->i_ino;
3228 pos = 0;
3229 path->reada = 2;
3230
3231 while (1) {
3232 /*
3233 * note the key will change type as we walk through the
3234 * tree.
3235 */
3236 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
3237 if (ret < 0)
3238 goto out;
3239
3240 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3241 ret = btrfs_next_leaf(root, path);
3242 if (ret < 0)
3243 goto out;
3244 if (ret > 0)
3245 break;
3246 }
3247 leaf = path->nodes[0];
3248 slot = path->slots[0];
3249 btrfs_item_key_to_cpu(leaf, &key, slot);
3250 nritems = btrfs_header_nritems(leaf);
3251
3252 if (btrfs_key_type(&key) > BTRFS_CSUM_ITEM_KEY ||
3253 key.objectid != src->i_ino)
3254 break;
3255
3256 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
3257 struct btrfs_file_extent_item *extent;
3258 int found_type;
3259 pos = key.offset;
3260 extent = btrfs_item_ptr(leaf, slot,
3261 struct btrfs_file_extent_item);
3262 found_type = btrfs_file_extent_type(leaf, extent);
3263 if (found_type == BTRFS_FILE_EXTENT_REG) {
3264 u64 len = btrfs_file_extent_num_bytes(leaf,
3265 extent);
3266 u64 ds = btrfs_file_extent_disk_bytenr(leaf,
3267 extent);
3268 u64 dl = btrfs_file_extent_disk_num_bytes(leaf,
3269 extent);
3270 u64 off = btrfs_file_extent_offset(leaf,
3271 extent);
3272 btrfs_insert_file_extent(trans, root,
3273 inode->i_ino, pos,
3274 ds, dl, len, off);
3275 /* ds == 0 means there's a hole */
3276 if (ds != 0) {
3277 btrfs_inc_extent_ref(trans, root,
3278 ds, dl,
3279 root->root_key.objectid,
3280 trans->transid,
3281 inode->i_ino, pos);
3282 }
3283 pos = key.offset + len;
3284 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3285 ret = dup_item_to_inode(trans, root, path,
3286 leaf, slot, &key,
3287 inode->i_ino);
3288 if (ret)
3289 goto out;
3290 pos = key.offset + btrfs_item_size_nr(leaf,
3291 slot);
3292 }
3293 } else if (btrfs_key_type(&key) == BTRFS_CSUM_ITEM_KEY) {
3294 ret = dup_item_to_inode(trans, root, path, leaf,
3295 slot, &key, inode->i_ino);
3296
3297 if (ret)
3298 goto out;
3299 }
3300 key.offset++;
3301 btrfs_release_path(root, path);
3302 }
3303
3304 ret = 0;
3305 out:
3306 btrfs_free_path(path);
3307
3308 inode->i_blocks = src->i_blocks;
3309 i_size_write(inode, src->i_size);
3310 btrfs_update_inode(trans, root, inode);
3311
3312 unlock_extent(&BTRFS_I(src)->io_tree, 0, (u64)-1, GFP_NOFS);
3313
3314 btrfs_end_transaction(trans, root);
3315 mutex_unlock(&root->fs_info->fs_mutex);
3316
3317 out_unlock:
3318 mutex_unlock(&src->i_mutex);
3319 mutex_unlock(&inode->i_mutex);
3320 out_fput:
3321 fput(src_file);
3322 return ret;
3323 }
3324
3325 long btrfs_ioctl(struct file *file, unsigned int
3326 cmd, unsigned long arg)
3327 {
3328 struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
3329
3330 switch (cmd) {
3331 case BTRFS_IOC_SNAP_CREATE:
3332 return btrfs_ioctl_snap_create(root, (void __user *)arg);
3333 case BTRFS_IOC_DEFRAG:
3334 return btrfs_ioctl_defrag(file);
3335 case BTRFS_IOC_RESIZE:
3336 return btrfs_ioctl_resize(root, (void __user *)arg);
3337 case BTRFS_IOC_ADD_DEV:
3338 return btrfs_ioctl_add_dev(root, (void __user *)arg);
3339 case BTRFS_IOC_RM_DEV:
3340 return btrfs_ioctl_rm_dev(root, (void __user *)arg);
3341 case BTRFS_IOC_BALANCE:
3342 return btrfs_balance(root->fs_info->dev_root);
3343 case BTRFS_IOC_CLONE:
3344 return btrfs_ioctl_clone(file, arg);
3345 }
3346
3347 return -ENOTTY;
3348 }
3349
3350 /*
3351 * Called inside transaction, so use GFP_NOFS
3352 */
3353 struct inode *btrfs_alloc_inode(struct super_block *sb)
3354 {
3355 struct btrfs_inode *ei;
3356
3357 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
3358 if (!ei)
3359 return NULL;
3360 ei->last_trans = 0;
3361 ei->ordered_trans = 0;
3362 return &ei->vfs_inode;
3363 }
3364
3365 void btrfs_destroy_inode(struct inode *inode)
3366 {
3367 WARN_ON(!list_empty(&inode->i_dentry));
3368 WARN_ON(inode->i_data.nrpages);
3369
3370 btrfs_drop_extent_cache(inode, 0, (u64)-1);
3371 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
3372 }
3373
3374 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
3375 static void init_once(struct kmem_cache * cachep, void *foo)
3376 #else
3377 static void init_once(void * foo, struct kmem_cache * cachep,
3378 unsigned long flags)
3379 #endif
3380 {
3381 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
3382
3383 inode_init_once(&ei->vfs_inode);
3384 }
3385
3386 void btrfs_destroy_cachep(void)
3387 {
3388 if (btrfs_inode_cachep)
3389 kmem_cache_destroy(btrfs_inode_cachep);
3390 if (btrfs_trans_handle_cachep)
3391 kmem_cache_destroy(btrfs_trans_handle_cachep);
3392 if (btrfs_transaction_cachep)
3393 kmem_cache_destroy(btrfs_transaction_cachep);
3394 if (btrfs_bit_radix_cachep)
3395 kmem_cache_destroy(btrfs_bit_radix_cachep);
3396 if (btrfs_path_cachep)
3397 kmem_cache_destroy(btrfs_path_cachep);
3398 }
3399
3400 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
3401 unsigned long extra_flags,
3402 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
3403 void (*ctor)(struct kmem_cache *, void *)
3404 #else
3405 void (*ctor)(void *, struct kmem_cache *,
3406 unsigned long)
3407 #endif
3408 )
3409 {
3410 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
3411 SLAB_MEM_SPREAD | extra_flags), ctor
3412 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
3413 ,NULL
3414 #endif
3415 );
3416 }
3417
3418 int btrfs_init_cachep(void)
3419 {
3420 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
3421 sizeof(struct btrfs_inode),
3422 0, init_once);
3423 if (!btrfs_inode_cachep)
3424 goto fail;
3425 btrfs_trans_handle_cachep =
3426 btrfs_cache_create("btrfs_trans_handle_cache",
3427 sizeof(struct btrfs_trans_handle),
3428 0, NULL);
3429 if (!btrfs_trans_handle_cachep)
3430 goto fail;
3431 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
3432 sizeof(struct btrfs_transaction),
3433 0, NULL);
3434 if (!btrfs_transaction_cachep)
3435 goto fail;
3436 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
3437 sizeof(struct btrfs_path),
3438 0, NULL);
3439 if (!btrfs_path_cachep)
3440 goto fail;
3441 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
3442 SLAB_DESTROY_BY_RCU, NULL);
3443 if (!btrfs_bit_radix_cachep)
3444 goto fail;
3445 return 0;
3446 fail:
3447 btrfs_destroy_cachep();
3448 return -ENOMEM;
3449 }
3450
3451 static int btrfs_getattr(struct vfsmount *mnt,
3452 struct dentry *dentry, struct kstat *stat)
3453 {
3454 struct inode *inode = dentry->d_inode;
3455 generic_fillattr(inode, stat);
3456 stat->blksize = PAGE_CACHE_SIZE;
3457 stat->blocks = inode->i_blocks + (BTRFS_I(inode)->delalloc_bytes >> 9);
3458 return 0;
3459 }
3460
3461 static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
3462 struct inode * new_dir,struct dentry *new_dentry)
3463 {
3464 struct btrfs_trans_handle *trans;
3465 struct btrfs_root *root = BTRFS_I(old_dir)->root;
3466 struct inode *new_inode = new_dentry->d_inode;
3467 struct inode *old_inode = old_dentry->d_inode;
3468 struct timespec ctime = CURRENT_TIME;
3469 struct btrfs_path *path;
3470 int ret;
3471
3472 if (S_ISDIR(old_inode->i_mode) && new_inode &&
3473 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
3474 return -ENOTEMPTY;
3475 }
3476
3477 mutex_lock(&root->fs_info->fs_mutex);
3478 ret = btrfs_check_free_space(root, 1, 0);
3479 if (ret)
3480 goto out_unlock;
3481
3482 trans = btrfs_start_transaction(root, 1);
3483
3484 btrfs_set_trans_block_group(trans, new_dir);
3485 path = btrfs_alloc_path();
3486 if (!path) {
3487 ret = -ENOMEM;
3488 goto out_fail;
3489 }
3490
3491 old_dentry->d_inode->i_nlink++;
3492 old_dir->i_ctime = old_dir->i_mtime = ctime;
3493 new_dir->i_ctime = new_dir->i_mtime = ctime;
3494 old_inode->i_ctime = ctime;
3495
3496 ret = btrfs_unlink_trans(trans, root, old_dir, old_dentry);
3497 if (ret)
3498 goto out_fail;
3499
3500 if (new_inode) {
3501 new_inode->i_ctime = CURRENT_TIME;
3502 ret = btrfs_unlink_trans(trans, root, new_dir, new_dentry);
3503 if (ret)
3504 goto out_fail;
3505 }
3506 ret = btrfs_add_link(trans, new_dentry, old_inode, 1);
3507 if (ret)
3508 goto out_fail;
3509
3510 out_fail:
3511 btrfs_free_path(path);
3512 btrfs_end_transaction(trans, root);
3513 out_unlock:
3514 mutex_unlock(&root->fs_info->fs_mutex);
3515 return ret;
3516 }
3517
3518 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
3519 const char *symname)
3520 {
3521 struct btrfs_trans_handle *trans;
3522 struct btrfs_root *root = BTRFS_I(dir)->root;
3523 struct btrfs_path *path;
3524 struct btrfs_key key;
3525 struct inode *inode = NULL;
3526 int err;
3527 int drop_inode = 0;
3528 u64 objectid;
3529 int name_len;
3530 int datasize;
3531 unsigned long ptr;
3532 struct btrfs_file_extent_item *ei;
3533 struct extent_buffer *leaf;
3534 unsigned long nr = 0;
3535
3536 name_len = strlen(symname) + 1;
3537 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
3538 return -ENAMETOOLONG;
3539
3540 mutex_lock(&root->fs_info->fs_mutex);
3541 err = btrfs_check_free_space(root, 1, 0);
3542 if (err)
3543 goto out_fail;
3544
3545 trans = btrfs_start_transaction(root, 1);
3546 btrfs_set_trans_block_group(trans, dir);
3547
3548 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3549 if (err) {
3550 err = -ENOSPC;
3551 goto out_unlock;
3552 }
3553
3554 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
3555 dentry->d_name.len,
3556 dentry->d_parent->d_inode->i_ino, objectid,
3557 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO);
3558 err = PTR_ERR(inode);
3559 if (IS_ERR(inode))
3560 goto out_unlock;
3561
3562 btrfs_set_trans_block_group(trans, inode);
3563 err = btrfs_add_nondir(trans, dentry, inode, 0);
3564 if (err)
3565 drop_inode = 1;
3566 else {
3567 inode->i_mapping->a_ops = &btrfs_aops;
3568 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3569 inode->i_fop = &btrfs_file_operations;
3570 inode->i_op = &btrfs_file_inode_operations;
3571 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3572 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3573 inode->i_mapping, GFP_NOFS);
3574 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3575 inode->i_mapping, GFP_NOFS);
3576 BTRFS_I(inode)->delalloc_bytes = 0;
3577 atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
3578 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3579 }
3580 dir->i_sb->s_dirt = 1;
3581 btrfs_update_inode_block_group(trans, inode);
3582 btrfs_update_inode_block_group(trans, dir);
3583 if (drop_inode)
3584 goto out_unlock;
3585
3586 path = btrfs_alloc_path();
3587 BUG_ON(!path);
3588 key.objectid = inode->i_ino;
3589 key.offset = 0;
3590 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
3591 datasize = btrfs_file_extent_calc_inline_size(name_len);
3592 err = btrfs_insert_empty_item(trans, root, path, &key,
3593 datasize);
3594 if (err) {
3595 drop_inode = 1;
3596 goto out_unlock;
3597 }
3598 leaf = path->nodes[0];
3599 ei = btrfs_item_ptr(leaf, path->slots[0],
3600 struct btrfs_file_extent_item);
3601 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
3602 btrfs_set_file_extent_type(leaf, ei,
3603 BTRFS_FILE_EXTENT_INLINE);
3604 ptr = btrfs_file_extent_inline_start(ei);
3605 write_extent_buffer(leaf, symname, ptr, name_len);
3606 btrfs_mark_buffer_dirty(leaf);
3607 btrfs_free_path(path);
3608
3609 inode->i_op = &btrfs_symlink_inode_operations;
3610 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3611 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3612 inode->i_size = name_len - 1;
3613 err = btrfs_update_inode(trans, root, inode);
3614 if (err)
3615 drop_inode = 1;
3616
3617 out_unlock:
3618 nr = trans->blocks_used;
3619 btrfs_end_transaction(trans, root);
3620 out_fail:
3621 mutex_unlock(&root->fs_info->fs_mutex);
3622 if (drop_inode) {
3623 inode_dec_link_count(inode);
3624 iput(inode);
3625 }
3626 btrfs_btree_balance_dirty(root, nr);
3627 btrfs_throttle(root);
3628 return err;
3629 }
3630
3631 static int btrfs_permission(struct inode *inode, int mask,
3632 struct nameidata *nd)
3633 {
3634 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
3635 return -EACCES;
3636 return generic_permission(inode, mask, NULL);
3637 }
3638
3639 static struct inode_operations btrfs_dir_inode_operations = {
3640 .lookup = btrfs_lookup,
3641 .create = btrfs_create,
3642 .unlink = btrfs_unlink,
3643 .link = btrfs_link,
3644 .mkdir = btrfs_mkdir,
3645 .rmdir = btrfs_rmdir,
3646 .rename = btrfs_rename,
3647 .symlink = btrfs_symlink,
3648 .setattr = btrfs_setattr,
3649 .mknod = btrfs_mknod,
3650 .setxattr = generic_setxattr,
3651 .getxattr = generic_getxattr,
3652 .listxattr = btrfs_listxattr,
3653 .removexattr = generic_removexattr,
3654 .permission = btrfs_permission,
3655 };
3656 static struct inode_operations btrfs_dir_ro_inode_operations = {
3657 .lookup = btrfs_lookup,
3658 .permission = btrfs_permission,
3659 };
3660 static struct file_operations btrfs_dir_file_operations = {
3661 .llseek = generic_file_llseek,
3662 .read = generic_read_dir,
3663 .readdir = btrfs_readdir,
3664 .unlocked_ioctl = btrfs_ioctl,
3665 #ifdef CONFIG_COMPAT
3666 .compat_ioctl = btrfs_ioctl,
3667 #endif
3668 };
3669
3670 static struct extent_io_ops btrfs_extent_io_ops = {
3671 .fill_delalloc = run_delalloc_range,
3672 .submit_bio_hook = btrfs_submit_bio_hook,
3673 .merge_bio_hook = btrfs_merge_bio_hook,
3674 .readpage_io_hook = btrfs_readpage_io_hook,
3675 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
3676 .readpage_io_failed_hook = btrfs_io_failed_hook,
3677 .set_bit_hook = btrfs_set_bit_hook,
3678 .clear_bit_hook = btrfs_clear_bit_hook,
3679 };
3680
3681 static struct address_space_operations btrfs_aops = {
3682 .readpage = btrfs_readpage,
3683 .writepage = btrfs_writepage,
3684 .writepages = btrfs_writepages,
3685 .readpages = btrfs_readpages,
3686 .sync_page = block_sync_page,
3687 .bmap = btrfs_bmap,
3688 .direct_IO = btrfs_direct_IO,
3689 .invalidatepage = btrfs_invalidatepage,
3690 .releasepage = btrfs_releasepage,
3691 .set_page_dirty = __set_page_dirty_nobuffers,
3692 };
3693
3694 static struct address_space_operations btrfs_symlink_aops = {
3695 .readpage = btrfs_readpage,
3696 .writepage = btrfs_writepage,
3697 .invalidatepage = btrfs_invalidatepage,
3698 .releasepage = btrfs_releasepage,
3699 };
3700
3701 static struct inode_operations btrfs_file_inode_operations = {
3702 .truncate = btrfs_truncate,
3703 .getattr = btrfs_getattr,
3704 .setattr = btrfs_setattr,
3705 .setxattr = generic_setxattr,
3706 .getxattr = generic_getxattr,
3707 .listxattr = btrfs_listxattr,
3708 .removexattr = generic_removexattr,
3709 .permission = btrfs_permission,
3710 };
3711 static struct inode_operations btrfs_special_inode_operations = {
3712 .getattr = btrfs_getattr,
3713 .setattr = btrfs_setattr,
3714 .permission = btrfs_permission,
3715 };
3716 static struct inode_operations btrfs_symlink_inode_operations = {
3717 .readlink = generic_readlink,
3718 .follow_link = page_follow_link_light,
3719 .put_link = page_put_link,
3720 .permission = btrfs_permission,
3721 };