]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/btrfs/ioctl.c
treewide: use kv[mz]alloc* rather than opencoded variants
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / ioctl.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/fsnotify.h>
25 #include <linux/pagemap.h>
26 #include <linux/highmem.h>
27 #include <linux/time.h>
28 #include <linux/init.h>
29 #include <linux/string.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mount.h>
32 #include <linux/mpage.h>
33 #include <linux/namei.h>
34 #include <linux/swap.h>
35 #include <linux/writeback.h>
36 #include <linux/compat.h>
37 #include <linux/bit_spinlock.h>
38 #include <linux/security.h>
39 #include <linux/xattr.h>
40 #include <linux/vmalloc.h>
41 #include <linux/slab.h>
42 #include <linux/blkdev.h>
43 #include <linux/uuid.h>
44 #include <linux/btrfs.h>
45 #include <linux/uaccess.h>
46 #include "ctree.h"
47 #include "disk-io.h"
48 #include "transaction.h"
49 #include "btrfs_inode.h"
50 #include "print-tree.h"
51 #include "volumes.h"
52 #include "locking.h"
53 #include "inode-map.h"
54 #include "backref.h"
55 #include "rcu-string.h"
56 #include "send.h"
57 #include "dev-replace.h"
58 #include "props.h"
59 #include "sysfs.h"
60 #include "qgroup.h"
61 #include "tree-log.h"
62 #include "compression.h"
63
64 #ifdef CONFIG_64BIT
65 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
66 * structures are incorrect, as the timespec structure from userspace
67 * is 4 bytes too small. We define these alternatives here to teach
68 * the kernel about the 32-bit struct packing.
69 */
70 struct btrfs_ioctl_timespec_32 {
71 __u64 sec;
72 __u32 nsec;
73 } __attribute__ ((__packed__));
74
75 struct btrfs_ioctl_received_subvol_args_32 {
76 char uuid[BTRFS_UUID_SIZE]; /* in */
77 __u64 stransid; /* in */
78 __u64 rtransid; /* out */
79 struct btrfs_ioctl_timespec_32 stime; /* in */
80 struct btrfs_ioctl_timespec_32 rtime; /* out */
81 __u64 flags; /* in */
82 __u64 reserved[16]; /* in */
83 } __attribute__ ((__packed__));
84
85 #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \
86 struct btrfs_ioctl_received_subvol_args_32)
87 #endif
88
89
90 static int btrfs_clone(struct inode *src, struct inode *inode,
91 u64 off, u64 olen, u64 olen_aligned, u64 destoff,
92 int no_time_update);
93
94 /* Mask out flags that are inappropriate for the given type of inode. */
95 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
96 {
97 if (S_ISDIR(mode))
98 return flags;
99 else if (S_ISREG(mode))
100 return flags & ~FS_DIRSYNC_FL;
101 else
102 return flags & (FS_NODUMP_FL | FS_NOATIME_FL);
103 }
104
105 /*
106 * Export inode flags to the format expected by the FS_IOC_GETFLAGS ioctl.
107 */
108 static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
109 {
110 unsigned int iflags = 0;
111
112 if (flags & BTRFS_INODE_SYNC)
113 iflags |= FS_SYNC_FL;
114 if (flags & BTRFS_INODE_IMMUTABLE)
115 iflags |= FS_IMMUTABLE_FL;
116 if (flags & BTRFS_INODE_APPEND)
117 iflags |= FS_APPEND_FL;
118 if (flags & BTRFS_INODE_NODUMP)
119 iflags |= FS_NODUMP_FL;
120 if (flags & BTRFS_INODE_NOATIME)
121 iflags |= FS_NOATIME_FL;
122 if (flags & BTRFS_INODE_DIRSYNC)
123 iflags |= FS_DIRSYNC_FL;
124 if (flags & BTRFS_INODE_NODATACOW)
125 iflags |= FS_NOCOW_FL;
126
127 if (flags & BTRFS_INODE_NOCOMPRESS)
128 iflags |= FS_NOCOMP_FL;
129 else if (flags & BTRFS_INODE_COMPRESS)
130 iflags |= FS_COMPR_FL;
131
132 return iflags;
133 }
134
135 /*
136 * Update inode->i_flags based on the btrfs internal flags.
137 */
138 void btrfs_update_iflags(struct inode *inode)
139 {
140 struct btrfs_inode *ip = BTRFS_I(inode);
141 unsigned int new_fl = 0;
142
143 if (ip->flags & BTRFS_INODE_SYNC)
144 new_fl |= S_SYNC;
145 if (ip->flags & BTRFS_INODE_IMMUTABLE)
146 new_fl |= S_IMMUTABLE;
147 if (ip->flags & BTRFS_INODE_APPEND)
148 new_fl |= S_APPEND;
149 if (ip->flags & BTRFS_INODE_NOATIME)
150 new_fl |= S_NOATIME;
151 if (ip->flags & BTRFS_INODE_DIRSYNC)
152 new_fl |= S_DIRSYNC;
153
154 set_mask_bits(&inode->i_flags,
155 S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
156 new_fl);
157 }
158
159 /*
160 * Inherit flags from the parent inode.
161 *
162 * Currently only the compression flags and the cow flags are inherited.
163 */
164 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
165 {
166 unsigned int flags;
167
168 if (!dir)
169 return;
170
171 flags = BTRFS_I(dir)->flags;
172
173 if (flags & BTRFS_INODE_NOCOMPRESS) {
174 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
175 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
176 } else if (flags & BTRFS_INODE_COMPRESS) {
177 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
178 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
179 }
180
181 if (flags & BTRFS_INODE_NODATACOW) {
182 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
183 if (S_ISREG(inode->i_mode))
184 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
185 }
186
187 btrfs_update_iflags(inode);
188 }
189
190 static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
191 {
192 struct btrfs_inode *ip = BTRFS_I(file_inode(file));
193 unsigned int flags = btrfs_flags_to_ioctl(ip->flags);
194
195 if (copy_to_user(arg, &flags, sizeof(flags)))
196 return -EFAULT;
197 return 0;
198 }
199
200 static int check_flags(unsigned int flags)
201 {
202 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
203 FS_NOATIME_FL | FS_NODUMP_FL | \
204 FS_SYNC_FL | FS_DIRSYNC_FL | \
205 FS_NOCOMP_FL | FS_COMPR_FL |
206 FS_NOCOW_FL))
207 return -EOPNOTSUPP;
208
209 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
210 return -EINVAL;
211
212 return 0;
213 }
214
215 static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
216 {
217 struct inode *inode = file_inode(file);
218 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
219 struct btrfs_inode *ip = BTRFS_I(inode);
220 struct btrfs_root *root = ip->root;
221 struct btrfs_trans_handle *trans;
222 unsigned int flags, oldflags;
223 int ret;
224 u64 ip_oldflags;
225 unsigned int i_oldflags;
226 umode_t mode;
227
228 if (!inode_owner_or_capable(inode))
229 return -EPERM;
230
231 if (btrfs_root_readonly(root))
232 return -EROFS;
233
234 if (copy_from_user(&flags, arg, sizeof(flags)))
235 return -EFAULT;
236
237 ret = check_flags(flags);
238 if (ret)
239 return ret;
240
241 ret = mnt_want_write_file(file);
242 if (ret)
243 return ret;
244
245 inode_lock(inode);
246
247 ip_oldflags = ip->flags;
248 i_oldflags = inode->i_flags;
249 mode = inode->i_mode;
250
251 flags = btrfs_mask_flags(inode->i_mode, flags);
252 oldflags = btrfs_flags_to_ioctl(ip->flags);
253 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
254 if (!capable(CAP_LINUX_IMMUTABLE)) {
255 ret = -EPERM;
256 goto out_unlock;
257 }
258 }
259
260 if (flags & FS_SYNC_FL)
261 ip->flags |= BTRFS_INODE_SYNC;
262 else
263 ip->flags &= ~BTRFS_INODE_SYNC;
264 if (flags & FS_IMMUTABLE_FL)
265 ip->flags |= BTRFS_INODE_IMMUTABLE;
266 else
267 ip->flags &= ~BTRFS_INODE_IMMUTABLE;
268 if (flags & FS_APPEND_FL)
269 ip->flags |= BTRFS_INODE_APPEND;
270 else
271 ip->flags &= ~BTRFS_INODE_APPEND;
272 if (flags & FS_NODUMP_FL)
273 ip->flags |= BTRFS_INODE_NODUMP;
274 else
275 ip->flags &= ~BTRFS_INODE_NODUMP;
276 if (flags & FS_NOATIME_FL)
277 ip->flags |= BTRFS_INODE_NOATIME;
278 else
279 ip->flags &= ~BTRFS_INODE_NOATIME;
280 if (flags & FS_DIRSYNC_FL)
281 ip->flags |= BTRFS_INODE_DIRSYNC;
282 else
283 ip->flags &= ~BTRFS_INODE_DIRSYNC;
284 if (flags & FS_NOCOW_FL) {
285 if (S_ISREG(mode)) {
286 /*
287 * It's safe to turn csums off here, no extents exist.
288 * Otherwise we want the flag to reflect the real COW
289 * status of the file and will not set it.
290 */
291 if (inode->i_size == 0)
292 ip->flags |= BTRFS_INODE_NODATACOW
293 | BTRFS_INODE_NODATASUM;
294 } else {
295 ip->flags |= BTRFS_INODE_NODATACOW;
296 }
297 } else {
298 /*
299 * Revert back under same assumptions as above
300 */
301 if (S_ISREG(mode)) {
302 if (inode->i_size == 0)
303 ip->flags &= ~(BTRFS_INODE_NODATACOW
304 | BTRFS_INODE_NODATASUM);
305 } else {
306 ip->flags &= ~BTRFS_INODE_NODATACOW;
307 }
308 }
309
310 /*
311 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
312 * flag may be changed automatically if compression code won't make
313 * things smaller.
314 */
315 if (flags & FS_NOCOMP_FL) {
316 ip->flags &= ~BTRFS_INODE_COMPRESS;
317 ip->flags |= BTRFS_INODE_NOCOMPRESS;
318
319 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
320 if (ret && ret != -ENODATA)
321 goto out_drop;
322 } else if (flags & FS_COMPR_FL) {
323 const char *comp;
324
325 ip->flags |= BTRFS_INODE_COMPRESS;
326 ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
327
328 if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
329 comp = "lzo";
330 else
331 comp = "zlib";
332 ret = btrfs_set_prop(inode, "btrfs.compression",
333 comp, strlen(comp), 0);
334 if (ret)
335 goto out_drop;
336
337 } else {
338 ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
339 if (ret && ret != -ENODATA)
340 goto out_drop;
341 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
342 }
343
344 trans = btrfs_start_transaction(root, 1);
345 if (IS_ERR(trans)) {
346 ret = PTR_ERR(trans);
347 goto out_drop;
348 }
349
350 btrfs_update_iflags(inode);
351 inode_inc_iversion(inode);
352 inode->i_ctime = current_time(inode);
353 ret = btrfs_update_inode(trans, root, inode);
354
355 btrfs_end_transaction(trans);
356 out_drop:
357 if (ret) {
358 ip->flags = ip_oldflags;
359 inode->i_flags = i_oldflags;
360 }
361
362 out_unlock:
363 inode_unlock(inode);
364 mnt_drop_write_file(file);
365 return ret;
366 }
367
368 static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
369 {
370 struct inode *inode = file_inode(file);
371
372 return put_user(inode->i_generation, arg);
373 }
374
375 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
376 {
377 struct inode *inode = file_inode(file);
378 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
379 struct btrfs_device *device;
380 struct request_queue *q;
381 struct fstrim_range range;
382 u64 minlen = ULLONG_MAX;
383 u64 num_devices = 0;
384 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
385 int ret;
386
387 if (!capable(CAP_SYS_ADMIN))
388 return -EPERM;
389
390 rcu_read_lock();
391 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
392 dev_list) {
393 if (!device->bdev)
394 continue;
395 q = bdev_get_queue(device->bdev);
396 if (blk_queue_discard(q)) {
397 num_devices++;
398 minlen = min_t(u64, q->limits.discard_granularity,
399 minlen);
400 }
401 }
402 rcu_read_unlock();
403
404 if (!num_devices)
405 return -EOPNOTSUPP;
406 if (copy_from_user(&range, arg, sizeof(range)))
407 return -EFAULT;
408 if (range.start > total_bytes ||
409 range.len < fs_info->sb->s_blocksize)
410 return -EINVAL;
411
412 range.len = min(range.len, total_bytes - range.start);
413 range.minlen = max(range.minlen, minlen);
414 ret = btrfs_trim_fs(fs_info, &range);
415 if (ret < 0)
416 return ret;
417
418 if (copy_to_user(arg, &range, sizeof(range)))
419 return -EFAULT;
420
421 return 0;
422 }
423
424 int btrfs_is_empty_uuid(u8 *uuid)
425 {
426 int i;
427
428 for (i = 0; i < BTRFS_UUID_SIZE; i++) {
429 if (uuid[i])
430 return 0;
431 }
432 return 1;
433 }
434
435 static noinline int create_subvol(struct inode *dir,
436 struct dentry *dentry,
437 const char *name, int namelen,
438 u64 *async_transid,
439 struct btrfs_qgroup_inherit *inherit)
440 {
441 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
442 struct btrfs_trans_handle *trans;
443 struct btrfs_key key;
444 struct btrfs_root_item *root_item;
445 struct btrfs_inode_item *inode_item;
446 struct extent_buffer *leaf;
447 struct btrfs_root *root = BTRFS_I(dir)->root;
448 struct btrfs_root *new_root;
449 struct btrfs_block_rsv block_rsv;
450 struct timespec cur_time = current_time(dir);
451 struct inode *inode;
452 int ret;
453 int err;
454 u64 objectid;
455 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
456 u64 index = 0;
457 u64 qgroup_reserved;
458 uuid_le new_uuid;
459
460 root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
461 if (!root_item)
462 return -ENOMEM;
463
464 ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid);
465 if (ret)
466 goto fail_free;
467
468 /*
469 * Don't create subvolume whose level is not zero. Or qgroup will be
470 * screwed up since it assumes subvolume qgroup's level to be 0.
471 */
472 if (btrfs_qgroup_level(objectid)) {
473 ret = -ENOSPC;
474 goto fail_free;
475 }
476
477 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
478 /*
479 * The same as the snapshot creation, please see the comment
480 * of create_snapshot().
481 */
482 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv,
483 8, &qgroup_reserved, false);
484 if (ret)
485 goto fail_free;
486
487 trans = btrfs_start_transaction(root, 0);
488 if (IS_ERR(trans)) {
489 ret = PTR_ERR(trans);
490 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
491 goto fail_free;
492 }
493 trans->block_rsv = &block_rsv;
494 trans->bytes_reserved = block_rsv.size;
495
496 ret = btrfs_qgroup_inherit(trans, fs_info, 0, objectid, inherit);
497 if (ret)
498 goto fail;
499
500 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
501 if (IS_ERR(leaf)) {
502 ret = PTR_ERR(leaf);
503 goto fail;
504 }
505
506 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
507 btrfs_set_header_bytenr(leaf, leaf->start);
508 btrfs_set_header_generation(leaf, trans->transid);
509 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
510 btrfs_set_header_owner(leaf, objectid);
511
512 write_extent_buffer_fsid(leaf, fs_info->fsid);
513 write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
514 btrfs_mark_buffer_dirty(leaf);
515
516 inode_item = &root_item->inode;
517 btrfs_set_stack_inode_generation(inode_item, 1);
518 btrfs_set_stack_inode_size(inode_item, 3);
519 btrfs_set_stack_inode_nlink(inode_item, 1);
520 btrfs_set_stack_inode_nbytes(inode_item,
521 fs_info->nodesize);
522 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
523
524 btrfs_set_root_flags(root_item, 0);
525 btrfs_set_root_limit(root_item, 0);
526 btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT);
527
528 btrfs_set_root_bytenr(root_item, leaf->start);
529 btrfs_set_root_generation(root_item, trans->transid);
530 btrfs_set_root_level(root_item, 0);
531 btrfs_set_root_refs(root_item, 1);
532 btrfs_set_root_used(root_item, leaf->len);
533 btrfs_set_root_last_snapshot(root_item, 0);
534
535 btrfs_set_root_generation_v2(root_item,
536 btrfs_root_generation(root_item));
537 uuid_le_gen(&new_uuid);
538 memcpy(root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
539 btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec);
540 btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec);
541 root_item->ctime = root_item->otime;
542 btrfs_set_root_ctransid(root_item, trans->transid);
543 btrfs_set_root_otransid(root_item, trans->transid);
544
545 btrfs_tree_unlock(leaf);
546 free_extent_buffer(leaf);
547 leaf = NULL;
548
549 btrfs_set_root_dirid(root_item, new_dirid);
550
551 key.objectid = objectid;
552 key.offset = 0;
553 key.type = BTRFS_ROOT_ITEM_KEY;
554 ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
555 root_item);
556 if (ret)
557 goto fail;
558
559 key.offset = (u64)-1;
560 new_root = btrfs_read_fs_root_no_name(fs_info, &key);
561 if (IS_ERR(new_root)) {
562 ret = PTR_ERR(new_root);
563 btrfs_abort_transaction(trans, ret);
564 goto fail;
565 }
566
567 btrfs_record_root_in_trans(trans, new_root);
568
569 ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
570 if (ret) {
571 /* We potentially lose an unused inode item here */
572 btrfs_abort_transaction(trans, ret);
573 goto fail;
574 }
575
576 mutex_lock(&new_root->objectid_mutex);
577 new_root->highest_objectid = new_dirid;
578 mutex_unlock(&new_root->objectid_mutex);
579
580 /*
581 * insert the directory item
582 */
583 ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
584 if (ret) {
585 btrfs_abort_transaction(trans, ret);
586 goto fail;
587 }
588
589 ret = btrfs_insert_dir_item(trans, root,
590 name, namelen, BTRFS_I(dir), &key,
591 BTRFS_FT_DIR, index);
592 if (ret) {
593 btrfs_abort_transaction(trans, ret);
594 goto fail;
595 }
596
597 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
598 ret = btrfs_update_inode(trans, root, dir);
599 BUG_ON(ret);
600
601 ret = btrfs_add_root_ref(trans, fs_info,
602 objectid, root->root_key.objectid,
603 btrfs_ino(BTRFS_I(dir)), index, name, namelen);
604 BUG_ON(ret);
605
606 ret = btrfs_uuid_tree_add(trans, fs_info, root_item->uuid,
607 BTRFS_UUID_KEY_SUBVOL, objectid);
608 if (ret)
609 btrfs_abort_transaction(trans, ret);
610
611 fail:
612 kfree(root_item);
613 trans->block_rsv = NULL;
614 trans->bytes_reserved = 0;
615 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
616
617 if (async_transid) {
618 *async_transid = trans->transid;
619 err = btrfs_commit_transaction_async(trans, 1);
620 if (err)
621 err = btrfs_commit_transaction(trans);
622 } else {
623 err = btrfs_commit_transaction(trans);
624 }
625 if (err && !ret)
626 ret = err;
627
628 if (!ret) {
629 inode = btrfs_lookup_dentry(dir, dentry);
630 if (IS_ERR(inode))
631 return PTR_ERR(inode);
632 d_instantiate(dentry, inode);
633 }
634 return ret;
635
636 fail_free:
637 kfree(root_item);
638 return ret;
639 }
640
641 static void btrfs_wait_for_no_snapshoting_writes(struct btrfs_root *root)
642 {
643 s64 writers;
644 DEFINE_WAIT(wait);
645
646 do {
647 prepare_to_wait(&root->subv_writers->wait, &wait,
648 TASK_UNINTERRUPTIBLE);
649
650 writers = percpu_counter_sum(&root->subv_writers->counter);
651 if (writers)
652 schedule();
653
654 finish_wait(&root->subv_writers->wait, &wait);
655 } while (writers);
656 }
657
658 static int create_snapshot(struct btrfs_root *root, struct inode *dir,
659 struct dentry *dentry,
660 u64 *async_transid, bool readonly,
661 struct btrfs_qgroup_inherit *inherit)
662 {
663 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
664 struct inode *inode;
665 struct btrfs_pending_snapshot *pending_snapshot;
666 struct btrfs_trans_handle *trans;
667 int ret;
668
669 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
670 return -EINVAL;
671
672 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
673 if (!pending_snapshot)
674 return -ENOMEM;
675
676 pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item),
677 GFP_KERNEL);
678 pending_snapshot->path = btrfs_alloc_path();
679 if (!pending_snapshot->root_item || !pending_snapshot->path) {
680 ret = -ENOMEM;
681 goto free_pending;
682 }
683
684 atomic_inc(&root->will_be_snapshoted);
685 smp_mb__after_atomic();
686 btrfs_wait_for_no_snapshoting_writes(root);
687
688 ret = btrfs_start_delalloc_inodes(root, 0);
689 if (ret)
690 goto dec_and_free;
691
692 btrfs_wait_ordered_extents(root, -1, 0, (u64)-1);
693
694 btrfs_init_block_rsv(&pending_snapshot->block_rsv,
695 BTRFS_BLOCK_RSV_TEMP);
696 /*
697 * 1 - parent dir inode
698 * 2 - dir entries
699 * 1 - root item
700 * 2 - root ref/backref
701 * 1 - root of snapshot
702 * 1 - UUID item
703 */
704 ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
705 &pending_snapshot->block_rsv, 8,
706 &pending_snapshot->qgroup_reserved,
707 false);
708 if (ret)
709 goto dec_and_free;
710
711 pending_snapshot->dentry = dentry;
712 pending_snapshot->root = root;
713 pending_snapshot->readonly = readonly;
714 pending_snapshot->dir = dir;
715 pending_snapshot->inherit = inherit;
716
717 trans = btrfs_start_transaction(root, 0);
718 if (IS_ERR(trans)) {
719 ret = PTR_ERR(trans);
720 goto fail;
721 }
722
723 spin_lock(&fs_info->trans_lock);
724 list_add(&pending_snapshot->list,
725 &trans->transaction->pending_snapshots);
726 spin_unlock(&fs_info->trans_lock);
727 if (async_transid) {
728 *async_transid = trans->transid;
729 ret = btrfs_commit_transaction_async(trans, 1);
730 if (ret)
731 ret = btrfs_commit_transaction(trans);
732 } else {
733 ret = btrfs_commit_transaction(trans);
734 }
735 if (ret)
736 goto fail;
737
738 ret = pending_snapshot->error;
739 if (ret)
740 goto fail;
741
742 ret = btrfs_orphan_cleanup(pending_snapshot->snap);
743 if (ret)
744 goto fail;
745
746 inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry);
747 if (IS_ERR(inode)) {
748 ret = PTR_ERR(inode);
749 goto fail;
750 }
751
752 d_instantiate(dentry, inode);
753 ret = 0;
754 fail:
755 btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
756 dec_and_free:
757 if (atomic_dec_and_test(&root->will_be_snapshoted))
758 wake_up_atomic_t(&root->will_be_snapshoted);
759 free_pending:
760 kfree(pending_snapshot->root_item);
761 btrfs_free_path(pending_snapshot->path);
762 kfree(pending_snapshot);
763
764 return ret;
765 }
766
767 /* copy of may_delete in fs/namei.c()
768 * Check whether we can remove a link victim from directory dir, check
769 * whether the type of victim is right.
770 * 1. We can't do it if dir is read-only (done in permission())
771 * 2. We should have write and exec permissions on dir
772 * 3. We can't remove anything from append-only dir
773 * 4. We can't do anything with immutable dir (done in permission())
774 * 5. If the sticky bit on dir is set we should either
775 * a. be owner of dir, or
776 * b. be owner of victim, or
777 * c. have CAP_FOWNER capability
778 * 6. If the victim is append-only or immutable we can't do anything with
779 * links pointing to it.
780 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
781 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
782 * 9. We can't remove a root or mountpoint.
783 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
784 * nfs_async_unlink().
785 */
786
787 static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
788 {
789 int error;
790
791 if (d_really_is_negative(victim))
792 return -ENOENT;
793
794 BUG_ON(d_inode(victim->d_parent) != dir);
795 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
796
797 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
798 if (error)
799 return error;
800 if (IS_APPEND(dir))
801 return -EPERM;
802 if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
803 IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
804 return -EPERM;
805 if (isdir) {
806 if (!d_is_dir(victim))
807 return -ENOTDIR;
808 if (IS_ROOT(victim))
809 return -EBUSY;
810 } else if (d_is_dir(victim))
811 return -EISDIR;
812 if (IS_DEADDIR(dir))
813 return -ENOENT;
814 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
815 return -EBUSY;
816 return 0;
817 }
818
819 /* copy of may_create in fs/namei.c() */
820 static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
821 {
822 if (d_really_is_positive(child))
823 return -EEXIST;
824 if (IS_DEADDIR(dir))
825 return -ENOENT;
826 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
827 }
828
829 /*
830 * Create a new subvolume below @parent. This is largely modeled after
831 * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
832 * inside this filesystem so it's quite a bit simpler.
833 */
834 static noinline int btrfs_mksubvol(const struct path *parent,
835 const char *name, int namelen,
836 struct btrfs_root *snap_src,
837 u64 *async_transid, bool readonly,
838 struct btrfs_qgroup_inherit *inherit)
839 {
840 struct inode *dir = d_inode(parent->dentry);
841 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
842 struct dentry *dentry;
843 int error;
844
845 error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
846 if (error == -EINTR)
847 return error;
848
849 dentry = lookup_one_len(name, parent->dentry, namelen);
850 error = PTR_ERR(dentry);
851 if (IS_ERR(dentry))
852 goto out_unlock;
853
854 error = btrfs_may_create(dir, dentry);
855 if (error)
856 goto out_dput;
857
858 /*
859 * even if this name doesn't exist, we may get hash collisions.
860 * check for them now when we can safely fail
861 */
862 error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
863 dir->i_ino, name,
864 namelen);
865 if (error)
866 goto out_dput;
867
868 down_read(&fs_info->subvol_sem);
869
870 if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
871 goto out_up_read;
872
873 if (snap_src) {
874 error = create_snapshot(snap_src, dir, dentry,
875 async_transid, readonly, inherit);
876 } else {
877 error = create_subvol(dir, dentry, name, namelen,
878 async_transid, inherit);
879 }
880 if (!error)
881 fsnotify_mkdir(dir, dentry);
882 out_up_read:
883 up_read(&fs_info->subvol_sem);
884 out_dput:
885 dput(dentry);
886 out_unlock:
887 inode_unlock(dir);
888 return error;
889 }
890
891 /*
892 * When we're defragging a range, we don't want to kick it off again
893 * if it is really just waiting for delalloc to send it down.
894 * If we find a nice big extent or delalloc range for the bytes in the
895 * file you want to defrag, we return 0 to let you know to skip this
896 * part of the file
897 */
898 static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
899 {
900 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
901 struct extent_map *em = NULL;
902 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
903 u64 end;
904
905 read_lock(&em_tree->lock);
906 em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
907 read_unlock(&em_tree->lock);
908
909 if (em) {
910 end = extent_map_end(em);
911 free_extent_map(em);
912 if (end - offset > thresh)
913 return 0;
914 }
915 /* if we already have a nice delalloc here, just stop */
916 thresh /= 2;
917 end = count_range_bits(io_tree, &offset, offset + thresh,
918 thresh, EXTENT_DELALLOC, 1);
919 if (end >= thresh)
920 return 0;
921 return 1;
922 }
923
924 /*
925 * helper function to walk through a file and find extents
926 * newer than a specific transid, and smaller than thresh.
927 *
928 * This is used by the defragging code to find new and small
929 * extents
930 */
931 static int find_new_extents(struct btrfs_root *root,
932 struct inode *inode, u64 newer_than,
933 u64 *off, u32 thresh)
934 {
935 struct btrfs_path *path;
936 struct btrfs_key min_key;
937 struct extent_buffer *leaf;
938 struct btrfs_file_extent_item *extent;
939 int type;
940 int ret;
941 u64 ino = btrfs_ino(BTRFS_I(inode));
942
943 path = btrfs_alloc_path();
944 if (!path)
945 return -ENOMEM;
946
947 min_key.objectid = ino;
948 min_key.type = BTRFS_EXTENT_DATA_KEY;
949 min_key.offset = *off;
950
951 while (1) {
952 ret = btrfs_search_forward(root, &min_key, path, newer_than);
953 if (ret != 0)
954 goto none;
955 process_slot:
956 if (min_key.objectid != ino)
957 goto none;
958 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
959 goto none;
960
961 leaf = path->nodes[0];
962 extent = btrfs_item_ptr(leaf, path->slots[0],
963 struct btrfs_file_extent_item);
964
965 type = btrfs_file_extent_type(leaf, extent);
966 if (type == BTRFS_FILE_EXTENT_REG &&
967 btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
968 check_defrag_in_cache(inode, min_key.offset, thresh)) {
969 *off = min_key.offset;
970 btrfs_free_path(path);
971 return 0;
972 }
973
974 path->slots[0]++;
975 if (path->slots[0] < btrfs_header_nritems(leaf)) {
976 btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]);
977 goto process_slot;
978 }
979
980 if (min_key.offset == (u64)-1)
981 goto none;
982
983 min_key.offset++;
984 btrfs_release_path(path);
985 }
986 none:
987 btrfs_free_path(path);
988 return -ENOENT;
989 }
990
991 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
992 {
993 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
994 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
995 struct extent_map *em;
996 u64 len = PAGE_SIZE;
997
998 /*
999 * hopefully we have this extent in the tree already, try without
1000 * the full extent lock
1001 */
1002 read_lock(&em_tree->lock);
1003 em = lookup_extent_mapping(em_tree, start, len);
1004 read_unlock(&em_tree->lock);
1005
1006 if (!em) {
1007 struct extent_state *cached = NULL;
1008 u64 end = start + len - 1;
1009
1010 /* get the big lock and read metadata off disk */
1011 lock_extent_bits(io_tree, start, end, &cached);
1012 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
1013 unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
1014
1015 if (IS_ERR(em))
1016 return NULL;
1017 }
1018
1019 return em;
1020 }
1021
1022 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
1023 {
1024 struct extent_map *next;
1025 bool ret = true;
1026
1027 /* this is the last extent */
1028 if (em->start + em->len >= i_size_read(inode))
1029 return false;
1030
1031 next = defrag_lookup_extent(inode, em->start + em->len);
1032 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
1033 ret = false;
1034 else if ((em->block_start + em->block_len == next->block_start) &&
1035 (em->block_len > SZ_128K && next->block_len > SZ_128K))
1036 ret = false;
1037
1038 free_extent_map(next);
1039 return ret;
1040 }
1041
1042 static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
1043 u64 *last_len, u64 *skip, u64 *defrag_end,
1044 int compress)
1045 {
1046 struct extent_map *em;
1047 int ret = 1;
1048 bool next_mergeable = true;
1049 bool prev_mergeable = true;
1050
1051 /*
1052 * make sure that once we start defragging an extent, we keep on
1053 * defragging it
1054 */
1055 if (start < *defrag_end)
1056 return 1;
1057
1058 *skip = 0;
1059
1060 em = defrag_lookup_extent(inode, start);
1061 if (!em)
1062 return 0;
1063
1064 /* this will cover holes, and inline extents */
1065 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1066 ret = 0;
1067 goto out;
1068 }
1069
1070 if (!*defrag_end)
1071 prev_mergeable = false;
1072
1073 next_mergeable = defrag_check_next_extent(inode, em);
1074 /*
1075 * we hit a real extent, if it is big or the next extent is not a
1076 * real extent, don't bother defragging it
1077 */
1078 if (!compress && (*last_len == 0 || *last_len >= thresh) &&
1079 (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
1080 ret = 0;
1081 out:
1082 /*
1083 * last_len ends up being a counter of how many bytes we've defragged.
1084 * every time we choose not to defrag an extent, we reset *last_len
1085 * so that the next tiny extent will force a defrag.
1086 *
1087 * The end result of this is that tiny extents before a single big
1088 * extent will force at least part of that big extent to be defragged.
1089 */
1090 if (ret) {
1091 *defrag_end = extent_map_end(em);
1092 } else {
1093 *last_len = 0;
1094 *skip = extent_map_end(em);
1095 *defrag_end = 0;
1096 }
1097
1098 free_extent_map(em);
1099 return ret;
1100 }
1101
1102 /*
1103 * it doesn't do much good to defrag one or two pages
1104 * at a time. This pulls in a nice chunk of pages
1105 * to COW and defrag.
1106 *
1107 * It also makes sure the delalloc code has enough
1108 * dirty data to avoid making new small extents as part
1109 * of the defrag
1110 *
1111 * It's a good idea to start RA on this range
1112 * before calling this.
1113 */
1114 static int cluster_pages_for_defrag(struct inode *inode,
1115 struct page **pages,
1116 unsigned long start_index,
1117 unsigned long num_pages)
1118 {
1119 unsigned long file_end;
1120 u64 isize = i_size_read(inode);
1121 u64 page_start;
1122 u64 page_end;
1123 u64 page_cnt;
1124 int ret;
1125 int i;
1126 int i_done;
1127 struct btrfs_ordered_extent *ordered;
1128 struct extent_state *cached_state = NULL;
1129 struct extent_io_tree *tree;
1130 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1131
1132 file_end = (isize - 1) >> PAGE_SHIFT;
1133 if (!isize || start_index > file_end)
1134 return 0;
1135
1136 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
1137
1138 ret = btrfs_delalloc_reserve_space(inode,
1139 start_index << PAGE_SHIFT,
1140 page_cnt << PAGE_SHIFT);
1141 if (ret)
1142 return ret;
1143 i_done = 0;
1144 tree = &BTRFS_I(inode)->io_tree;
1145
1146 /* step one, lock all the pages */
1147 for (i = 0; i < page_cnt; i++) {
1148 struct page *page;
1149 again:
1150 page = find_or_create_page(inode->i_mapping,
1151 start_index + i, mask);
1152 if (!page)
1153 break;
1154
1155 page_start = page_offset(page);
1156 page_end = page_start + PAGE_SIZE - 1;
1157 while (1) {
1158 lock_extent_bits(tree, page_start, page_end,
1159 &cached_state);
1160 ordered = btrfs_lookup_ordered_extent(inode,
1161 page_start);
1162 unlock_extent_cached(tree, page_start, page_end,
1163 &cached_state, GFP_NOFS);
1164 if (!ordered)
1165 break;
1166
1167 unlock_page(page);
1168 btrfs_start_ordered_extent(inode, ordered, 1);
1169 btrfs_put_ordered_extent(ordered);
1170 lock_page(page);
1171 /*
1172 * we unlocked the page above, so we need check if
1173 * it was released or not.
1174 */
1175 if (page->mapping != inode->i_mapping) {
1176 unlock_page(page);
1177 put_page(page);
1178 goto again;
1179 }
1180 }
1181
1182 if (!PageUptodate(page)) {
1183 btrfs_readpage(NULL, page);
1184 lock_page(page);
1185 if (!PageUptodate(page)) {
1186 unlock_page(page);
1187 put_page(page);
1188 ret = -EIO;
1189 break;
1190 }
1191 }
1192
1193 if (page->mapping != inode->i_mapping) {
1194 unlock_page(page);
1195 put_page(page);
1196 goto again;
1197 }
1198
1199 pages[i] = page;
1200 i_done++;
1201 }
1202 if (!i_done || ret)
1203 goto out;
1204
1205 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1206 goto out;
1207
1208 /*
1209 * so now we have a nice long stream of locked
1210 * and up to date pages, lets wait on them
1211 */
1212 for (i = 0; i < i_done; i++)
1213 wait_on_page_writeback(pages[i]);
1214
1215 page_start = page_offset(pages[0]);
1216 page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
1217
1218 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1219 page_start, page_end - 1, &cached_state);
1220 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
1221 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1222 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
1223 &cached_state, GFP_NOFS);
1224
1225 if (i_done != page_cnt) {
1226 spin_lock(&BTRFS_I(inode)->lock);
1227 BTRFS_I(inode)->outstanding_extents++;
1228 spin_unlock(&BTRFS_I(inode)->lock);
1229 btrfs_delalloc_release_space(inode,
1230 start_index << PAGE_SHIFT,
1231 (page_cnt - i_done) << PAGE_SHIFT);
1232 }
1233
1234
1235 set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
1236 &cached_state);
1237
1238 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1239 page_start, page_end - 1, &cached_state,
1240 GFP_NOFS);
1241
1242 for (i = 0; i < i_done; i++) {
1243 clear_page_dirty_for_io(pages[i]);
1244 ClearPageChecked(pages[i]);
1245 set_page_extent_mapped(pages[i]);
1246 set_page_dirty(pages[i]);
1247 unlock_page(pages[i]);
1248 put_page(pages[i]);
1249 }
1250 return i_done;
1251 out:
1252 for (i = 0; i < i_done; i++) {
1253 unlock_page(pages[i]);
1254 put_page(pages[i]);
1255 }
1256 btrfs_delalloc_release_space(inode,
1257 start_index << PAGE_SHIFT,
1258 page_cnt << PAGE_SHIFT);
1259 return ret;
1260
1261 }
1262
1263 int btrfs_defrag_file(struct inode *inode, struct file *file,
1264 struct btrfs_ioctl_defrag_range_args *range,
1265 u64 newer_than, unsigned long max_to_defrag)
1266 {
1267 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1268 struct btrfs_root *root = BTRFS_I(inode)->root;
1269 struct file_ra_state *ra = NULL;
1270 unsigned long last_index;
1271 u64 isize = i_size_read(inode);
1272 u64 last_len = 0;
1273 u64 skip = 0;
1274 u64 defrag_end = 0;
1275 u64 newer_off = range->start;
1276 unsigned long i;
1277 unsigned long ra_index = 0;
1278 int ret;
1279 int defrag_count = 0;
1280 int compress_type = BTRFS_COMPRESS_ZLIB;
1281 u32 extent_thresh = range->extent_thresh;
1282 unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
1283 unsigned long cluster = max_cluster;
1284 u64 new_align = ~((u64)SZ_128K - 1);
1285 struct page **pages = NULL;
1286
1287 if (isize == 0)
1288 return 0;
1289
1290 if (range->start >= isize)
1291 return -EINVAL;
1292
1293 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
1294 if (range->compress_type > BTRFS_COMPRESS_TYPES)
1295 return -EINVAL;
1296 if (range->compress_type)
1297 compress_type = range->compress_type;
1298 }
1299
1300 if (extent_thresh == 0)
1301 extent_thresh = SZ_256K;
1302
1303 /*
1304 * if we were not given a file, allocate a readahead
1305 * context
1306 */
1307 if (!file) {
1308 ra = kzalloc(sizeof(*ra), GFP_NOFS);
1309 if (!ra)
1310 return -ENOMEM;
1311 file_ra_state_init(ra, inode->i_mapping);
1312 } else {
1313 ra = &file->f_ra;
1314 }
1315
1316 pages = kmalloc_array(max_cluster, sizeof(struct page *),
1317 GFP_NOFS);
1318 if (!pages) {
1319 ret = -ENOMEM;
1320 goto out_ra;
1321 }
1322
1323 /* find the last page to defrag */
1324 if (range->start + range->len > range->start) {
1325 last_index = min_t(u64, isize - 1,
1326 range->start + range->len - 1) >> PAGE_SHIFT;
1327 } else {
1328 last_index = (isize - 1) >> PAGE_SHIFT;
1329 }
1330
1331 if (newer_than) {
1332 ret = find_new_extents(root, inode, newer_than,
1333 &newer_off, SZ_64K);
1334 if (!ret) {
1335 range->start = newer_off;
1336 /*
1337 * we always align our defrag to help keep
1338 * the extents in the file evenly spaced
1339 */
1340 i = (newer_off & new_align) >> PAGE_SHIFT;
1341 } else
1342 goto out_ra;
1343 } else {
1344 i = range->start >> PAGE_SHIFT;
1345 }
1346 if (!max_to_defrag)
1347 max_to_defrag = last_index - i + 1;
1348
1349 /*
1350 * make writeback starts from i, so the defrag range can be
1351 * written sequentially.
1352 */
1353 if (i < inode->i_mapping->writeback_index)
1354 inode->i_mapping->writeback_index = i;
1355
1356 while (i <= last_index && defrag_count < max_to_defrag &&
1357 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
1358 /*
1359 * make sure we stop running if someone unmounts
1360 * the FS
1361 */
1362 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1363 break;
1364
1365 if (btrfs_defrag_cancelled(fs_info)) {
1366 btrfs_debug(fs_info, "defrag_file cancelled");
1367 ret = -EAGAIN;
1368 break;
1369 }
1370
1371 if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
1372 extent_thresh, &last_len, &skip,
1373 &defrag_end, range->flags &
1374 BTRFS_DEFRAG_RANGE_COMPRESS)) {
1375 unsigned long next;
1376 /*
1377 * the should_defrag function tells us how much to skip
1378 * bump our counter by the suggested amount
1379 */
1380 next = DIV_ROUND_UP(skip, PAGE_SIZE);
1381 i = max(i + 1, next);
1382 continue;
1383 }
1384
1385 if (!newer_than) {
1386 cluster = (PAGE_ALIGN(defrag_end) >>
1387 PAGE_SHIFT) - i;
1388 cluster = min(cluster, max_cluster);
1389 } else {
1390 cluster = max_cluster;
1391 }
1392
1393 if (i + cluster > ra_index) {
1394 ra_index = max(i, ra_index);
1395 btrfs_force_ra(inode->i_mapping, ra, file, ra_index,
1396 cluster);
1397 ra_index += cluster;
1398 }
1399
1400 inode_lock(inode);
1401 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
1402 BTRFS_I(inode)->force_compress = compress_type;
1403 ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1404 if (ret < 0) {
1405 inode_unlock(inode);
1406 goto out_ra;
1407 }
1408
1409 defrag_count += ret;
1410 balance_dirty_pages_ratelimited(inode->i_mapping);
1411 inode_unlock(inode);
1412
1413 if (newer_than) {
1414 if (newer_off == (u64)-1)
1415 break;
1416
1417 if (ret > 0)
1418 i += ret;
1419
1420 newer_off = max(newer_off + 1,
1421 (u64)i << PAGE_SHIFT);
1422
1423 ret = find_new_extents(root, inode, newer_than,
1424 &newer_off, SZ_64K);
1425 if (!ret) {
1426 range->start = newer_off;
1427 i = (newer_off & new_align) >> PAGE_SHIFT;
1428 } else {
1429 break;
1430 }
1431 } else {
1432 if (ret > 0) {
1433 i += ret;
1434 last_len += ret << PAGE_SHIFT;
1435 } else {
1436 i++;
1437 last_len = 0;
1438 }
1439 }
1440 }
1441
1442 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
1443 filemap_flush(inode->i_mapping);
1444 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1445 &BTRFS_I(inode)->runtime_flags))
1446 filemap_flush(inode->i_mapping);
1447 }
1448
1449 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
1450 /* the filemap_flush will queue IO into the worker threads, but
1451 * we have to make sure the IO is actually started and that
1452 * ordered extents get created before we return
1453 */
1454 atomic_inc(&fs_info->async_submit_draining);
1455 while (atomic_read(&fs_info->nr_async_submits) ||
1456 atomic_read(&fs_info->async_delalloc_pages)) {
1457 wait_event(fs_info->async_submit_wait,
1458 (atomic_read(&fs_info->nr_async_submits) == 0 &&
1459 atomic_read(&fs_info->async_delalloc_pages) == 0));
1460 }
1461 atomic_dec(&fs_info->async_submit_draining);
1462 }
1463
1464 if (range->compress_type == BTRFS_COMPRESS_LZO) {
1465 btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1466 }
1467
1468 ret = defrag_count;
1469
1470 out_ra:
1471 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
1472 inode_lock(inode);
1473 BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE;
1474 inode_unlock(inode);
1475 }
1476 if (!file)
1477 kfree(ra);
1478 kfree(pages);
1479 return ret;
1480 }
1481
1482 static noinline int btrfs_ioctl_resize(struct file *file,
1483 void __user *arg)
1484 {
1485 struct inode *inode = file_inode(file);
1486 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1487 u64 new_size;
1488 u64 old_size;
1489 u64 devid = 1;
1490 struct btrfs_root *root = BTRFS_I(inode)->root;
1491 struct btrfs_ioctl_vol_args *vol_args;
1492 struct btrfs_trans_handle *trans;
1493 struct btrfs_device *device = NULL;
1494 char *sizestr;
1495 char *retptr;
1496 char *devstr = NULL;
1497 int ret = 0;
1498 int mod = 0;
1499
1500 if (!capable(CAP_SYS_ADMIN))
1501 return -EPERM;
1502
1503 ret = mnt_want_write_file(file);
1504 if (ret)
1505 return ret;
1506
1507 if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
1508 mnt_drop_write_file(file);
1509 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1510 }
1511
1512 mutex_lock(&fs_info->volume_mutex);
1513 vol_args = memdup_user(arg, sizeof(*vol_args));
1514 if (IS_ERR(vol_args)) {
1515 ret = PTR_ERR(vol_args);
1516 goto out;
1517 }
1518
1519 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1520
1521 sizestr = vol_args->name;
1522 devstr = strchr(sizestr, ':');
1523 if (devstr) {
1524 sizestr = devstr + 1;
1525 *devstr = '\0';
1526 devstr = vol_args->name;
1527 ret = kstrtoull(devstr, 10, &devid);
1528 if (ret)
1529 goto out_free;
1530 if (!devid) {
1531 ret = -EINVAL;
1532 goto out_free;
1533 }
1534 btrfs_info(fs_info, "resizing devid %llu", devid);
1535 }
1536
1537 device = btrfs_find_device(fs_info, devid, NULL, NULL);
1538 if (!device) {
1539 btrfs_info(fs_info, "resizer unable to find device %llu",
1540 devid);
1541 ret = -ENODEV;
1542 goto out_free;
1543 }
1544
1545 if (!device->writeable) {
1546 btrfs_info(fs_info,
1547 "resizer unable to apply on readonly device %llu",
1548 devid);
1549 ret = -EPERM;
1550 goto out_free;
1551 }
1552
1553 if (!strcmp(sizestr, "max"))
1554 new_size = device->bdev->bd_inode->i_size;
1555 else {
1556 if (sizestr[0] == '-') {
1557 mod = -1;
1558 sizestr++;
1559 } else if (sizestr[0] == '+') {
1560 mod = 1;
1561 sizestr++;
1562 }
1563 new_size = memparse(sizestr, &retptr);
1564 if (*retptr != '\0' || new_size == 0) {
1565 ret = -EINVAL;
1566 goto out_free;
1567 }
1568 }
1569
1570 if (device->is_tgtdev_for_dev_replace) {
1571 ret = -EPERM;
1572 goto out_free;
1573 }
1574
1575 old_size = btrfs_device_get_total_bytes(device);
1576
1577 if (mod < 0) {
1578 if (new_size > old_size) {
1579 ret = -EINVAL;
1580 goto out_free;
1581 }
1582 new_size = old_size - new_size;
1583 } else if (mod > 0) {
1584 if (new_size > ULLONG_MAX - old_size) {
1585 ret = -ERANGE;
1586 goto out_free;
1587 }
1588 new_size = old_size + new_size;
1589 }
1590
1591 if (new_size < SZ_256M) {
1592 ret = -EINVAL;
1593 goto out_free;
1594 }
1595 if (new_size > device->bdev->bd_inode->i_size) {
1596 ret = -EFBIG;
1597 goto out_free;
1598 }
1599
1600 new_size = div_u64(new_size, fs_info->sectorsize);
1601 new_size *= fs_info->sectorsize;
1602
1603 btrfs_info_in_rcu(fs_info, "new size for %s is %llu",
1604 rcu_str_deref(device->name), new_size);
1605
1606 if (new_size > old_size) {
1607 trans = btrfs_start_transaction(root, 0);
1608 if (IS_ERR(trans)) {
1609 ret = PTR_ERR(trans);
1610 goto out_free;
1611 }
1612 ret = btrfs_grow_device(trans, device, new_size);
1613 btrfs_commit_transaction(trans);
1614 } else if (new_size < old_size) {
1615 ret = btrfs_shrink_device(device, new_size);
1616 } /* equal, nothing need to do */
1617
1618 out_free:
1619 kfree(vol_args);
1620 out:
1621 mutex_unlock(&fs_info->volume_mutex);
1622 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
1623 mnt_drop_write_file(file);
1624 return ret;
1625 }
1626
1627 static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1628 const char *name, unsigned long fd, int subvol,
1629 u64 *transid, bool readonly,
1630 struct btrfs_qgroup_inherit *inherit)
1631 {
1632 int namelen;
1633 int ret = 0;
1634
1635 if (!S_ISDIR(file_inode(file)->i_mode))
1636 return -ENOTDIR;
1637
1638 ret = mnt_want_write_file(file);
1639 if (ret)
1640 goto out;
1641
1642 namelen = strlen(name);
1643 if (strchr(name, '/')) {
1644 ret = -EINVAL;
1645 goto out_drop_write;
1646 }
1647
1648 if (name[0] == '.' &&
1649 (namelen == 1 || (name[1] == '.' && namelen == 2))) {
1650 ret = -EEXIST;
1651 goto out_drop_write;
1652 }
1653
1654 if (subvol) {
1655 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1656 NULL, transid, readonly, inherit);
1657 } else {
1658 struct fd src = fdget(fd);
1659 struct inode *src_inode;
1660 if (!src.file) {
1661 ret = -EINVAL;
1662 goto out_drop_write;
1663 }
1664
1665 src_inode = file_inode(src.file);
1666 if (src_inode->i_sb != file_inode(file)->i_sb) {
1667 btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
1668 "Snapshot src from another FS");
1669 ret = -EXDEV;
1670 } else if (!inode_owner_or_capable(src_inode)) {
1671 /*
1672 * Subvolume creation is not restricted, but snapshots
1673 * are limited to own subvolumes only
1674 */
1675 ret = -EPERM;
1676 } else {
1677 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1678 BTRFS_I(src_inode)->root,
1679 transid, readonly, inherit);
1680 }
1681 fdput(src);
1682 }
1683 out_drop_write:
1684 mnt_drop_write_file(file);
1685 out:
1686 return ret;
1687 }
1688
1689 static noinline int btrfs_ioctl_snap_create(struct file *file,
1690 void __user *arg, int subvol)
1691 {
1692 struct btrfs_ioctl_vol_args *vol_args;
1693 int ret;
1694
1695 if (!S_ISDIR(file_inode(file)->i_mode))
1696 return -ENOTDIR;
1697
1698 vol_args = memdup_user(arg, sizeof(*vol_args));
1699 if (IS_ERR(vol_args))
1700 return PTR_ERR(vol_args);
1701 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
1702
1703 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1704 vol_args->fd, subvol,
1705 NULL, false, NULL);
1706
1707 kfree(vol_args);
1708 return ret;
1709 }
1710
1711 static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1712 void __user *arg, int subvol)
1713 {
1714 struct btrfs_ioctl_vol_args_v2 *vol_args;
1715 int ret;
1716 u64 transid = 0;
1717 u64 *ptr = NULL;
1718 bool readonly = false;
1719 struct btrfs_qgroup_inherit *inherit = NULL;
1720
1721 if (!S_ISDIR(file_inode(file)->i_mode))
1722 return -ENOTDIR;
1723
1724 vol_args = memdup_user(arg, sizeof(*vol_args));
1725 if (IS_ERR(vol_args))
1726 return PTR_ERR(vol_args);
1727 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
1728
1729 if (vol_args->flags &
1730 ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
1731 BTRFS_SUBVOL_QGROUP_INHERIT)) {
1732 ret = -EOPNOTSUPP;
1733 goto free_args;
1734 }
1735
1736 if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
1737 ptr = &transid;
1738 if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
1739 readonly = true;
1740 if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
1741 if (vol_args->size > PAGE_SIZE) {
1742 ret = -EINVAL;
1743 goto free_args;
1744 }
1745 inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
1746 if (IS_ERR(inherit)) {
1747 ret = PTR_ERR(inherit);
1748 goto free_args;
1749 }
1750 }
1751
1752 ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
1753 vol_args->fd, subvol, ptr,
1754 readonly, inherit);
1755 if (ret)
1756 goto free_inherit;
1757
1758 if (ptr && copy_to_user(arg +
1759 offsetof(struct btrfs_ioctl_vol_args_v2,
1760 transid),
1761 ptr, sizeof(*ptr)))
1762 ret = -EFAULT;
1763
1764 free_inherit:
1765 kfree(inherit);
1766 free_args:
1767 kfree(vol_args);
1768 return ret;
1769 }
1770
1771 static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
1772 void __user *arg)
1773 {
1774 struct inode *inode = file_inode(file);
1775 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1776 struct btrfs_root *root = BTRFS_I(inode)->root;
1777 int ret = 0;
1778 u64 flags = 0;
1779
1780 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID)
1781 return -EINVAL;
1782
1783 down_read(&fs_info->subvol_sem);
1784 if (btrfs_root_readonly(root))
1785 flags |= BTRFS_SUBVOL_RDONLY;
1786 up_read(&fs_info->subvol_sem);
1787
1788 if (copy_to_user(arg, &flags, sizeof(flags)))
1789 ret = -EFAULT;
1790
1791 return ret;
1792 }
1793
1794 static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1795 void __user *arg)
1796 {
1797 struct inode *inode = file_inode(file);
1798 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1799 struct btrfs_root *root = BTRFS_I(inode)->root;
1800 struct btrfs_trans_handle *trans;
1801 u64 root_flags;
1802 u64 flags;
1803 int ret = 0;
1804
1805 if (!inode_owner_or_capable(inode))
1806 return -EPERM;
1807
1808 ret = mnt_want_write_file(file);
1809 if (ret)
1810 goto out;
1811
1812 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
1813 ret = -EINVAL;
1814 goto out_drop_write;
1815 }
1816
1817 if (copy_from_user(&flags, arg, sizeof(flags))) {
1818 ret = -EFAULT;
1819 goto out_drop_write;
1820 }
1821
1822 if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
1823 ret = -EINVAL;
1824 goto out_drop_write;
1825 }
1826
1827 if (flags & ~BTRFS_SUBVOL_RDONLY) {
1828 ret = -EOPNOTSUPP;
1829 goto out_drop_write;
1830 }
1831
1832 down_write(&fs_info->subvol_sem);
1833
1834 /* nothing to do */
1835 if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
1836 goto out_drop_sem;
1837
1838 root_flags = btrfs_root_flags(&root->root_item);
1839 if (flags & BTRFS_SUBVOL_RDONLY) {
1840 btrfs_set_root_flags(&root->root_item,
1841 root_flags | BTRFS_ROOT_SUBVOL_RDONLY);
1842 } else {
1843 /*
1844 * Block RO -> RW transition if this subvolume is involved in
1845 * send
1846 */
1847 spin_lock(&root->root_item_lock);
1848 if (root->send_in_progress == 0) {
1849 btrfs_set_root_flags(&root->root_item,
1850 root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY);
1851 spin_unlock(&root->root_item_lock);
1852 } else {
1853 spin_unlock(&root->root_item_lock);
1854 btrfs_warn(fs_info,
1855 "Attempt to set subvolume %llu read-write during send",
1856 root->root_key.objectid);
1857 ret = -EPERM;
1858 goto out_drop_sem;
1859 }
1860 }
1861
1862 trans = btrfs_start_transaction(root, 1);
1863 if (IS_ERR(trans)) {
1864 ret = PTR_ERR(trans);
1865 goto out_reset;
1866 }
1867
1868 ret = btrfs_update_root(trans, fs_info->tree_root,
1869 &root->root_key, &root->root_item);
1870
1871 btrfs_commit_transaction(trans);
1872 out_reset:
1873 if (ret)
1874 btrfs_set_root_flags(&root->root_item, root_flags);
1875 out_drop_sem:
1876 up_write(&fs_info->subvol_sem);
1877 out_drop_write:
1878 mnt_drop_write_file(file);
1879 out:
1880 return ret;
1881 }
1882
1883 /*
1884 * helper to check if the subvolume references other subvolumes
1885 */
1886 static noinline int may_destroy_subvol(struct btrfs_root *root)
1887 {
1888 struct btrfs_fs_info *fs_info = root->fs_info;
1889 struct btrfs_path *path;
1890 struct btrfs_dir_item *di;
1891 struct btrfs_key key;
1892 u64 dir_id;
1893 int ret;
1894
1895 path = btrfs_alloc_path();
1896 if (!path)
1897 return -ENOMEM;
1898
1899 /* Make sure this root isn't set as the default subvol */
1900 dir_id = btrfs_super_root_dir(fs_info->super_copy);
1901 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
1902 dir_id, "default", 7, 0);
1903 if (di && !IS_ERR(di)) {
1904 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1905 if (key.objectid == root->root_key.objectid) {
1906 ret = -EPERM;
1907 btrfs_err(fs_info,
1908 "deleting default subvolume %llu is not allowed",
1909 key.objectid);
1910 goto out;
1911 }
1912 btrfs_release_path(path);
1913 }
1914
1915 key.objectid = root->root_key.objectid;
1916 key.type = BTRFS_ROOT_REF_KEY;
1917 key.offset = (u64)-1;
1918
1919 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1920 if (ret < 0)
1921 goto out;
1922 BUG_ON(ret == 0);
1923
1924 ret = 0;
1925 if (path->slots[0] > 0) {
1926 path->slots[0]--;
1927 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1928 if (key.objectid == root->root_key.objectid &&
1929 key.type == BTRFS_ROOT_REF_KEY)
1930 ret = -ENOTEMPTY;
1931 }
1932 out:
1933 btrfs_free_path(path);
1934 return ret;
1935 }
1936
1937 static noinline int key_in_sk(struct btrfs_key *key,
1938 struct btrfs_ioctl_search_key *sk)
1939 {
1940 struct btrfs_key test;
1941 int ret;
1942
1943 test.objectid = sk->min_objectid;
1944 test.type = sk->min_type;
1945 test.offset = sk->min_offset;
1946
1947 ret = btrfs_comp_cpu_keys(key, &test);
1948 if (ret < 0)
1949 return 0;
1950
1951 test.objectid = sk->max_objectid;
1952 test.type = sk->max_type;
1953 test.offset = sk->max_offset;
1954
1955 ret = btrfs_comp_cpu_keys(key, &test);
1956 if (ret > 0)
1957 return 0;
1958 return 1;
1959 }
1960
1961 static noinline int copy_to_sk(struct btrfs_path *path,
1962 struct btrfs_key *key,
1963 struct btrfs_ioctl_search_key *sk,
1964 size_t *buf_size,
1965 char __user *ubuf,
1966 unsigned long *sk_offset,
1967 int *num_found)
1968 {
1969 u64 found_transid;
1970 struct extent_buffer *leaf;
1971 struct btrfs_ioctl_search_header sh;
1972 struct btrfs_key test;
1973 unsigned long item_off;
1974 unsigned long item_len;
1975 int nritems;
1976 int i;
1977 int slot;
1978 int ret = 0;
1979
1980 leaf = path->nodes[0];
1981 slot = path->slots[0];
1982 nritems = btrfs_header_nritems(leaf);
1983
1984 if (btrfs_header_generation(leaf) > sk->max_transid) {
1985 i = nritems;
1986 goto advance_key;
1987 }
1988 found_transid = btrfs_header_generation(leaf);
1989
1990 for (i = slot; i < nritems; i++) {
1991 item_off = btrfs_item_ptr_offset(leaf, i);
1992 item_len = btrfs_item_size_nr(leaf, i);
1993
1994 btrfs_item_key_to_cpu(leaf, key, i);
1995 if (!key_in_sk(key, sk))
1996 continue;
1997
1998 if (sizeof(sh) + item_len > *buf_size) {
1999 if (*num_found) {
2000 ret = 1;
2001 goto out;
2002 }
2003
2004 /*
2005 * return one empty item back for v1, which does not
2006 * handle -EOVERFLOW
2007 */
2008
2009 *buf_size = sizeof(sh) + item_len;
2010 item_len = 0;
2011 ret = -EOVERFLOW;
2012 }
2013
2014 if (sizeof(sh) + item_len + *sk_offset > *buf_size) {
2015 ret = 1;
2016 goto out;
2017 }
2018
2019 sh.objectid = key->objectid;
2020 sh.offset = key->offset;
2021 sh.type = key->type;
2022 sh.len = item_len;
2023 sh.transid = found_transid;
2024
2025 /* copy search result header */
2026 if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
2027 ret = -EFAULT;
2028 goto out;
2029 }
2030
2031 *sk_offset += sizeof(sh);
2032
2033 if (item_len) {
2034 char __user *up = ubuf + *sk_offset;
2035 /* copy the item */
2036 if (read_extent_buffer_to_user(leaf, up,
2037 item_off, item_len)) {
2038 ret = -EFAULT;
2039 goto out;
2040 }
2041
2042 *sk_offset += item_len;
2043 }
2044 (*num_found)++;
2045
2046 if (ret) /* -EOVERFLOW from above */
2047 goto out;
2048
2049 if (*num_found >= sk->nr_items) {
2050 ret = 1;
2051 goto out;
2052 }
2053 }
2054 advance_key:
2055 ret = 0;
2056 test.objectid = sk->max_objectid;
2057 test.type = sk->max_type;
2058 test.offset = sk->max_offset;
2059 if (btrfs_comp_cpu_keys(key, &test) >= 0)
2060 ret = 1;
2061 else if (key->offset < (u64)-1)
2062 key->offset++;
2063 else if (key->type < (u8)-1) {
2064 key->offset = 0;
2065 key->type++;
2066 } else if (key->objectid < (u64)-1) {
2067 key->offset = 0;
2068 key->type = 0;
2069 key->objectid++;
2070 } else
2071 ret = 1;
2072 out:
2073 /*
2074 * 0: all items from this leaf copied, continue with next
2075 * 1: * more items can be copied, but unused buffer is too small
2076 * * all items were found
2077 * Either way, it will stops the loop which iterates to the next
2078 * leaf
2079 * -EOVERFLOW: item was to large for buffer
2080 * -EFAULT: could not copy extent buffer back to userspace
2081 */
2082 return ret;
2083 }
2084
2085 static noinline int search_ioctl(struct inode *inode,
2086 struct btrfs_ioctl_search_key *sk,
2087 size_t *buf_size,
2088 char __user *ubuf)
2089 {
2090 struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
2091 struct btrfs_root *root;
2092 struct btrfs_key key;
2093 struct btrfs_path *path;
2094 int ret;
2095 int num_found = 0;
2096 unsigned long sk_offset = 0;
2097
2098 if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) {
2099 *buf_size = sizeof(struct btrfs_ioctl_search_header);
2100 return -EOVERFLOW;
2101 }
2102
2103 path = btrfs_alloc_path();
2104 if (!path)
2105 return -ENOMEM;
2106
2107 if (sk->tree_id == 0) {
2108 /* search the root of the inode that was passed */
2109 root = BTRFS_I(inode)->root;
2110 } else {
2111 key.objectid = sk->tree_id;
2112 key.type = BTRFS_ROOT_ITEM_KEY;
2113 key.offset = (u64)-1;
2114 root = btrfs_read_fs_root_no_name(info, &key);
2115 if (IS_ERR(root)) {
2116 btrfs_free_path(path);
2117 return -ENOENT;
2118 }
2119 }
2120
2121 key.objectid = sk->min_objectid;
2122 key.type = sk->min_type;
2123 key.offset = sk->min_offset;
2124
2125 while (1) {
2126 ret = btrfs_search_forward(root, &key, path, sk->min_transid);
2127 if (ret != 0) {
2128 if (ret > 0)
2129 ret = 0;
2130 goto err;
2131 }
2132 ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
2133 &sk_offset, &num_found);
2134 btrfs_release_path(path);
2135 if (ret)
2136 break;
2137
2138 }
2139 if (ret > 0)
2140 ret = 0;
2141 err:
2142 sk->nr_items = num_found;
2143 btrfs_free_path(path);
2144 return ret;
2145 }
2146
2147 static noinline int btrfs_ioctl_tree_search(struct file *file,
2148 void __user *argp)
2149 {
2150 struct btrfs_ioctl_search_args __user *uargs;
2151 struct btrfs_ioctl_search_key sk;
2152 struct inode *inode;
2153 int ret;
2154 size_t buf_size;
2155
2156 if (!capable(CAP_SYS_ADMIN))
2157 return -EPERM;
2158
2159 uargs = (struct btrfs_ioctl_search_args __user *)argp;
2160
2161 if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
2162 return -EFAULT;
2163
2164 buf_size = sizeof(uargs->buf);
2165
2166 inode = file_inode(file);
2167 ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
2168
2169 /*
2170 * In the origin implementation an overflow is handled by returning a
2171 * search header with a len of zero, so reset ret.
2172 */
2173 if (ret == -EOVERFLOW)
2174 ret = 0;
2175
2176 if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
2177 ret = -EFAULT;
2178 return ret;
2179 }
2180
2181 static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
2182 void __user *argp)
2183 {
2184 struct btrfs_ioctl_search_args_v2 __user *uarg;
2185 struct btrfs_ioctl_search_args_v2 args;
2186 struct inode *inode;
2187 int ret;
2188 size_t buf_size;
2189 const size_t buf_limit = SZ_16M;
2190
2191 if (!capable(CAP_SYS_ADMIN))
2192 return -EPERM;
2193
2194 /* copy search header and buffer size */
2195 uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp;
2196 if (copy_from_user(&args, uarg, sizeof(args)))
2197 return -EFAULT;
2198
2199 buf_size = args.buf_size;
2200
2201 if (buf_size < sizeof(struct btrfs_ioctl_search_header))
2202 return -EOVERFLOW;
2203
2204 /* limit result size to 16MB */
2205 if (buf_size > buf_limit)
2206 buf_size = buf_limit;
2207
2208 inode = file_inode(file);
2209 ret = search_ioctl(inode, &args.key, &buf_size,
2210 (char *)(&uarg->buf[0]));
2211 if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
2212 ret = -EFAULT;
2213 else if (ret == -EOVERFLOW &&
2214 copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size)))
2215 ret = -EFAULT;
2216
2217 return ret;
2218 }
2219
2220 /*
2221 * Search INODE_REFs to identify path name of 'dirid' directory
2222 * in a 'tree_id' tree. and sets path name to 'name'.
2223 */
2224 static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2225 u64 tree_id, u64 dirid, char *name)
2226 {
2227 struct btrfs_root *root;
2228 struct btrfs_key key;
2229 char *ptr;
2230 int ret = -1;
2231 int slot;
2232 int len;
2233 int total_len = 0;
2234 struct btrfs_inode_ref *iref;
2235 struct extent_buffer *l;
2236 struct btrfs_path *path;
2237
2238 if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
2239 name[0]='\0';
2240 return 0;
2241 }
2242
2243 path = btrfs_alloc_path();
2244 if (!path)
2245 return -ENOMEM;
2246
2247 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX];
2248
2249 key.objectid = tree_id;
2250 key.type = BTRFS_ROOT_ITEM_KEY;
2251 key.offset = (u64)-1;
2252 root = btrfs_read_fs_root_no_name(info, &key);
2253 if (IS_ERR(root)) {
2254 btrfs_err(info, "could not find root %llu", tree_id);
2255 ret = -ENOENT;
2256 goto out;
2257 }
2258
2259 key.objectid = dirid;
2260 key.type = BTRFS_INODE_REF_KEY;
2261 key.offset = (u64)-1;
2262
2263 while (1) {
2264 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2265 if (ret < 0)
2266 goto out;
2267 else if (ret > 0) {
2268 ret = btrfs_previous_item(root, path, dirid,
2269 BTRFS_INODE_REF_KEY);
2270 if (ret < 0)
2271 goto out;
2272 else if (ret > 0) {
2273 ret = -ENOENT;
2274 goto out;
2275 }
2276 }
2277
2278 l = path->nodes[0];
2279 slot = path->slots[0];
2280 btrfs_item_key_to_cpu(l, &key, slot);
2281
2282 iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
2283 len = btrfs_inode_ref_name_len(l, iref);
2284 ptr -= len + 1;
2285 total_len += len + 1;
2286 if (ptr < name) {
2287 ret = -ENAMETOOLONG;
2288 goto out;
2289 }
2290
2291 *(ptr + len) = '/';
2292 read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len);
2293
2294 if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
2295 break;
2296
2297 btrfs_release_path(path);
2298 key.objectid = key.offset;
2299 key.offset = (u64)-1;
2300 dirid = key.objectid;
2301 }
2302 memmove(name, ptr, total_len);
2303 name[total_len] = '\0';
2304 ret = 0;
2305 out:
2306 btrfs_free_path(path);
2307 return ret;
2308 }
2309
2310 static noinline int btrfs_ioctl_ino_lookup(struct file *file,
2311 void __user *argp)
2312 {
2313 struct btrfs_ioctl_ino_lookup_args *args;
2314 struct inode *inode;
2315 int ret = 0;
2316
2317 args = memdup_user(argp, sizeof(*args));
2318 if (IS_ERR(args))
2319 return PTR_ERR(args);
2320
2321 inode = file_inode(file);
2322
2323 /*
2324 * Unprivileged query to obtain the containing subvolume root id. The
2325 * path is reset so it's consistent with btrfs_search_path_in_tree.
2326 */
2327 if (args->treeid == 0)
2328 args->treeid = BTRFS_I(inode)->root->root_key.objectid;
2329
2330 if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
2331 args->name[0] = 0;
2332 goto out;
2333 }
2334
2335 if (!capable(CAP_SYS_ADMIN)) {
2336 ret = -EPERM;
2337 goto out;
2338 }
2339
2340 ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
2341 args->treeid, args->objectid,
2342 args->name);
2343
2344 out:
2345 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
2346 ret = -EFAULT;
2347
2348 kfree(args);
2349 return ret;
2350 }
2351
2352 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2353 void __user *arg)
2354 {
2355 struct dentry *parent = file->f_path.dentry;
2356 struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
2357 struct dentry *dentry;
2358 struct inode *dir = d_inode(parent);
2359 struct inode *inode;
2360 struct btrfs_root *root = BTRFS_I(dir)->root;
2361 struct btrfs_root *dest = NULL;
2362 struct btrfs_ioctl_vol_args *vol_args;
2363 struct btrfs_trans_handle *trans;
2364 struct btrfs_block_rsv block_rsv;
2365 u64 root_flags;
2366 u64 qgroup_reserved;
2367 int namelen;
2368 int ret;
2369 int err = 0;
2370
2371 if (!S_ISDIR(dir->i_mode))
2372 return -ENOTDIR;
2373
2374 vol_args = memdup_user(arg, sizeof(*vol_args));
2375 if (IS_ERR(vol_args))
2376 return PTR_ERR(vol_args);
2377
2378 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2379 namelen = strlen(vol_args->name);
2380 if (strchr(vol_args->name, '/') ||
2381 strncmp(vol_args->name, "..", namelen) == 0) {
2382 err = -EINVAL;
2383 goto out;
2384 }
2385
2386 err = mnt_want_write_file(file);
2387 if (err)
2388 goto out;
2389
2390
2391 err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
2392 if (err == -EINTR)
2393 goto out_drop_write;
2394 dentry = lookup_one_len(vol_args->name, parent, namelen);
2395 if (IS_ERR(dentry)) {
2396 err = PTR_ERR(dentry);
2397 goto out_unlock_dir;
2398 }
2399
2400 if (d_really_is_negative(dentry)) {
2401 err = -ENOENT;
2402 goto out_dput;
2403 }
2404
2405 inode = d_inode(dentry);
2406 dest = BTRFS_I(inode)->root;
2407 if (!capable(CAP_SYS_ADMIN)) {
2408 /*
2409 * Regular user. Only allow this with a special mount
2410 * option, when the user has write+exec access to the
2411 * subvol root, and when rmdir(2) would have been
2412 * allowed.
2413 *
2414 * Note that this is _not_ check that the subvol is
2415 * empty or doesn't contain data that we wouldn't
2416 * otherwise be able to delete.
2417 *
2418 * Users who want to delete empty subvols should try
2419 * rmdir(2).
2420 */
2421 err = -EPERM;
2422 if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
2423 goto out_dput;
2424
2425 /*
2426 * Do not allow deletion if the parent dir is the same
2427 * as the dir to be deleted. That means the ioctl
2428 * must be called on the dentry referencing the root
2429 * of the subvol, not a random directory contained
2430 * within it.
2431 */
2432 err = -EINVAL;
2433 if (root == dest)
2434 goto out_dput;
2435
2436 err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
2437 if (err)
2438 goto out_dput;
2439 }
2440
2441 /* check if subvolume may be deleted by a user */
2442 err = btrfs_may_delete(dir, dentry, 1);
2443 if (err)
2444 goto out_dput;
2445
2446 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
2447 err = -EINVAL;
2448 goto out_dput;
2449 }
2450
2451 inode_lock(inode);
2452
2453 /*
2454 * Don't allow to delete a subvolume with send in progress. This is
2455 * inside the i_mutex so the error handling that has to drop the bit
2456 * again is not run concurrently.
2457 */
2458 spin_lock(&dest->root_item_lock);
2459 root_flags = btrfs_root_flags(&dest->root_item);
2460 if (dest->send_in_progress == 0) {
2461 btrfs_set_root_flags(&dest->root_item,
2462 root_flags | BTRFS_ROOT_SUBVOL_DEAD);
2463 spin_unlock(&dest->root_item_lock);
2464 } else {
2465 spin_unlock(&dest->root_item_lock);
2466 btrfs_warn(fs_info,
2467 "Attempt to delete subvolume %llu during send",
2468 dest->root_key.objectid);
2469 err = -EPERM;
2470 goto out_unlock_inode;
2471 }
2472
2473 down_write(&fs_info->subvol_sem);
2474
2475 err = may_destroy_subvol(dest);
2476 if (err)
2477 goto out_up_write;
2478
2479 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
2480 /*
2481 * One for dir inode, two for dir entries, two for root
2482 * ref/backref.
2483 */
2484 err = btrfs_subvolume_reserve_metadata(root, &block_rsv,
2485 5, &qgroup_reserved, true);
2486 if (err)
2487 goto out_up_write;
2488
2489 trans = btrfs_start_transaction(root, 0);
2490 if (IS_ERR(trans)) {
2491 err = PTR_ERR(trans);
2492 goto out_release;
2493 }
2494 trans->block_rsv = &block_rsv;
2495 trans->bytes_reserved = block_rsv.size;
2496
2497 btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
2498
2499 ret = btrfs_unlink_subvol(trans, root, dir,
2500 dest->root_key.objectid,
2501 dentry->d_name.name,
2502 dentry->d_name.len);
2503 if (ret) {
2504 err = ret;
2505 btrfs_abort_transaction(trans, ret);
2506 goto out_end_trans;
2507 }
2508
2509 btrfs_record_root_in_trans(trans, dest);
2510
2511 memset(&dest->root_item.drop_progress, 0,
2512 sizeof(dest->root_item.drop_progress));
2513 dest->root_item.drop_level = 0;
2514 btrfs_set_root_refs(&dest->root_item, 0);
2515
2516 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
2517 ret = btrfs_insert_orphan_item(trans,
2518 fs_info->tree_root,
2519 dest->root_key.objectid);
2520 if (ret) {
2521 btrfs_abort_transaction(trans, ret);
2522 err = ret;
2523 goto out_end_trans;
2524 }
2525 }
2526
2527 ret = btrfs_uuid_tree_rem(trans, fs_info, dest->root_item.uuid,
2528 BTRFS_UUID_KEY_SUBVOL,
2529 dest->root_key.objectid);
2530 if (ret && ret != -ENOENT) {
2531 btrfs_abort_transaction(trans, ret);
2532 err = ret;
2533 goto out_end_trans;
2534 }
2535 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
2536 ret = btrfs_uuid_tree_rem(trans, fs_info,
2537 dest->root_item.received_uuid,
2538 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
2539 dest->root_key.objectid);
2540 if (ret && ret != -ENOENT) {
2541 btrfs_abort_transaction(trans, ret);
2542 err = ret;
2543 goto out_end_trans;
2544 }
2545 }
2546
2547 out_end_trans:
2548 trans->block_rsv = NULL;
2549 trans->bytes_reserved = 0;
2550 ret = btrfs_end_transaction(trans);
2551 if (ret && !err)
2552 err = ret;
2553 inode->i_flags |= S_DEAD;
2554 out_release:
2555 btrfs_subvolume_release_metadata(fs_info, &block_rsv);
2556 out_up_write:
2557 up_write(&fs_info->subvol_sem);
2558 if (err) {
2559 spin_lock(&dest->root_item_lock);
2560 root_flags = btrfs_root_flags(&dest->root_item);
2561 btrfs_set_root_flags(&dest->root_item,
2562 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
2563 spin_unlock(&dest->root_item_lock);
2564 }
2565 out_unlock_inode:
2566 inode_unlock(inode);
2567 if (!err) {
2568 d_invalidate(dentry);
2569 btrfs_invalidate_inodes(dest);
2570 d_delete(dentry);
2571 ASSERT(dest->send_in_progress == 0);
2572
2573 /* the last ref */
2574 if (dest->ino_cache_inode) {
2575 iput(dest->ino_cache_inode);
2576 dest->ino_cache_inode = NULL;
2577 }
2578 }
2579 out_dput:
2580 dput(dentry);
2581 out_unlock_dir:
2582 inode_unlock(dir);
2583 out_drop_write:
2584 mnt_drop_write_file(file);
2585 out:
2586 kfree(vol_args);
2587 return err;
2588 }
2589
2590 static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
2591 {
2592 struct inode *inode = file_inode(file);
2593 struct btrfs_root *root = BTRFS_I(inode)->root;
2594 struct btrfs_ioctl_defrag_range_args *range;
2595 int ret;
2596
2597 ret = mnt_want_write_file(file);
2598 if (ret)
2599 return ret;
2600
2601 if (btrfs_root_readonly(root)) {
2602 ret = -EROFS;
2603 goto out;
2604 }
2605
2606 switch (inode->i_mode & S_IFMT) {
2607 case S_IFDIR:
2608 if (!capable(CAP_SYS_ADMIN)) {
2609 ret = -EPERM;
2610 goto out;
2611 }
2612 ret = btrfs_defrag_root(root);
2613 break;
2614 case S_IFREG:
2615 if (!(file->f_mode & FMODE_WRITE)) {
2616 ret = -EINVAL;
2617 goto out;
2618 }
2619
2620 range = kzalloc(sizeof(*range), GFP_KERNEL);
2621 if (!range) {
2622 ret = -ENOMEM;
2623 goto out;
2624 }
2625
2626 if (argp) {
2627 if (copy_from_user(range, argp,
2628 sizeof(*range))) {
2629 ret = -EFAULT;
2630 kfree(range);
2631 goto out;
2632 }
2633 /* compression requires us to start the IO */
2634 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
2635 range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
2636 range->extent_thresh = (u32)-1;
2637 }
2638 } else {
2639 /* the rest are all set to zero by kzalloc */
2640 range->len = (u64)-1;
2641 }
2642 ret = btrfs_defrag_file(file_inode(file), file,
2643 range, 0, 0);
2644 if (ret > 0)
2645 ret = 0;
2646 kfree(range);
2647 break;
2648 default:
2649 ret = -EINVAL;
2650 }
2651 out:
2652 mnt_drop_write_file(file);
2653 return ret;
2654 }
2655
2656 static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
2657 {
2658 struct btrfs_ioctl_vol_args *vol_args;
2659 int ret;
2660
2661 if (!capable(CAP_SYS_ADMIN))
2662 return -EPERM;
2663
2664 if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1))
2665 return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2666
2667 mutex_lock(&fs_info->volume_mutex);
2668 vol_args = memdup_user(arg, sizeof(*vol_args));
2669 if (IS_ERR(vol_args)) {
2670 ret = PTR_ERR(vol_args);
2671 goto out;
2672 }
2673
2674 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2675 ret = btrfs_init_new_device(fs_info, vol_args->name);
2676
2677 if (!ret)
2678 btrfs_info(fs_info, "disk added %s", vol_args->name);
2679
2680 kfree(vol_args);
2681 out:
2682 mutex_unlock(&fs_info->volume_mutex);
2683 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
2684 return ret;
2685 }
2686
2687 static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
2688 {
2689 struct inode *inode = file_inode(file);
2690 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2691 struct btrfs_ioctl_vol_args_v2 *vol_args;
2692 int ret;
2693
2694 if (!capable(CAP_SYS_ADMIN))
2695 return -EPERM;
2696
2697 ret = mnt_want_write_file(file);
2698 if (ret)
2699 return ret;
2700
2701 vol_args = memdup_user(arg, sizeof(*vol_args));
2702 if (IS_ERR(vol_args)) {
2703 ret = PTR_ERR(vol_args);
2704 goto err_drop;
2705 }
2706
2707 /* Check for compatibility reject unknown flags */
2708 if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED)
2709 return -EOPNOTSUPP;
2710
2711 if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
2712 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2713 goto out;
2714 }
2715
2716 mutex_lock(&fs_info->volume_mutex);
2717 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
2718 ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
2719 } else {
2720 vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
2721 ret = btrfs_rm_device(fs_info, vol_args->name, 0);
2722 }
2723 mutex_unlock(&fs_info->volume_mutex);
2724 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
2725
2726 if (!ret) {
2727 if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
2728 btrfs_info(fs_info, "device deleted: id %llu",
2729 vol_args->devid);
2730 else
2731 btrfs_info(fs_info, "device deleted: %s",
2732 vol_args->name);
2733 }
2734 out:
2735 kfree(vol_args);
2736 err_drop:
2737 mnt_drop_write_file(file);
2738 return ret;
2739 }
2740
2741 static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
2742 {
2743 struct inode *inode = file_inode(file);
2744 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2745 struct btrfs_ioctl_vol_args *vol_args;
2746 int ret;
2747
2748 if (!capable(CAP_SYS_ADMIN))
2749 return -EPERM;
2750
2751 ret = mnt_want_write_file(file);
2752 if (ret)
2753 return ret;
2754
2755 if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
2756 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
2757 goto out_drop_write;
2758 }
2759
2760 vol_args = memdup_user(arg, sizeof(*vol_args));
2761 if (IS_ERR(vol_args)) {
2762 ret = PTR_ERR(vol_args);
2763 goto out;
2764 }
2765
2766 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
2767 mutex_lock(&fs_info->volume_mutex);
2768 ret = btrfs_rm_device(fs_info, vol_args->name, 0);
2769 mutex_unlock(&fs_info->volume_mutex);
2770
2771 if (!ret)
2772 btrfs_info(fs_info, "disk deleted %s", vol_args->name);
2773 kfree(vol_args);
2774 out:
2775 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
2776 out_drop_write:
2777 mnt_drop_write_file(file);
2778
2779 return ret;
2780 }
2781
2782 static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
2783 void __user *arg)
2784 {
2785 struct btrfs_ioctl_fs_info_args *fi_args;
2786 struct btrfs_device *device;
2787 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2788 int ret = 0;
2789
2790 fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
2791 if (!fi_args)
2792 return -ENOMEM;
2793
2794 mutex_lock(&fs_devices->device_list_mutex);
2795 fi_args->num_devices = fs_devices->num_devices;
2796 memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
2797
2798 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2799 if (device->devid > fi_args->max_id)
2800 fi_args->max_id = device->devid;
2801 }
2802 mutex_unlock(&fs_devices->device_list_mutex);
2803
2804 fi_args->nodesize = fs_info->super_copy->nodesize;
2805 fi_args->sectorsize = fs_info->super_copy->sectorsize;
2806 fi_args->clone_alignment = fs_info->super_copy->sectorsize;
2807
2808 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
2809 ret = -EFAULT;
2810
2811 kfree(fi_args);
2812 return ret;
2813 }
2814
2815 static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
2816 void __user *arg)
2817 {
2818 struct btrfs_ioctl_dev_info_args *di_args;
2819 struct btrfs_device *dev;
2820 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2821 int ret = 0;
2822 char *s_uuid = NULL;
2823
2824 di_args = memdup_user(arg, sizeof(*di_args));
2825 if (IS_ERR(di_args))
2826 return PTR_ERR(di_args);
2827
2828 if (!btrfs_is_empty_uuid(di_args->uuid))
2829 s_uuid = di_args->uuid;
2830
2831 mutex_lock(&fs_devices->device_list_mutex);
2832 dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL);
2833
2834 if (!dev) {
2835 ret = -ENODEV;
2836 goto out;
2837 }
2838
2839 di_args->devid = dev->devid;
2840 di_args->bytes_used = btrfs_device_get_bytes_used(dev);
2841 di_args->total_bytes = btrfs_device_get_total_bytes(dev);
2842 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
2843 if (dev->name) {
2844 struct rcu_string *name;
2845
2846 rcu_read_lock();
2847 name = rcu_dereference(dev->name);
2848 strncpy(di_args->path, name->str, sizeof(di_args->path));
2849 rcu_read_unlock();
2850 di_args->path[sizeof(di_args->path) - 1] = 0;
2851 } else {
2852 di_args->path[0] = '\0';
2853 }
2854
2855 out:
2856 mutex_unlock(&fs_devices->device_list_mutex);
2857 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
2858 ret = -EFAULT;
2859
2860 kfree(di_args);
2861 return ret;
2862 }
2863
2864 static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
2865 {
2866 struct page *page;
2867
2868 page = grab_cache_page(inode->i_mapping, index);
2869 if (!page)
2870 return ERR_PTR(-ENOMEM);
2871
2872 if (!PageUptodate(page)) {
2873 int ret;
2874
2875 ret = btrfs_readpage(NULL, page);
2876 if (ret)
2877 return ERR_PTR(ret);
2878 lock_page(page);
2879 if (!PageUptodate(page)) {
2880 unlock_page(page);
2881 put_page(page);
2882 return ERR_PTR(-EIO);
2883 }
2884 if (page->mapping != inode->i_mapping) {
2885 unlock_page(page);
2886 put_page(page);
2887 return ERR_PTR(-EAGAIN);
2888 }
2889 }
2890
2891 return page;
2892 }
2893
2894 static int gather_extent_pages(struct inode *inode, struct page **pages,
2895 int num_pages, u64 off)
2896 {
2897 int i;
2898 pgoff_t index = off >> PAGE_SHIFT;
2899
2900 for (i = 0; i < num_pages; i++) {
2901 again:
2902 pages[i] = extent_same_get_page(inode, index + i);
2903 if (IS_ERR(pages[i])) {
2904 int err = PTR_ERR(pages[i]);
2905
2906 if (err == -EAGAIN)
2907 goto again;
2908 pages[i] = NULL;
2909 return err;
2910 }
2911 }
2912 return 0;
2913 }
2914
2915 static int lock_extent_range(struct inode *inode, u64 off, u64 len,
2916 bool retry_range_locking)
2917 {
2918 /*
2919 * Do any pending delalloc/csum calculations on inode, one way or
2920 * another, and lock file content.
2921 * The locking order is:
2922 *
2923 * 1) pages
2924 * 2) range in the inode's io tree
2925 */
2926 while (1) {
2927 struct btrfs_ordered_extent *ordered;
2928 lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2929 ordered = btrfs_lookup_first_ordered_extent(inode,
2930 off + len - 1);
2931 if ((!ordered ||
2932 ordered->file_offset + ordered->len <= off ||
2933 ordered->file_offset >= off + len) &&
2934 !test_range_bit(&BTRFS_I(inode)->io_tree, off,
2935 off + len - 1, EXTENT_DELALLOC, 0, NULL)) {
2936 if (ordered)
2937 btrfs_put_ordered_extent(ordered);
2938 break;
2939 }
2940 unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2941 if (ordered)
2942 btrfs_put_ordered_extent(ordered);
2943 if (!retry_range_locking)
2944 return -EAGAIN;
2945 btrfs_wait_ordered_range(inode, off, len);
2946 }
2947 return 0;
2948 }
2949
2950 static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
2951 {
2952 inode_unlock(inode1);
2953 inode_unlock(inode2);
2954 }
2955
2956 static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
2957 {
2958 if (inode1 < inode2)
2959 swap(inode1, inode2);
2960
2961 inode_lock_nested(inode1, I_MUTEX_PARENT);
2962 inode_lock_nested(inode2, I_MUTEX_CHILD);
2963 }
2964
2965 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
2966 struct inode *inode2, u64 loff2, u64 len)
2967 {
2968 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
2969 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
2970 }
2971
2972 static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
2973 struct inode *inode2, u64 loff2, u64 len,
2974 bool retry_range_locking)
2975 {
2976 int ret;
2977
2978 if (inode1 < inode2) {
2979 swap(inode1, inode2);
2980 swap(loff1, loff2);
2981 }
2982 ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
2983 if (ret)
2984 return ret;
2985 ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
2986 if (ret)
2987 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
2988 loff1 + len - 1);
2989 return ret;
2990 }
2991
2992 struct cmp_pages {
2993 int num_pages;
2994 struct page **src_pages;
2995 struct page **dst_pages;
2996 };
2997
2998 static void btrfs_cmp_data_free(struct cmp_pages *cmp)
2999 {
3000 int i;
3001 struct page *pg;
3002
3003 for (i = 0; i < cmp->num_pages; i++) {
3004 pg = cmp->src_pages[i];
3005 if (pg) {
3006 unlock_page(pg);
3007 put_page(pg);
3008 }
3009 pg = cmp->dst_pages[i];
3010 if (pg) {
3011 unlock_page(pg);
3012 put_page(pg);
3013 }
3014 }
3015 kfree(cmp->src_pages);
3016 kfree(cmp->dst_pages);
3017 }
3018
3019 static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
3020 struct inode *dst, u64 dst_loff,
3021 u64 len, struct cmp_pages *cmp)
3022 {
3023 int ret;
3024 int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
3025 struct page **src_pgarr, **dst_pgarr;
3026
3027 /*
3028 * We must gather up all the pages before we initiate our
3029 * extent locking. We use an array for the page pointers. Size
3030 * of the array is bounded by len, which is in turn bounded by
3031 * BTRFS_MAX_DEDUPE_LEN.
3032 */
3033 src_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
3034 dst_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
3035 if (!src_pgarr || !dst_pgarr) {
3036 kfree(src_pgarr);
3037 kfree(dst_pgarr);
3038 return -ENOMEM;
3039 }
3040 cmp->num_pages = num_pages;
3041 cmp->src_pages = src_pgarr;
3042 cmp->dst_pages = dst_pgarr;
3043
3044 /*
3045 * If deduping ranges in the same inode, locking rules make it mandatory
3046 * to always lock pages in ascending order to avoid deadlocks with
3047 * concurrent tasks (such as starting writeback/delalloc).
3048 */
3049 if (src == dst && dst_loff < loff) {
3050 swap(src_pgarr, dst_pgarr);
3051 swap(loff, dst_loff);
3052 }
3053
3054 ret = gather_extent_pages(src, src_pgarr, cmp->num_pages, loff);
3055 if (ret)
3056 goto out;
3057
3058 ret = gather_extent_pages(dst, dst_pgarr, cmp->num_pages, dst_loff);
3059
3060 out:
3061 if (ret)
3062 btrfs_cmp_data_free(cmp);
3063 return 0;
3064 }
3065
3066 static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp)
3067 {
3068 int ret = 0;
3069 int i;
3070 struct page *src_page, *dst_page;
3071 unsigned int cmp_len = PAGE_SIZE;
3072 void *addr, *dst_addr;
3073
3074 i = 0;
3075 while (len) {
3076 if (len < PAGE_SIZE)
3077 cmp_len = len;
3078
3079 BUG_ON(i >= cmp->num_pages);
3080
3081 src_page = cmp->src_pages[i];
3082 dst_page = cmp->dst_pages[i];
3083 ASSERT(PageLocked(src_page));
3084 ASSERT(PageLocked(dst_page));
3085
3086 addr = kmap_atomic(src_page);
3087 dst_addr = kmap_atomic(dst_page);
3088
3089 flush_dcache_page(src_page);
3090 flush_dcache_page(dst_page);
3091
3092 if (memcmp(addr, dst_addr, cmp_len))
3093 ret = -EBADE;
3094
3095 kunmap_atomic(addr);
3096 kunmap_atomic(dst_addr);
3097
3098 if (ret)
3099 break;
3100
3101 len -= cmp_len;
3102 i++;
3103 }
3104
3105 return ret;
3106 }
3107
3108 static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
3109 u64 olen)
3110 {
3111 u64 len = *plen;
3112 u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;
3113
3114 if (off + olen > inode->i_size || off + olen < off)
3115 return -EINVAL;
3116
3117 /* if we extend to eof, continue to block boundary */
3118 if (off + len == inode->i_size)
3119 *plen = len = ALIGN(inode->i_size, bs) - off;
3120
3121 /* Check that we are block aligned - btrfs_clone() requires this */
3122 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
3123 return -EINVAL;
3124
3125 return 0;
3126 }
3127
3128 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
3129 struct inode *dst, u64 dst_loff)
3130 {
3131 int ret;
3132 u64 len = olen;
3133 struct cmp_pages cmp;
3134 bool same_inode = (src == dst);
3135 u64 same_lock_start = 0;
3136 u64 same_lock_len = 0;
3137
3138 if (len == 0)
3139 return 0;
3140
3141 if (same_inode)
3142 inode_lock(src);
3143 else
3144 btrfs_double_inode_lock(src, dst);
3145
3146 ret = extent_same_check_offsets(src, loff, &len, olen);
3147 if (ret)
3148 goto out_unlock;
3149
3150 ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
3151 if (ret)
3152 goto out_unlock;
3153
3154 if (same_inode) {
3155 /*
3156 * Single inode case wants the same checks, except we
3157 * don't want our length pushed out past i_size as
3158 * comparing that data range makes no sense.
3159 *
3160 * extent_same_check_offsets() will do this for an
3161 * unaligned length at i_size, so catch it here and
3162 * reject the request.
3163 *
3164 * This effectively means we require aligned extents
3165 * for the single-inode case, whereas the other cases
3166 * allow an unaligned length so long as it ends at
3167 * i_size.
3168 */
3169 if (len != olen) {
3170 ret = -EINVAL;
3171 goto out_unlock;
3172 }
3173
3174 /* Check for overlapping ranges */
3175 if (dst_loff + len > loff && dst_loff < loff + len) {
3176 ret = -EINVAL;
3177 goto out_unlock;
3178 }
3179
3180 same_lock_start = min_t(u64, loff, dst_loff);
3181 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3182 }
3183
3184 /* don't make the dst file partly checksummed */
3185 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3186 (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) {
3187 ret = -EINVAL;
3188 goto out_unlock;
3189 }
3190
3191 again:
3192 ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
3193 if (ret)
3194 goto out_unlock;
3195
3196 if (same_inode)
3197 ret = lock_extent_range(src, same_lock_start, same_lock_len,
3198 false);
3199 else
3200 ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
3201 false);
3202 /*
3203 * If one of the inodes has dirty pages in the respective range or
3204 * ordered extents, we need to flush dellaloc and wait for all ordered
3205 * extents in the range. We must unlock the pages and the ranges in the
3206 * io trees to avoid deadlocks when flushing delalloc (requires locking
3207 * pages) and when waiting for ordered extents to complete (they require
3208 * range locking).
3209 */
3210 if (ret == -EAGAIN) {
3211 /*
3212 * Ranges in the io trees already unlocked. Now unlock all
3213 * pages before waiting for all IO to complete.
3214 */
3215 btrfs_cmp_data_free(&cmp);
3216 if (same_inode) {
3217 btrfs_wait_ordered_range(src, same_lock_start,
3218 same_lock_len);
3219 } else {
3220 btrfs_wait_ordered_range(src, loff, len);
3221 btrfs_wait_ordered_range(dst, dst_loff, len);
3222 }
3223 goto again;
3224 }
3225 ASSERT(ret == 0);
3226 if (WARN_ON(ret)) {
3227 /* ranges in the io trees already unlocked */
3228 btrfs_cmp_data_free(&cmp);
3229 return ret;
3230 }
3231
3232 /* pass original length for comparison so we stay within i_size */
3233 ret = btrfs_cmp_data(olen, &cmp);
3234 if (ret == 0)
3235 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
3236
3237 if (same_inode)
3238 unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
3239 same_lock_start + same_lock_len - 1);
3240 else
3241 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
3242
3243 btrfs_cmp_data_free(&cmp);
3244 out_unlock:
3245 if (same_inode)
3246 inode_unlock(src);
3247 else
3248 btrfs_double_inode_unlock(src, dst);
3249
3250 return ret;
3251 }
3252
3253 #define BTRFS_MAX_DEDUPE_LEN SZ_16M
3254
3255 ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
3256 struct file *dst_file, u64 dst_loff)
3257 {
3258 struct inode *src = file_inode(src_file);
3259 struct inode *dst = file_inode(dst_file);
3260 u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
3261 ssize_t res;
3262
3263 if (olen > BTRFS_MAX_DEDUPE_LEN)
3264 olen = BTRFS_MAX_DEDUPE_LEN;
3265
3266 if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
3267 /*
3268 * Btrfs does not support blocksize < page_size. As a
3269 * result, btrfs_cmp_data() won't correctly handle
3270 * this situation without an update.
3271 */
3272 return -EINVAL;
3273 }
3274
3275 res = btrfs_extent_same(src, loff, olen, dst, dst_loff);
3276 if (res)
3277 return res;
3278 return olen;
3279 }
3280
3281 static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
3282 struct inode *inode,
3283 u64 endoff,
3284 const u64 destoff,
3285 const u64 olen,
3286 int no_time_update)
3287 {
3288 struct btrfs_root *root = BTRFS_I(inode)->root;
3289 int ret;
3290
3291 inode_inc_iversion(inode);
3292 if (!no_time_update)
3293 inode->i_mtime = inode->i_ctime = current_time(inode);
3294 /*
3295 * We round up to the block size at eof when determining which
3296 * extents to clone above, but shouldn't round up the file size.
3297 */
3298 if (endoff > destoff + olen)
3299 endoff = destoff + olen;
3300 if (endoff > inode->i_size)
3301 btrfs_i_size_write(BTRFS_I(inode), endoff);
3302
3303 ret = btrfs_update_inode(trans, root, inode);
3304 if (ret) {
3305 btrfs_abort_transaction(trans, ret);
3306 btrfs_end_transaction(trans);
3307 goto out;
3308 }
3309 ret = btrfs_end_transaction(trans);
3310 out:
3311 return ret;
3312 }
3313
3314 static void clone_update_extent_map(struct btrfs_inode *inode,
3315 const struct btrfs_trans_handle *trans,
3316 const struct btrfs_path *path,
3317 const u64 hole_offset,
3318 const u64 hole_len)
3319 {
3320 struct extent_map_tree *em_tree = &inode->extent_tree;
3321 struct extent_map *em;
3322 int ret;
3323
3324 em = alloc_extent_map();
3325 if (!em) {
3326 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
3327 return;
3328 }
3329
3330 if (path) {
3331 struct btrfs_file_extent_item *fi;
3332
3333 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
3334 struct btrfs_file_extent_item);
3335 btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
3336 em->generation = -1;
3337 if (btrfs_file_extent_type(path->nodes[0], fi) ==
3338 BTRFS_FILE_EXTENT_INLINE)
3339 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3340 &inode->runtime_flags);
3341 } else {
3342 em->start = hole_offset;
3343 em->len = hole_len;
3344 em->ram_bytes = em->len;
3345 em->orig_start = hole_offset;
3346 em->block_start = EXTENT_MAP_HOLE;
3347 em->block_len = 0;
3348 em->orig_block_len = 0;
3349 em->compress_type = BTRFS_COMPRESS_NONE;
3350 em->generation = trans->transid;
3351 }
3352
3353 while (1) {
3354 write_lock(&em_tree->lock);
3355 ret = add_extent_mapping(em_tree, em, 1);
3356 write_unlock(&em_tree->lock);
3357 if (ret != -EEXIST) {
3358 free_extent_map(em);
3359 break;
3360 }
3361 btrfs_drop_extent_cache(inode, em->start,
3362 em->start + em->len - 1, 0);
3363 }
3364
3365 if (ret)
3366 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
3367 }
3368
3369 /*
3370 * Make sure we do not end up inserting an inline extent into a file that has
3371 * already other (non-inline) extents. If a file has an inline extent it can
3372 * not have any other extents and the (single) inline extent must start at the
3373 * file offset 0. Failing to respect these rules will lead to file corruption,
3374 * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
3375 *
3376 * We can have extents that have been already written to disk or we can have
3377 * dirty ranges still in delalloc, in which case the extent maps and items are
3378 * created only when we run delalloc, and the delalloc ranges might fall outside
3379 * the range we are currently locking in the inode's io tree. So we check the
3380 * inode's i_size because of that (i_size updates are done while holding the
3381 * i_mutex, which we are holding here).
3382 * We also check to see if the inode has a size not greater than "datal" but has
3383 * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
3384 * protected against such concurrent fallocate calls by the i_mutex).
3385 *
3386 * If the file has no extents but a size greater than datal, do not allow the
3387 * copy because we would need turn the inline extent into a non-inline one (even
3388 * with NO_HOLES enabled). If we find our destination inode only has one inline
3389 * extent, just overwrite it with the source inline extent if its size is less
3390 * than the source extent's size, or we could copy the source inline extent's
3391 * data into the destination inode's inline extent if the later is greater then
3392 * the former.
3393 */
3394 static int clone_copy_inline_extent(struct inode *dst,
3395 struct btrfs_trans_handle *trans,
3396 struct btrfs_path *path,
3397 struct btrfs_key *new_key,
3398 const u64 drop_start,
3399 const u64 datal,
3400 const u64 skip,
3401 const u64 size,
3402 char *inline_data)
3403 {
3404 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
3405 struct btrfs_root *root = BTRFS_I(dst)->root;
3406 const u64 aligned_end = ALIGN(new_key->offset + datal,
3407 fs_info->sectorsize);
3408 int ret;
3409 struct btrfs_key key;
3410
3411 if (new_key->offset > 0)
3412 return -EOPNOTSUPP;
3413
3414 key.objectid = btrfs_ino(BTRFS_I(dst));
3415 key.type = BTRFS_EXTENT_DATA_KEY;
3416 key.offset = 0;
3417 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3418 if (ret < 0) {
3419 return ret;
3420 } else if (ret > 0) {
3421 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3422 ret = btrfs_next_leaf(root, path);
3423 if (ret < 0)
3424 return ret;
3425 else if (ret > 0)
3426 goto copy_inline_extent;
3427 }
3428 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3429 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3430 key.type == BTRFS_EXTENT_DATA_KEY) {
3431 ASSERT(key.offset > 0);
3432 return -EOPNOTSUPP;
3433 }
3434 } else if (i_size_read(dst) <= datal) {
3435 struct btrfs_file_extent_item *ei;
3436 u64 ext_len;
3437
3438 /*
3439 * If the file size is <= datal, make sure there are no other
3440 * extents following (can happen do to an fallocate call with
3441 * the flag FALLOC_FL_KEEP_SIZE).
3442 */
3443 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3444 struct btrfs_file_extent_item);
3445 /*
3446 * If it's an inline extent, it can not have other extents
3447 * following it.
3448 */
3449 if (btrfs_file_extent_type(path->nodes[0], ei) ==
3450 BTRFS_FILE_EXTENT_INLINE)
3451 goto copy_inline_extent;
3452
3453 ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
3454 if (ext_len > aligned_end)
3455 return -EOPNOTSUPP;
3456
3457 ret = btrfs_next_item(root, path);
3458 if (ret < 0) {
3459 return ret;
3460 } else if (ret == 0) {
3461 btrfs_item_key_to_cpu(path->nodes[0], &key,
3462 path->slots[0]);
3463 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
3464 key.type == BTRFS_EXTENT_DATA_KEY)
3465 return -EOPNOTSUPP;
3466 }
3467 }
3468
3469 copy_inline_extent:
3470 /*
3471 * We have no extent items, or we have an extent at offset 0 which may
3472 * or may not be inlined. All these cases are dealt the same way.
3473 */
3474 if (i_size_read(dst) > datal) {
3475 /*
3476 * If the destination inode has an inline extent...
3477 * This would require copying the data from the source inline
3478 * extent into the beginning of the destination's inline extent.
3479 * But this is really complex, both extents can be compressed
3480 * or just one of them, which would require decompressing and
3481 * re-compressing data (which could increase the new compressed
3482 * size, not allowing the compressed data to fit anymore in an
3483 * inline extent).
3484 * So just don't support this case for now (it should be rare,
3485 * we are not really saving space when cloning inline extents).
3486 */
3487 return -EOPNOTSUPP;
3488 }
3489
3490 btrfs_release_path(path);
3491 ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
3492 if (ret)
3493 return ret;
3494 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
3495 if (ret)
3496 return ret;
3497
3498 if (skip) {
3499 const u32 start = btrfs_file_extent_calc_inline_size(0);
3500
3501 memmove(inline_data + start, inline_data + start + skip, datal);
3502 }
3503
3504 write_extent_buffer(path->nodes[0], inline_data,
3505 btrfs_item_ptr_offset(path->nodes[0],
3506 path->slots[0]),
3507 size);
3508 inode_add_bytes(dst, datal);
3509
3510 return 0;
3511 }
3512
3513 /**
3514 * btrfs_clone() - clone a range from inode file to another
3515 *
3516 * @src: Inode to clone from
3517 * @inode: Inode to clone to
3518 * @off: Offset within source to start clone from
3519 * @olen: Original length, passed by user, of range to clone
3520 * @olen_aligned: Block-aligned value of olen
3521 * @destoff: Offset within @inode to start clone
3522 * @no_time_update: Whether to update mtime/ctime on the target inode
3523 */
3524 static int btrfs_clone(struct inode *src, struct inode *inode,
3525 const u64 off, const u64 olen, const u64 olen_aligned,
3526 const u64 destoff, int no_time_update)
3527 {
3528 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3529 struct btrfs_root *root = BTRFS_I(inode)->root;
3530 struct btrfs_path *path = NULL;
3531 struct extent_buffer *leaf;
3532 struct btrfs_trans_handle *trans;
3533 char *buf = NULL;
3534 struct btrfs_key key;
3535 u32 nritems;
3536 int slot;
3537 int ret;
3538 const u64 len = olen_aligned;
3539 u64 last_dest_end = destoff;
3540
3541 ret = -ENOMEM;
3542 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
3543 if (!buf)
3544 return ret;
3545
3546 path = btrfs_alloc_path();
3547 if (!path) {
3548 kvfree(buf);
3549 return ret;
3550 }
3551
3552 path->reada = READA_FORWARD;
3553 /* clone data */
3554 key.objectid = btrfs_ino(BTRFS_I(src));
3555 key.type = BTRFS_EXTENT_DATA_KEY;
3556 key.offset = off;
3557
3558 while (1) {
3559 u64 next_key_min_offset = key.offset + 1;
3560
3561 /*
3562 * note the key will change type as we walk through the
3563 * tree.
3564 */
3565 path->leave_spinning = 1;
3566 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
3567 0, 0);
3568 if (ret < 0)
3569 goto out;
3570 /*
3571 * First search, if no extent item that starts at offset off was
3572 * found but the previous item is an extent item, it's possible
3573 * it might overlap our target range, therefore process it.
3574 */
3575 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
3576 btrfs_item_key_to_cpu(path->nodes[0], &key,
3577 path->slots[0] - 1);
3578 if (key.type == BTRFS_EXTENT_DATA_KEY)
3579 path->slots[0]--;
3580 }
3581
3582 nritems = btrfs_header_nritems(path->nodes[0]);
3583 process_slot:
3584 if (path->slots[0] >= nritems) {
3585 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
3586 if (ret < 0)
3587 goto out;
3588 if (ret > 0)
3589 break;
3590 nritems = btrfs_header_nritems(path->nodes[0]);
3591 }
3592 leaf = path->nodes[0];
3593 slot = path->slots[0];
3594
3595 btrfs_item_key_to_cpu(leaf, &key, slot);
3596 if (key.type > BTRFS_EXTENT_DATA_KEY ||
3597 key.objectid != btrfs_ino(BTRFS_I(src)))
3598 break;
3599
3600 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3601 struct btrfs_file_extent_item *extent;
3602 int type;
3603 u32 size;
3604 struct btrfs_key new_key;
3605 u64 disko = 0, diskl = 0;
3606 u64 datao = 0, datal = 0;
3607 u8 comp;
3608 u64 drop_start;
3609
3610 extent = btrfs_item_ptr(leaf, slot,
3611 struct btrfs_file_extent_item);
3612 comp = btrfs_file_extent_compression(leaf, extent);
3613 type = btrfs_file_extent_type(leaf, extent);
3614 if (type == BTRFS_FILE_EXTENT_REG ||
3615 type == BTRFS_FILE_EXTENT_PREALLOC) {
3616 disko = btrfs_file_extent_disk_bytenr(leaf,
3617 extent);
3618 diskl = btrfs_file_extent_disk_num_bytes(leaf,
3619 extent);
3620 datao = btrfs_file_extent_offset(leaf, extent);
3621 datal = btrfs_file_extent_num_bytes(leaf,
3622 extent);
3623 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3624 /* take upper bound, may be compressed */
3625 datal = btrfs_file_extent_ram_bytes(leaf,
3626 extent);
3627 }
3628
3629 /*
3630 * The first search might have left us at an extent
3631 * item that ends before our target range's start, can
3632 * happen if we have holes and NO_HOLES feature enabled.
3633 */
3634 if (key.offset + datal <= off) {
3635 path->slots[0]++;
3636 goto process_slot;
3637 } else if (key.offset >= off + len) {
3638 break;
3639 }
3640 next_key_min_offset = key.offset + datal;
3641 size = btrfs_item_size_nr(leaf, slot);
3642 read_extent_buffer(leaf, buf,
3643 btrfs_item_ptr_offset(leaf, slot),
3644 size);
3645
3646 btrfs_release_path(path);
3647 path->leave_spinning = 0;
3648
3649 memcpy(&new_key, &key, sizeof(new_key));
3650 new_key.objectid = btrfs_ino(BTRFS_I(inode));
3651 if (off <= key.offset)
3652 new_key.offset = key.offset + destoff - off;
3653 else
3654 new_key.offset = destoff;
3655
3656 /*
3657 * Deal with a hole that doesn't have an extent item
3658 * that represents it (NO_HOLES feature enabled).
3659 * This hole is either in the middle of the cloning
3660 * range or at the beginning (fully overlaps it or
3661 * partially overlaps it).
3662 */
3663 if (new_key.offset != last_dest_end)
3664 drop_start = last_dest_end;
3665 else
3666 drop_start = new_key.offset;
3667
3668 /*
3669 * 1 - adjusting old extent (we may have to split it)
3670 * 1 - add new extent
3671 * 1 - inode update
3672 */
3673 trans = btrfs_start_transaction(root, 3);
3674 if (IS_ERR(trans)) {
3675 ret = PTR_ERR(trans);
3676 goto out;
3677 }
3678
3679 if (type == BTRFS_FILE_EXTENT_REG ||
3680 type == BTRFS_FILE_EXTENT_PREALLOC) {
3681 /*
3682 * a | --- range to clone ---| b
3683 * | ------------- extent ------------- |
3684 */
3685
3686 /* subtract range b */
3687 if (key.offset + datal > off + len)
3688 datal = off + len - key.offset;
3689
3690 /* subtract range a */
3691 if (off > key.offset) {
3692 datao += off - key.offset;
3693 datal -= off - key.offset;
3694 }
3695
3696 ret = btrfs_drop_extents(trans, root, inode,
3697 drop_start,
3698 new_key.offset + datal,
3699 1);
3700 if (ret) {
3701 if (ret != -EOPNOTSUPP)
3702 btrfs_abort_transaction(trans,
3703 ret);
3704 btrfs_end_transaction(trans);
3705 goto out;
3706 }
3707
3708 ret = btrfs_insert_empty_item(trans, root, path,
3709 &new_key, size);
3710 if (ret) {
3711 btrfs_abort_transaction(trans, ret);
3712 btrfs_end_transaction(trans);
3713 goto out;
3714 }
3715
3716 leaf = path->nodes[0];
3717 slot = path->slots[0];
3718 write_extent_buffer(leaf, buf,
3719 btrfs_item_ptr_offset(leaf, slot),
3720 size);
3721
3722 extent = btrfs_item_ptr(leaf, slot,
3723 struct btrfs_file_extent_item);
3724
3725 /* disko == 0 means it's a hole */
3726 if (!disko)
3727 datao = 0;
3728
3729 btrfs_set_file_extent_offset(leaf, extent,
3730 datao);
3731 btrfs_set_file_extent_num_bytes(leaf, extent,
3732 datal);
3733
3734 if (disko) {
3735 inode_add_bytes(inode, datal);
3736 ret = btrfs_inc_extent_ref(trans,
3737 fs_info,
3738 disko, diskl, 0,
3739 root->root_key.objectid,
3740 btrfs_ino(BTRFS_I(inode)),
3741 new_key.offset - datao);
3742 if (ret) {
3743 btrfs_abort_transaction(trans,
3744 ret);
3745 btrfs_end_transaction(trans);
3746 goto out;
3747
3748 }
3749 }
3750 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3751 u64 skip = 0;
3752 u64 trim = 0;
3753
3754 if (off > key.offset) {
3755 skip = off - key.offset;
3756 new_key.offset += skip;
3757 }
3758
3759 if (key.offset + datal > off + len)
3760 trim = key.offset + datal - (off + len);
3761
3762 if (comp && (skip || trim)) {
3763 ret = -EINVAL;
3764 btrfs_end_transaction(trans);
3765 goto out;
3766 }
3767 size -= skip + trim;
3768 datal -= skip + trim;
3769
3770 ret = clone_copy_inline_extent(inode,
3771 trans, path,
3772 &new_key,
3773 drop_start,
3774 datal,
3775 skip, size, buf);
3776 if (ret) {
3777 if (ret != -EOPNOTSUPP)
3778 btrfs_abort_transaction(trans,
3779 ret);
3780 btrfs_end_transaction(trans);
3781 goto out;
3782 }
3783 leaf = path->nodes[0];
3784 slot = path->slots[0];
3785 }
3786
3787 /* If we have an implicit hole (NO_HOLES feature). */
3788 if (drop_start < new_key.offset)
3789 clone_update_extent_map(BTRFS_I(inode), trans,
3790 NULL, drop_start,
3791 new_key.offset - drop_start);
3792
3793 clone_update_extent_map(BTRFS_I(inode), trans,
3794 path, 0, 0);
3795
3796 btrfs_mark_buffer_dirty(leaf);
3797 btrfs_release_path(path);
3798
3799 last_dest_end = ALIGN(new_key.offset + datal,
3800 fs_info->sectorsize);
3801 ret = clone_finish_inode_update(trans, inode,
3802 last_dest_end,
3803 destoff, olen,
3804 no_time_update);
3805 if (ret)
3806 goto out;
3807 if (new_key.offset + datal >= destoff + len)
3808 break;
3809 }
3810 btrfs_release_path(path);
3811 key.offset = next_key_min_offset;
3812
3813 if (fatal_signal_pending(current)) {
3814 ret = -EINTR;
3815 goto out;
3816 }
3817 }
3818 ret = 0;
3819
3820 if (last_dest_end < destoff + len) {
3821 /*
3822 * We have an implicit hole (NO_HOLES feature is enabled) that
3823 * fully or partially overlaps our cloning range at its end.
3824 */
3825 btrfs_release_path(path);
3826
3827 /*
3828 * 1 - remove extent(s)
3829 * 1 - inode update
3830 */
3831 trans = btrfs_start_transaction(root, 2);
3832 if (IS_ERR(trans)) {
3833 ret = PTR_ERR(trans);
3834 goto out;
3835 }
3836 ret = btrfs_drop_extents(trans, root, inode,
3837 last_dest_end, destoff + len, 1);
3838 if (ret) {
3839 if (ret != -EOPNOTSUPP)
3840 btrfs_abort_transaction(trans, ret);
3841 btrfs_end_transaction(trans);
3842 goto out;
3843 }
3844 clone_update_extent_map(BTRFS_I(inode), trans, NULL,
3845 last_dest_end,
3846 destoff + len - last_dest_end);
3847 ret = clone_finish_inode_update(trans, inode, destoff + len,
3848 destoff, olen, no_time_update);
3849 }
3850
3851 out:
3852 btrfs_free_path(path);
3853 kvfree(buf);
3854 return ret;
3855 }
3856
3857 static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
3858 u64 off, u64 olen, u64 destoff)
3859 {
3860 struct inode *inode = file_inode(file);
3861 struct inode *src = file_inode(file_src);
3862 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3863 struct btrfs_root *root = BTRFS_I(inode)->root;
3864 int ret;
3865 u64 len = olen;
3866 u64 bs = fs_info->sb->s_blocksize;
3867 int same_inode = src == inode;
3868
3869 /*
3870 * TODO:
3871 * - split compressed inline extents. annoying: we need to
3872 * decompress into destination's address_space (the file offset
3873 * may change, so source mapping won't do), then recompress (or
3874 * otherwise reinsert) a subrange.
3875 *
3876 * - split destination inode's inline extents. The inline extents can
3877 * be either compressed or non-compressed.
3878 */
3879
3880 if (btrfs_root_readonly(root))
3881 return -EROFS;
3882
3883 if (file_src->f_path.mnt != file->f_path.mnt ||
3884 src->i_sb != inode->i_sb)
3885 return -EXDEV;
3886
3887 /* don't make the dst file partly checksummed */
3888 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
3889 (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
3890 return -EINVAL;
3891
3892 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
3893 return -EISDIR;
3894
3895 if (!same_inode) {
3896 btrfs_double_inode_lock(src, inode);
3897 } else {
3898 inode_lock(src);
3899 }
3900
3901 /* determine range to clone */
3902 ret = -EINVAL;
3903 if (off + len > src->i_size || off + len < off)
3904 goto out_unlock;
3905 if (len == 0)
3906 olen = len = src->i_size - off;
3907 /* if we extend to eof, continue to block boundary */
3908 if (off + len == src->i_size)
3909 len = ALIGN(src->i_size, bs) - off;
3910
3911 if (len == 0) {
3912 ret = 0;
3913 goto out_unlock;
3914 }
3915
3916 /* verify the end result is block aligned */
3917 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
3918 !IS_ALIGNED(destoff, bs))
3919 goto out_unlock;
3920
3921 /* verify if ranges are overlapped within the same file */
3922 if (same_inode) {
3923 if (destoff + len > off && destoff < off + len)
3924 goto out_unlock;
3925 }
3926
3927 if (destoff > inode->i_size) {
3928 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
3929 if (ret)
3930 goto out_unlock;
3931 }
3932
3933 /*
3934 * Lock the target range too. Right after we replace the file extent
3935 * items in the fs tree (which now point to the cloned data), we might
3936 * have a worker replace them with extent items relative to a write
3937 * operation that was issued before this clone operation (i.e. confront
3938 * with inode.c:btrfs_finish_ordered_io).
3939 */
3940 if (same_inode) {
3941 u64 lock_start = min_t(u64, off, destoff);
3942 u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
3943
3944 ret = lock_extent_range(src, lock_start, lock_len, true);
3945 } else {
3946 ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
3947 true);
3948 }
3949 ASSERT(ret == 0);
3950 if (WARN_ON(ret)) {
3951 /* ranges in the io trees already unlocked */
3952 goto out_unlock;
3953 }
3954
3955 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
3956
3957 if (same_inode) {
3958 u64 lock_start = min_t(u64, off, destoff);
3959 u64 lock_end = max_t(u64, off, destoff) + len - 1;
3960
3961 unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
3962 } else {
3963 btrfs_double_extent_unlock(src, off, inode, destoff, len);
3964 }
3965 /*
3966 * Truncate page cache pages so that future reads will see the cloned
3967 * data immediately and not the previous data.
3968 */
3969 truncate_inode_pages_range(&inode->i_data,
3970 round_down(destoff, PAGE_SIZE),
3971 round_up(destoff + len, PAGE_SIZE) - 1);
3972 out_unlock:
3973 if (!same_inode)
3974 btrfs_double_inode_unlock(src, inode);
3975 else
3976 inode_unlock(src);
3977 return ret;
3978 }
3979
3980 int btrfs_clone_file_range(struct file *src_file, loff_t off,
3981 struct file *dst_file, loff_t destoff, u64 len)
3982 {
3983 return btrfs_clone_files(dst_file, src_file, off, len, destoff);
3984 }
3985
3986 /*
3987 * there are many ways the trans_start and trans_end ioctls can lead
3988 * to deadlocks. They should only be used by applications that
3989 * basically own the machine, and have a very in depth understanding
3990 * of all the possible deadlocks and enospc problems.
3991 */
3992 static long btrfs_ioctl_trans_start(struct file *file)
3993 {
3994 struct inode *inode = file_inode(file);
3995 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3996 struct btrfs_root *root = BTRFS_I(inode)->root;
3997 struct btrfs_trans_handle *trans;
3998 int ret;
3999
4000 ret = -EPERM;
4001 if (!capable(CAP_SYS_ADMIN))
4002 goto out;
4003
4004 ret = -EINPROGRESS;
4005 if (file->private_data)
4006 goto out;
4007
4008 ret = -EROFS;
4009 if (btrfs_root_readonly(root))
4010 goto out;
4011
4012 ret = mnt_want_write_file(file);
4013 if (ret)
4014 goto out;
4015
4016 atomic_inc(&fs_info->open_ioctl_trans);
4017
4018 ret = -ENOMEM;
4019 trans = btrfs_start_ioctl_transaction(root);
4020 if (IS_ERR(trans))
4021 goto out_drop;
4022
4023 file->private_data = trans;
4024 return 0;
4025
4026 out_drop:
4027 atomic_dec(&fs_info->open_ioctl_trans);
4028 mnt_drop_write_file(file);
4029 out:
4030 return ret;
4031 }
4032
4033 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
4034 {
4035 struct inode *inode = file_inode(file);
4036 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4037 struct btrfs_root *root = BTRFS_I(inode)->root;
4038 struct btrfs_root *new_root;
4039 struct btrfs_dir_item *di;
4040 struct btrfs_trans_handle *trans;
4041 struct btrfs_path *path;
4042 struct btrfs_key location;
4043 struct btrfs_disk_key disk_key;
4044 u64 objectid = 0;
4045 u64 dir_id;
4046 int ret;
4047
4048 if (!capable(CAP_SYS_ADMIN))
4049 return -EPERM;
4050
4051 ret = mnt_want_write_file(file);
4052 if (ret)
4053 return ret;
4054
4055 if (copy_from_user(&objectid, argp, sizeof(objectid))) {
4056 ret = -EFAULT;
4057 goto out;
4058 }
4059
4060 if (!objectid)
4061 objectid = BTRFS_FS_TREE_OBJECTID;
4062
4063 location.objectid = objectid;
4064 location.type = BTRFS_ROOT_ITEM_KEY;
4065 location.offset = (u64)-1;
4066
4067 new_root = btrfs_read_fs_root_no_name(fs_info, &location);
4068 if (IS_ERR(new_root)) {
4069 ret = PTR_ERR(new_root);
4070 goto out;
4071 }
4072
4073 path = btrfs_alloc_path();
4074 if (!path) {
4075 ret = -ENOMEM;
4076 goto out;
4077 }
4078 path->leave_spinning = 1;
4079
4080 trans = btrfs_start_transaction(root, 1);
4081 if (IS_ERR(trans)) {
4082 btrfs_free_path(path);
4083 ret = PTR_ERR(trans);
4084 goto out;
4085 }
4086
4087 dir_id = btrfs_super_root_dir(fs_info->super_copy);
4088 di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
4089 dir_id, "default", 7, 1);
4090 if (IS_ERR_OR_NULL(di)) {
4091 btrfs_free_path(path);
4092 btrfs_end_transaction(trans);
4093 btrfs_err(fs_info,
4094 "Umm, you don't have the default diritem, this isn't going to work");
4095 ret = -ENOENT;
4096 goto out;
4097 }
4098
4099 btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
4100 btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
4101 btrfs_mark_buffer_dirty(path->nodes[0]);
4102 btrfs_free_path(path);
4103
4104 btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
4105 btrfs_end_transaction(trans);
4106 out:
4107 mnt_drop_write_file(file);
4108 return ret;
4109 }
4110
4111 void btrfs_get_block_group_info(struct list_head *groups_list,
4112 struct btrfs_ioctl_space_info *space)
4113 {
4114 struct btrfs_block_group_cache *block_group;
4115
4116 space->total_bytes = 0;
4117 space->used_bytes = 0;
4118 space->flags = 0;
4119 list_for_each_entry(block_group, groups_list, list) {
4120 space->flags = block_group->flags;
4121 space->total_bytes += block_group->key.offset;
4122 space->used_bytes +=
4123 btrfs_block_group_used(&block_group->item);
4124 }
4125 }
4126
4127 static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
4128 void __user *arg)
4129 {
4130 struct btrfs_ioctl_space_args space_args;
4131 struct btrfs_ioctl_space_info space;
4132 struct btrfs_ioctl_space_info *dest;
4133 struct btrfs_ioctl_space_info *dest_orig;
4134 struct btrfs_ioctl_space_info __user *user_dest;
4135 struct btrfs_space_info *info;
4136 u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
4137 BTRFS_BLOCK_GROUP_SYSTEM,
4138 BTRFS_BLOCK_GROUP_METADATA,
4139 BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
4140 int num_types = 4;
4141 int alloc_size;
4142 int ret = 0;
4143 u64 slot_count = 0;
4144 int i, c;
4145
4146 if (copy_from_user(&space_args,
4147 (struct btrfs_ioctl_space_args __user *)arg,
4148 sizeof(space_args)))
4149 return -EFAULT;
4150
4151 for (i = 0; i < num_types; i++) {
4152 struct btrfs_space_info *tmp;
4153
4154 info = NULL;
4155 rcu_read_lock();
4156 list_for_each_entry_rcu(tmp, &fs_info->space_info,
4157 list) {
4158 if (tmp->flags == types[i]) {
4159 info = tmp;
4160 break;
4161 }
4162 }
4163 rcu_read_unlock();
4164
4165 if (!info)
4166 continue;
4167
4168 down_read(&info->groups_sem);
4169 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4170 if (!list_empty(&info->block_groups[c]))
4171 slot_count++;
4172 }
4173 up_read(&info->groups_sem);
4174 }
4175
4176 /*
4177 * Global block reserve, exported as a space_info
4178 */
4179 slot_count++;
4180
4181 /* space_slots == 0 means they are asking for a count */
4182 if (space_args.space_slots == 0) {
4183 space_args.total_spaces = slot_count;
4184 goto out;
4185 }
4186
4187 slot_count = min_t(u64, space_args.space_slots, slot_count);
4188
4189 alloc_size = sizeof(*dest) * slot_count;
4190
4191 /* we generally have at most 6 or so space infos, one for each raid
4192 * level. So, a whole page should be more than enough for everyone
4193 */
4194 if (alloc_size > PAGE_SIZE)
4195 return -ENOMEM;
4196
4197 space_args.total_spaces = 0;
4198 dest = kmalloc(alloc_size, GFP_KERNEL);
4199 if (!dest)
4200 return -ENOMEM;
4201 dest_orig = dest;
4202
4203 /* now we have a buffer to copy into */
4204 for (i = 0; i < num_types; i++) {
4205 struct btrfs_space_info *tmp;
4206
4207 if (!slot_count)
4208 break;
4209
4210 info = NULL;
4211 rcu_read_lock();
4212 list_for_each_entry_rcu(tmp, &fs_info->space_info,
4213 list) {
4214 if (tmp->flags == types[i]) {
4215 info = tmp;
4216 break;
4217 }
4218 }
4219 rcu_read_unlock();
4220
4221 if (!info)
4222 continue;
4223 down_read(&info->groups_sem);
4224 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
4225 if (!list_empty(&info->block_groups[c])) {
4226 btrfs_get_block_group_info(
4227 &info->block_groups[c], &space);
4228 memcpy(dest, &space, sizeof(space));
4229 dest++;
4230 space_args.total_spaces++;
4231 slot_count--;
4232 }
4233 if (!slot_count)
4234 break;
4235 }
4236 up_read(&info->groups_sem);
4237 }
4238
4239 /*
4240 * Add global block reserve
4241 */
4242 if (slot_count) {
4243 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4244
4245 spin_lock(&block_rsv->lock);
4246 space.total_bytes = block_rsv->size;
4247 space.used_bytes = block_rsv->size - block_rsv->reserved;
4248 spin_unlock(&block_rsv->lock);
4249 space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
4250 memcpy(dest, &space, sizeof(space));
4251 space_args.total_spaces++;
4252 }
4253
4254 user_dest = (struct btrfs_ioctl_space_info __user *)
4255 (arg + sizeof(struct btrfs_ioctl_space_args));
4256
4257 if (copy_to_user(user_dest, dest_orig, alloc_size))
4258 ret = -EFAULT;
4259
4260 kfree(dest_orig);
4261 out:
4262 if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
4263 ret = -EFAULT;
4264
4265 return ret;
4266 }
4267
4268 /*
4269 * there are many ways the trans_start and trans_end ioctls can lead
4270 * to deadlocks. They should only be used by applications that
4271 * basically own the machine, and have a very in depth understanding
4272 * of all the possible deadlocks and enospc problems.
4273 */
4274 long btrfs_ioctl_trans_end(struct file *file)
4275 {
4276 struct inode *inode = file_inode(file);
4277 struct btrfs_root *root = BTRFS_I(inode)->root;
4278 struct btrfs_trans_handle *trans;
4279
4280 trans = file->private_data;
4281 if (!trans)
4282 return -EINVAL;
4283 file->private_data = NULL;
4284
4285 btrfs_end_transaction(trans);
4286
4287 atomic_dec(&root->fs_info->open_ioctl_trans);
4288
4289 mnt_drop_write_file(file);
4290 return 0;
4291 }
4292
4293 static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
4294 void __user *argp)
4295 {
4296 struct btrfs_trans_handle *trans;
4297 u64 transid;
4298 int ret;
4299
4300 trans = btrfs_attach_transaction_barrier(root);
4301 if (IS_ERR(trans)) {
4302 if (PTR_ERR(trans) != -ENOENT)
4303 return PTR_ERR(trans);
4304
4305 /* No running transaction, don't bother */
4306 transid = root->fs_info->last_trans_committed;
4307 goto out;
4308 }
4309 transid = trans->transid;
4310 ret = btrfs_commit_transaction_async(trans, 0);
4311 if (ret) {
4312 btrfs_end_transaction(trans);
4313 return ret;
4314 }
4315 out:
4316 if (argp)
4317 if (copy_to_user(argp, &transid, sizeof(transid)))
4318 return -EFAULT;
4319 return 0;
4320 }
4321
4322 static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
4323 void __user *argp)
4324 {
4325 u64 transid;
4326
4327 if (argp) {
4328 if (copy_from_user(&transid, argp, sizeof(transid)))
4329 return -EFAULT;
4330 } else {
4331 transid = 0; /* current trans */
4332 }
4333 return btrfs_wait_for_commit(fs_info, transid);
4334 }
4335
4336 static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
4337 {
4338 struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
4339 struct btrfs_ioctl_scrub_args *sa;
4340 int ret;
4341
4342 if (!capable(CAP_SYS_ADMIN))
4343 return -EPERM;
4344
4345 sa = memdup_user(arg, sizeof(*sa));
4346 if (IS_ERR(sa))
4347 return PTR_ERR(sa);
4348
4349 if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
4350 ret = mnt_want_write_file(file);
4351 if (ret)
4352 goto out;
4353 }
4354
4355 ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
4356 &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
4357 0);
4358
4359 if (copy_to_user(arg, sa, sizeof(*sa)))
4360 ret = -EFAULT;
4361
4362 if (!(sa->flags & BTRFS_SCRUB_READONLY))
4363 mnt_drop_write_file(file);
4364 out:
4365 kfree(sa);
4366 return ret;
4367 }
4368
4369 static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
4370 {
4371 if (!capable(CAP_SYS_ADMIN))
4372 return -EPERM;
4373
4374 return btrfs_scrub_cancel(fs_info);
4375 }
4376
4377 static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
4378 void __user *arg)
4379 {
4380 struct btrfs_ioctl_scrub_args *sa;
4381 int ret;
4382
4383 if (!capable(CAP_SYS_ADMIN))
4384 return -EPERM;
4385
4386 sa = memdup_user(arg, sizeof(*sa));
4387 if (IS_ERR(sa))
4388 return PTR_ERR(sa);
4389
4390 ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
4391
4392 if (copy_to_user(arg, sa, sizeof(*sa)))
4393 ret = -EFAULT;
4394
4395 kfree(sa);
4396 return ret;
4397 }
4398
4399 static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
4400 void __user *arg)
4401 {
4402 struct btrfs_ioctl_get_dev_stats *sa;
4403 int ret;
4404
4405 sa = memdup_user(arg, sizeof(*sa));
4406 if (IS_ERR(sa))
4407 return PTR_ERR(sa);
4408
4409 if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
4410 kfree(sa);
4411 return -EPERM;
4412 }
4413
4414 ret = btrfs_get_dev_stats(fs_info, sa);
4415
4416 if (copy_to_user(arg, sa, sizeof(*sa)))
4417 ret = -EFAULT;
4418
4419 kfree(sa);
4420 return ret;
4421 }
4422
4423 static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
4424 void __user *arg)
4425 {
4426 struct btrfs_ioctl_dev_replace_args *p;
4427 int ret;
4428
4429 if (!capable(CAP_SYS_ADMIN))
4430 return -EPERM;
4431
4432 p = memdup_user(arg, sizeof(*p));
4433 if (IS_ERR(p))
4434 return PTR_ERR(p);
4435
4436 switch (p->cmd) {
4437 case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
4438 if (fs_info->sb->s_flags & MS_RDONLY) {
4439 ret = -EROFS;
4440 goto out;
4441 }
4442 if (atomic_xchg(
4443 &fs_info->mutually_exclusive_operation_running, 1)) {
4444 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4445 } else {
4446 ret = btrfs_dev_replace_by_ioctl(fs_info, p);
4447 atomic_set(
4448 &fs_info->mutually_exclusive_operation_running, 0);
4449 }
4450 break;
4451 case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
4452 btrfs_dev_replace_status(fs_info, p);
4453 ret = 0;
4454 break;
4455 case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
4456 ret = btrfs_dev_replace_cancel(fs_info, p);
4457 break;
4458 default:
4459 ret = -EINVAL;
4460 break;
4461 }
4462
4463 if (copy_to_user(arg, p, sizeof(*p)))
4464 ret = -EFAULT;
4465 out:
4466 kfree(p);
4467 return ret;
4468 }
4469
4470 static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
4471 {
4472 int ret = 0;
4473 int i;
4474 u64 rel_ptr;
4475 int size;
4476 struct btrfs_ioctl_ino_path_args *ipa = NULL;
4477 struct inode_fs_paths *ipath = NULL;
4478 struct btrfs_path *path;
4479
4480 if (!capable(CAP_DAC_READ_SEARCH))
4481 return -EPERM;
4482
4483 path = btrfs_alloc_path();
4484 if (!path) {
4485 ret = -ENOMEM;
4486 goto out;
4487 }
4488
4489 ipa = memdup_user(arg, sizeof(*ipa));
4490 if (IS_ERR(ipa)) {
4491 ret = PTR_ERR(ipa);
4492 ipa = NULL;
4493 goto out;
4494 }
4495
4496 size = min_t(u32, ipa->size, 4096);
4497 ipath = init_ipath(size, root, path);
4498 if (IS_ERR(ipath)) {
4499 ret = PTR_ERR(ipath);
4500 ipath = NULL;
4501 goto out;
4502 }
4503
4504 ret = paths_from_inode(ipa->inum, ipath);
4505 if (ret < 0)
4506 goto out;
4507
4508 for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
4509 rel_ptr = ipath->fspath->val[i] -
4510 (u64)(unsigned long)ipath->fspath->val;
4511 ipath->fspath->val[i] = rel_ptr;
4512 }
4513
4514 ret = copy_to_user((void *)(unsigned long)ipa->fspath,
4515 (void *)(unsigned long)ipath->fspath, size);
4516 if (ret) {
4517 ret = -EFAULT;
4518 goto out;
4519 }
4520
4521 out:
4522 btrfs_free_path(path);
4523 free_ipath(ipath);
4524 kfree(ipa);
4525
4526 return ret;
4527 }
4528
4529 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
4530 {
4531 struct btrfs_data_container *inodes = ctx;
4532 const size_t c = 3 * sizeof(u64);
4533
4534 if (inodes->bytes_left >= c) {
4535 inodes->bytes_left -= c;
4536 inodes->val[inodes->elem_cnt] = inum;
4537 inodes->val[inodes->elem_cnt + 1] = offset;
4538 inodes->val[inodes->elem_cnt + 2] = root;
4539 inodes->elem_cnt += 3;
4540 } else {
4541 inodes->bytes_missing += c - inodes->bytes_left;
4542 inodes->bytes_left = 0;
4543 inodes->elem_missed += 3;
4544 }
4545
4546 return 0;
4547 }
4548
4549 static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
4550 void __user *arg)
4551 {
4552 int ret = 0;
4553 int size;
4554 struct btrfs_ioctl_logical_ino_args *loi;
4555 struct btrfs_data_container *inodes = NULL;
4556 struct btrfs_path *path = NULL;
4557
4558 if (!capable(CAP_SYS_ADMIN))
4559 return -EPERM;
4560
4561 loi = memdup_user(arg, sizeof(*loi));
4562 if (IS_ERR(loi))
4563 return PTR_ERR(loi);
4564
4565 path = btrfs_alloc_path();
4566 if (!path) {
4567 ret = -ENOMEM;
4568 goto out;
4569 }
4570
4571 size = min_t(u32, loi->size, SZ_64K);
4572 inodes = init_data_container(size);
4573 if (IS_ERR(inodes)) {
4574 ret = PTR_ERR(inodes);
4575 inodes = NULL;
4576 goto out;
4577 }
4578
4579 ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
4580 build_ino_list, inodes);
4581 if (ret == -EINVAL)
4582 ret = -ENOENT;
4583 if (ret < 0)
4584 goto out;
4585
4586 ret = copy_to_user((void *)(unsigned long)loi->inodes,
4587 (void *)(unsigned long)inodes, size);
4588 if (ret)
4589 ret = -EFAULT;
4590
4591 out:
4592 btrfs_free_path(path);
4593 vfree(inodes);
4594 kfree(loi);
4595
4596 return ret;
4597 }
4598
4599 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
4600 struct btrfs_ioctl_balance_args *bargs)
4601 {
4602 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4603
4604 bargs->flags = bctl->flags;
4605
4606 if (atomic_read(&fs_info->balance_running))
4607 bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
4608 if (atomic_read(&fs_info->balance_pause_req))
4609 bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
4610 if (atomic_read(&fs_info->balance_cancel_req))
4611 bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
4612
4613 memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
4614 memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
4615 memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
4616
4617 if (lock) {
4618 spin_lock(&fs_info->balance_lock);
4619 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4620 spin_unlock(&fs_info->balance_lock);
4621 } else {
4622 memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
4623 }
4624 }
4625
4626 static long btrfs_ioctl_balance(struct file *file, void __user *arg)
4627 {
4628 struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
4629 struct btrfs_fs_info *fs_info = root->fs_info;
4630 struct btrfs_ioctl_balance_args *bargs;
4631 struct btrfs_balance_control *bctl;
4632 bool need_unlock; /* for mut. excl. ops lock */
4633 int ret;
4634
4635 if (!capable(CAP_SYS_ADMIN))
4636 return -EPERM;
4637
4638 ret = mnt_want_write_file(file);
4639 if (ret)
4640 return ret;
4641
4642 again:
4643 if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
4644 mutex_lock(&fs_info->volume_mutex);
4645 mutex_lock(&fs_info->balance_mutex);
4646 need_unlock = true;
4647 goto locked;
4648 }
4649
4650 /*
4651 * mut. excl. ops lock is locked. Three possibilities:
4652 * (1) some other op is running
4653 * (2) balance is running
4654 * (3) balance is paused -- special case (think resume)
4655 */
4656 mutex_lock(&fs_info->balance_mutex);
4657 if (fs_info->balance_ctl) {
4658 /* this is either (2) or (3) */
4659 if (!atomic_read(&fs_info->balance_running)) {
4660 mutex_unlock(&fs_info->balance_mutex);
4661 if (!mutex_trylock(&fs_info->volume_mutex))
4662 goto again;
4663 mutex_lock(&fs_info->balance_mutex);
4664
4665 if (fs_info->balance_ctl &&
4666 !atomic_read(&fs_info->balance_running)) {
4667 /* this is (3) */
4668 need_unlock = false;
4669 goto locked;
4670 }
4671
4672 mutex_unlock(&fs_info->balance_mutex);
4673 mutex_unlock(&fs_info->volume_mutex);
4674 goto again;
4675 } else {
4676 /* this is (2) */
4677 mutex_unlock(&fs_info->balance_mutex);
4678 ret = -EINPROGRESS;
4679 goto out;
4680 }
4681 } else {
4682 /* this is (1) */
4683 mutex_unlock(&fs_info->balance_mutex);
4684 ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
4685 goto out;
4686 }
4687
4688 locked:
4689 BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running));
4690
4691 if (arg) {
4692 bargs = memdup_user(arg, sizeof(*bargs));
4693 if (IS_ERR(bargs)) {
4694 ret = PTR_ERR(bargs);
4695 goto out_unlock;
4696 }
4697
4698 if (bargs->flags & BTRFS_BALANCE_RESUME) {
4699 if (!fs_info->balance_ctl) {
4700 ret = -ENOTCONN;
4701 goto out_bargs;
4702 }
4703
4704 bctl = fs_info->balance_ctl;
4705 spin_lock(&fs_info->balance_lock);
4706 bctl->flags |= BTRFS_BALANCE_RESUME;
4707 spin_unlock(&fs_info->balance_lock);
4708
4709 goto do_balance;
4710 }
4711 } else {
4712 bargs = NULL;
4713 }
4714
4715 if (fs_info->balance_ctl) {
4716 ret = -EINPROGRESS;
4717 goto out_bargs;
4718 }
4719
4720 bctl = kzalloc(sizeof(*bctl), GFP_KERNEL);
4721 if (!bctl) {
4722 ret = -ENOMEM;
4723 goto out_bargs;
4724 }
4725
4726 bctl->fs_info = fs_info;
4727 if (arg) {
4728 memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
4729 memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
4730 memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
4731
4732 bctl->flags = bargs->flags;
4733 } else {
4734 /* balance everything - no filters */
4735 bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
4736 }
4737
4738 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
4739 ret = -EINVAL;
4740 goto out_bctl;
4741 }
4742
4743 do_balance:
4744 /*
4745 * Ownership of bctl and mutually_exclusive_operation_running
4746 * goes to to btrfs_balance. bctl is freed in __cancel_balance,
4747 * or, if restriper was paused all the way until unmount, in
4748 * free_fs_info. mutually_exclusive_operation_running is
4749 * cleared in __cancel_balance.
4750 */
4751 need_unlock = false;
4752
4753 ret = btrfs_balance(bctl, bargs);
4754 bctl = NULL;
4755
4756 if (arg) {
4757 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4758 ret = -EFAULT;
4759 }
4760
4761 out_bctl:
4762 kfree(bctl);
4763 out_bargs:
4764 kfree(bargs);
4765 out_unlock:
4766 mutex_unlock(&fs_info->balance_mutex);
4767 mutex_unlock(&fs_info->volume_mutex);
4768 if (need_unlock)
4769 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
4770 out:
4771 mnt_drop_write_file(file);
4772 return ret;
4773 }
4774
4775 static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
4776 {
4777 if (!capable(CAP_SYS_ADMIN))
4778 return -EPERM;
4779
4780 switch (cmd) {
4781 case BTRFS_BALANCE_CTL_PAUSE:
4782 return btrfs_pause_balance(fs_info);
4783 case BTRFS_BALANCE_CTL_CANCEL:
4784 return btrfs_cancel_balance(fs_info);
4785 }
4786
4787 return -EINVAL;
4788 }
4789
4790 static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
4791 void __user *arg)
4792 {
4793 struct btrfs_ioctl_balance_args *bargs;
4794 int ret = 0;
4795
4796 if (!capable(CAP_SYS_ADMIN))
4797 return -EPERM;
4798
4799 mutex_lock(&fs_info->balance_mutex);
4800 if (!fs_info->balance_ctl) {
4801 ret = -ENOTCONN;
4802 goto out;
4803 }
4804
4805 bargs = kzalloc(sizeof(*bargs), GFP_KERNEL);
4806 if (!bargs) {
4807 ret = -ENOMEM;
4808 goto out;
4809 }
4810
4811 update_ioctl_balance_args(fs_info, 1, bargs);
4812
4813 if (copy_to_user(arg, bargs, sizeof(*bargs)))
4814 ret = -EFAULT;
4815
4816 kfree(bargs);
4817 out:
4818 mutex_unlock(&fs_info->balance_mutex);
4819 return ret;
4820 }
4821
4822 static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
4823 {
4824 struct inode *inode = file_inode(file);
4825 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4826 struct btrfs_ioctl_quota_ctl_args *sa;
4827 struct btrfs_trans_handle *trans = NULL;
4828 int ret;
4829 int err;
4830
4831 if (!capable(CAP_SYS_ADMIN))
4832 return -EPERM;
4833
4834 ret = mnt_want_write_file(file);
4835 if (ret)
4836 return ret;
4837
4838 sa = memdup_user(arg, sizeof(*sa));
4839 if (IS_ERR(sa)) {
4840 ret = PTR_ERR(sa);
4841 goto drop_write;
4842 }
4843
4844 down_write(&fs_info->subvol_sem);
4845 trans = btrfs_start_transaction(fs_info->tree_root, 2);
4846 if (IS_ERR(trans)) {
4847 ret = PTR_ERR(trans);
4848 goto out;
4849 }
4850
4851 switch (sa->cmd) {
4852 case BTRFS_QUOTA_CTL_ENABLE:
4853 ret = btrfs_quota_enable(trans, fs_info);
4854 break;
4855 case BTRFS_QUOTA_CTL_DISABLE:
4856 ret = btrfs_quota_disable(trans, fs_info);
4857 break;
4858 default:
4859 ret = -EINVAL;
4860 break;
4861 }
4862
4863 err = btrfs_commit_transaction(trans);
4864 if (err && !ret)
4865 ret = err;
4866 out:
4867 kfree(sa);
4868 up_write(&fs_info->subvol_sem);
4869 drop_write:
4870 mnt_drop_write_file(file);
4871 return ret;
4872 }
4873
4874 static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
4875 {
4876 struct inode *inode = file_inode(file);
4877 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4878 struct btrfs_root *root = BTRFS_I(inode)->root;
4879 struct btrfs_ioctl_qgroup_assign_args *sa;
4880 struct btrfs_trans_handle *trans;
4881 int ret;
4882 int err;
4883
4884 if (!capable(CAP_SYS_ADMIN))
4885 return -EPERM;
4886
4887 ret = mnt_want_write_file(file);
4888 if (ret)
4889 return ret;
4890
4891 sa = memdup_user(arg, sizeof(*sa));
4892 if (IS_ERR(sa)) {
4893 ret = PTR_ERR(sa);
4894 goto drop_write;
4895 }
4896
4897 trans = btrfs_join_transaction(root);
4898 if (IS_ERR(trans)) {
4899 ret = PTR_ERR(trans);
4900 goto out;
4901 }
4902
4903 /* FIXME: check if the IDs really exist */
4904 if (sa->assign) {
4905 ret = btrfs_add_qgroup_relation(trans, fs_info,
4906 sa->src, sa->dst);
4907 } else {
4908 ret = btrfs_del_qgroup_relation(trans, fs_info,
4909 sa->src, sa->dst);
4910 }
4911
4912 /* update qgroup status and info */
4913 err = btrfs_run_qgroups(trans, fs_info);
4914 if (err < 0)
4915 btrfs_handle_fs_error(fs_info, err,
4916 "failed to update qgroup status and info");
4917 err = btrfs_end_transaction(trans);
4918 if (err && !ret)
4919 ret = err;
4920
4921 out:
4922 kfree(sa);
4923 drop_write:
4924 mnt_drop_write_file(file);
4925 return ret;
4926 }
4927
4928 static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
4929 {
4930 struct inode *inode = file_inode(file);
4931 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4932 struct btrfs_root *root = BTRFS_I(inode)->root;
4933 struct btrfs_ioctl_qgroup_create_args *sa;
4934 struct btrfs_trans_handle *trans;
4935 int ret;
4936 int err;
4937
4938 if (!capable(CAP_SYS_ADMIN))
4939 return -EPERM;
4940
4941 ret = mnt_want_write_file(file);
4942 if (ret)
4943 return ret;
4944
4945 sa = memdup_user(arg, sizeof(*sa));
4946 if (IS_ERR(sa)) {
4947 ret = PTR_ERR(sa);
4948 goto drop_write;
4949 }
4950
4951 if (!sa->qgroupid) {
4952 ret = -EINVAL;
4953 goto out;
4954 }
4955
4956 trans = btrfs_join_transaction(root);
4957 if (IS_ERR(trans)) {
4958 ret = PTR_ERR(trans);
4959 goto out;
4960 }
4961
4962 /* FIXME: check if the IDs really exist */
4963 if (sa->create) {
4964 ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid);
4965 } else {
4966 ret = btrfs_remove_qgroup(trans, fs_info, sa->qgroupid);
4967 }
4968
4969 err = btrfs_end_transaction(trans);
4970 if (err && !ret)
4971 ret = err;
4972
4973 out:
4974 kfree(sa);
4975 drop_write:
4976 mnt_drop_write_file(file);
4977 return ret;
4978 }
4979
4980 static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
4981 {
4982 struct inode *inode = file_inode(file);
4983 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4984 struct btrfs_root *root = BTRFS_I(inode)->root;
4985 struct btrfs_ioctl_qgroup_limit_args *sa;
4986 struct btrfs_trans_handle *trans;
4987 int ret;
4988 int err;
4989 u64 qgroupid;
4990
4991 if (!capable(CAP_SYS_ADMIN))
4992 return -EPERM;
4993
4994 ret = mnt_want_write_file(file);
4995 if (ret)
4996 return ret;
4997
4998 sa = memdup_user(arg, sizeof(*sa));
4999 if (IS_ERR(sa)) {
5000 ret = PTR_ERR(sa);
5001 goto drop_write;
5002 }
5003
5004 trans = btrfs_join_transaction(root);
5005 if (IS_ERR(trans)) {
5006 ret = PTR_ERR(trans);
5007 goto out;
5008 }
5009
5010 qgroupid = sa->qgroupid;
5011 if (!qgroupid) {
5012 /* take the current subvol as qgroup */
5013 qgroupid = root->root_key.objectid;
5014 }
5015
5016 /* FIXME: check if the IDs really exist */
5017 ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim);
5018
5019 err = btrfs_end_transaction(trans);
5020 if (err && !ret)
5021 ret = err;
5022
5023 out:
5024 kfree(sa);
5025 drop_write:
5026 mnt_drop_write_file(file);
5027 return ret;
5028 }
5029
5030 static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
5031 {
5032 struct inode *inode = file_inode(file);
5033 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5034 struct btrfs_ioctl_quota_rescan_args *qsa;
5035 int ret;
5036
5037 if (!capable(CAP_SYS_ADMIN))
5038 return -EPERM;
5039
5040 ret = mnt_want_write_file(file);
5041 if (ret)
5042 return ret;
5043
5044 qsa = memdup_user(arg, sizeof(*qsa));
5045 if (IS_ERR(qsa)) {
5046 ret = PTR_ERR(qsa);
5047 goto drop_write;
5048 }
5049
5050 if (qsa->flags) {
5051 ret = -EINVAL;
5052 goto out;
5053 }
5054
5055 ret = btrfs_qgroup_rescan(fs_info);
5056
5057 out:
5058 kfree(qsa);
5059 drop_write:
5060 mnt_drop_write_file(file);
5061 return ret;
5062 }
5063
5064 static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
5065 {
5066 struct inode *inode = file_inode(file);
5067 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5068 struct btrfs_ioctl_quota_rescan_args *qsa;
5069 int ret = 0;
5070
5071 if (!capable(CAP_SYS_ADMIN))
5072 return -EPERM;
5073
5074 qsa = kzalloc(sizeof(*qsa), GFP_KERNEL);
5075 if (!qsa)
5076 return -ENOMEM;
5077
5078 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
5079 qsa->flags = 1;
5080 qsa->progress = fs_info->qgroup_rescan_progress.objectid;
5081 }
5082
5083 if (copy_to_user(arg, qsa, sizeof(*qsa)))
5084 ret = -EFAULT;
5085
5086 kfree(qsa);
5087 return ret;
5088 }
5089
5090 static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
5091 {
5092 struct inode *inode = file_inode(file);
5093 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5094
5095 if (!capable(CAP_SYS_ADMIN))
5096 return -EPERM;
5097
5098 return btrfs_qgroup_wait_for_completion(fs_info, true);
5099 }
5100
5101 static long _btrfs_ioctl_set_received_subvol(struct file *file,
5102 struct btrfs_ioctl_received_subvol_args *sa)
5103 {
5104 struct inode *inode = file_inode(file);
5105 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5106 struct btrfs_root *root = BTRFS_I(inode)->root;
5107 struct btrfs_root_item *root_item = &root->root_item;
5108 struct btrfs_trans_handle *trans;
5109 struct timespec ct = current_time(inode);
5110 int ret = 0;
5111 int received_uuid_changed;
5112
5113 if (!inode_owner_or_capable(inode))
5114 return -EPERM;
5115
5116 ret = mnt_want_write_file(file);
5117 if (ret < 0)
5118 return ret;
5119
5120 down_write(&fs_info->subvol_sem);
5121
5122 if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
5123 ret = -EINVAL;
5124 goto out;
5125 }
5126
5127 if (btrfs_root_readonly(root)) {
5128 ret = -EROFS;
5129 goto out;
5130 }
5131
5132 /*
5133 * 1 - root item
5134 * 2 - uuid items (received uuid + subvol uuid)
5135 */
5136 trans = btrfs_start_transaction(root, 3);
5137 if (IS_ERR(trans)) {
5138 ret = PTR_ERR(trans);
5139 trans = NULL;
5140 goto out;
5141 }
5142
5143 sa->rtransid = trans->transid;
5144 sa->rtime.sec = ct.tv_sec;
5145 sa->rtime.nsec = ct.tv_nsec;
5146
5147 received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
5148 BTRFS_UUID_SIZE);
5149 if (received_uuid_changed &&
5150 !btrfs_is_empty_uuid(root_item->received_uuid))
5151 btrfs_uuid_tree_rem(trans, fs_info, root_item->received_uuid,
5152 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5153 root->root_key.objectid);
5154 memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
5155 btrfs_set_root_stransid(root_item, sa->stransid);
5156 btrfs_set_root_rtransid(root_item, sa->rtransid);
5157 btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec);
5158 btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec);
5159 btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
5160 btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
5161
5162 ret = btrfs_update_root(trans, fs_info->tree_root,
5163 &root->root_key, &root->root_item);
5164 if (ret < 0) {
5165 btrfs_end_transaction(trans);
5166 goto out;
5167 }
5168 if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
5169 ret = btrfs_uuid_tree_add(trans, fs_info, sa->uuid,
5170 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
5171 root->root_key.objectid);
5172 if (ret < 0 && ret != -EEXIST) {
5173 btrfs_abort_transaction(trans, ret);
5174 goto out;
5175 }
5176 }
5177 ret = btrfs_commit_transaction(trans);
5178 if (ret < 0) {
5179 btrfs_abort_transaction(trans, ret);
5180 goto out;
5181 }
5182
5183 out:
5184 up_write(&fs_info->subvol_sem);
5185 mnt_drop_write_file(file);
5186 return ret;
5187 }
5188
5189 #ifdef CONFIG_64BIT
5190 static long btrfs_ioctl_set_received_subvol_32(struct file *file,
5191 void __user *arg)
5192 {
5193 struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL;
5194 struct btrfs_ioctl_received_subvol_args *args64 = NULL;
5195 int ret = 0;
5196
5197 args32 = memdup_user(arg, sizeof(*args32));
5198 if (IS_ERR(args32))
5199 return PTR_ERR(args32);
5200
5201 args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
5202 if (!args64) {
5203 ret = -ENOMEM;
5204 goto out;
5205 }
5206
5207 memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE);
5208 args64->stransid = args32->stransid;
5209 args64->rtransid = args32->rtransid;
5210 args64->stime.sec = args32->stime.sec;
5211 args64->stime.nsec = args32->stime.nsec;
5212 args64->rtime.sec = args32->rtime.sec;
5213 args64->rtime.nsec = args32->rtime.nsec;
5214 args64->flags = args32->flags;
5215
5216 ret = _btrfs_ioctl_set_received_subvol(file, args64);
5217 if (ret)
5218 goto out;
5219
5220 memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE);
5221 args32->stransid = args64->stransid;
5222 args32->rtransid = args64->rtransid;
5223 args32->stime.sec = args64->stime.sec;
5224 args32->stime.nsec = args64->stime.nsec;
5225 args32->rtime.sec = args64->rtime.sec;
5226 args32->rtime.nsec = args64->rtime.nsec;
5227 args32->flags = args64->flags;
5228
5229 ret = copy_to_user(arg, args32, sizeof(*args32));
5230 if (ret)
5231 ret = -EFAULT;
5232
5233 out:
5234 kfree(args32);
5235 kfree(args64);
5236 return ret;
5237 }
5238 #endif
5239
5240 static long btrfs_ioctl_set_received_subvol(struct file *file,
5241 void __user *arg)
5242 {
5243 struct btrfs_ioctl_received_subvol_args *sa = NULL;
5244 int ret = 0;
5245
5246 sa = memdup_user(arg, sizeof(*sa));
5247 if (IS_ERR(sa))
5248 return PTR_ERR(sa);
5249
5250 ret = _btrfs_ioctl_set_received_subvol(file, sa);
5251
5252 if (ret)
5253 goto out;
5254
5255 ret = copy_to_user(arg, sa, sizeof(*sa));
5256 if (ret)
5257 ret = -EFAULT;
5258
5259 out:
5260 kfree(sa);
5261 return ret;
5262 }
5263
5264 static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
5265 {
5266 struct inode *inode = file_inode(file);
5267 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5268 size_t len;
5269 int ret;
5270 char label[BTRFS_LABEL_SIZE];
5271
5272 spin_lock(&fs_info->super_lock);
5273 memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
5274 spin_unlock(&fs_info->super_lock);
5275
5276 len = strnlen(label, BTRFS_LABEL_SIZE);
5277
5278 if (len == BTRFS_LABEL_SIZE) {
5279 btrfs_warn(fs_info,
5280 "label is too long, return the first %zu bytes",
5281 --len);
5282 }
5283
5284 ret = copy_to_user(arg, label, len);
5285
5286 return ret ? -EFAULT : 0;
5287 }
5288
5289 static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
5290 {
5291 struct inode *inode = file_inode(file);
5292 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5293 struct btrfs_root *root = BTRFS_I(inode)->root;
5294 struct btrfs_super_block *super_block = fs_info->super_copy;
5295 struct btrfs_trans_handle *trans;
5296 char label[BTRFS_LABEL_SIZE];
5297 int ret;
5298
5299 if (!capable(CAP_SYS_ADMIN))
5300 return -EPERM;
5301
5302 if (copy_from_user(label, arg, sizeof(label)))
5303 return -EFAULT;
5304
5305 if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
5306 btrfs_err(fs_info,
5307 "unable to set label with more than %d bytes",
5308 BTRFS_LABEL_SIZE - 1);
5309 return -EINVAL;
5310 }
5311
5312 ret = mnt_want_write_file(file);
5313 if (ret)
5314 return ret;
5315
5316 trans = btrfs_start_transaction(root, 0);
5317 if (IS_ERR(trans)) {
5318 ret = PTR_ERR(trans);
5319 goto out_unlock;
5320 }
5321
5322 spin_lock(&fs_info->super_lock);
5323 strcpy(super_block->label, label);
5324 spin_unlock(&fs_info->super_lock);
5325 ret = btrfs_commit_transaction(trans);
5326
5327 out_unlock:
5328 mnt_drop_write_file(file);
5329 return ret;
5330 }
5331
5332 #define INIT_FEATURE_FLAGS(suffix) \
5333 { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \
5334 .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \
5335 .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix }
5336
5337 int btrfs_ioctl_get_supported_features(void __user *arg)
5338 {
5339 static const struct btrfs_ioctl_feature_flags features[3] = {
5340 INIT_FEATURE_FLAGS(SUPP),
5341 INIT_FEATURE_FLAGS(SAFE_SET),
5342 INIT_FEATURE_FLAGS(SAFE_CLEAR)
5343 };
5344
5345 if (copy_to_user(arg, &features, sizeof(features)))
5346 return -EFAULT;
5347
5348 return 0;
5349 }
5350
5351 static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
5352 {
5353 struct inode *inode = file_inode(file);
5354 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5355 struct btrfs_super_block *super_block = fs_info->super_copy;
5356 struct btrfs_ioctl_feature_flags features;
5357
5358 features.compat_flags = btrfs_super_compat_flags(super_block);
5359 features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block);
5360 features.incompat_flags = btrfs_super_incompat_flags(super_block);
5361
5362 if (copy_to_user(arg, &features, sizeof(features)))
5363 return -EFAULT;
5364
5365 return 0;
5366 }
5367
5368 static int check_feature_bits(struct btrfs_fs_info *fs_info,
5369 enum btrfs_feature_set set,
5370 u64 change_mask, u64 flags, u64 supported_flags,
5371 u64 safe_set, u64 safe_clear)
5372 {
5373 const char *type = btrfs_feature_set_names[set];
5374 char *names;
5375 u64 disallowed, unsupported;
5376 u64 set_mask = flags & change_mask;
5377 u64 clear_mask = ~flags & change_mask;
5378
5379 unsupported = set_mask & ~supported_flags;
5380 if (unsupported) {
5381 names = btrfs_printable_features(set, unsupported);
5382 if (names) {
5383 btrfs_warn(fs_info,
5384 "this kernel does not support the %s feature bit%s",
5385 names, strchr(names, ',') ? "s" : "");
5386 kfree(names);
5387 } else
5388 btrfs_warn(fs_info,
5389 "this kernel does not support %s bits 0x%llx",
5390 type, unsupported);
5391 return -EOPNOTSUPP;
5392 }
5393
5394 disallowed = set_mask & ~safe_set;
5395 if (disallowed) {
5396 names = btrfs_printable_features(set, disallowed);
5397 if (names) {
5398 btrfs_warn(fs_info,
5399 "can't set the %s feature bit%s while mounted",
5400 names, strchr(names, ',') ? "s" : "");
5401 kfree(names);
5402 } else
5403 btrfs_warn(fs_info,
5404 "can't set %s bits 0x%llx while mounted",
5405 type, disallowed);
5406 return -EPERM;
5407 }
5408
5409 disallowed = clear_mask & ~safe_clear;
5410 if (disallowed) {
5411 names = btrfs_printable_features(set, disallowed);
5412 if (names) {
5413 btrfs_warn(fs_info,
5414 "can't clear the %s feature bit%s while mounted",
5415 names, strchr(names, ',') ? "s" : "");
5416 kfree(names);
5417 } else
5418 btrfs_warn(fs_info,
5419 "can't clear %s bits 0x%llx while mounted",
5420 type, disallowed);
5421 return -EPERM;
5422 }
5423
5424 return 0;
5425 }
5426
5427 #define check_feature(fs_info, change_mask, flags, mask_base) \
5428 check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \
5429 BTRFS_FEATURE_ ## mask_base ## _SUPP, \
5430 BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
5431 BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
5432
5433 static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
5434 {
5435 struct inode *inode = file_inode(file);
5436 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5437 struct btrfs_root *root = BTRFS_I(inode)->root;
5438 struct btrfs_super_block *super_block = fs_info->super_copy;
5439 struct btrfs_ioctl_feature_flags flags[2];
5440 struct btrfs_trans_handle *trans;
5441 u64 newflags;
5442 int ret;
5443
5444 if (!capable(CAP_SYS_ADMIN))
5445 return -EPERM;
5446
5447 if (copy_from_user(flags, arg, sizeof(flags)))
5448 return -EFAULT;
5449
5450 /* Nothing to do */
5451 if (!flags[0].compat_flags && !flags[0].compat_ro_flags &&
5452 !flags[0].incompat_flags)
5453 return 0;
5454
5455 ret = check_feature(fs_info, flags[0].compat_flags,
5456 flags[1].compat_flags, COMPAT);
5457 if (ret)
5458 return ret;
5459
5460 ret = check_feature(fs_info, flags[0].compat_ro_flags,
5461 flags[1].compat_ro_flags, COMPAT_RO);
5462 if (ret)
5463 return ret;
5464
5465 ret = check_feature(fs_info, flags[0].incompat_flags,
5466 flags[1].incompat_flags, INCOMPAT);
5467 if (ret)
5468 return ret;
5469
5470 ret = mnt_want_write_file(file);
5471 if (ret)
5472 return ret;
5473
5474 trans = btrfs_start_transaction(root, 0);
5475 if (IS_ERR(trans)) {
5476 ret = PTR_ERR(trans);
5477 goto out_drop_write;
5478 }
5479
5480 spin_lock(&fs_info->super_lock);
5481 newflags = btrfs_super_compat_flags(super_block);
5482 newflags |= flags[0].compat_flags & flags[1].compat_flags;
5483 newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
5484 btrfs_set_super_compat_flags(super_block, newflags);
5485
5486 newflags = btrfs_super_compat_ro_flags(super_block);
5487 newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags;
5488 newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags);
5489 btrfs_set_super_compat_ro_flags(super_block, newflags);
5490
5491 newflags = btrfs_super_incompat_flags(super_block);
5492 newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
5493 newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
5494 btrfs_set_super_incompat_flags(super_block, newflags);
5495 spin_unlock(&fs_info->super_lock);
5496
5497 ret = btrfs_commit_transaction(trans);
5498 out_drop_write:
5499 mnt_drop_write_file(file);
5500
5501 return ret;
5502 }
5503
5504 long btrfs_ioctl(struct file *file, unsigned int
5505 cmd, unsigned long arg)
5506 {
5507 struct inode *inode = file_inode(file);
5508 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5509 struct btrfs_root *root = BTRFS_I(inode)->root;
5510 void __user *argp = (void __user *)arg;
5511
5512 switch (cmd) {
5513 case FS_IOC_GETFLAGS:
5514 return btrfs_ioctl_getflags(file, argp);
5515 case FS_IOC_SETFLAGS:
5516 return btrfs_ioctl_setflags(file, argp);
5517 case FS_IOC_GETVERSION:
5518 return btrfs_ioctl_getversion(file, argp);
5519 case FITRIM:
5520 return btrfs_ioctl_fitrim(file, argp);
5521 case BTRFS_IOC_SNAP_CREATE:
5522 return btrfs_ioctl_snap_create(file, argp, 0);
5523 case BTRFS_IOC_SNAP_CREATE_V2:
5524 return btrfs_ioctl_snap_create_v2(file, argp, 0);
5525 case BTRFS_IOC_SUBVOL_CREATE:
5526 return btrfs_ioctl_snap_create(file, argp, 1);
5527 case BTRFS_IOC_SUBVOL_CREATE_V2:
5528 return btrfs_ioctl_snap_create_v2(file, argp, 1);
5529 case BTRFS_IOC_SNAP_DESTROY:
5530 return btrfs_ioctl_snap_destroy(file, argp);
5531 case BTRFS_IOC_SUBVOL_GETFLAGS:
5532 return btrfs_ioctl_subvol_getflags(file, argp);
5533 case BTRFS_IOC_SUBVOL_SETFLAGS:
5534 return btrfs_ioctl_subvol_setflags(file, argp);
5535 case BTRFS_IOC_DEFAULT_SUBVOL:
5536 return btrfs_ioctl_default_subvol(file, argp);
5537 case BTRFS_IOC_DEFRAG:
5538 return btrfs_ioctl_defrag(file, NULL);
5539 case BTRFS_IOC_DEFRAG_RANGE:
5540 return btrfs_ioctl_defrag(file, argp);
5541 case BTRFS_IOC_RESIZE:
5542 return btrfs_ioctl_resize(file, argp);
5543 case BTRFS_IOC_ADD_DEV:
5544 return btrfs_ioctl_add_dev(fs_info, argp);
5545 case BTRFS_IOC_RM_DEV:
5546 return btrfs_ioctl_rm_dev(file, argp);
5547 case BTRFS_IOC_RM_DEV_V2:
5548 return btrfs_ioctl_rm_dev_v2(file, argp);
5549 case BTRFS_IOC_FS_INFO:
5550 return btrfs_ioctl_fs_info(fs_info, argp);
5551 case BTRFS_IOC_DEV_INFO:
5552 return btrfs_ioctl_dev_info(fs_info, argp);
5553 case BTRFS_IOC_BALANCE:
5554 return btrfs_ioctl_balance(file, NULL);
5555 case BTRFS_IOC_TRANS_START:
5556 return btrfs_ioctl_trans_start(file);
5557 case BTRFS_IOC_TRANS_END:
5558 return btrfs_ioctl_trans_end(file);
5559 case BTRFS_IOC_TREE_SEARCH:
5560 return btrfs_ioctl_tree_search(file, argp);
5561 case BTRFS_IOC_TREE_SEARCH_V2:
5562 return btrfs_ioctl_tree_search_v2(file, argp);
5563 case BTRFS_IOC_INO_LOOKUP:
5564 return btrfs_ioctl_ino_lookup(file, argp);
5565 case BTRFS_IOC_INO_PATHS:
5566 return btrfs_ioctl_ino_to_path(root, argp);
5567 case BTRFS_IOC_LOGICAL_INO:
5568 return btrfs_ioctl_logical_to_ino(fs_info, argp);
5569 case BTRFS_IOC_SPACE_INFO:
5570 return btrfs_ioctl_space_info(fs_info, argp);
5571 case BTRFS_IOC_SYNC: {
5572 int ret;
5573
5574 ret = btrfs_start_delalloc_roots(fs_info, 0, -1);
5575 if (ret)
5576 return ret;
5577 ret = btrfs_sync_fs(inode->i_sb, 1);
5578 /*
5579 * The transaction thread may want to do more work,
5580 * namely it pokes the cleaner kthread that will start
5581 * processing uncleaned subvols.
5582 */
5583 wake_up_process(fs_info->transaction_kthread);
5584 return ret;
5585 }
5586 case BTRFS_IOC_START_SYNC:
5587 return btrfs_ioctl_start_sync(root, argp);
5588 case BTRFS_IOC_WAIT_SYNC:
5589 return btrfs_ioctl_wait_sync(fs_info, argp);
5590 case BTRFS_IOC_SCRUB:
5591 return btrfs_ioctl_scrub(file, argp);
5592 case BTRFS_IOC_SCRUB_CANCEL:
5593 return btrfs_ioctl_scrub_cancel(fs_info);
5594 case BTRFS_IOC_SCRUB_PROGRESS:
5595 return btrfs_ioctl_scrub_progress(fs_info, argp);
5596 case BTRFS_IOC_BALANCE_V2:
5597 return btrfs_ioctl_balance(file, argp);
5598 case BTRFS_IOC_BALANCE_CTL:
5599 return btrfs_ioctl_balance_ctl(fs_info, arg);
5600 case BTRFS_IOC_BALANCE_PROGRESS:
5601 return btrfs_ioctl_balance_progress(fs_info, argp);
5602 case BTRFS_IOC_SET_RECEIVED_SUBVOL:
5603 return btrfs_ioctl_set_received_subvol(file, argp);
5604 #ifdef CONFIG_64BIT
5605 case BTRFS_IOC_SET_RECEIVED_SUBVOL_32:
5606 return btrfs_ioctl_set_received_subvol_32(file, argp);
5607 #endif
5608 case BTRFS_IOC_SEND:
5609 return btrfs_ioctl_send(file, argp);
5610 case BTRFS_IOC_GET_DEV_STATS:
5611 return btrfs_ioctl_get_dev_stats(fs_info, argp);
5612 case BTRFS_IOC_QUOTA_CTL:
5613 return btrfs_ioctl_quota_ctl(file, argp);
5614 case BTRFS_IOC_QGROUP_ASSIGN:
5615 return btrfs_ioctl_qgroup_assign(file, argp);
5616 case BTRFS_IOC_QGROUP_CREATE:
5617 return btrfs_ioctl_qgroup_create(file, argp);
5618 case BTRFS_IOC_QGROUP_LIMIT:
5619 return btrfs_ioctl_qgroup_limit(file, argp);
5620 case BTRFS_IOC_QUOTA_RESCAN:
5621 return btrfs_ioctl_quota_rescan(file, argp);
5622 case BTRFS_IOC_QUOTA_RESCAN_STATUS:
5623 return btrfs_ioctl_quota_rescan_status(file, argp);
5624 case BTRFS_IOC_QUOTA_RESCAN_WAIT:
5625 return btrfs_ioctl_quota_rescan_wait(file, argp);
5626 case BTRFS_IOC_DEV_REPLACE:
5627 return btrfs_ioctl_dev_replace(fs_info, argp);
5628 case BTRFS_IOC_GET_FSLABEL:
5629 return btrfs_ioctl_get_fslabel(file, argp);
5630 case BTRFS_IOC_SET_FSLABEL:
5631 return btrfs_ioctl_set_fslabel(file, argp);
5632 case BTRFS_IOC_GET_SUPPORTED_FEATURES:
5633 return btrfs_ioctl_get_supported_features(argp);
5634 case BTRFS_IOC_GET_FEATURES:
5635 return btrfs_ioctl_get_features(file, argp);
5636 case BTRFS_IOC_SET_FEATURES:
5637 return btrfs_ioctl_set_features(file, argp);
5638 }
5639
5640 return -ENOTTY;
5641 }
5642
5643 #ifdef CONFIG_COMPAT
5644 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5645 {
5646 /*
5647 * These all access 32-bit values anyway so no further
5648 * handling is necessary.
5649 */
5650 switch (cmd) {
5651 case FS_IOC32_GETFLAGS:
5652 cmd = FS_IOC_GETFLAGS;
5653 break;
5654 case FS_IOC32_SETFLAGS:
5655 cmd = FS_IOC_SETFLAGS;
5656 break;
5657 case FS_IOC32_GETVERSION:
5658 cmd = FS_IOC_GETVERSION;
5659 break;
5660 }
5661
5662 return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
5663 }
5664 #endif