]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/btrfs/volumes.c
fs/btrfs: use WARN
[mirror_ubuntu-bionic-kernel.git] / fs / btrfs / volumes.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include "compat.h"
29 #include "ctree.h"
30 #include "extent_map.h"
31 #include "disk-io.h"
32 #include "transaction.h"
33 #include "print-tree.h"
34 #include "volumes.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
37 #include "rcu-string.h"
38 #include "math.h"
39
40 static int init_first_rw_device(struct btrfs_trans_handle *trans,
41 struct btrfs_root *root,
42 struct btrfs_device *device);
43 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
44 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
45 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
46
47 static DEFINE_MUTEX(uuid_mutex);
48 static LIST_HEAD(fs_uuids);
49
50 static void lock_chunks(struct btrfs_root *root)
51 {
52 mutex_lock(&root->fs_info->chunk_mutex);
53 }
54
55 static void unlock_chunks(struct btrfs_root *root)
56 {
57 mutex_unlock(&root->fs_info->chunk_mutex);
58 }
59
60 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
61 {
62 struct btrfs_device *device;
63 WARN_ON(fs_devices->opened);
64 while (!list_empty(&fs_devices->devices)) {
65 device = list_entry(fs_devices->devices.next,
66 struct btrfs_device, dev_list);
67 list_del(&device->dev_list);
68 rcu_string_free(device->name);
69 kfree(device);
70 }
71 kfree(fs_devices);
72 }
73
74 void btrfs_cleanup_fs_uuids(void)
75 {
76 struct btrfs_fs_devices *fs_devices;
77
78 while (!list_empty(&fs_uuids)) {
79 fs_devices = list_entry(fs_uuids.next,
80 struct btrfs_fs_devices, list);
81 list_del(&fs_devices->list);
82 free_fs_devices(fs_devices);
83 }
84 }
85
86 static noinline struct btrfs_device *__find_device(struct list_head *head,
87 u64 devid, u8 *uuid)
88 {
89 struct btrfs_device *dev;
90
91 list_for_each_entry(dev, head, dev_list) {
92 if (dev->devid == devid &&
93 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
94 return dev;
95 }
96 }
97 return NULL;
98 }
99
100 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
101 {
102 struct btrfs_fs_devices *fs_devices;
103
104 list_for_each_entry(fs_devices, &fs_uuids, list) {
105 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
106 return fs_devices;
107 }
108 return NULL;
109 }
110
111 static void requeue_list(struct btrfs_pending_bios *pending_bios,
112 struct bio *head, struct bio *tail)
113 {
114
115 struct bio *old_head;
116
117 old_head = pending_bios->head;
118 pending_bios->head = head;
119 if (pending_bios->tail)
120 tail->bi_next = old_head;
121 else
122 pending_bios->tail = tail;
123 }
124
125 /*
126 * we try to collect pending bios for a device so we don't get a large
127 * number of procs sending bios down to the same device. This greatly
128 * improves the schedulers ability to collect and merge the bios.
129 *
130 * But, it also turns into a long list of bios to process and that is sure
131 * to eventually make the worker thread block. The solution here is to
132 * make some progress and then put this work struct back at the end of
133 * the list if the block device is congested. This way, multiple devices
134 * can make progress from a single worker thread.
135 */
136 static noinline void run_scheduled_bios(struct btrfs_device *device)
137 {
138 struct bio *pending;
139 struct backing_dev_info *bdi;
140 struct btrfs_fs_info *fs_info;
141 struct btrfs_pending_bios *pending_bios;
142 struct bio *tail;
143 struct bio *cur;
144 int again = 0;
145 unsigned long num_run;
146 unsigned long batch_run = 0;
147 unsigned long limit;
148 unsigned long last_waited = 0;
149 int force_reg = 0;
150 int sync_pending = 0;
151 struct blk_plug plug;
152
153 /*
154 * this function runs all the bios we've collected for
155 * a particular device. We don't want to wander off to
156 * another device without first sending all of these down.
157 * So, setup a plug here and finish it off before we return
158 */
159 blk_start_plug(&plug);
160
161 bdi = blk_get_backing_dev_info(device->bdev);
162 fs_info = device->dev_root->fs_info;
163 limit = btrfs_async_submit_limit(fs_info);
164 limit = limit * 2 / 3;
165
166 loop:
167 spin_lock(&device->io_lock);
168
169 loop_lock:
170 num_run = 0;
171
172 /* take all the bios off the list at once and process them
173 * later on (without the lock held). But, remember the
174 * tail and other pointers so the bios can be properly reinserted
175 * into the list if we hit congestion
176 */
177 if (!force_reg && device->pending_sync_bios.head) {
178 pending_bios = &device->pending_sync_bios;
179 force_reg = 1;
180 } else {
181 pending_bios = &device->pending_bios;
182 force_reg = 0;
183 }
184
185 pending = pending_bios->head;
186 tail = pending_bios->tail;
187 WARN_ON(pending && !tail);
188
189 /*
190 * if pending was null this time around, no bios need processing
191 * at all and we can stop. Otherwise it'll loop back up again
192 * and do an additional check so no bios are missed.
193 *
194 * device->running_pending is used to synchronize with the
195 * schedule_bio code.
196 */
197 if (device->pending_sync_bios.head == NULL &&
198 device->pending_bios.head == NULL) {
199 again = 0;
200 device->running_pending = 0;
201 } else {
202 again = 1;
203 device->running_pending = 1;
204 }
205
206 pending_bios->head = NULL;
207 pending_bios->tail = NULL;
208
209 spin_unlock(&device->io_lock);
210
211 while (pending) {
212
213 rmb();
214 /* we want to work on both lists, but do more bios on the
215 * sync list than the regular list
216 */
217 if ((num_run > 32 &&
218 pending_bios != &device->pending_sync_bios &&
219 device->pending_sync_bios.head) ||
220 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
221 device->pending_bios.head)) {
222 spin_lock(&device->io_lock);
223 requeue_list(pending_bios, pending, tail);
224 goto loop_lock;
225 }
226
227 cur = pending;
228 pending = pending->bi_next;
229 cur->bi_next = NULL;
230
231 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
232 waitqueue_active(&fs_info->async_submit_wait))
233 wake_up(&fs_info->async_submit_wait);
234
235 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
236
237 /*
238 * if we're doing the sync list, record that our
239 * plug has some sync requests on it
240 *
241 * If we're doing the regular list and there are
242 * sync requests sitting around, unplug before
243 * we add more
244 */
245 if (pending_bios == &device->pending_sync_bios) {
246 sync_pending = 1;
247 } else if (sync_pending) {
248 blk_finish_plug(&plug);
249 blk_start_plug(&plug);
250 sync_pending = 0;
251 }
252
253 btrfsic_submit_bio(cur->bi_rw, cur);
254 num_run++;
255 batch_run++;
256 if (need_resched())
257 cond_resched();
258
259 /*
260 * we made progress, there is more work to do and the bdi
261 * is now congested. Back off and let other work structs
262 * run instead
263 */
264 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
265 fs_info->fs_devices->open_devices > 1) {
266 struct io_context *ioc;
267
268 ioc = current->io_context;
269
270 /*
271 * the main goal here is that we don't want to
272 * block if we're going to be able to submit
273 * more requests without blocking.
274 *
275 * This code does two great things, it pokes into
276 * the elevator code from a filesystem _and_
277 * it makes assumptions about how batching works.
278 */
279 if (ioc && ioc->nr_batch_requests > 0 &&
280 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
281 (last_waited == 0 ||
282 ioc->last_waited == last_waited)) {
283 /*
284 * we want to go through our batch of
285 * requests and stop. So, we copy out
286 * the ioc->last_waited time and test
287 * against it before looping
288 */
289 last_waited = ioc->last_waited;
290 if (need_resched())
291 cond_resched();
292 continue;
293 }
294 spin_lock(&device->io_lock);
295 requeue_list(pending_bios, pending, tail);
296 device->running_pending = 1;
297
298 spin_unlock(&device->io_lock);
299 btrfs_requeue_work(&device->work);
300 goto done;
301 }
302 /* unplug every 64 requests just for good measure */
303 if (batch_run % 64 == 0) {
304 blk_finish_plug(&plug);
305 blk_start_plug(&plug);
306 sync_pending = 0;
307 }
308 }
309
310 cond_resched();
311 if (again)
312 goto loop;
313
314 spin_lock(&device->io_lock);
315 if (device->pending_bios.head || device->pending_sync_bios.head)
316 goto loop_lock;
317 spin_unlock(&device->io_lock);
318
319 done:
320 blk_finish_plug(&plug);
321 }
322
323 static void pending_bios_fn(struct btrfs_work *work)
324 {
325 struct btrfs_device *device;
326
327 device = container_of(work, struct btrfs_device, work);
328 run_scheduled_bios(device);
329 }
330
331 static noinline int device_list_add(const char *path,
332 struct btrfs_super_block *disk_super,
333 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
334 {
335 struct btrfs_device *device;
336 struct btrfs_fs_devices *fs_devices;
337 struct rcu_string *name;
338 u64 found_transid = btrfs_super_generation(disk_super);
339
340 fs_devices = find_fsid(disk_super->fsid);
341 if (!fs_devices) {
342 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
343 if (!fs_devices)
344 return -ENOMEM;
345 INIT_LIST_HEAD(&fs_devices->devices);
346 INIT_LIST_HEAD(&fs_devices->alloc_list);
347 list_add(&fs_devices->list, &fs_uuids);
348 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
349 fs_devices->latest_devid = devid;
350 fs_devices->latest_trans = found_transid;
351 mutex_init(&fs_devices->device_list_mutex);
352 device = NULL;
353 } else {
354 device = __find_device(&fs_devices->devices, devid,
355 disk_super->dev_item.uuid);
356 }
357 if (!device) {
358 if (fs_devices->opened)
359 return -EBUSY;
360
361 device = kzalloc(sizeof(*device), GFP_NOFS);
362 if (!device) {
363 /* we can safely leave the fs_devices entry around */
364 return -ENOMEM;
365 }
366 device->devid = devid;
367 device->dev_stats_valid = 0;
368 device->work.func = pending_bios_fn;
369 memcpy(device->uuid, disk_super->dev_item.uuid,
370 BTRFS_UUID_SIZE);
371 spin_lock_init(&device->io_lock);
372
373 name = rcu_string_strdup(path, GFP_NOFS);
374 if (!name) {
375 kfree(device);
376 return -ENOMEM;
377 }
378 rcu_assign_pointer(device->name, name);
379 INIT_LIST_HEAD(&device->dev_alloc_list);
380
381 /* init readahead state */
382 spin_lock_init(&device->reada_lock);
383 device->reada_curr_zone = NULL;
384 atomic_set(&device->reada_in_flight, 0);
385 device->reada_next = 0;
386 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
387 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
388
389 mutex_lock(&fs_devices->device_list_mutex);
390 list_add_rcu(&device->dev_list, &fs_devices->devices);
391 mutex_unlock(&fs_devices->device_list_mutex);
392
393 device->fs_devices = fs_devices;
394 fs_devices->num_devices++;
395 } else if (!device->name || strcmp(device->name->str, path)) {
396 name = rcu_string_strdup(path, GFP_NOFS);
397 if (!name)
398 return -ENOMEM;
399 rcu_string_free(device->name);
400 rcu_assign_pointer(device->name, name);
401 if (device->missing) {
402 fs_devices->missing_devices--;
403 device->missing = 0;
404 }
405 }
406
407 if (found_transid > fs_devices->latest_trans) {
408 fs_devices->latest_devid = devid;
409 fs_devices->latest_trans = found_transid;
410 }
411 *fs_devices_ret = fs_devices;
412 return 0;
413 }
414
415 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
416 {
417 struct btrfs_fs_devices *fs_devices;
418 struct btrfs_device *device;
419 struct btrfs_device *orig_dev;
420
421 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
422 if (!fs_devices)
423 return ERR_PTR(-ENOMEM);
424
425 INIT_LIST_HEAD(&fs_devices->devices);
426 INIT_LIST_HEAD(&fs_devices->alloc_list);
427 INIT_LIST_HEAD(&fs_devices->list);
428 mutex_init(&fs_devices->device_list_mutex);
429 fs_devices->latest_devid = orig->latest_devid;
430 fs_devices->latest_trans = orig->latest_trans;
431 fs_devices->total_devices = orig->total_devices;
432 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
433
434 /* We have held the volume lock, it is safe to get the devices. */
435 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
436 struct rcu_string *name;
437
438 device = kzalloc(sizeof(*device), GFP_NOFS);
439 if (!device)
440 goto error;
441
442 /*
443 * This is ok to do without rcu read locked because we hold the
444 * uuid mutex so nothing we touch in here is going to disappear.
445 */
446 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
447 if (!name) {
448 kfree(device);
449 goto error;
450 }
451 rcu_assign_pointer(device->name, name);
452
453 device->devid = orig_dev->devid;
454 device->work.func = pending_bios_fn;
455 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
456 spin_lock_init(&device->io_lock);
457 INIT_LIST_HEAD(&device->dev_list);
458 INIT_LIST_HEAD(&device->dev_alloc_list);
459
460 list_add(&device->dev_list, &fs_devices->devices);
461 device->fs_devices = fs_devices;
462 fs_devices->num_devices++;
463 }
464 return fs_devices;
465 error:
466 free_fs_devices(fs_devices);
467 return ERR_PTR(-ENOMEM);
468 }
469
470 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
471 {
472 struct btrfs_device *device, *next;
473
474 struct block_device *latest_bdev = NULL;
475 u64 latest_devid = 0;
476 u64 latest_transid = 0;
477
478 mutex_lock(&uuid_mutex);
479 again:
480 /* This is the initialized path, it is safe to release the devices. */
481 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
482 if (device->in_fs_metadata) {
483 if (!latest_transid ||
484 device->generation > latest_transid) {
485 latest_devid = device->devid;
486 latest_transid = device->generation;
487 latest_bdev = device->bdev;
488 }
489 continue;
490 }
491
492 if (device->bdev) {
493 blkdev_put(device->bdev, device->mode);
494 device->bdev = NULL;
495 fs_devices->open_devices--;
496 }
497 if (device->writeable) {
498 list_del_init(&device->dev_alloc_list);
499 device->writeable = 0;
500 fs_devices->rw_devices--;
501 }
502 list_del_init(&device->dev_list);
503 fs_devices->num_devices--;
504 rcu_string_free(device->name);
505 kfree(device);
506 }
507
508 if (fs_devices->seed) {
509 fs_devices = fs_devices->seed;
510 goto again;
511 }
512
513 fs_devices->latest_bdev = latest_bdev;
514 fs_devices->latest_devid = latest_devid;
515 fs_devices->latest_trans = latest_transid;
516
517 mutex_unlock(&uuid_mutex);
518 }
519
520 static void __free_device(struct work_struct *work)
521 {
522 struct btrfs_device *device;
523
524 device = container_of(work, struct btrfs_device, rcu_work);
525
526 if (device->bdev)
527 blkdev_put(device->bdev, device->mode);
528
529 rcu_string_free(device->name);
530 kfree(device);
531 }
532
533 static void free_device(struct rcu_head *head)
534 {
535 struct btrfs_device *device;
536
537 device = container_of(head, struct btrfs_device, rcu);
538
539 INIT_WORK(&device->rcu_work, __free_device);
540 schedule_work(&device->rcu_work);
541 }
542
543 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
544 {
545 struct btrfs_device *device;
546
547 if (--fs_devices->opened > 0)
548 return 0;
549
550 mutex_lock(&fs_devices->device_list_mutex);
551 list_for_each_entry(device, &fs_devices->devices, dev_list) {
552 struct btrfs_device *new_device;
553 struct rcu_string *name;
554
555 if (device->bdev)
556 fs_devices->open_devices--;
557
558 if (device->writeable) {
559 list_del_init(&device->dev_alloc_list);
560 fs_devices->rw_devices--;
561 }
562
563 if (device->can_discard)
564 fs_devices->num_can_discard--;
565
566 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
567 BUG_ON(!new_device); /* -ENOMEM */
568 memcpy(new_device, device, sizeof(*new_device));
569
570 /* Safe because we are under uuid_mutex */
571 if (device->name) {
572 name = rcu_string_strdup(device->name->str, GFP_NOFS);
573 BUG_ON(device->name && !name); /* -ENOMEM */
574 rcu_assign_pointer(new_device->name, name);
575 }
576 new_device->bdev = NULL;
577 new_device->writeable = 0;
578 new_device->in_fs_metadata = 0;
579 new_device->can_discard = 0;
580 list_replace_rcu(&device->dev_list, &new_device->dev_list);
581
582 call_rcu(&device->rcu, free_device);
583 }
584 mutex_unlock(&fs_devices->device_list_mutex);
585
586 WARN_ON(fs_devices->open_devices);
587 WARN_ON(fs_devices->rw_devices);
588 fs_devices->opened = 0;
589 fs_devices->seeding = 0;
590
591 return 0;
592 }
593
594 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
595 {
596 struct btrfs_fs_devices *seed_devices = NULL;
597 int ret;
598
599 mutex_lock(&uuid_mutex);
600 ret = __btrfs_close_devices(fs_devices);
601 if (!fs_devices->opened) {
602 seed_devices = fs_devices->seed;
603 fs_devices->seed = NULL;
604 }
605 mutex_unlock(&uuid_mutex);
606
607 while (seed_devices) {
608 fs_devices = seed_devices;
609 seed_devices = fs_devices->seed;
610 __btrfs_close_devices(fs_devices);
611 free_fs_devices(fs_devices);
612 }
613 return ret;
614 }
615
616 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
617 fmode_t flags, void *holder)
618 {
619 struct request_queue *q;
620 struct block_device *bdev;
621 struct list_head *head = &fs_devices->devices;
622 struct btrfs_device *device;
623 struct block_device *latest_bdev = NULL;
624 struct buffer_head *bh;
625 struct btrfs_super_block *disk_super;
626 u64 latest_devid = 0;
627 u64 latest_transid = 0;
628 u64 devid;
629 int seeding = 1;
630 int ret = 0;
631
632 flags |= FMODE_EXCL;
633
634 list_for_each_entry(device, head, dev_list) {
635 if (device->bdev)
636 continue;
637 if (!device->name)
638 continue;
639
640 bdev = blkdev_get_by_path(device->name->str, flags, holder);
641 if (IS_ERR(bdev)) {
642 printk(KERN_INFO "btrfs: open %s failed\n", device->name->str);
643 goto error;
644 }
645 filemap_write_and_wait(bdev->bd_inode->i_mapping);
646 invalidate_bdev(bdev);
647 set_blocksize(bdev, 4096);
648
649 bh = btrfs_read_dev_super(bdev);
650 if (!bh)
651 goto error_close;
652
653 disk_super = (struct btrfs_super_block *)bh->b_data;
654 devid = btrfs_stack_device_id(&disk_super->dev_item);
655 if (devid != device->devid)
656 goto error_brelse;
657
658 if (memcmp(device->uuid, disk_super->dev_item.uuid,
659 BTRFS_UUID_SIZE))
660 goto error_brelse;
661
662 device->generation = btrfs_super_generation(disk_super);
663 if (!latest_transid || device->generation > latest_transid) {
664 latest_devid = devid;
665 latest_transid = device->generation;
666 latest_bdev = bdev;
667 }
668
669 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
670 device->writeable = 0;
671 } else {
672 device->writeable = !bdev_read_only(bdev);
673 seeding = 0;
674 }
675
676 q = bdev_get_queue(bdev);
677 if (blk_queue_discard(q)) {
678 device->can_discard = 1;
679 fs_devices->num_can_discard++;
680 }
681
682 device->bdev = bdev;
683 device->in_fs_metadata = 0;
684 device->mode = flags;
685
686 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
687 fs_devices->rotating = 1;
688
689 fs_devices->open_devices++;
690 if (device->writeable) {
691 fs_devices->rw_devices++;
692 list_add(&device->dev_alloc_list,
693 &fs_devices->alloc_list);
694 }
695 brelse(bh);
696 continue;
697
698 error_brelse:
699 brelse(bh);
700 error_close:
701 blkdev_put(bdev, flags);
702 error:
703 continue;
704 }
705 if (fs_devices->open_devices == 0) {
706 ret = -EINVAL;
707 goto out;
708 }
709 fs_devices->seeding = seeding;
710 fs_devices->opened = 1;
711 fs_devices->latest_bdev = latest_bdev;
712 fs_devices->latest_devid = latest_devid;
713 fs_devices->latest_trans = latest_transid;
714 fs_devices->total_rw_bytes = 0;
715 out:
716 return ret;
717 }
718
719 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
720 fmode_t flags, void *holder)
721 {
722 int ret;
723
724 mutex_lock(&uuid_mutex);
725 if (fs_devices->opened) {
726 fs_devices->opened++;
727 ret = 0;
728 } else {
729 ret = __btrfs_open_devices(fs_devices, flags, holder);
730 }
731 mutex_unlock(&uuid_mutex);
732 return ret;
733 }
734
735 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
736 struct btrfs_fs_devices **fs_devices_ret)
737 {
738 struct btrfs_super_block *disk_super;
739 struct block_device *bdev;
740 struct buffer_head *bh;
741 int ret;
742 u64 devid;
743 u64 transid;
744 u64 total_devices;
745
746 flags |= FMODE_EXCL;
747 bdev = blkdev_get_by_path(path, flags, holder);
748
749 if (IS_ERR(bdev)) {
750 ret = PTR_ERR(bdev);
751 goto error;
752 }
753
754 mutex_lock(&uuid_mutex);
755 ret = set_blocksize(bdev, 4096);
756 if (ret)
757 goto error_close;
758 bh = btrfs_read_dev_super(bdev);
759 if (!bh) {
760 ret = -EINVAL;
761 goto error_close;
762 }
763 disk_super = (struct btrfs_super_block *)bh->b_data;
764 devid = btrfs_stack_device_id(&disk_super->dev_item);
765 transid = btrfs_super_generation(disk_super);
766 total_devices = btrfs_super_num_devices(disk_super);
767 if (disk_super->label[0])
768 printk(KERN_INFO "device label %s ", disk_super->label);
769 else
770 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
771 printk(KERN_CONT "devid %llu transid %llu %s\n",
772 (unsigned long long)devid, (unsigned long long)transid, path);
773 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
774 if (!ret && fs_devices_ret)
775 (*fs_devices_ret)->total_devices = total_devices;
776 brelse(bh);
777 error_close:
778 mutex_unlock(&uuid_mutex);
779 blkdev_put(bdev, flags);
780 error:
781 return ret;
782 }
783
784 /* helper to account the used device space in the range */
785 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
786 u64 end, u64 *length)
787 {
788 struct btrfs_key key;
789 struct btrfs_root *root = device->dev_root;
790 struct btrfs_dev_extent *dev_extent;
791 struct btrfs_path *path;
792 u64 extent_end;
793 int ret;
794 int slot;
795 struct extent_buffer *l;
796
797 *length = 0;
798
799 if (start >= device->total_bytes)
800 return 0;
801
802 path = btrfs_alloc_path();
803 if (!path)
804 return -ENOMEM;
805 path->reada = 2;
806
807 key.objectid = device->devid;
808 key.offset = start;
809 key.type = BTRFS_DEV_EXTENT_KEY;
810
811 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
812 if (ret < 0)
813 goto out;
814 if (ret > 0) {
815 ret = btrfs_previous_item(root, path, key.objectid, key.type);
816 if (ret < 0)
817 goto out;
818 }
819
820 while (1) {
821 l = path->nodes[0];
822 slot = path->slots[0];
823 if (slot >= btrfs_header_nritems(l)) {
824 ret = btrfs_next_leaf(root, path);
825 if (ret == 0)
826 continue;
827 if (ret < 0)
828 goto out;
829
830 break;
831 }
832 btrfs_item_key_to_cpu(l, &key, slot);
833
834 if (key.objectid < device->devid)
835 goto next;
836
837 if (key.objectid > device->devid)
838 break;
839
840 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
841 goto next;
842
843 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
844 extent_end = key.offset + btrfs_dev_extent_length(l,
845 dev_extent);
846 if (key.offset <= start && extent_end > end) {
847 *length = end - start + 1;
848 break;
849 } else if (key.offset <= start && extent_end > start)
850 *length += extent_end - start;
851 else if (key.offset > start && extent_end <= end)
852 *length += extent_end - key.offset;
853 else if (key.offset > start && key.offset <= end) {
854 *length += end - key.offset + 1;
855 break;
856 } else if (key.offset > end)
857 break;
858
859 next:
860 path->slots[0]++;
861 }
862 ret = 0;
863 out:
864 btrfs_free_path(path);
865 return ret;
866 }
867
868 /*
869 * find_free_dev_extent - find free space in the specified device
870 * @device: the device which we search the free space in
871 * @num_bytes: the size of the free space that we need
872 * @start: store the start of the free space.
873 * @len: the size of the free space. that we find, or the size of the max
874 * free space if we don't find suitable free space
875 *
876 * this uses a pretty simple search, the expectation is that it is
877 * called very infrequently and that a given device has a small number
878 * of extents
879 *
880 * @start is used to store the start of the free space if we find. But if we
881 * don't find suitable free space, it will be used to store the start position
882 * of the max free space.
883 *
884 * @len is used to store the size of the free space that we find.
885 * But if we don't find suitable free space, it is used to store the size of
886 * the max free space.
887 */
888 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
889 u64 *start, u64 *len)
890 {
891 struct btrfs_key key;
892 struct btrfs_root *root = device->dev_root;
893 struct btrfs_dev_extent *dev_extent;
894 struct btrfs_path *path;
895 u64 hole_size;
896 u64 max_hole_start;
897 u64 max_hole_size;
898 u64 extent_end;
899 u64 search_start;
900 u64 search_end = device->total_bytes;
901 int ret;
902 int slot;
903 struct extent_buffer *l;
904
905 /* FIXME use last free of some kind */
906
907 /* we don't want to overwrite the superblock on the drive,
908 * so we make sure to start at an offset of at least 1MB
909 */
910 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
911
912 max_hole_start = search_start;
913 max_hole_size = 0;
914 hole_size = 0;
915
916 if (search_start >= search_end) {
917 ret = -ENOSPC;
918 goto error;
919 }
920
921 path = btrfs_alloc_path();
922 if (!path) {
923 ret = -ENOMEM;
924 goto error;
925 }
926 path->reada = 2;
927
928 key.objectid = device->devid;
929 key.offset = search_start;
930 key.type = BTRFS_DEV_EXTENT_KEY;
931
932 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
933 if (ret < 0)
934 goto out;
935 if (ret > 0) {
936 ret = btrfs_previous_item(root, path, key.objectid, key.type);
937 if (ret < 0)
938 goto out;
939 }
940
941 while (1) {
942 l = path->nodes[0];
943 slot = path->slots[0];
944 if (slot >= btrfs_header_nritems(l)) {
945 ret = btrfs_next_leaf(root, path);
946 if (ret == 0)
947 continue;
948 if (ret < 0)
949 goto out;
950
951 break;
952 }
953 btrfs_item_key_to_cpu(l, &key, slot);
954
955 if (key.objectid < device->devid)
956 goto next;
957
958 if (key.objectid > device->devid)
959 break;
960
961 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
962 goto next;
963
964 if (key.offset > search_start) {
965 hole_size = key.offset - search_start;
966
967 if (hole_size > max_hole_size) {
968 max_hole_start = search_start;
969 max_hole_size = hole_size;
970 }
971
972 /*
973 * If this free space is greater than which we need,
974 * it must be the max free space that we have found
975 * until now, so max_hole_start must point to the start
976 * of this free space and the length of this free space
977 * is stored in max_hole_size. Thus, we return
978 * max_hole_start and max_hole_size and go back to the
979 * caller.
980 */
981 if (hole_size >= num_bytes) {
982 ret = 0;
983 goto out;
984 }
985 }
986
987 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
988 extent_end = key.offset + btrfs_dev_extent_length(l,
989 dev_extent);
990 if (extent_end > search_start)
991 search_start = extent_end;
992 next:
993 path->slots[0]++;
994 cond_resched();
995 }
996
997 /*
998 * At this point, search_start should be the end of
999 * allocated dev extents, and when shrinking the device,
1000 * search_end may be smaller than search_start.
1001 */
1002 if (search_end > search_start)
1003 hole_size = search_end - search_start;
1004
1005 if (hole_size > max_hole_size) {
1006 max_hole_start = search_start;
1007 max_hole_size = hole_size;
1008 }
1009
1010 /* See above. */
1011 if (hole_size < num_bytes)
1012 ret = -ENOSPC;
1013 else
1014 ret = 0;
1015
1016 out:
1017 btrfs_free_path(path);
1018 error:
1019 *start = max_hole_start;
1020 if (len)
1021 *len = max_hole_size;
1022 return ret;
1023 }
1024
1025 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1026 struct btrfs_device *device,
1027 u64 start)
1028 {
1029 int ret;
1030 struct btrfs_path *path;
1031 struct btrfs_root *root = device->dev_root;
1032 struct btrfs_key key;
1033 struct btrfs_key found_key;
1034 struct extent_buffer *leaf = NULL;
1035 struct btrfs_dev_extent *extent = NULL;
1036
1037 path = btrfs_alloc_path();
1038 if (!path)
1039 return -ENOMEM;
1040
1041 key.objectid = device->devid;
1042 key.offset = start;
1043 key.type = BTRFS_DEV_EXTENT_KEY;
1044 again:
1045 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1046 if (ret > 0) {
1047 ret = btrfs_previous_item(root, path, key.objectid,
1048 BTRFS_DEV_EXTENT_KEY);
1049 if (ret)
1050 goto out;
1051 leaf = path->nodes[0];
1052 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1053 extent = btrfs_item_ptr(leaf, path->slots[0],
1054 struct btrfs_dev_extent);
1055 BUG_ON(found_key.offset > start || found_key.offset +
1056 btrfs_dev_extent_length(leaf, extent) < start);
1057 key = found_key;
1058 btrfs_release_path(path);
1059 goto again;
1060 } else if (ret == 0) {
1061 leaf = path->nodes[0];
1062 extent = btrfs_item_ptr(leaf, path->slots[0],
1063 struct btrfs_dev_extent);
1064 } else {
1065 btrfs_error(root->fs_info, ret, "Slot search failed");
1066 goto out;
1067 }
1068
1069 if (device->bytes_used > 0) {
1070 u64 len = btrfs_dev_extent_length(leaf, extent);
1071 device->bytes_used -= len;
1072 spin_lock(&root->fs_info->free_chunk_lock);
1073 root->fs_info->free_chunk_space += len;
1074 spin_unlock(&root->fs_info->free_chunk_lock);
1075 }
1076 ret = btrfs_del_item(trans, root, path);
1077 if (ret) {
1078 btrfs_error(root->fs_info, ret,
1079 "Failed to remove dev extent item");
1080 }
1081 out:
1082 btrfs_free_path(path);
1083 return ret;
1084 }
1085
1086 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1087 struct btrfs_device *device,
1088 u64 chunk_tree, u64 chunk_objectid,
1089 u64 chunk_offset, u64 start, u64 num_bytes)
1090 {
1091 int ret;
1092 struct btrfs_path *path;
1093 struct btrfs_root *root = device->dev_root;
1094 struct btrfs_dev_extent *extent;
1095 struct extent_buffer *leaf;
1096 struct btrfs_key key;
1097
1098 WARN_ON(!device->in_fs_metadata);
1099 path = btrfs_alloc_path();
1100 if (!path)
1101 return -ENOMEM;
1102
1103 key.objectid = device->devid;
1104 key.offset = start;
1105 key.type = BTRFS_DEV_EXTENT_KEY;
1106 ret = btrfs_insert_empty_item(trans, root, path, &key,
1107 sizeof(*extent));
1108 if (ret)
1109 goto out;
1110
1111 leaf = path->nodes[0];
1112 extent = btrfs_item_ptr(leaf, path->slots[0],
1113 struct btrfs_dev_extent);
1114 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1115 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1116 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1117
1118 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1119 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1120 BTRFS_UUID_SIZE);
1121
1122 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1123 btrfs_mark_buffer_dirty(leaf);
1124 out:
1125 btrfs_free_path(path);
1126 return ret;
1127 }
1128
1129 static noinline int find_next_chunk(struct btrfs_root *root,
1130 u64 objectid, u64 *offset)
1131 {
1132 struct btrfs_path *path;
1133 int ret;
1134 struct btrfs_key key;
1135 struct btrfs_chunk *chunk;
1136 struct btrfs_key found_key;
1137
1138 path = btrfs_alloc_path();
1139 if (!path)
1140 return -ENOMEM;
1141
1142 key.objectid = objectid;
1143 key.offset = (u64)-1;
1144 key.type = BTRFS_CHUNK_ITEM_KEY;
1145
1146 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1147 if (ret < 0)
1148 goto error;
1149
1150 BUG_ON(ret == 0); /* Corruption */
1151
1152 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1153 if (ret) {
1154 *offset = 0;
1155 } else {
1156 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1157 path->slots[0]);
1158 if (found_key.objectid != objectid)
1159 *offset = 0;
1160 else {
1161 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1162 struct btrfs_chunk);
1163 *offset = found_key.offset +
1164 btrfs_chunk_length(path->nodes[0], chunk);
1165 }
1166 }
1167 ret = 0;
1168 error:
1169 btrfs_free_path(path);
1170 return ret;
1171 }
1172
1173 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1174 {
1175 int ret;
1176 struct btrfs_key key;
1177 struct btrfs_key found_key;
1178 struct btrfs_path *path;
1179
1180 root = root->fs_info->chunk_root;
1181
1182 path = btrfs_alloc_path();
1183 if (!path)
1184 return -ENOMEM;
1185
1186 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1187 key.type = BTRFS_DEV_ITEM_KEY;
1188 key.offset = (u64)-1;
1189
1190 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1191 if (ret < 0)
1192 goto error;
1193
1194 BUG_ON(ret == 0); /* Corruption */
1195
1196 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1197 BTRFS_DEV_ITEM_KEY);
1198 if (ret) {
1199 *objectid = 1;
1200 } else {
1201 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1202 path->slots[0]);
1203 *objectid = found_key.offset + 1;
1204 }
1205 ret = 0;
1206 error:
1207 btrfs_free_path(path);
1208 return ret;
1209 }
1210
1211 /*
1212 * the device information is stored in the chunk root
1213 * the btrfs_device struct should be fully filled in
1214 */
1215 int btrfs_add_device(struct btrfs_trans_handle *trans,
1216 struct btrfs_root *root,
1217 struct btrfs_device *device)
1218 {
1219 int ret;
1220 struct btrfs_path *path;
1221 struct btrfs_dev_item *dev_item;
1222 struct extent_buffer *leaf;
1223 struct btrfs_key key;
1224 unsigned long ptr;
1225
1226 root = root->fs_info->chunk_root;
1227
1228 path = btrfs_alloc_path();
1229 if (!path)
1230 return -ENOMEM;
1231
1232 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1233 key.type = BTRFS_DEV_ITEM_KEY;
1234 key.offset = device->devid;
1235
1236 ret = btrfs_insert_empty_item(trans, root, path, &key,
1237 sizeof(*dev_item));
1238 if (ret)
1239 goto out;
1240
1241 leaf = path->nodes[0];
1242 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1243
1244 btrfs_set_device_id(leaf, dev_item, device->devid);
1245 btrfs_set_device_generation(leaf, dev_item, 0);
1246 btrfs_set_device_type(leaf, dev_item, device->type);
1247 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1248 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1249 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1250 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1251 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1252 btrfs_set_device_group(leaf, dev_item, 0);
1253 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1254 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1255 btrfs_set_device_start_offset(leaf, dev_item, 0);
1256
1257 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1258 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1259 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1260 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1261 btrfs_mark_buffer_dirty(leaf);
1262
1263 ret = 0;
1264 out:
1265 btrfs_free_path(path);
1266 return ret;
1267 }
1268
1269 static int btrfs_rm_dev_item(struct btrfs_root *root,
1270 struct btrfs_device *device)
1271 {
1272 int ret;
1273 struct btrfs_path *path;
1274 struct btrfs_key key;
1275 struct btrfs_trans_handle *trans;
1276
1277 root = root->fs_info->chunk_root;
1278
1279 path = btrfs_alloc_path();
1280 if (!path)
1281 return -ENOMEM;
1282
1283 trans = btrfs_start_transaction(root, 0);
1284 if (IS_ERR(trans)) {
1285 btrfs_free_path(path);
1286 return PTR_ERR(trans);
1287 }
1288 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1289 key.type = BTRFS_DEV_ITEM_KEY;
1290 key.offset = device->devid;
1291 lock_chunks(root);
1292
1293 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1294 if (ret < 0)
1295 goto out;
1296
1297 if (ret > 0) {
1298 ret = -ENOENT;
1299 goto out;
1300 }
1301
1302 ret = btrfs_del_item(trans, root, path);
1303 if (ret)
1304 goto out;
1305 out:
1306 btrfs_free_path(path);
1307 unlock_chunks(root);
1308 btrfs_commit_transaction(trans, root);
1309 return ret;
1310 }
1311
1312 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1313 {
1314 struct btrfs_device *device;
1315 struct btrfs_device *next_device;
1316 struct block_device *bdev;
1317 struct buffer_head *bh = NULL;
1318 struct btrfs_super_block *disk_super;
1319 struct btrfs_fs_devices *cur_devices;
1320 u64 all_avail;
1321 u64 devid;
1322 u64 num_devices;
1323 u8 *dev_uuid;
1324 int ret = 0;
1325 bool clear_super = false;
1326
1327 mutex_lock(&uuid_mutex);
1328
1329 all_avail = root->fs_info->avail_data_alloc_bits |
1330 root->fs_info->avail_system_alloc_bits |
1331 root->fs_info->avail_metadata_alloc_bits;
1332
1333 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1334 root->fs_info->fs_devices->num_devices <= 4) {
1335 printk(KERN_ERR "btrfs: unable to go below four devices "
1336 "on raid10\n");
1337 ret = -EINVAL;
1338 goto out;
1339 }
1340
1341 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1342 root->fs_info->fs_devices->num_devices <= 2) {
1343 printk(KERN_ERR "btrfs: unable to go below two "
1344 "devices on raid1\n");
1345 ret = -EINVAL;
1346 goto out;
1347 }
1348
1349 if (strcmp(device_path, "missing") == 0) {
1350 struct list_head *devices;
1351 struct btrfs_device *tmp;
1352
1353 device = NULL;
1354 devices = &root->fs_info->fs_devices->devices;
1355 /*
1356 * It is safe to read the devices since the volume_mutex
1357 * is held.
1358 */
1359 list_for_each_entry(tmp, devices, dev_list) {
1360 if (tmp->in_fs_metadata && !tmp->bdev) {
1361 device = tmp;
1362 break;
1363 }
1364 }
1365 bdev = NULL;
1366 bh = NULL;
1367 disk_super = NULL;
1368 if (!device) {
1369 printk(KERN_ERR "btrfs: no missing devices found to "
1370 "remove\n");
1371 goto out;
1372 }
1373 } else {
1374 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1375 root->fs_info->bdev_holder);
1376 if (IS_ERR(bdev)) {
1377 ret = PTR_ERR(bdev);
1378 goto out;
1379 }
1380
1381 set_blocksize(bdev, 4096);
1382 invalidate_bdev(bdev);
1383 bh = btrfs_read_dev_super(bdev);
1384 if (!bh) {
1385 ret = -EINVAL;
1386 goto error_close;
1387 }
1388 disk_super = (struct btrfs_super_block *)bh->b_data;
1389 devid = btrfs_stack_device_id(&disk_super->dev_item);
1390 dev_uuid = disk_super->dev_item.uuid;
1391 device = btrfs_find_device(root, devid, dev_uuid,
1392 disk_super->fsid);
1393 if (!device) {
1394 ret = -ENOENT;
1395 goto error_brelse;
1396 }
1397 }
1398
1399 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1400 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1401 "device\n");
1402 ret = -EINVAL;
1403 goto error_brelse;
1404 }
1405
1406 if (device->writeable) {
1407 lock_chunks(root);
1408 list_del_init(&device->dev_alloc_list);
1409 unlock_chunks(root);
1410 root->fs_info->fs_devices->rw_devices--;
1411 clear_super = true;
1412 }
1413
1414 ret = btrfs_shrink_device(device, 0);
1415 if (ret)
1416 goto error_undo;
1417
1418 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1419 if (ret)
1420 goto error_undo;
1421
1422 spin_lock(&root->fs_info->free_chunk_lock);
1423 root->fs_info->free_chunk_space = device->total_bytes -
1424 device->bytes_used;
1425 spin_unlock(&root->fs_info->free_chunk_lock);
1426
1427 device->in_fs_metadata = 0;
1428 btrfs_scrub_cancel_dev(root, device);
1429
1430 /*
1431 * the device list mutex makes sure that we don't change
1432 * the device list while someone else is writing out all
1433 * the device supers.
1434 */
1435
1436 cur_devices = device->fs_devices;
1437 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1438 list_del_rcu(&device->dev_list);
1439
1440 device->fs_devices->num_devices--;
1441 device->fs_devices->total_devices--;
1442
1443 if (device->missing)
1444 root->fs_info->fs_devices->missing_devices--;
1445
1446 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1447 struct btrfs_device, dev_list);
1448 if (device->bdev == root->fs_info->sb->s_bdev)
1449 root->fs_info->sb->s_bdev = next_device->bdev;
1450 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1451 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1452
1453 if (device->bdev)
1454 device->fs_devices->open_devices--;
1455
1456 call_rcu(&device->rcu, free_device);
1457 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1458
1459 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1460 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1461
1462 if (cur_devices->open_devices == 0) {
1463 struct btrfs_fs_devices *fs_devices;
1464 fs_devices = root->fs_info->fs_devices;
1465 while (fs_devices) {
1466 if (fs_devices->seed == cur_devices)
1467 break;
1468 fs_devices = fs_devices->seed;
1469 }
1470 fs_devices->seed = cur_devices->seed;
1471 cur_devices->seed = NULL;
1472 lock_chunks(root);
1473 __btrfs_close_devices(cur_devices);
1474 unlock_chunks(root);
1475 free_fs_devices(cur_devices);
1476 }
1477
1478 root->fs_info->num_tolerated_disk_barrier_failures =
1479 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1480
1481 /*
1482 * at this point, the device is zero sized. We want to
1483 * remove it from the devices list and zero out the old super
1484 */
1485 if (clear_super) {
1486 /* make sure this device isn't detected as part of
1487 * the FS anymore
1488 */
1489 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1490 set_buffer_dirty(bh);
1491 sync_dirty_buffer(bh);
1492 }
1493
1494 ret = 0;
1495
1496 error_brelse:
1497 brelse(bh);
1498 error_close:
1499 if (bdev)
1500 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1501 out:
1502 mutex_unlock(&uuid_mutex);
1503 return ret;
1504 error_undo:
1505 if (device->writeable) {
1506 lock_chunks(root);
1507 list_add(&device->dev_alloc_list,
1508 &root->fs_info->fs_devices->alloc_list);
1509 unlock_chunks(root);
1510 root->fs_info->fs_devices->rw_devices++;
1511 }
1512 goto error_brelse;
1513 }
1514
1515 /*
1516 * does all the dirty work required for changing file system's UUID.
1517 */
1518 static int btrfs_prepare_sprout(struct btrfs_root *root)
1519 {
1520 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1521 struct btrfs_fs_devices *old_devices;
1522 struct btrfs_fs_devices *seed_devices;
1523 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1524 struct btrfs_device *device;
1525 u64 super_flags;
1526
1527 BUG_ON(!mutex_is_locked(&uuid_mutex));
1528 if (!fs_devices->seeding)
1529 return -EINVAL;
1530
1531 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1532 if (!seed_devices)
1533 return -ENOMEM;
1534
1535 old_devices = clone_fs_devices(fs_devices);
1536 if (IS_ERR(old_devices)) {
1537 kfree(seed_devices);
1538 return PTR_ERR(old_devices);
1539 }
1540
1541 list_add(&old_devices->list, &fs_uuids);
1542
1543 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1544 seed_devices->opened = 1;
1545 INIT_LIST_HEAD(&seed_devices->devices);
1546 INIT_LIST_HEAD(&seed_devices->alloc_list);
1547 mutex_init(&seed_devices->device_list_mutex);
1548
1549 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1550 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1551 synchronize_rcu);
1552 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1553
1554 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1555 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1556 device->fs_devices = seed_devices;
1557 }
1558
1559 fs_devices->seeding = 0;
1560 fs_devices->num_devices = 0;
1561 fs_devices->open_devices = 0;
1562 fs_devices->total_devices = 0;
1563 fs_devices->seed = seed_devices;
1564
1565 generate_random_uuid(fs_devices->fsid);
1566 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1567 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1568 super_flags = btrfs_super_flags(disk_super) &
1569 ~BTRFS_SUPER_FLAG_SEEDING;
1570 btrfs_set_super_flags(disk_super, super_flags);
1571
1572 return 0;
1573 }
1574
1575 /*
1576 * strore the expected generation for seed devices in device items.
1577 */
1578 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1579 struct btrfs_root *root)
1580 {
1581 struct btrfs_path *path;
1582 struct extent_buffer *leaf;
1583 struct btrfs_dev_item *dev_item;
1584 struct btrfs_device *device;
1585 struct btrfs_key key;
1586 u8 fs_uuid[BTRFS_UUID_SIZE];
1587 u8 dev_uuid[BTRFS_UUID_SIZE];
1588 u64 devid;
1589 int ret;
1590
1591 path = btrfs_alloc_path();
1592 if (!path)
1593 return -ENOMEM;
1594
1595 root = root->fs_info->chunk_root;
1596 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1597 key.offset = 0;
1598 key.type = BTRFS_DEV_ITEM_KEY;
1599
1600 while (1) {
1601 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1602 if (ret < 0)
1603 goto error;
1604
1605 leaf = path->nodes[0];
1606 next_slot:
1607 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1608 ret = btrfs_next_leaf(root, path);
1609 if (ret > 0)
1610 break;
1611 if (ret < 0)
1612 goto error;
1613 leaf = path->nodes[0];
1614 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1615 btrfs_release_path(path);
1616 continue;
1617 }
1618
1619 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1620 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1621 key.type != BTRFS_DEV_ITEM_KEY)
1622 break;
1623
1624 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1625 struct btrfs_dev_item);
1626 devid = btrfs_device_id(leaf, dev_item);
1627 read_extent_buffer(leaf, dev_uuid,
1628 (unsigned long)btrfs_device_uuid(dev_item),
1629 BTRFS_UUID_SIZE);
1630 read_extent_buffer(leaf, fs_uuid,
1631 (unsigned long)btrfs_device_fsid(dev_item),
1632 BTRFS_UUID_SIZE);
1633 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1634 BUG_ON(!device); /* Logic error */
1635
1636 if (device->fs_devices->seeding) {
1637 btrfs_set_device_generation(leaf, dev_item,
1638 device->generation);
1639 btrfs_mark_buffer_dirty(leaf);
1640 }
1641
1642 path->slots[0]++;
1643 goto next_slot;
1644 }
1645 ret = 0;
1646 error:
1647 btrfs_free_path(path);
1648 return ret;
1649 }
1650
1651 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1652 {
1653 struct request_queue *q;
1654 struct btrfs_trans_handle *trans;
1655 struct btrfs_device *device;
1656 struct block_device *bdev;
1657 struct list_head *devices;
1658 struct super_block *sb = root->fs_info->sb;
1659 struct rcu_string *name;
1660 u64 total_bytes;
1661 int seeding_dev = 0;
1662 int ret = 0;
1663
1664 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1665 return -EROFS;
1666
1667 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1668 root->fs_info->bdev_holder);
1669 if (IS_ERR(bdev))
1670 return PTR_ERR(bdev);
1671
1672 if (root->fs_info->fs_devices->seeding) {
1673 seeding_dev = 1;
1674 down_write(&sb->s_umount);
1675 mutex_lock(&uuid_mutex);
1676 }
1677
1678 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1679
1680 devices = &root->fs_info->fs_devices->devices;
1681 /*
1682 * we have the volume lock, so we don't need the extra
1683 * device list mutex while reading the list here.
1684 */
1685 list_for_each_entry(device, devices, dev_list) {
1686 if (device->bdev == bdev) {
1687 ret = -EEXIST;
1688 goto error;
1689 }
1690 }
1691
1692 device = kzalloc(sizeof(*device), GFP_NOFS);
1693 if (!device) {
1694 /* we can safely leave the fs_devices entry around */
1695 ret = -ENOMEM;
1696 goto error;
1697 }
1698
1699 name = rcu_string_strdup(device_path, GFP_NOFS);
1700 if (!name) {
1701 kfree(device);
1702 ret = -ENOMEM;
1703 goto error;
1704 }
1705 rcu_assign_pointer(device->name, name);
1706
1707 ret = find_next_devid(root, &device->devid);
1708 if (ret) {
1709 rcu_string_free(device->name);
1710 kfree(device);
1711 goto error;
1712 }
1713
1714 trans = btrfs_start_transaction(root, 0);
1715 if (IS_ERR(trans)) {
1716 rcu_string_free(device->name);
1717 kfree(device);
1718 ret = PTR_ERR(trans);
1719 goto error;
1720 }
1721
1722 lock_chunks(root);
1723
1724 q = bdev_get_queue(bdev);
1725 if (blk_queue_discard(q))
1726 device->can_discard = 1;
1727 device->writeable = 1;
1728 device->work.func = pending_bios_fn;
1729 generate_random_uuid(device->uuid);
1730 spin_lock_init(&device->io_lock);
1731 device->generation = trans->transid;
1732 device->io_width = root->sectorsize;
1733 device->io_align = root->sectorsize;
1734 device->sector_size = root->sectorsize;
1735 device->total_bytes = i_size_read(bdev->bd_inode);
1736 device->disk_total_bytes = device->total_bytes;
1737 device->dev_root = root->fs_info->dev_root;
1738 device->bdev = bdev;
1739 device->in_fs_metadata = 1;
1740 device->mode = FMODE_EXCL;
1741 set_blocksize(device->bdev, 4096);
1742
1743 if (seeding_dev) {
1744 sb->s_flags &= ~MS_RDONLY;
1745 ret = btrfs_prepare_sprout(root);
1746 BUG_ON(ret); /* -ENOMEM */
1747 }
1748
1749 device->fs_devices = root->fs_info->fs_devices;
1750
1751 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1752 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1753 list_add(&device->dev_alloc_list,
1754 &root->fs_info->fs_devices->alloc_list);
1755 root->fs_info->fs_devices->num_devices++;
1756 root->fs_info->fs_devices->open_devices++;
1757 root->fs_info->fs_devices->rw_devices++;
1758 root->fs_info->fs_devices->total_devices++;
1759 if (device->can_discard)
1760 root->fs_info->fs_devices->num_can_discard++;
1761 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1762
1763 spin_lock(&root->fs_info->free_chunk_lock);
1764 root->fs_info->free_chunk_space += device->total_bytes;
1765 spin_unlock(&root->fs_info->free_chunk_lock);
1766
1767 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1768 root->fs_info->fs_devices->rotating = 1;
1769
1770 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1771 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1772 total_bytes + device->total_bytes);
1773
1774 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1775 btrfs_set_super_num_devices(root->fs_info->super_copy,
1776 total_bytes + 1);
1777 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1778
1779 if (seeding_dev) {
1780 ret = init_first_rw_device(trans, root, device);
1781 if (ret) {
1782 btrfs_abort_transaction(trans, root, ret);
1783 goto error_trans;
1784 }
1785 ret = btrfs_finish_sprout(trans, root);
1786 if (ret) {
1787 btrfs_abort_transaction(trans, root, ret);
1788 goto error_trans;
1789 }
1790 } else {
1791 ret = btrfs_add_device(trans, root, device);
1792 if (ret) {
1793 btrfs_abort_transaction(trans, root, ret);
1794 goto error_trans;
1795 }
1796 }
1797
1798 /*
1799 * we've got more storage, clear any full flags on the space
1800 * infos
1801 */
1802 btrfs_clear_space_info_full(root->fs_info);
1803
1804 unlock_chunks(root);
1805 root->fs_info->num_tolerated_disk_barrier_failures =
1806 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1807 ret = btrfs_commit_transaction(trans, root);
1808
1809 if (seeding_dev) {
1810 mutex_unlock(&uuid_mutex);
1811 up_write(&sb->s_umount);
1812
1813 if (ret) /* transaction commit */
1814 return ret;
1815
1816 ret = btrfs_relocate_sys_chunks(root);
1817 if (ret < 0)
1818 btrfs_error(root->fs_info, ret,
1819 "Failed to relocate sys chunks after "
1820 "device initialization. This can be fixed "
1821 "using the \"btrfs balance\" command.");
1822 trans = btrfs_attach_transaction(root);
1823 if (IS_ERR(trans)) {
1824 if (PTR_ERR(trans) == -ENOENT)
1825 return 0;
1826 return PTR_ERR(trans);
1827 }
1828 ret = btrfs_commit_transaction(trans, root);
1829 }
1830
1831 return ret;
1832
1833 error_trans:
1834 unlock_chunks(root);
1835 btrfs_end_transaction(trans, root);
1836 rcu_string_free(device->name);
1837 kfree(device);
1838 error:
1839 blkdev_put(bdev, FMODE_EXCL);
1840 if (seeding_dev) {
1841 mutex_unlock(&uuid_mutex);
1842 up_write(&sb->s_umount);
1843 }
1844 return ret;
1845 }
1846
1847 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1848 struct btrfs_device *device)
1849 {
1850 int ret;
1851 struct btrfs_path *path;
1852 struct btrfs_root *root;
1853 struct btrfs_dev_item *dev_item;
1854 struct extent_buffer *leaf;
1855 struct btrfs_key key;
1856
1857 root = device->dev_root->fs_info->chunk_root;
1858
1859 path = btrfs_alloc_path();
1860 if (!path)
1861 return -ENOMEM;
1862
1863 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1864 key.type = BTRFS_DEV_ITEM_KEY;
1865 key.offset = device->devid;
1866
1867 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1868 if (ret < 0)
1869 goto out;
1870
1871 if (ret > 0) {
1872 ret = -ENOENT;
1873 goto out;
1874 }
1875
1876 leaf = path->nodes[0];
1877 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1878
1879 btrfs_set_device_id(leaf, dev_item, device->devid);
1880 btrfs_set_device_type(leaf, dev_item, device->type);
1881 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1882 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1883 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1884 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1885 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1886 btrfs_mark_buffer_dirty(leaf);
1887
1888 out:
1889 btrfs_free_path(path);
1890 return ret;
1891 }
1892
1893 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1894 struct btrfs_device *device, u64 new_size)
1895 {
1896 struct btrfs_super_block *super_copy =
1897 device->dev_root->fs_info->super_copy;
1898 u64 old_total = btrfs_super_total_bytes(super_copy);
1899 u64 diff = new_size - device->total_bytes;
1900
1901 if (!device->writeable)
1902 return -EACCES;
1903 if (new_size <= device->total_bytes)
1904 return -EINVAL;
1905
1906 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1907 device->fs_devices->total_rw_bytes += diff;
1908
1909 device->total_bytes = new_size;
1910 device->disk_total_bytes = new_size;
1911 btrfs_clear_space_info_full(device->dev_root->fs_info);
1912
1913 return btrfs_update_device(trans, device);
1914 }
1915
1916 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1917 struct btrfs_device *device, u64 new_size)
1918 {
1919 int ret;
1920 lock_chunks(device->dev_root);
1921 ret = __btrfs_grow_device(trans, device, new_size);
1922 unlock_chunks(device->dev_root);
1923 return ret;
1924 }
1925
1926 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1927 struct btrfs_root *root,
1928 u64 chunk_tree, u64 chunk_objectid,
1929 u64 chunk_offset)
1930 {
1931 int ret;
1932 struct btrfs_path *path;
1933 struct btrfs_key key;
1934
1935 root = root->fs_info->chunk_root;
1936 path = btrfs_alloc_path();
1937 if (!path)
1938 return -ENOMEM;
1939
1940 key.objectid = chunk_objectid;
1941 key.offset = chunk_offset;
1942 key.type = BTRFS_CHUNK_ITEM_KEY;
1943
1944 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1945 if (ret < 0)
1946 goto out;
1947 else if (ret > 0) { /* Logic error or corruption */
1948 btrfs_error(root->fs_info, -ENOENT,
1949 "Failed lookup while freeing chunk.");
1950 ret = -ENOENT;
1951 goto out;
1952 }
1953
1954 ret = btrfs_del_item(trans, root, path);
1955 if (ret < 0)
1956 btrfs_error(root->fs_info, ret,
1957 "Failed to delete chunk item.");
1958 out:
1959 btrfs_free_path(path);
1960 return ret;
1961 }
1962
1963 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1964 chunk_offset)
1965 {
1966 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1967 struct btrfs_disk_key *disk_key;
1968 struct btrfs_chunk *chunk;
1969 u8 *ptr;
1970 int ret = 0;
1971 u32 num_stripes;
1972 u32 array_size;
1973 u32 len = 0;
1974 u32 cur;
1975 struct btrfs_key key;
1976
1977 array_size = btrfs_super_sys_array_size(super_copy);
1978
1979 ptr = super_copy->sys_chunk_array;
1980 cur = 0;
1981
1982 while (cur < array_size) {
1983 disk_key = (struct btrfs_disk_key *)ptr;
1984 btrfs_disk_key_to_cpu(&key, disk_key);
1985
1986 len = sizeof(*disk_key);
1987
1988 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1989 chunk = (struct btrfs_chunk *)(ptr + len);
1990 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1991 len += btrfs_chunk_item_size(num_stripes);
1992 } else {
1993 ret = -EIO;
1994 break;
1995 }
1996 if (key.objectid == chunk_objectid &&
1997 key.offset == chunk_offset) {
1998 memmove(ptr, ptr + len, array_size - (cur + len));
1999 array_size -= len;
2000 btrfs_set_super_sys_array_size(super_copy, array_size);
2001 } else {
2002 ptr += len;
2003 cur += len;
2004 }
2005 }
2006 return ret;
2007 }
2008
2009 static int btrfs_relocate_chunk(struct btrfs_root *root,
2010 u64 chunk_tree, u64 chunk_objectid,
2011 u64 chunk_offset)
2012 {
2013 struct extent_map_tree *em_tree;
2014 struct btrfs_root *extent_root;
2015 struct btrfs_trans_handle *trans;
2016 struct extent_map *em;
2017 struct map_lookup *map;
2018 int ret;
2019 int i;
2020
2021 root = root->fs_info->chunk_root;
2022 extent_root = root->fs_info->extent_root;
2023 em_tree = &root->fs_info->mapping_tree.map_tree;
2024
2025 ret = btrfs_can_relocate(extent_root, chunk_offset);
2026 if (ret)
2027 return -ENOSPC;
2028
2029 /* step one, relocate all the extents inside this chunk */
2030 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2031 if (ret)
2032 return ret;
2033
2034 trans = btrfs_start_transaction(root, 0);
2035 BUG_ON(IS_ERR(trans));
2036
2037 lock_chunks(root);
2038
2039 /*
2040 * step two, delete the device extents and the
2041 * chunk tree entries
2042 */
2043 read_lock(&em_tree->lock);
2044 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2045 read_unlock(&em_tree->lock);
2046
2047 BUG_ON(!em || em->start > chunk_offset ||
2048 em->start + em->len < chunk_offset);
2049 map = (struct map_lookup *)em->bdev;
2050
2051 for (i = 0; i < map->num_stripes; i++) {
2052 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2053 map->stripes[i].physical);
2054 BUG_ON(ret);
2055
2056 if (map->stripes[i].dev) {
2057 ret = btrfs_update_device(trans, map->stripes[i].dev);
2058 BUG_ON(ret);
2059 }
2060 }
2061 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2062 chunk_offset);
2063
2064 BUG_ON(ret);
2065
2066 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2067
2068 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2069 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2070 BUG_ON(ret);
2071 }
2072
2073 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2074 BUG_ON(ret);
2075
2076 write_lock(&em_tree->lock);
2077 remove_extent_mapping(em_tree, em);
2078 write_unlock(&em_tree->lock);
2079
2080 kfree(map);
2081 em->bdev = NULL;
2082
2083 /* once for the tree */
2084 free_extent_map(em);
2085 /* once for us */
2086 free_extent_map(em);
2087
2088 unlock_chunks(root);
2089 btrfs_end_transaction(trans, root);
2090 return 0;
2091 }
2092
2093 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2094 {
2095 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2096 struct btrfs_path *path;
2097 struct extent_buffer *leaf;
2098 struct btrfs_chunk *chunk;
2099 struct btrfs_key key;
2100 struct btrfs_key found_key;
2101 u64 chunk_tree = chunk_root->root_key.objectid;
2102 u64 chunk_type;
2103 bool retried = false;
2104 int failed = 0;
2105 int ret;
2106
2107 path = btrfs_alloc_path();
2108 if (!path)
2109 return -ENOMEM;
2110
2111 again:
2112 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2113 key.offset = (u64)-1;
2114 key.type = BTRFS_CHUNK_ITEM_KEY;
2115
2116 while (1) {
2117 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2118 if (ret < 0)
2119 goto error;
2120 BUG_ON(ret == 0); /* Corruption */
2121
2122 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2123 key.type);
2124 if (ret < 0)
2125 goto error;
2126 if (ret > 0)
2127 break;
2128
2129 leaf = path->nodes[0];
2130 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2131
2132 chunk = btrfs_item_ptr(leaf, path->slots[0],
2133 struct btrfs_chunk);
2134 chunk_type = btrfs_chunk_type(leaf, chunk);
2135 btrfs_release_path(path);
2136
2137 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2138 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2139 found_key.objectid,
2140 found_key.offset);
2141 if (ret == -ENOSPC)
2142 failed++;
2143 else if (ret)
2144 BUG();
2145 }
2146
2147 if (found_key.offset == 0)
2148 break;
2149 key.offset = found_key.offset - 1;
2150 }
2151 ret = 0;
2152 if (failed && !retried) {
2153 failed = 0;
2154 retried = true;
2155 goto again;
2156 } else if (failed && retried) {
2157 WARN_ON(1);
2158 ret = -ENOSPC;
2159 }
2160 error:
2161 btrfs_free_path(path);
2162 return ret;
2163 }
2164
2165 static int insert_balance_item(struct btrfs_root *root,
2166 struct btrfs_balance_control *bctl)
2167 {
2168 struct btrfs_trans_handle *trans;
2169 struct btrfs_balance_item *item;
2170 struct btrfs_disk_balance_args disk_bargs;
2171 struct btrfs_path *path;
2172 struct extent_buffer *leaf;
2173 struct btrfs_key key;
2174 int ret, err;
2175
2176 path = btrfs_alloc_path();
2177 if (!path)
2178 return -ENOMEM;
2179
2180 trans = btrfs_start_transaction(root, 0);
2181 if (IS_ERR(trans)) {
2182 btrfs_free_path(path);
2183 return PTR_ERR(trans);
2184 }
2185
2186 key.objectid = BTRFS_BALANCE_OBJECTID;
2187 key.type = BTRFS_BALANCE_ITEM_KEY;
2188 key.offset = 0;
2189
2190 ret = btrfs_insert_empty_item(trans, root, path, &key,
2191 sizeof(*item));
2192 if (ret)
2193 goto out;
2194
2195 leaf = path->nodes[0];
2196 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2197
2198 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2199
2200 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2201 btrfs_set_balance_data(leaf, item, &disk_bargs);
2202 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2203 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2204 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2205 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2206
2207 btrfs_set_balance_flags(leaf, item, bctl->flags);
2208
2209 btrfs_mark_buffer_dirty(leaf);
2210 out:
2211 btrfs_free_path(path);
2212 err = btrfs_commit_transaction(trans, root);
2213 if (err && !ret)
2214 ret = err;
2215 return ret;
2216 }
2217
2218 static int del_balance_item(struct btrfs_root *root)
2219 {
2220 struct btrfs_trans_handle *trans;
2221 struct btrfs_path *path;
2222 struct btrfs_key key;
2223 int ret, err;
2224
2225 path = btrfs_alloc_path();
2226 if (!path)
2227 return -ENOMEM;
2228
2229 trans = btrfs_start_transaction(root, 0);
2230 if (IS_ERR(trans)) {
2231 btrfs_free_path(path);
2232 return PTR_ERR(trans);
2233 }
2234
2235 key.objectid = BTRFS_BALANCE_OBJECTID;
2236 key.type = BTRFS_BALANCE_ITEM_KEY;
2237 key.offset = 0;
2238
2239 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2240 if (ret < 0)
2241 goto out;
2242 if (ret > 0) {
2243 ret = -ENOENT;
2244 goto out;
2245 }
2246
2247 ret = btrfs_del_item(trans, root, path);
2248 out:
2249 btrfs_free_path(path);
2250 err = btrfs_commit_transaction(trans, root);
2251 if (err && !ret)
2252 ret = err;
2253 return ret;
2254 }
2255
2256 /*
2257 * This is a heuristic used to reduce the number of chunks balanced on
2258 * resume after balance was interrupted.
2259 */
2260 static void update_balance_args(struct btrfs_balance_control *bctl)
2261 {
2262 /*
2263 * Turn on soft mode for chunk types that were being converted.
2264 */
2265 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2266 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2267 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2268 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2269 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2270 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2271
2272 /*
2273 * Turn on usage filter if is not already used. The idea is
2274 * that chunks that we have already balanced should be
2275 * reasonably full. Don't do it for chunks that are being
2276 * converted - that will keep us from relocating unconverted
2277 * (albeit full) chunks.
2278 */
2279 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2280 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2281 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2282 bctl->data.usage = 90;
2283 }
2284 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2285 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2286 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2287 bctl->sys.usage = 90;
2288 }
2289 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2290 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2291 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2292 bctl->meta.usage = 90;
2293 }
2294 }
2295
2296 /*
2297 * Should be called with both balance and volume mutexes held to
2298 * serialize other volume operations (add_dev/rm_dev/resize) with
2299 * restriper. Same goes for unset_balance_control.
2300 */
2301 static void set_balance_control(struct btrfs_balance_control *bctl)
2302 {
2303 struct btrfs_fs_info *fs_info = bctl->fs_info;
2304
2305 BUG_ON(fs_info->balance_ctl);
2306
2307 spin_lock(&fs_info->balance_lock);
2308 fs_info->balance_ctl = bctl;
2309 spin_unlock(&fs_info->balance_lock);
2310 }
2311
2312 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2313 {
2314 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2315
2316 BUG_ON(!fs_info->balance_ctl);
2317
2318 spin_lock(&fs_info->balance_lock);
2319 fs_info->balance_ctl = NULL;
2320 spin_unlock(&fs_info->balance_lock);
2321
2322 kfree(bctl);
2323 }
2324
2325 /*
2326 * Balance filters. Return 1 if chunk should be filtered out
2327 * (should not be balanced).
2328 */
2329 static int chunk_profiles_filter(u64 chunk_type,
2330 struct btrfs_balance_args *bargs)
2331 {
2332 chunk_type = chunk_to_extended(chunk_type) &
2333 BTRFS_EXTENDED_PROFILE_MASK;
2334
2335 if (bargs->profiles & chunk_type)
2336 return 0;
2337
2338 return 1;
2339 }
2340
2341 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2342 struct btrfs_balance_args *bargs)
2343 {
2344 struct btrfs_block_group_cache *cache;
2345 u64 chunk_used, user_thresh;
2346 int ret = 1;
2347
2348 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2349 chunk_used = btrfs_block_group_used(&cache->item);
2350
2351 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2352 if (chunk_used < user_thresh)
2353 ret = 0;
2354
2355 btrfs_put_block_group(cache);
2356 return ret;
2357 }
2358
2359 static int chunk_devid_filter(struct extent_buffer *leaf,
2360 struct btrfs_chunk *chunk,
2361 struct btrfs_balance_args *bargs)
2362 {
2363 struct btrfs_stripe *stripe;
2364 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2365 int i;
2366
2367 for (i = 0; i < num_stripes; i++) {
2368 stripe = btrfs_stripe_nr(chunk, i);
2369 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2370 return 0;
2371 }
2372
2373 return 1;
2374 }
2375
2376 /* [pstart, pend) */
2377 static int chunk_drange_filter(struct extent_buffer *leaf,
2378 struct btrfs_chunk *chunk,
2379 u64 chunk_offset,
2380 struct btrfs_balance_args *bargs)
2381 {
2382 struct btrfs_stripe *stripe;
2383 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2384 u64 stripe_offset;
2385 u64 stripe_length;
2386 int factor;
2387 int i;
2388
2389 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2390 return 0;
2391
2392 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2393 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2394 factor = 2;
2395 else
2396 factor = 1;
2397 factor = num_stripes / factor;
2398
2399 for (i = 0; i < num_stripes; i++) {
2400 stripe = btrfs_stripe_nr(chunk, i);
2401 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2402 continue;
2403
2404 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2405 stripe_length = btrfs_chunk_length(leaf, chunk);
2406 do_div(stripe_length, factor);
2407
2408 if (stripe_offset < bargs->pend &&
2409 stripe_offset + stripe_length > bargs->pstart)
2410 return 0;
2411 }
2412
2413 return 1;
2414 }
2415
2416 /* [vstart, vend) */
2417 static int chunk_vrange_filter(struct extent_buffer *leaf,
2418 struct btrfs_chunk *chunk,
2419 u64 chunk_offset,
2420 struct btrfs_balance_args *bargs)
2421 {
2422 if (chunk_offset < bargs->vend &&
2423 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2424 /* at least part of the chunk is inside this vrange */
2425 return 0;
2426
2427 return 1;
2428 }
2429
2430 static int chunk_soft_convert_filter(u64 chunk_type,
2431 struct btrfs_balance_args *bargs)
2432 {
2433 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2434 return 0;
2435
2436 chunk_type = chunk_to_extended(chunk_type) &
2437 BTRFS_EXTENDED_PROFILE_MASK;
2438
2439 if (bargs->target == chunk_type)
2440 return 1;
2441
2442 return 0;
2443 }
2444
2445 static int should_balance_chunk(struct btrfs_root *root,
2446 struct extent_buffer *leaf,
2447 struct btrfs_chunk *chunk, u64 chunk_offset)
2448 {
2449 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2450 struct btrfs_balance_args *bargs = NULL;
2451 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2452
2453 /* type filter */
2454 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2455 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2456 return 0;
2457 }
2458
2459 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2460 bargs = &bctl->data;
2461 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2462 bargs = &bctl->sys;
2463 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2464 bargs = &bctl->meta;
2465
2466 /* profiles filter */
2467 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2468 chunk_profiles_filter(chunk_type, bargs)) {
2469 return 0;
2470 }
2471
2472 /* usage filter */
2473 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2474 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2475 return 0;
2476 }
2477
2478 /* devid filter */
2479 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2480 chunk_devid_filter(leaf, chunk, bargs)) {
2481 return 0;
2482 }
2483
2484 /* drange filter, makes sense only with devid filter */
2485 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2486 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2487 return 0;
2488 }
2489
2490 /* vrange filter */
2491 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2492 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2493 return 0;
2494 }
2495
2496 /* soft profile changing mode */
2497 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2498 chunk_soft_convert_filter(chunk_type, bargs)) {
2499 return 0;
2500 }
2501
2502 return 1;
2503 }
2504
2505 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2506 {
2507 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2508 struct btrfs_root *chunk_root = fs_info->chunk_root;
2509 struct btrfs_root *dev_root = fs_info->dev_root;
2510 struct list_head *devices;
2511 struct btrfs_device *device;
2512 u64 old_size;
2513 u64 size_to_free;
2514 struct btrfs_chunk *chunk;
2515 struct btrfs_path *path;
2516 struct btrfs_key key;
2517 struct btrfs_key found_key;
2518 struct btrfs_trans_handle *trans;
2519 struct extent_buffer *leaf;
2520 int slot;
2521 int ret;
2522 int enospc_errors = 0;
2523 bool counting = true;
2524
2525 /* step one make some room on all the devices */
2526 devices = &fs_info->fs_devices->devices;
2527 list_for_each_entry(device, devices, dev_list) {
2528 old_size = device->total_bytes;
2529 size_to_free = div_factor(old_size, 1);
2530 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2531 if (!device->writeable ||
2532 device->total_bytes - device->bytes_used > size_to_free)
2533 continue;
2534
2535 ret = btrfs_shrink_device(device, old_size - size_to_free);
2536 if (ret == -ENOSPC)
2537 break;
2538 BUG_ON(ret);
2539
2540 trans = btrfs_start_transaction(dev_root, 0);
2541 BUG_ON(IS_ERR(trans));
2542
2543 ret = btrfs_grow_device(trans, device, old_size);
2544 BUG_ON(ret);
2545
2546 btrfs_end_transaction(trans, dev_root);
2547 }
2548
2549 /* step two, relocate all the chunks */
2550 path = btrfs_alloc_path();
2551 if (!path) {
2552 ret = -ENOMEM;
2553 goto error;
2554 }
2555
2556 /* zero out stat counters */
2557 spin_lock(&fs_info->balance_lock);
2558 memset(&bctl->stat, 0, sizeof(bctl->stat));
2559 spin_unlock(&fs_info->balance_lock);
2560 again:
2561 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2562 key.offset = (u64)-1;
2563 key.type = BTRFS_CHUNK_ITEM_KEY;
2564
2565 while (1) {
2566 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2567 atomic_read(&fs_info->balance_cancel_req)) {
2568 ret = -ECANCELED;
2569 goto error;
2570 }
2571
2572 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2573 if (ret < 0)
2574 goto error;
2575
2576 /*
2577 * this shouldn't happen, it means the last relocate
2578 * failed
2579 */
2580 if (ret == 0)
2581 BUG(); /* FIXME break ? */
2582
2583 ret = btrfs_previous_item(chunk_root, path, 0,
2584 BTRFS_CHUNK_ITEM_KEY);
2585 if (ret) {
2586 ret = 0;
2587 break;
2588 }
2589
2590 leaf = path->nodes[0];
2591 slot = path->slots[0];
2592 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2593
2594 if (found_key.objectid != key.objectid)
2595 break;
2596
2597 /* chunk zero is special */
2598 if (found_key.offset == 0)
2599 break;
2600
2601 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2602
2603 if (!counting) {
2604 spin_lock(&fs_info->balance_lock);
2605 bctl->stat.considered++;
2606 spin_unlock(&fs_info->balance_lock);
2607 }
2608
2609 ret = should_balance_chunk(chunk_root, leaf, chunk,
2610 found_key.offset);
2611 btrfs_release_path(path);
2612 if (!ret)
2613 goto loop;
2614
2615 if (counting) {
2616 spin_lock(&fs_info->balance_lock);
2617 bctl->stat.expected++;
2618 spin_unlock(&fs_info->balance_lock);
2619 goto loop;
2620 }
2621
2622 ret = btrfs_relocate_chunk(chunk_root,
2623 chunk_root->root_key.objectid,
2624 found_key.objectid,
2625 found_key.offset);
2626 if (ret && ret != -ENOSPC)
2627 goto error;
2628 if (ret == -ENOSPC) {
2629 enospc_errors++;
2630 } else {
2631 spin_lock(&fs_info->balance_lock);
2632 bctl->stat.completed++;
2633 spin_unlock(&fs_info->balance_lock);
2634 }
2635 loop:
2636 key.offset = found_key.offset - 1;
2637 }
2638
2639 if (counting) {
2640 btrfs_release_path(path);
2641 counting = false;
2642 goto again;
2643 }
2644 error:
2645 btrfs_free_path(path);
2646 if (enospc_errors) {
2647 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2648 enospc_errors);
2649 if (!ret)
2650 ret = -ENOSPC;
2651 }
2652
2653 return ret;
2654 }
2655
2656 /**
2657 * alloc_profile_is_valid - see if a given profile is valid and reduced
2658 * @flags: profile to validate
2659 * @extended: if true @flags is treated as an extended profile
2660 */
2661 static int alloc_profile_is_valid(u64 flags, int extended)
2662 {
2663 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2664 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2665
2666 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2667
2668 /* 1) check that all other bits are zeroed */
2669 if (flags & ~mask)
2670 return 0;
2671
2672 /* 2) see if profile is reduced */
2673 if (flags == 0)
2674 return !extended; /* "0" is valid for usual profiles */
2675
2676 /* true if exactly one bit set */
2677 return (flags & (flags - 1)) == 0;
2678 }
2679
2680 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2681 {
2682 /* cancel requested || normal exit path */
2683 return atomic_read(&fs_info->balance_cancel_req) ||
2684 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2685 atomic_read(&fs_info->balance_cancel_req) == 0);
2686 }
2687
2688 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2689 {
2690 int ret;
2691
2692 unset_balance_control(fs_info);
2693 ret = del_balance_item(fs_info->tree_root);
2694 BUG_ON(ret);
2695 }
2696
2697 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2698 struct btrfs_ioctl_balance_args *bargs);
2699
2700 /*
2701 * Should be called with both balance and volume mutexes held
2702 */
2703 int btrfs_balance(struct btrfs_balance_control *bctl,
2704 struct btrfs_ioctl_balance_args *bargs)
2705 {
2706 struct btrfs_fs_info *fs_info = bctl->fs_info;
2707 u64 allowed;
2708 int mixed = 0;
2709 int ret;
2710
2711 if (btrfs_fs_closing(fs_info) ||
2712 atomic_read(&fs_info->balance_pause_req) ||
2713 atomic_read(&fs_info->balance_cancel_req)) {
2714 ret = -EINVAL;
2715 goto out;
2716 }
2717
2718 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2719 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2720 mixed = 1;
2721
2722 /*
2723 * In case of mixed groups both data and meta should be picked,
2724 * and identical options should be given for both of them.
2725 */
2726 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2727 if (mixed && (bctl->flags & allowed)) {
2728 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2729 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2730 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2731 printk(KERN_ERR "btrfs: with mixed groups data and "
2732 "metadata balance options must be the same\n");
2733 ret = -EINVAL;
2734 goto out;
2735 }
2736 }
2737
2738 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2739 if (fs_info->fs_devices->num_devices == 1)
2740 allowed |= BTRFS_BLOCK_GROUP_DUP;
2741 else if (fs_info->fs_devices->num_devices < 4)
2742 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2743 else
2744 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2745 BTRFS_BLOCK_GROUP_RAID10);
2746
2747 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2748 (!alloc_profile_is_valid(bctl->data.target, 1) ||
2749 (bctl->data.target & ~allowed))) {
2750 printk(KERN_ERR "btrfs: unable to start balance with target "
2751 "data profile %llu\n",
2752 (unsigned long long)bctl->data.target);
2753 ret = -EINVAL;
2754 goto out;
2755 }
2756 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2757 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2758 (bctl->meta.target & ~allowed))) {
2759 printk(KERN_ERR "btrfs: unable to start balance with target "
2760 "metadata profile %llu\n",
2761 (unsigned long long)bctl->meta.target);
2762 ret = -EINVAL;
2763 goto out;
2764 }
2765 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2766 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2767 (bctl->sys.target & ~allowed))) {
2768 printk(KERN_ERR "btrfs: unable to start balance with target "
2769 "system profile %llu\n",
2770 (unsigned long long)bctl->sys.target);
2771 ret = -EINVAL;
2772 goto out;
2773 }
2774
2775 /* allow dup'ed data chunks only in mixed mode */
2776 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2777 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2778 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2779 ret = -EINVAL;
2780 goto out;
2781 }
2782
2783 /* allow to reduce meta or sys integrity only if force set */
2784 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2785 BTRFS_BLOCK_GROUP_RAID10;
2786 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2787 (fs_info->avail_system_alloc_bits & allowed) &&
2788 !(bctl->sys.target & allowed)) ||
2789 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2790 (fs_info->avail_metadata_alloc_bits & allowed) &&
2791 !(bctl->meta.target & allowed))) {
2792 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2793 printk(KERN_INFO "btrfs: force reducing metadata "
2794 "integrity\n");
2795 } else {
2796 printk(KERN_ERR "btrfs: balance will reduce metadata "
2797 "integrity, use force if you want this\n");
2798 ret = -EINVAL;
2799 goto out;
2800 }
2801 }
2802
2803 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
2804 int num_tolerated_disk_barrier_failures;
2805 u64 target = bctl->sys.target;
2806
2807 num_tolerated_disk_barrier_failures =
2808 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2809 if (num_tolerated_disk_barrier_failures > 0 &&
2810 (target &
2811 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
2812 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
2813 num_tolerated_disk_barrier_failures = 0;
2814 else if (num_tolerated_disk_barrier_failures > 1 &&
2815 (target &
2816 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
2817 num_tolerated_disk_barrier_failures = 1;
2818
2819 fs_info->num_tolerated_disk_barrier_failures =
2820 num_tolerated_disk_barrier_failures;
2821 }
2822
2823 ret = insert_balance_item(fs_info->tree_root, bctl);
2824 if (ret && ret != -EEXIST)
2825 goto out;
2826
2827 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2828 BUG_ON(ret == -EEXIST);
2829 set_balance_control(bctl);
2830 } else {
2831 BUG_ON(ret != -EEXIST);
2832 spin_lock(&fs_info->balance_lock);
2833 update_balance_args(bctl);
2834 spin_unlock(&fs_info->balance_lock);
2835 }
2836
2837 atomic_inc(&fs_info->balance_running);
2838 mutex_unlock(&fs_info->balance_mutex);
2839
2840 ret = __btrfs_balance(fs_info);
2841
2842 mutex_lock(&fs_info->balance_mutex);
2843 atomic_dec(&fs_info->balance_running);
2844
2845 if (bargs) {
2846 memset(bargs, 0, sizeof(*bargs));
2847 update_ioctl_balance_args(fs_info, 0, bargs);
2848 }
2849
2850 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2851 balance_need_close(fs_info)) {
2852 __cancel_balance(fs_info);
2853 }
2854
2855 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
2856 fs_info->num_tolerated_disk_barrier_failures =
2857 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2858 }
2859
2860 wake_up(&fs_info->balance_wait_q);
2861
2862 return ret;
2863 out:
2864 if (bctl->flags & BTRFS_BALANCE_RESUME)
2865 __cancel_balance(fs_info);
2866 else
2867 kfree(bctl);
2868 return ret;
2869 }
2870
2871 static int balance_kthread(void *data)
2872 {
2873 struct btrfs_fs_info *fs_info = data;
2874 int ret = 0;
2875
2876 mutex_lock(&fs_info->volume_mutex);
2877 mutex_lock(&fs_info->balance_mutex);
2878
2879 if (fs_info->balance_ctl) {
2880 printk(KERN_INFO "btrfs: continuing balance\n");
2881 ret = btrfs_balance(fs_info->balance_ctl, NULL);
2882 }
2883
2884 mutex_unlock(&fs_info->balance_mutex);
2885 mutex_unlock(&fs_info->volume_mutex);
2886
2887 return ret;
2888 }
2889
2890 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
2891 {
2892 struct task_struct *tsk;
2893
2894 spin_lock(&fs_info->balance_lock);
2895 if (!fs_info->balance_ctl) {
2896 spin_unlock(&fs_info->balance_lock);
2897 return 0;
2898 }
2899 spin_unlock(&fs_info->balance_lock);
2900
2901 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2902 printk(KERN_INFO "btrfs: force skipping balance\n");
2903 return 0;
2904 }
2905
2906 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
2907 if (IS_ERR(tsk))
2908 return PTR_ERR(tsk);
2909
2910 return 0;
2911 }
2912
2913 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
2914 {
2915 struct btrfs_balance_control *bctl;
2916 struct btrfs_balance_item *item;
2917 struct btrfs_disk_balance_args disk_bargs;
2918 struct btrfs_path *path;
2919 struct extent_buffer *leaf;
2920 struct btrfs_key key;
2921 int ret;
2922
2923 path = btrfs_alloc_path();
2924 if (!path)
2925 return -ENOMEM;
2926
2927 key.objectid = BTRFS_BALANCE_OBJECTID;
2928 key.type = BTRFS_BALANCE_ITEM_KEY;
2929 key.offset = 0;
2930
2931 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2932 if (ret < 0)
2933 goto out;
2934 if (ret > 0) { /* ret = -ENOENT; */
2935 ret = 0;
2936 goto out;
2937 }
2938
2939 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2940 if (!bctl) {
2941 ret = -ENOMEM;
2942 goto out;
2943 }
2944
2945 leaf = path->nodes[0];
2946 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2947
2948 bctl->fs_info = fs_info;
2949 bctl->flags = btrfs_balance_flags(leaf, item);
2950 bctl->flags |= BTRFS_BALANCE_RESUME;
2951
2952 btrfs_balance_data(leaf, item, &disk_bargs);
2953 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2954 btrfs_balance_meta(leaf, item, &disk_bargs);
2955 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2956 btrfs_balance_sys(leaf, item, &disk_bargs);
2957 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2958
2959 mutex_lock(&fs_info->volume_mutex);
2960 mutex_lock(&fs_info->balance_mutex);
2961
2962 set_balance_control(bctl);
2963
2964 mutex_unlock(&fs_info->balance_mutex);
2965 mutex_unlock(&fs_info->volume_mutex);
2966 out:
2967 btrfs_free_path(path);
2968 return ret;
2969 }
2970
2971 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2972 {
2973 int ret = 0;
2974
2975 mutex_lock(&fs_info->balance_mutex);
2976 if (!fs_info->balance_ctl) {
2977 mutex_unlock(&fs_info->balance_mutex);
2978 return -ENOTCONN;
2979 }
2980
2981 if (atomic_read(&fs_info->balance_running)) {
2982 atomic_inc(&fs_info->balance_pause_req);
2983 mutex_unlock(&fs_info->balance_mutex);
2984
2985 wait_event(fs_info->balance_wait_q,
2986 atomic_read(&fs_info->balance_running) == 0);
2987
2988 mutex_lock(&fs_info->balance_mutex);
2989 /* we are good with balance_ctl ripped off from under us */
2990 BUG_ON(atomic_read(&fs_info->balance_running));
2991 atomic_dec(&fs_info->balance_pause_req);
2992 } else {
2993 ret = -ENOTCONN;
2994 }
2995
2996 mutex_unlock(&fs_info->balance_mutex);
2997 return ret;
2998 }
2999
3000 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3001 {
3002 mutex_lock(&fs_info->balance_mutex);
3003 if (!fs_info->balance_ctl) {
3004 mutex_unlock(&fs_info->balance_mutex);
3005 return -ENOTCONN;
3006 }
3007
3008 atomic_inc(&fs_info->balance_cancel_req);
3009 /*
3010 * if we are running just wait and return, balance item is
3011 * deleted in btrfs_balance in this case
3012 */
3013 if (atomic_read(&fs_info->balance_running)) {
3014 mutex_unlock(&fs_info->balance_mutex);
3015 wait_event(fs_info->balance_wait_q,
3016 atomic_read(&fs_info->balance_running) == 0);
3017 mutex_lock(&fs_info->balance_mutex);
3018 } else {
3019 /* __cancel_balance needs volume_mutex */
3020 mutex_unlock(&fs_info->balance_mutex);
3021 mutex_lock(&fs_info->volume_mutex);
3022 mutex_lock(&fs_info->balance_mutex);
3023
3024 if (fs_info->balance_ctl)
3025 __cancel_balance(fs_info);
3026
3027 mutex_unlock(&fs_info->volume_mutex);
3028 }
3029
3030 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3031 atomic_dec(&fs_info->balance_cancel_req);
3032 mutex_unlock(&fs_info->balance_mutex);
3033 return 0;
3034 }
3035
3036 /*
3037 * shrinking a device means finding all of the device extents past
3038 * the new size, and then following the back refs to the chunks.
3039 * The chunk relocation code actually frees the device extent
3040 */
3041 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3042 {
3043 struct btrfs_trans_handle *trans;
3044 struct btrfs_root *root = device->dev_root;
3045 struct btrfs_dev_extent *dev_extent = NULL;
3046 struct btrfs_path *path;
3047 u64 length;
3048 u64 chunk_tree;
3049 u64 chunk_objectid;
3050 u64 chunk_offset;
3051 int ret;
3052 int slot;
3053 int failed = 0;
3054 bool retried = false;
3055 struct extent_buffer *l;
3056 struct btrfs_key key;
3057 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3058 u64 old_total = btrfs_super_total_bytes(super_copy);
3059 u64 old_size = device->total_bytes;
3060 u64 diff = device->total_bytes - new_size;
3061
3062 path = btrfs_alloc_path();
3063 if (!path)
3064 return -ENOMEM;
3065
3066 path->reada = 2;
3067
3068 lock_chunks(root);
3069
3070 device->total_bytes = new_size;
3071 if (device->writeable) {
3072 device->fs_devices->total_rw_bytes -= diff;
3073 spin_lock(&root->fs_info->free_chunk_lock);
3074 root->fs_info->free_chunk_space -= diff;
3075 spin_unlock(&root->fs_info->free_chunk_lock);
3076 }
3077 unlock_chunks(root);
3078
3079 again:
3080 key.objectid = device->devid;
3081 key.offset = (u64)-1;
3082 key.type = BTRFS_DEV_EXTENT_KEY;
3083
3084 do {
3085 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3086 if (ret < 0)
3087 goto done;
3088
3089 ret = btrfs_previous_item(root, path, 0, key.type);
3090 if (ret < 0)
3091 goto done;
3092 if (ret) {
3093 ret = 0;
3094 btrfs_release_path(path);
3095 break;
3096 }
3097
3098 l = path->nodes[0];
3099 slot = path->slots[0];
3100 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3101
3102 if (key.objectid != device->devid) {
3103 btrfs_release_path(path);
3104 break;
3105 }
3106
3107 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3108 length = btrfs_dev_extent_length(l, dev_extent);
3109
3110 if (key.offset + length <= new_size) {
3111 btrfs_release_path(path);
3112 break;
3113 }
3114
3115 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3116 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3117 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3118 btrfs_release_path(path);
3119
3120 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3121 chunk_offset);
3122 if (ret && ret != -ENOSPC)
3123 goto done;
3124 if (ret == -ENOSPC)
3125 failed++;
3126 } while (key.offset-- > 0);
3127
3128 if (failed && !retried) {
3129 failed = 0;
3130 retried = true;
3131 goto again;
3132 } else if (failed && retried) {
3133 ret = -ENOSPC;
3134 lock_chunks(root);
3135
3136 device->total_bytes = old_size;
3137 if (device->writeable)
3138 device->fs_devices->total_rw_bytes += diff;
3139 spin_lock(&root->fs_info->free_chunk_lock);
3140 root->fs_info->free_chunk_space += diff;
3141 spin_unlock(&root->fs_info->free_chunk_lock);
3142 unlock_chunks(root);
3143 goto done;
3144 }
3145
3146 /* Shrinking succeeded, else we would be at "done". */
3147 trans = btrfs_start_transaction(root, 0);
3148 if (IS_ERR(trans)) {
3149 ret = PTR_ERR(trans);
3150 goto done;
3151 }
3152
3153 lock_chunks(root);
3154
3155 device->disk_total_bytes = new_size;
3156 /* Now btrfs_update_device() will change the on-disk size. */
3157 ret = btrfs_update_device(trans, device);
3158 if (ret) {
3159 unlock_chunks(root);
3160 btrfs_end_transaction(trans, root);
3161 goto done;
3162 }
3163 WARN_ON(diff > old_total);
3164 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3165 unlock_chunks(root);
3166 btrfs_end_transaction(trans, root);
3167 done:
3168 btrfs_free_path(path);
3169 return ret;
3170 }
3171
3172 static int btrfs_add_system_chunk(struct btrfs_root *root,
3173 struct btrfs_key *key,
3174 struct btrfs_chunk *chunk, int item_size)
3175 {
3176 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3177 struct btrfs_disk_key disk_key;
3178 u32 array_size;
3179 u8 *ptr;
3180
3181 array_size = btrfs_super_sys_array_size(super_copy);
3182 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3183 return -EFBIG;
3184
3185 ptr = super_copy->sys_chunk_array + array_size;
3186 btrfs_cpu_key_to_disk(&disk_key, key);
3187 memcpy(ptr, &disk_key, sizeof(disk_key));
3188 ptr += sizeof(disk_key);
3189 memcpy(ptr, chunk, item_size);
3190 item_size += sizeof(disk_key);
3191 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3192 return 0;
3193 }
3194
3195 /*
3196 * sort the devices in descending order by max_avail, total_avail
3197 */
3198 static int btrfs_cmp_device_info(const void *a, const void *b)
3199 {
3200 const struct btrfs_device_info *di_a = a;
3201 const struct btrfs_device_info *di_b = b;
3202
3203 if (di_a->max_avail > di_b->max_avail)
3204 return -1;
3205 if (di_a->max_avail < di_b->max_avail)
3206 return 1;
3207 if (di_a->total_avail > di_b->total_avail)
3208 return -1;
3209 if (di_a->total_avail < di_b->total_avail)
3210 return 1;
3211 return 0;
3212 }
3213
3214 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3215 struct btrfs_root *extent_root,
3216 struct map_lookup **map_ret,
3217 u64 *num_bytes_out, u64 *stripe_size_out,
3218 u64 start, u64 type)
3219 {
3220 struct btrfs_fs_info *info = extent_root->fs_info;
3221 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3222 struct list_head *cur;
3223 struct map_lookup *map = NULL;
3224 struct extent_map_tree *em_tree;
3225 struct extent_map *em;
3226 struct btrfs_device_info *devices_info = NULL;
3227 u64 total_avail;
3228 int num_stripes; /* total number of stripes to allocate */
3229 int sub_stripes; /* sub_stripes info for map */
3230 int dev_stripes; /* stripes per dev */
3231 int devs_max; /* max devs to use */
3232 int devs_min; /* min devs needed */
3233 int devs_increment; /* ndevs has to be a multiple of this */
3234 int ncopies; /* how many copies to data has */
3235 int ret;
3236 u64 max_stripe_size;
3237 u64 max_chunk_size;
3238 u64 stripe_size;
3239 u64 num_bytes;
3240 int ndevs;
3241 int i;
3242 int j;
3243
3244 BUG_ON(!alloc_profile_is_valid(type, 0));
3245
3246 if (list_empty(&fs_devices->alloc_list))
3247 return -ENOSPC;
3248
3249 sub_stripes = 1;
3250 dev_stripes = 1;
3251 devs_increment = 1;
3252 ncopies = 1;
3253 devs_max = 0; /* 0 == as many as possible */
3254 devs_min = 1;
3255
3256 /*
3257 * define the properties of each RAID type.
3258 * FIXME: move this to a global table and use it in all RAID
3259 * calculation code
3260 */
3261 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3262 dev_stripes = 2;
3263 ncopies = 2;
3264 devs_max = 1;
3265 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3266 devs_min = 2;
3267 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3268 devs_increment = 2;
3269 ncopies = 2;
3270 devs_max = 2;
3271 devs_min = 2;
3272 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3273 sub_stripes = 2;
3274 devs_increment = 2;
3275 ncopies = 2;
3276 devs_min = 4;
3277 } else {
3278 devs_max = 1;
3279 }
3280
3281 if (type & BTRFS_BLOCK_GROUP_DATA) {
3282 max_stripe_size = 1024 * 1024 * 1024;
3283 max_chunk_size = 10 * max_stripe_size;
3284 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3285 /* for larger filesystems, use larger metadata chunks */
3286 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3287 max_stripe_size = 1024 * 1024 * 1024;
3288 else
3289 max_stripe_size = 256 * 1024 * 1024;
3290 max_chunk_size = max_stripe_size;
3291 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3292 max_stripe_size = 32 * 1024 * 1024;
3293 max_chunk_size = 2 * max_stripe_size;
3294 } else {
3295 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3296 type);
3297 BUG_ON(1);
3298 }
3299
3300 /* we don't want a chunk larger than 10% of writeable space */
3301 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3302 max_chunk_size);
3303
3304 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3305 GFP_NOFS);
3306 if (!devices_info)
3307 return -ENOMEM;
3308
3309 cur = fs_devices->alloc_list.next;
3310
3311 /*
3312 * in the first pass through the devices list, we gather information
3313 * about the available holes on each device.
3314 */
3315 ndevs = 0;
3316 while (cur != &fs_devices->alloc_list) {
3317 struct btrfs_device *device;
3318 u64 max_avail;
3319 u64 dev_offset;
3320
3321 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3322
3323 cur = cur->next;
3324
3325 if (!device->writeable) {
3326 WARN(1, KERN_ERR
3327 "btrfs: read-only device in alloc_list\n");
3328 continue;
3329 }
3330
3331 if (!device->in_fs_metadata)
3332 continue;
3333
3334 if (device->total_bytes > device->bytes_used)
3335 total_avail = device->total_bytes - device->bytes_used;
3336 else
3337 total_avail = 0;
3338
3339 /* If there is no space on this device, skip it. */
3340 if (total_avail == 0)
3341 continue;
3342
3343 ret = find_free_dev_extent(device,
3344 max_stripe_size * dev_stripes,
3345 &dev_offset, &max_avail);
3346 if (ret && ret != -ENOSPC)
3347 goto error;
3348
3349 if (ret == 0)
3350 max_avail = max_stripe_size * dev_stripes;
3351
3352 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3353 continue;
3354
3355 devices_info[ndevs].dev_offset = dev_offset;
3356 devices_info[ndevs].max_avail = max_avail;
3357 devices_info[ndevs].total_avail = total_avail;
3358 devices_info[ndevs].dev = device;
3359 ++ndevs;
3360 }
3361
3362 /*
3363 * now sort the devices by hole size / available space
3364 */
3365 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3366 btrfs_cmp_device_info, NULL);
3367
3368 /* round down to number of usable stripes */
3369 ndevs -= ndevs % devs_increment;
3370
3371 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3372 ret = -ENOSPC;
3373 goto error;
3374 }
3375
3376 if (devs_max && ndevs > devs_max)
3377 ndevs = devs_max;
3378 /*
3379 * the primary goal is to maximize the number of stripes, so use as many
3380 * devices as possible, even if the stripes are not maximum sized.
3381 */
3382 stripe_size = devices_info[ndevs-1].max_avail;
3383 num_stripes = ndevs * dev_stripes;
3384
3385 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3386 stripe_size = max_chunk_size * ncopies;
3387 do_div(stripe_size, ndevs);
3388 }
3389
3390 do_div(stripe_size, dev_stripes);
3391
3392 /* align to BTRFS_STRIPE_LEN */
3393 do_div(stripe_size, BTRFS_STRIPE_LEN);
3394 stripe_size *= BTRFS_STRIPE_LEN;
3395
3396 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3397 if (!map) {
3398 ret = -ENOMEM;
3399 goto error;
3400 }
3401 map->num_stripes = num_stripes;
3402
3403 for (i = 0; i < ndevs; ++i) {
3404 for (j = 0; j < dev_stripes; ++j) {
3405 int s = i * dev_stripes + j;
3406 map->stripes[s].dev = devices_info[i].dev;
3407 map->stripes[s].physical = devices_info[i].dev_offset +
3408 j * stripe_size;
3409 }
3410 }
3411 map->sector_size = extent_root->sectorsize;
3412 map->stripe_len = BTRFS_STRIPE_LEN;
3413 map->io_align = BTRFS_STRIPE_LEN;
3414 map->io_width = BTRFS_STRIPE_LEN;
3415 map->type = type;
3416 map->sub_stripes = sub_stripes;
3417
3418 *map_ret = map;
3419 num_bytes = stripe_size * (num_stripes / ncopies);
3420
3421 *stripe_size_out = stripe_size;
3422 *num_bytes_out = num_bytes;
3423
3424 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3425
3426 em = alloc_extent_map();
3427 if (!em) {
3428 ret = -ENOMEM;
3429 goto error;
3430 }
3431 em->bdev = (struct block_device *)map;
3432 em->start = start;
3433 em->len = num_bytes;
3434 em->block_start = 0;
3435 em->block_len = em->len;
3436
3437 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3438 write_lock(&em_tree->lock);
3439 ret = add_extent_mapping(em_tree, em);
3440 write_unlock(&em_tree->lock);
3441 free_extent_map(em);
3442 if (ret)
3443 goto error;
3444
3445 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3446 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3447 start, num_bytes);
3448 if (ret)
3449 goto error;
3450
3451 for (i = 0; i < map->num_stripes; ++i) {
3452 struct btrfs_device *device;
3453 u64 dev_offset;
3454
3455 device = map->stripes[i].dev;
3456 dev_offset = map->stripes[i].physical;
3457
3458 ret = btrfs_alloc_dev_extent(trans, device,
3459 info->chunk_root->root_key.objectid,
3460 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3461 start, dev_offset, stripe_size);
3462 if (ret) {
3463 btrfs_abort_transaction(trans, extent_root, ret);
3464 goto error;
3465 }
3466 }
3467
3468 kfree(devices_info);
3469 return 0;
3470
3471 error:
3472 kfree(map);
3473 kfree(devices_info);
3474 return ret;
3475 }
3476
3477 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3478 struct btrfs_root *extent_root,
3479 struct map_lookup *map, u64 chunk_offset,
3480 u64 chunk_size, u64 stripe_size)
3481 {
3482 u64 dev_offset;
3483 struct btrfs_key key;
3484 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3485 struct btrfs_device *device;
3486 struct btrfs_chunk *chunk;
3487 struct btrfs_stripe *stripe;
3488 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3489 int index = 0;
3490 int ret;
3491
3492 chunk = kzalloc(item_size, GFP_NOFS);
3493 if (!chunk)
3494 return -ENOMEM;
3495
3496 index = 0;
3497 while (index < map->num_stripes) {
3498 device = map->stripes[index].dev;
3499 device->bytes_used += stripe_size;
3500 ret = btrfs_update_device(trans, device);
3501 if (ret)
3502 goto out_free;
3503 index++;
3504 }
3505
3506 spin_lock(&extent_root->fs_info->free_chunk_lock);
3507 extent_root->fs_info->free_chunk_space -= (stripe_size *
3508 map->num_stripes);
3509 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3510
3511 index = 0;
3512 stripe = &chunk->stripe;
3513 while (index < map->num_stripes) {
3514 device = map->stripes[index].dev;
3515 dev_offset = map->stripes[index].physical;
3516
3517 btrfs_set_stack_stripe_devid(stripe, device->devid);
3518 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3519 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3520 stripe++;
3521 index++;
3522 }
3523
3524 btrfs_set_stack_chunk_length(chunk, chunk_size);
3525 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3526 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3527 btrfs_set_stack_chunk_type(chunk, map->type);
3528 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3529 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3530 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3531 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3532 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3533
3534 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3535 key.type = BTRFS_CHUNK_ITEM_KEY;
3536 key.offset = chunk_offset;
3537
3538 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3539
3540 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3541 /*
3542 * TODO: Cleanup of inserted chunk root in case of
3543 * failure.
3544 */
3545 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3546 item_size);
3547 }
3548
3549 out_free:
3550 kfree(chunk);
3551 return ret;
3552 }
3553
3554 /*
3555 * Chunk allocation falls into two parts. The first part does works
3556 * that make the new allocated chunk useable, but not do any operation
3557 * that modifies the chunk tree. The second part does the works that
3558 * require modifying the chunk tree. This division is important for the
3559 * bootstrap process of adding storage to a seed btrfs.
3560 */
3561 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3562 struct btrfs_root *extent_root, u64 type)
3563 {
3564 u64 chunk_offset;
3565 u64 chunk_size;
3566 u64 stripe_size;
3567 struct map_lookup *map;
3568 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3569 int ret;
3570
3571 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3572 &chunk_offset);
3573 if (ret)
3574 return ret;
3575
3576 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3577 &stripe_size, chunk_offset, type);
3578 if (ret)
3579 return ret;
3580
3581 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3582 chunk_size, stripe_size);
3583 if (ret)
3584 return ret;
3585 return 0;
3586 }
3587
3588 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3589 struct btrfs_root *root,
3590 struct btrfs_device *device)
3591 {
3592 u64 chunk_offset;
3593 u64 sys_chunk_offset;
3594 u64 chunk_size;
3595 u64 sys_chunk_size;
3596 u64 stripe_size;
3597 u64 sys_stripe_size;
3598 u64 alloc_profile;
3599 struct map_lookup *map;
3600 struct map_lookup *sys_map;
3601 struct btrfs_fs_info *fs_info = root->fs_info;
3602 struct btrfs_root *extent_root = fs_info->extent_root;
3603 int ret;
3604
3605 ret = find_next_chunk(fs_info->chunk_root,
3606 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3607 if (ret)
3608 return ret;
3609
3610 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3611 fs_info->avail_metadata_alloc_bits;
3612 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3613
3614 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3615 &stripe_size, chunk_offset, alloc_profile);
3616 if (ret)
3617 return ret;
3618
3619 sys_chunk_offset = chunk_offset + chunk_size;
3620
3621 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3622 fs_info->avail_system_alloc_bits;
3623 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3624
3625 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3626 &sys_chunk_size, &sys_stripe_size,
3627 sys_chunk_offset, alloc_profile);
3628 if (ret) {
3629 btrfs_abort_transaction(trans, root, ret);
3630 goto out;
3631 }
3632
3633 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3634 if (ret) {
3635 btrfs_abort_transaction(trans, root, ret);
3636 goto out;
3637 }
3638
3639 /*
3640 * Modifying chunk tree needs allocating new blocks from both
3641 * system block group and metadata block group. So we only can
3642 * do operations require modifying the chunk tree after both
3643 * block groups were created.
3644 */
3645 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3646 chunk_size, stripe_size);
3647 if (ret) {
3648 btrfs_abort_transaction(trans, root, ret);
3649 goto out;
3650 }
3651
3652 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3653 sys_chunk_offset, sys_chunk_size,
3654 sys_stripe_size);
3655 if (ret)
3656 btrfs_abort_transaction(trans, root, ret);
3657
3658 out:
3659
3660 return ret;
3661 }
3662
3663 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3664 {
3665 struct extent_map *em;
3666 struct map_lookup *map;
3667 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3668 int readonly = 0;
3669 int i;
3670
3671 read_lock(&map_tree->map_tree.lock);
3672 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3673 read_unlock(&map_tree->map_tree.lock);
3674 if (!em)
3675 return 1;
3676
3677 if (btrfs_test_opt(root, DEGRADED)) {
3678 free_extent_map(em);
3679 return 0;
3680 }
3681
3682 map = (struct map_lookup *)em->bdev;
3683 for (i = 0; i < map->num_stripes; i++) {
3684 if (!map->stripes[i].dev->writeable) {
3685 readonly = 1;
3686 break;
3687 }
3688 }
3689 free_extent_map(em);
3690 return readonly;
3691 }
3692
3693 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3694 {
3695 extent_map_tree_init(&tree->map_tree);
3696 }
3697
3698 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3699 {
3700 struct extent_map *em;
3701
3702 while (1) {
3703 write_lock(&tree->map_tree.lock);
3704 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3705 if (em)
3706 remove_extent_mapping(&tree->map_tree, em);
3707 write_unlock(&tree->map_tree.lock);
3708 if (!em)
3709 break;
3710 kfree(em->bdev);
3711 /* once for us */
3712 free_extent_map(em);
3713 /* once for the tree */
3714 free_extent_map(em);
3715 }
3716 }
3717
3718 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3719 {
3720 struct extent_map *em;
3721 struct map_lookup *map;
3722 struct extent_map_tree *em_tree = &map_tree->map_tree;
3723 int ret;
3724
3725 read_lock(&em_tree->lock);
3726 em = lookup_extent_mapping(em_tree, logical, len);
3727 read_unlock(&em_tree->lock);
3728 BUG_ON(!em);
3729
3730 BUG_ON(em->start > logical || em->start + em->len < logical);
3731 map = (struct map_lookup *)em->bdev;
3732 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3733 ret = map->num_stripes;
3734 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3735 ret = map->sub_stripes;
3736 else
3737 ret = 1;
3738 free_extent_map(em);
3739 return ret;
3740 }
3741
3742 static int find_live_mirror(struct map_lookup *map, int first, int num,
3743 int optimal)
3744 {
3745 int i;
3746 if (map->stripes[optimal].dev->bdev)
3747 return optimal;
3748 for (i = first; i < first + num; i++) {
3749 if (map->stripes[i].dev->bdev)
3750 return i;
3751 }
3752 /* we couldn't find one that doesn't fail. Just return something
3753 * and the io error handling code will clean up eventually
3754 */
3755 return optimal;
3756 }
3757
3758 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3759 u64 logical, u64 *length,
3760 struct btrfs_bio **bbio_ret,
3761 int mirror_num)
3762 {
3763 struct extent_map *em;
3764 struct map_lookup *map;
3765 struct extent_map_tree *em_tree = &map_tree->map_tree;
3766 u64 offset;
3767 u64 stripe_offset;
3768 u64 stripe_end_offset;
3769 u64 stripe_nr;
3770 u64 stripe_nr_orig;
3771 u64 stripe_nr_end;
3772 int stripe_index;
3773 int i;
3774 int ret = 0;
3775 int num_stripes;
3776 int max_errors = 0;
3777 struct btrfs_bio *bbio = NULL;
3778
3779 read_lock(&em_tree->lock);
3780 em = lookup_extent_mapping(em_tree, logical, *length);
3781 read_unlock(&em_tree->lock);
3782
3783 if (!em) {
3784 printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
3785 (unsigned long long)logical,
3786 (unsigned long long)*length);
3787 BUG();
3788 }
3789
3790 BUG_ON(em->start > logical || em->start + em->len < logical);
3791 map = (struct map_lookup *)em->bdev;
3792 offset = logical - em->start;
3793
3794 if (mirror_num > map->num_stripes)
3795 mirror_num = 0;
3796
3797 stripe_nr = offset;
3798 /*
3799 * stripe_nr counts the total number of stripes we have to stride
3800 * to get to this block
3801 */
3802 do_div(stripe_nr, map->stripe_len);
3803
3804 stripe_offset = stripe_nr * map->stripe_len;
3805 BUG_ON(offset < stripe_offset);
3806
3807 /* stripe_offset is the offset of this block in its stripe*/
3808 stripe_offset = offset - stripe_offset;
3809
3810 if (rw & REQ_DISCARD)
3811 *length = min_t(u64, em->len - offset, *length);
3812 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3813 /* we limit the length of each bio to what fits in a stripe */
3814 *length = min_t(u64, em->len - offset,
3815 map->stripe_len - stripe_offset);
3816 } else {
3817 *length = em->len - offset;
3818 }
3819
3820 if (!bbio_ret)
3821 goto out;
3822
3823 num_stripes = 1;
3824 stripe_index = 0;
3825 stripe_nr_orig = stripe_nr;
3826 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3827 (~(map->stripe_len - 1));
3828 do_div(stripe_nr_end, map->stripe_len);
3829 stripe_end_offset = stripe_nr_end * map->stripe_len -
3830 (offset + *length);
3831 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3832 if (rw & REQ_DISCARD)
3833 num_stripes = min_t(u64, map->num_stripes,
3834 stripe_nr_end - stripe_nr_orig);
3835 stripe_index = do_div(stripe_nr, map->num_stripes);
3836 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3837 if (rw & (REQ_WRITE | REQ_DISCARD))
3838 num_stripes = map->num_stripes;
3839 else if (mirror_num)
3840 stripe_index = mirror_num - 1;
3841 else {
3842 stripe_index = find_live_mirror(map, 0,
3843 map->num_stripes,
3844 current->pid % map->num_stripes);
3845 mirror_num = stripe_index + 1;
3846 }
3847
3848 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3849 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3850 num_stripes = map->num_stripes;
3851 } else if (mirror_num) {
3852 stripe_index = mirror_num - 1;
3853 } else {
3854 mirror_num = 1;
3855 }
3856
3857 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3858 int factor = map->num_stripes / map->sub_stripes;
3859
3860 stripe_index = do_div(stripe_nr, factor);
3861 stripe_index *= map->sub_stripes;
3862
3863 if (rw & REQ_WRITE)
3864 num_stripes = map->sub_stripes;
3865 else if (rw & REQ_DISCARD)
3866 num_stripes = min_t(u64, map->sub_stripes *
3867 (stripe_nr_end - stripe_nr_orig),
3868 map->num_stripes);
3869 else if (mirror_num)
3870 stripe_index += mirror_num - 1;
3871 else {
3872 int old_stripe_index = stripe_index;
3873 stripe_index = find_live_mirror(map, stripe_index,
3874 map->sub_stripes, stripe_index +
3875 current->pid % map->sub_stripes);
3876 mirror_num = stripe_index - old_stripe_index + 1;
3877 }
3878 } else {
3879 /*
3880 * after this do_div call, stripe_nr is the number of stripes
3881 * on this device we have to walk to find the data, and
3882 * stripe_index is the number of our device in the stripe array
3883 */
3884 stripe_index = do_div(stripe_nr, map->num_stripes);
3885 mirror_num = stripe_index + 1;
3886 }
3887 BUG_ON(stripe_index >= map->num_stripes);
3888
3889 bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3890 if (!bbio) {
3891 ret = -ENOMEM;
3892 goto out;
3893 }
3894 atomic_set(&bbio->error, 0);
3895
3896 if (rw & REQ_DISCARD) {
3897 int factor = 0;
3898 int sub_stripes = 0;
3899 u64 stripes_per_dev = 0;
3900 u32 remaining_stripes = 0;
3901 u32 last_stripe = 0;
3902
3903 if (map->type &
3904 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3905 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3906 sub_stripes = 1;
3907 else
3908 sub_stripes = map->sub_stripes;
3909
3910 factor = map->num_stripes / sub_stripes;
3911 stripes_per_dev = div_u64_rem(stripe_nr_end -
3912 stripe_nr_orig,
3913 factor,
3914 &remaining_stripes);
3915 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3916 last_stripe *= sub_stripes;
3917 }
3918
3919 for (i = 0; i < num_stripes; i++) {
3920 bbio->stripes[i].physical =
3921 map->stripes[stripe_index].physical +
3922 stripe_offset + stripe_nr * map->stripe_len;
3923 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3924
3925 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3926 BTRFS_BLOCK_GROUP_RAID10)) {
3927 bbio->stripes[i].length = stripes_per_dev *
3928 map->stripe_len;
3929
3930 if (i / sub_stripes < remaining_stripes)
3931 bbio->stripes[i].length +=
3932 map->stripe_len;
3933
3934 /*
3935 * Special for the first stripe and
3936 * the last stripe:
3937 *
3938 * |-------|...|-------|
3939 * |----------|
3940 * off end_off
3941 */
3942 if (i < sub_stripes)
3943 bbio->stripes[i].length -=
3944 stripe_offset;
3945
3946 if (stripe_index >= last_stripe &&
3947 stripe_index <= (last_stripe +
3948 sub_stripes - 1))
3949 bbio->stripes[i].length -=
3950 stripe_end_offset;
3951
3952 if (i == sub_stripes - 1)
3953 stripe_offset = 0;
3954 } else
3955 bbio->stripes[i].length = *length;
3956
3957 stripe_index++;
3958 if (stripe_index == map->num_stripes) {
3959 /* This could only happen for RAID0/10 */
3960 stripe_index = 0;
3961 stripe_nr++;
3962 }
3963 }
3964 } else {
3965 for (i = 0; i < num_stripes; i++) {
3966 bbio->stripes[i].physical =
3967 map->stripes[stripe_index].physical +
3968 stripe_offset +
3969 stripe_nr * map->stripe_len;
3970 bbio->stripes[i].dev =
3971 map->stripes[stripe_index].dev;
3972 stripe_index++;
3973 }
3974 }
3975
3976 if (rw & REQ_WRITE) {
3977 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3978 BTRFS_BLOCK_GROUP_RAID10 |
3979 BTRFS_BLOCK_GROUP_DUP)) {
3980 max_errors = 1;
3981 }
3982 }
3983
3984 *bbio_ret = bbio;
3985 bbio->num_stripes = num_stripes;
3986 bbio->max_errors = max_errors;
3987 bbio->mirror_num = mirror_num;
3988 out:
3989 free_extent_map(em);
3990 return ret;
3991 }
3992
3993 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3994 u64 logical, u64 *length,
3995 struct btrfs_bio **bbio_ret, int mirror_num)
3996 {
3997 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3998 mirror_num);
3999 }
4000
4001 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4002 u64 chunk_start, u64 physical, u64 devid,
4003 u64 **logical, int *naddrs, int *stripe_len)
4004 {
4005 struct extent_map_tree *em_tree = &map_tree->map_tree;
4006 struct extent_map *em;
4007 struct map_lookup *map;
4008 u64 *buf;
4009 u64 bytenr;
4010 u64 length;
4011 u64 stripe_nr;
4012 int i, j, nr = 0;
4013
4014 read_lock(&em_tree->lock);
4015 em = lookup_extent_mapping(em_tree, chunk_start, 1);
4016 read_unlock(&em_tree->lock);
4017
4018 BUG_ON(!em || em->start != chunk_start);
4019 map = (struct map_lookup *)em->bdev;
4020
4021 length = em->len;
4022 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4023 do_div(length, map->num_stripes / map->sub_stripes);
4024 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4025 do_div(length, map->num_stripes);
4026
4027 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4028 BUG_ON(!buf); /* -ENOMEM */
4029
4030 for (i = 0; i < map->num_stripes; i++) {
4031 if (devid && map->stripes[i].dev->devid != devid)
4032 continue;
4033 if (map->stripes[i].physical > physical ||
4034 map->stripes[i].physical + length <= physical)
4035 continue;
4036
4037 stripe_nr = physical - map->stripes[i].physical;
4038 do_div(stripe_nr, map->stripe_len);
4039
4040 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4041 stripe_nr = stripe_nr * map->num_stripes + i;
4042 do_div(stripe_nr, map->sub_stripes);
4043 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4044 stripe_nr = stripe_nr * map->num_stripes + i;
4045 }
4046 bytenr = chunk_start + stripe_nr * map->stripe_len;
4047 WARN_ON(nr >= map->num_stripes);
4048 for (j = 0; j < nr; j++) {
4049 if (buf[j] == bytenr)
4050 break;
4051 }
4052 if (j == nr) {
4053 WARN_ON(nr >= map->num_stripes);
4054 buf[nr++] = bytenr;
4055 }
4056 }
4057
4058 *logical = buf;
4059 *naddrs = nr;
4060 *stripe_len = map->stripe_len;
4061
4062 free_extent_map(em);
4063 return 0;
4064 }
4065
4066 static void *merge_stripe_index_into_bio_private(void *bi_private,
4067 unsigned int stripe_index)
4068 {
4069 /*
4070 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4071 * at most 1.
4072 * The alternative solution (instead of stealing bits from the
4073 * pointer) would be to allocate an intermediate structure
4074 * that contains the old private pointer plus the stripe_index.
4075 */
4076 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4077 BUG_ON(stripe_index > 3);
4078 return (void *)(((uintptr_t)bi_private) | stripe_index);
4079 }
4080
4081 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4082 {
4083 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4084 }
4085
4086 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4087 {
4088 return (unsigned int)((uintptr_t)bi_private) & 3;
4089 }
4090
4091 static void btrfs_end_bio(struct bio *bio, int err)
4092 {
4093 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4094 int is_orig_bio = 0;
4095
4096 if (err) {
4097 atomic_inc(&bbio->error);
4098 if (err == -EIO || err == -EREMOTEIO) {
4099 unsigned int stripe_index =
4100 extract_stripe_index_from_bio_private(
4101 bio->bi_private);
4102 struct btrfs_device *dev;
4103
4104 BUG_ON(stripe_index >= bbio->num_stripes);
4105 dev = bbio->stripes[stripe_index].dev;
4106 if (dev->bdev) {
4107 if (bio->bi_rw & WRITE)
4108 btrfs_dev_stat_inc(dev,
4109 BTRFS_DEV_STAT_WRITE_ERRS);
4110 else
4111 btrfs_dev_stat_inc(dev,
4112 BTRFS_DEV_STAT_READ_ERRS);
4113 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4114 btrfs_dev_stat_inc(dev,
4115 BTRFS_DEV_STAT_FLUSH_ERRS);
4116 btrfs_dev_stat_print_on_error(dev);
4117 }
4118 }
4119 }
4120
4121 if (bio == bbio->orig_bio)
4122 is_orig_bio = 1;
4123
4124 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4125 if (!is_orig_bio) {
4126 bio_put(bio);
4127 bio = bbio->orig_bio;
4128 }
4129 bio->bi_private = bbio->private;
4130 bio->bi_end_io = bbio->end_io;
4131 bio->bi_bdev = (struct block_device *)
4132 (unsigned long)bbio->mirror_num;
4133 /* only send an error to the higher layers if it is
4134 * beyond the tolerance of the multi-bio
4135 */
4136 if (atomic_read(&bbio->error) > bbio->max_errors) {
4137 err = -EIO;
4138 } else {
4139 /*
4140 * this bio is actually up to date, we didn't
4141 * go over the max number of errors
4142 */
4143 set_bit(BIO_UPTODATE, &bio->bi_flags);
4144 err = 0;
4145 }
4146 kfree(bbio);
4147
4148 bio_endio(bio, err);
4149 } else if (!is_orig_bio) {
4150 bio_put(bio);
4151 }
4152 }
4153
4154 struct async_sched {
4155 struct bio *bio;
4156 int rw;
4157 struct btrfs_fs_info *info;
4158 struct btrfs_work work;
4159 };
4160
4161 /*
4162 * see run_scheduled_bios for a description of why bios are collected for
4163 * async submit.
4164 *
4165 * This will add one bio to the pending list for a device and make sure
4166 * the work struct is scheduled.
4167 */
4168 static noinline void schedule_bio(struct btrfs_root *root,
4169 struct btrfs_device *device,
4170 int rw, struct bio *bio)
4171 {
4172 int should_queue = 1;
4173 struct btrfs_pending_bios *pending_bios;
4174
4175 /* don't bother with additional async steps for reads, right now */
4176 if (!(rw & REQ_WRITE)) {
4177 bio_get(bio);
4178 btrfsic_submit_bio(rw, bio);
4179 bio_put(bio);
4180 return;
4181 }
4182
4183 /*
4184 * nr_async_bios allows us to reliably return congestion to the
4185 * higher layers. Otherwise, the async bio makes it appear we have
4186 * made progress against dirty pages when we've really just put it
4187 * on a queue for later
4188 */
4189 atomic_inc(&root->fs_info->nr_async_bios);
4190 WARN_ON(bio->bi_next);
4191 bio->bi_next = NULL;
4192 bio->bi_rw |= rw;
4193
4194 spin_lock(&device->io_lock);
4195 if (bio->bi_rw & REQ_SYNC)
4196 pending_bios = &device->pending_sync_bios;
4197 else
4198 pending_bios = &device->pending_bios;
4199
4200 if (pending_bios->tail)
4201 pending_bios->tail->bi_next = bio;
4202
4203 pending_bios->tail = bio;
4204 if (!pending_bios->head)
4205 pending_bios->head = bio;
4206 if (device->running_pending)
4207 should_queue = 0;
4208
4209 spin_unlock(&device->io_lock);
4210
4211 if (should_queue)
4212 btrfs_queue_worker(&root->fs_info->submit_workers,
4213 &device->work);
4214 }
4215
4216 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
4217 sector_t sector)
4218 {
4219 struct bio_vec *prev;
4220 struct request_queue *q = bdev_get_queue(bdev);
4221 unsigned short max_sectors = queue_max_sectors(q);
4222 struct bvec_merge_data bvm = {
4223 .bi_bdev = bdev,
4224 .bi_sector = sector,
4225 .bi_rw = bio->bi_rw,
4226 };
4227
4228 if (bio->bi_vcnt == 0) {
4229 WARN_ON(1);
4230 return 1;
4231 }
4232
4233 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
4234 if ((bio->bi_size >> 9) > max_sectors)
4235 return 0;
4236
4237 if (!q->merge_bvec_fn)
4238 return 1;
4239
4240 bvm.bi_size = bio->bi_size - prev->bv_len;
4241 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
4242 return 0;
4243 return 1;
4244 }
4245
4246 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4247 struct bio *bio, u64 physical, int dev_nr,
4248 int rw, int async)
4249 {
4250 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
4251
4252 bio->bi_private = bbio;
4253 bio->bi_private = merge_stripe_index_into_bio_private(
4254 bio->bi_private, (unsigned int)dev_nr);
4255 bio->bi_end_io = btrfs_end_bio;
4256 bio->bi_sector = physical >> 9;
4257 #ifdef DEBUG
4258 {
4259 struct rcu_string *name;
4260
4261 rcu_read_lock();
4262 name = rcu_dereference(dev->name);
4263 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
4264 "(%s id %llu), size=%u\n", rw,
4265 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4266 name->str, dev->devid, bio->bi_size);
4267 rcu_read_unlock();
4268 }
4269 #endif
4270 bio->bi_bdev = dev->bdev;
4271 if (async)
4272 schedule_bio(root, dev, rw, bio);
4273 else
4274 btrfsic_submit_bio(rw, bio);
4275 }
4276
4277 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4278 struct bio *first_bio, struct btrfs_device *dev,
4279 int dev_nr, int rw, int async)
4280 {
4281 struct bio_vec *bvec = first_bio->bi_io_vec;
4282 struct bio *bio;
4283 int nr_vecs = bio_get_nr_vecs(dev->bdev);
4284 u64 physical = bbio->stripes[dev_nr].physical;
4285
4286 again:
4287 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
4288 if (!bio)
4289 return -ENOMEM;
4290
4291 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
4292 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
4293 bvec->bv_offset) < bvec->bv_len) {
4294 u64 len = bio->bi_size;
4295
4296 atomic_inc(&bbio->stripes_pending);
4297 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
4298 rw, async);
4299 physical += len;
4300 goto again;
4301 }
4302 bvec++;
4303 }
4304
4305 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
4306 return 0;
4307 }
4308
4309 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
4310 {
4311 atomic_inc(&bbio->error);
4312 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4313 bio->bi_private = bbio->private;
4314 bio->bi_end_io = bbio->end_io;
4315 bio->bi_bdev = (struct block_device *)
4316 (unsigned long)bbio->mirror_num;
4317 bio->bi_sector = logical >> 9;
4318 kfree(bbio);
4319 bio_endio(bio, -EIO);
4320 }
4321 }
4322
4323 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4324 int mirror_num, int async_submit)
4325 {
4326 struct btrfs_mapping_tree *map_tree;
4327 struct btrfs_device *dev;
4328 struct bio *first_bio = bio;
4329 u64 logical = (u64)bio->bi_sector << 9;
4330 u64 length = 0;
4331 u64 map_length;
4332 int ret;
4333 int dev_nr = 0;
4334 int total_devs = 1;
4335 struct btrfs_bio *bbio = NULL;
4336
4337 length = bio->bi_size;
4338 map_tree = &root->fs_info->mapping_tree;
4339 map_length = length;
4340
4341 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4342 mirror_num);
4343 if (ret) /* -ENOMEM */
4344 return ret;
4345
4346 total_devs = bbio->num_stripes;
4347 if (map_length < length) {
4348 printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
4349 "len %llu\n", (unsigned long long)logical,
4350 (unsigned long long)length,
4351 (unsigned long long)map_length);
4352 BUG();
4353 }
4354
4355 bbio->orig_bio = first_bio;
4356 bbio->private = first_bio->bi_private;
4357 bbio->end_io = first_bio->bi_end_io;
4358 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4359
4360 while (dev_nr < total_devs) {
4361 dev = bbio->stripes[dev_nr].dev;
4362 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
4363 bbio_error(bbio, first_bio, logical);
4364 dev_nr++;
4365 continue;
4366 }
4367
4368 /*
4369 * Check and see if we're ok with this bio based on it's size
4370 * and offset with the given device.
4371 */
4372 if (!bio_size_ok(dev->bdev, first_bio,
4373 bbio->stripes[dev_nr].physical >> 9)) {
4374 ret = breakup_stripe_bio(root, bbio, first_bio, dev,
4375 dev_nr, rw, async_submit);
4376 BUG_ON(ret);
4377 dev_nr++;
4378 continue;
4379 }
4380
4381 if (dev_nr < total_devs - 1) {
4382 bio = bio_clone(first_bio, GFP_NOFS);
4383 BUG_ON(!bio); /* -ENOMEM */
4384 } else {
4385 bio = first_bio;
4386 }
4387
4388 submit_stripe_bio(root, bbio, bio,
4389 bbio->stripes[dev_nr].physical, dev_nr, rw,
4390 async_submit);
4391 dev_nr++;
4392 }
4393 return 0;
4394 }
4395
4396 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4397 u8 *uuid, u8 *fsid)
4398 {
4399 struct btrfs_device *device;
4400 struct btrfs_fs_devices *cur_devices;
4401
4402 cur_devices = root->fs_info->fs_devices;
4403 while (cur_devices) {
4404 if (!fsid ||
4405 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4406 device = __find_device(&cur_devices->devices,
4407 devid, uuid);
4408 if (device)
4409 return device;
4410 }
4411 cur_devices = cur_devices->seed;
4412 }
4413 return NULL;
4414 }
4415
4416 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4417 u64 devid, u8 *dev_uuid)
4418 {
4419 struct btrfs_device *device;
4420 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4421
4422 device = kzalloc(sizeof(*device), GFP_NOFS);
4423 if (!device)
4424 return NULL;
4425 list_add(&device->dev_list,
4426 &fs_devices->devices);
4427 device->dev_root = root->fs_info->dev_root;
4428 device->devid = devid;
4429 device->work.func = pending_bios_fn;
4430 device->fs_devices = fs_devices;
4431 device->missing = 1;
4432 fs_devices->num_devices++;
4433 fs_devices->missing_devices++;
4434 spin_lock_init(&device->io_lock);
4435 INIT_LIST_HEAD(&device->dev_alloc_list);
4436 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4437 return device;
4438 }
4439
4440 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4441 struct extent_buffer *leaf,
4442 struct btrfs_chunk *chunk)
4443 {
4444 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4445 struct map_lookup *map;
4446 struct extent_map *em;
4447 u64 logical;
4448 u64 length;
4449 u64 devid;
4450 u8 uuid[BTRFS_UUID_SIZE];
4451 int num_stripes;
4452 int ret;
4453 int i;
4454
4455 logical = key->offset;
4456 length = btrfs_chunk_length(leaf, chunk);
4457
4458 read_lock(&map_tree->map_tree.lock);
4459 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4460 read_unlock(&map_tree->map_tree.lock);
4461
4462 /* already mapped? */
4463 if (em && em->start <= logical && em->start + em->len > logical) {
4464 free_extent_map(em);
4465 return 0;
4466 } else if (em) {
4467 free_extent_map(em);
4468 }
4469
4470 em = alloc_extent_map();
4471 if (!em)
4472 return -ENOMEM;
4473 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4474 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4475 if (!map) {
4476 free_extent_map(em);
4477 return -ENOMEM;
4478 }
4479
4480 em->bdev = (struct block_device *)map;
4481 em->start = logical;
4482 em->len = length;
4483 em->block_start = 0;
4484 em->block_len = em->len;
4485
4486 map->num_stripes = num_stripes;
4487 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4488 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4489 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4490 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4491 map->type = btrfs_chunk_type(leaf, chunk);
4492 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4493 for (i = 0; i < num_stripes; i++) {
4494 map->stripes[i].physical =
4495 btrfs_stripe_offset_nr(leaf, chunk, i);
4496 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4497 read_extent_buffer(leaf, uuid, (unsigned long)
4498 btrfs_stripe_dev_uuid_nr(chunk, i),
4499 BTRFS_UUID_SIZE);
4500 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4501 NULL);
4502 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4503 kfree(map);
4504 free_extent_map(em);
4505 return -EIO;
4506 }
4507 if (!map->stripes[i].dev) {
4508 map->stripes[i].dev =
4509 add_missing_dev(root, devid, uuid);
4510 if (!map->stripes[i].dev) {
4511 kfree(map);
4512 free_extent_map(em);
4513 return -EIO;
4514 }
4515 }
4516 map->stripes[i].dev->in_fs_metadata = 1;
4517 }
4518
4519 write_lock(&map_tree->map_tree.lock);
4520 ret = add_extent_mapping(&map_tree->map_tree, em);
4521 write_unlock(&map_tree->map_tree.lock);
4522 BUG_ON(ret); /* Tree corruption */
4523 free_extent_map(em);
4524
4525 return 0;
4526 }
4527
4528 static void fill_device_from_item(struct extent_buffer *leaf,
4529 struct btrfs_dev_item *dev_item,
4530 struct btrfs_device *device)
4531 {
4532 unsigned long ptr;
4533
4534 device->devid = btrfs_device_id(leaf, dev_item);
4535 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4536 device->total_bytes = device->disk_total_bytes;
4537 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4538 device->type = btrfs_device_type(leaf, dev_item);
4539 device->io_align = btrfs_device_io_align(leaf, dev_item);
4540 device->io_width = btrfs_device_io_width(leaf, dev_item);
4541 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4542
4543 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4544 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4545 }
4546
4547 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4548 {
4549 struct btrfs_fs_devices *fs_devices;
4550 int ret;
4551
4552 BUG_ON(!mutex_is_locked(&uuid_mutex));
4553
4554 fs_devices = root->fs_info->fs_devices->seed;
4555 while (fs_devices) {
4556 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4557 ret = 0;
4558 goto out;
4559 }
4560 fs_devices = fs_devices->seed;
4561 }
4562
4563 fs_devices = find_fsid(fsid);
4564 if (!fs_devices) {
4565 ret = -ENOENT;
4566 goto out;
4567 }
4568
4569 fs_devices = clone_fs_devices(fs_devices);
4570 if (IS_ERR(fs_devices)) {
4571 ret = PTR_ERR(fs_devices);
4572 goto out;
4573 }
4574
4575 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4576 root->fs_info->bdev_holder);
4577 if (ret) {
4578 free_fs_devices(fs_devices);
4579 goto out;
4580 }
4581
4582 if (!fs_devices->seeding) {
4583 __btrfs_close_devices(fs_devices);
4584 free_fs_devices(fs_devices);
4585 ret = -EINVAL;
4586 goto out;
4587 }
4588
4589 fs_devices->seed = root->fs_info->fs_devices->seed;
4590 root->fs_info->fs_devices->seed = fs_devices;
4591 out:
4592 return ret;
4593 }
4594
4595 static int read_one_dev(struct btrfs_root *root,
4596 struct extent_buffer *leaf,
4597 struct btrfs_dev_item *dev_item)
4598 {
4599 struct btrfs_device *device;
4600 u64 devid;
4601 int ret;
4602 u8 fs_uuid[BTRFS_UUID_SIZE];
4603 u8 dev_uuid[BTRFS_UUID_SIZE];
4604
4605 devid = btrfs_device_id(leaf, dev_item);
4606 read_extent_buffer(leaf, dev_uuid,
4607 (unsigned long)btrfs_device_uuid(dev_item),
4608 BTRFS_UUID_SIZE);
4609 read_extent_buffer(leaf, fs_uuid,
4610 (unsigned long)btrfs_device_fsid(dev_item),
4611 BTRFS_UUID_SIZE);
4612
4613 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4614 ret = open_seed_devices(root, fs_uuid);
4615 if (ret && !btrfs_test_opt(root, DEGRADED))
4616 return ret;
4617 }
4618
4619 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4620 if (!device || !device->bdev) {
4621 if (!btrfs_test_opt(root, DEGRADED))
4622 return -EIO;
4623
4624 if (!device) {
4625 printk(KERN_WARNING "warning devid %llu missing\n",
4626 (unsigned long long)devid);
4627 device = add_missing_dev(root, devid, dev_uuid);
4628 if (!device)
4629 return -ENOMEM;
4630 } else if (!device->missing) {
4631 /*
4632 * this happens when a device that was properly setup
4633 * in the device info lists suddenly goes bad.
4634 * device->bdev is NULL, and so we have to set
4635 * device->missing to one here
4636 */
4637 root->fs_info->fs_devices->missing_devices++;
4638 device->missing = 1;
4639 }
4640 }
4641
4642 if (device->fs_devices != root->fs_info->fs_devices) {
4643 BUG_ON(device->writeable);
4644 if (device->generation !=
4645 btrfs_device_generation(leaf, dev_item))
4646 return -EINVAL;
4647 }
4648
4649 fill_device_from_item(leaf, dev_item, device);
4650 device->dev_root = root->fs_info->dev_root;
4651 device->in_fs_metadata = 1;
4652 if (device->writeable) {
4653 device->fs_devices->total_rw_bytes += device->total_bytes;
4654 spin_lock(&root->fs_info->free_chunk_lock);
4655 root->fs_info->free_chunk_space += device->total_bytes -
4656 device->bytes_used;
4657 spin_unlock(&root->fs_info->free_chunk_lock);
4658 }
4659 ret = 0;
4660 return ret;
4661 }
4662
4663 int btrfs_read_sys_array(struct btrfs_root *root)
4664 {
4665 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4666 struct extent_buffer *sb;
4667 struct btrfs_disk_key *disk_key;
4668 struct btrfs_chunk *chunk;
4669 u8 *ptr;
4670 unsigned long sb_ptr;
4671 int ret = 0;
4672 u32 num_stripes;
4673 u32 array_size;
4674 u32 len = 0;
4675 u32 cur;
4676 struct btrfs_key key;
4677
4678 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4679 BTRFS_SUPER_INFO_SIZE);
4680 if (!sb)
4681 return -ENOMEM;
4682 btrfs_set_buffer_uptodate(sb);
4683 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4684 /*
4685 * The sb extent buffer is artifical and just used to read the system array.
4686 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4687 * pages up-to-date when the page is larger: extent does not cover the
4688 * whole page and consequently check_page_uptodate does not find all
4689 * the page's extents up-to-date (the hole beyond sb),
4690 * write_extent_buffer then triggers a WARN_ON.
4691 *
4692 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4693 * but sb spans only this function. Add an explicit SetPageUptodate call
4694 * to silence the warning eg. on PowerPC 64.
4695 */
4696 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4697 SetPageUptodate(sb->pages[0]);
4698
4699 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4700 array_size = btrfs_super_sys_array_size(super_copy);
4701
4702 ptr = super_copy->sys_chunk_array;
4703 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4704 cur = 0;
4705
4706 while (cur < array_size) {
4707 disk_key = (struct btrfs_disk_key *)ptr;
4708 btrfs_disk_key_to_cpu(&key, disk_key);
4709
4710 len = sizeof(*disk_key); ptr += len;
4711 sb_ptr += len;
4712 cur += len;
4713
4714 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4715 chunk = (struct btrfs_chunk *)sb_ptr;
4716 ret = read_one_chunk(root, &key, sb, chunk);
4717 if (ret)
4718 break;
4719 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4720 len = btrfs_chunk_item_size(num_stripes);
4721 } else {
4722 ret = -EIO;
4723 break;
4724 }
4725 ptr += len;
4726 sb_ptr += len;
4727 cur += len;
4728 }
4729 free_extent_buffer(sb);
4730 return ret;
4731 }
4732
4733 int btrfs_read_chunk_tree(struct btrfs_root *root)
4734 {
4735 struct btrfs_path *path;
4736 struct extent_buffer *leaf;
4737 struct btrfs_key key;
4738 struct btrfs_key found_key;
4739 int ret;
4740 int slot;
4741
4742 root = root->fs_info->chunk_root;
4743
4744 path = btrfs_alloc_path();
4745 if (!path)
4746 return -ENOMEM;
4747
4748 mutex_lock(&uuid_mutex);
4749 lock_chunks(root);
4750
4751 /* first we search for all of the device items, and then we
4752 * read in all of the chunk items. This way we can create chunk
4753 * mappings that reference all of the devices that are afound
4754 */
4755 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4756 key.offset = 0;
4757 key.type = 0;
4758 again:
4759 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4760 if (ret < 0)
4761 goto error;
4762 while (1) {
4763 leaf = path->nodes[0];
4764 slot = path->slots[0];
4765 if (slot >= btrfs_header_nritems(leaf)) {
4766 ret = btrfs_next_leaf(root, path);
4767 if (ret == 0)
4768 continue;
4769 if (ret < 0)
4770 goto error;
4771 break;
4772 }
4773 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4774 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4775 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4776 break;
4777 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4778 struct btrfs_dev_item *dev_item;
4779 dev_item = btrfs_item_ptr(leaf, slot,
4780 struct btrfs_dev_item);
4781 ret = read_one_dev(root, leaf, dev_item);
4782 if (ret)
4783 goto error;
4784 }
4785 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4786 struct btrfs_chunk *chunk;
4787 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4788 ret = read_one_chunk(root, &found_key, leaf, chunk);
4789 if (ret)
4790 goto error;
4791 }
4792 path->slots[0]++;
4793 }
4794 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4795 key.objectid = 0;
4796 btrfs_release_path(path);
4797 goto again;
4798 }
4799 ret = 0;
4800 error:
4801 unlock_chunks(root);
4802 mutex_unlock(&uuid_mutex);
4803
4804 btrfs_free_path(path);
4805 return ret;
4806 }
4807
4808 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
4809 {
4810 int i;
4811
4812 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4813 btrfs_dev_stat_reset(dev, i);
4814 }
4815
4816 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
4817 {
4818 struct btrfs_key key;
4819 struct btrfs_key found_key;
4820 struct btrfs_root *dev_root = fs_info->dev_root;
4821 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4822 struct extent_buffer *eb;
4823 int slot;
4824 int ret = 0;
4825 struct btrfs_device *device;
4826 struct btrfs_path *path = NULL;
4827 int i;
4828
4829 path = btrfs_alloc_path();
4830 if (!path) {
4831 ret = -ENOMEM;
4832 goto out;
4833 }
4834
4835 mutex_lock(&fs_devices->device_list_mutex);
4836 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4837 int item_size;
4838 struct btrfs_dev_stats_item *ptr;
4839
4840 key.objectid = 0;
4841 key.type = BTRFS_DEV_STATS_KEY;
4842 key.offset = device->devid;
4843 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
4844 if (ret) {
4845 __btrfs_reset_dev_stats(device);
4846 device->dev_stats_valid = 1;
4847 btrfs_release_path(path);
4848 continue;
4849 }
4850 slot = path->slots[0];
4851 eb = path->nodes[0];
4852 btrfs_item_key_to_cpu(eb, &found_key, slot);
4853 item_size = btrfs_item_size_nr(eb, slot);
4854
4855 ptr = btrfs_item_ptr(eb, slot,
4856 struct btrfs_dev_stats_item);
4857
4858 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4859 if (item_size >= (1 + i) * sizeof(__le64))
4860 btrfs_dev_stat_set(device, i,
4861 btrfs_dev_stats_value(eb, ptr, i));
4862 else
4863 btrfs_dev_stat_reset(device, i);
4864 }
4865
4866 device->dev_stats_valid = 1;
4867 btrfs_dev_stat_print_on_load(device);
4868 btrfs_release_path(path);
4869 }
4870 mutex_unlock(&fs_devices->device_list_mutex);
4871
4872 out:
4873 btrfs_free_path(path);
4874 return ret < 0 ? ret : 0;
4875 }
4876
4877 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
4878 struct btrfs_root *dev_root,
4879 struct btrfs_device *device)
4880 {
4881 struct btrfs_path *path;
4882 struct btrfs_key key;
4883 struct extent_buffer *eb;
4884 struct btrfs_dev_stats_item *ptr;
4885 int ret;
4886 int i;
4887
4888 key.objectid = 0;
4889 key.type = BTRFS_DEV_STATS_KEY;
4890 key.offset = device->devid;
4891
4892 path = btrfs_alloc_path();
4893 BUG_ON(!path);
4894 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
4895 if (ret < 0) {
4896 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
4897 ret, rcu_str_deref(device->name));
4898 goto out;
4899 }
4900
4901 if (ret == 0 &&
4902 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
4903 /* need to delete old one and insert a new one */
4904 ret = btrfs_del_item(trans, dev_root, path);
4905 if (ret != 0) {
4906 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
4907 rcu_str_deref(device->name), ret);
4908 goto out;
4909 }
4910 ret = 1;
4911 }
4912
4913 if (ret == 1) {
4914 /* need to insert a new item */
4915 btrfs_release_path(path);
4916 ret = btrfs_insert_empty_item(trans, dev_root, path,
4917 &key, sizeof(*ptr));
4918 if (ret < 0) {
4919 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
4920 rcu_str_deref(device->name), ret);
4921 goto out;
4922 }
4923 }
4924
4925 eb = path->nodes[0];
4926 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
4927 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4928 btrfs_set_dev_stats_value(eb, ptr, i,
4929 btrfs_dev_stat_read(device, i));
4930 btrfs_mark_buffer_dirty(eb);
4931
4932 out:
4933 btrfs_free_path(path);
4934 return ret;
4935 }
4936
4937 /*
4938 * called from commit_transaction. Writes all changed device stats to disk.
4939 */
4940 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
4941 struct btrfs_fs_info *fs_info)
4942 {
4943 struct btrfs_root *dev_root = fs_info->dev_root;
4944 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4945 struct btrfs_device *device;
4946 int ret = 0;
4947
4948 mutex_lock(&fs_devices->device_list_mutex);
4949 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4950 if (!device->dev_stats_valid || !device->dev_stats_dirty)
4951 continue;
4952
4953 ret = update_dev_stat_item(trans, dev_root, device);
4954 if (!ret)
4955 device->dev_stats_dirty = 0;
4956 }
4957 mutex_unlock(&fs_devices->device_list_mutex);
4958
4959 return ret;
4960 }
4961
4962 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
4963 {
4964 btrfs_dev_stat_inc(dev, index);
4965 btrfs_dev_stat_print_on_error(dev);
4966 }
4967
4968 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
4969 {
4970 if (!dev->dev_stats_valid)
4971 return;
4972 printk_ratelimited_in_rcu(KERN_ERR
4973 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4974 rcu_str_deref(dev->name),
4975 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4976 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4977 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4978 btrfs_dev_stat_read(dev,
4979 BTRFS_DEV_STAT_CORRUPTION_ERRS),
4980 btrfs_dev_stat_read(dev,
4981 BTRFS_DEV_STAT_GENERATION_ERRS));
4982 }
4983
4984 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
4985 {
4986 int i;
4987
4988 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4989 if (btrfs_dev_stat_read(dev, i) != 0)
4990 break;
4991 if (i == BTRFS_DEV_STAT_VALUES_MAX)
4992 return; /* all values == 0, suppress message */
4993
4994 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4995 rcu_str_deref(dev->name),
4996 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4997 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4998 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4999 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5000 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5001 }
5002
5003 int btrfs_get_dev_stats(struct btrfs_root *root,
5004 struct btrfs_ioctl_get_dev_stats *stats)
5005 {
5006 struct btrfs_device *dev;
5007 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5008 int i;
5009
5010 mutex_lock(&fs_devices->device_list_mutex);
5011 dev = btrfs_find_device(root, stats->devid, NULL, NULL);
5012 mutex_unlock(&fs_devices->device_list_mutex);
5013
5014 if (!dev) {
5015 printk(KERN_WARNING
5016 "btrfs: get dev_stats failed, device not found\n");
5017 return -ENODEV;
5018 } else if (!dev->dev_stats_valid) {
5019 printk(KERN_WARNING
5020 "btrfs: get dev_stats failed, not yet valid\n");
5021 return -ENODEV;
5022 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5023 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5024 if (stats->nr_items > i)
5025 stats->values[i] =
5026 btrfs_dev_stat_read_and_reset(dev, i);
5027 else
5028 btrfs_dev_stat_reset(dev, i);
5029 }
5030 } else {
5031 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5032 if (stats->nr_items > i)
5033 stats->values[i] = btrfs_dev_stat_read(dev, i);
5034 }
5035 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
5036 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
5037 return 0;
5038 }