]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/btrfs/volumes.c
Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[mirror_ubuntu-bionic-kernel.git] / fs / btrfs / volumes.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include "compat.h"
29 #include "ctree.h"
30 #include "extent_map.h"
31 #include "disk-io.h"
32 #include "transaction.h"
33 #include "print-tree.h"
34 #include "volumes.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
37 #include "rcu-string.h"
38 #include "math.h"
39 #include "dev-replace.h"
40
41 static int init_first_rw_device(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 struct btrfs_device *device);
44 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
45 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
46 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
47
48 static DEFINE_MUTEX(uuid_mutex);
49 static LIST_HEAD(fs_uuids);
50
51 static void lock_chunks(struct btrfs_root *root)
52 {
53 mutex_lock(&root->fs_info->chunk_mutex);
54 }
55
56 static void unlock_chunks(struct btrfs_root *root)
57 {
58 mutex_unlock(&root->fs_info->chunk_mutex);
59 }
60
61 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
62 {
63 struct btrfs_device *device;
64 WARN_ON(fs_devices->opened);
65 while (!list_empty(&fs_devices->devices)) {
66 device = list_entry(fs_devices->devices.next,
67 struct btrfs_device, dev_list);
68 list_del(&device->dev_list);
69 rcu_string_free(device->name);
70 kfree(device);
71 }
72 kfree(fs_devices);
73 }
74
75 static void btrfs_kobject_uevent(struct block_device *bdev,
76 enum kobject_action action)
77 {
78 int ret;
79
80 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
81 if (ret)
82 pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
83 action,
84 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
85 &disk_to_dev(bdev->bd_disk)->kobj);
86 }
87
88 void btrfs_cleanup_fs_uuids(void)
89 {
90 struct btrfs_fs_devices *fs_devices;
91
92 while (!list_empty(&fs_uuids)) {
93 fs_devices = list_entry(fs_uuids.next,
94 struct btrfs_fs_devices, list);
95 list_del(&fs_devices->list);
96 free_fs_devices(fs_devices);
97 }
98 }
99
100 static noinline struct btrfs_device *__find_device(struct list_head *head,
101 u64 devid, u8 *uuid)
102 {
103 struct btrfs_device *dev;
104
105 list_for_each_entry(dev, head, dev_list) {
106 if (dev->devid == devid &&
107 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
108 return dev;
109 }
110 }
111 return NULL;
112 }
113
114 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
115 {
116 struct btrfs_fs_devices *fs_devices;
117
118 list_for_each_entry(fs_devices, &fs_uuids, list) {
119 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
120 return fs_devices;
121 }
122 return NULL;
123 }
124
125 static int
126 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
127 int flush, struct block_device **bdev,
128 struct buffer_head **bh)
129 {
130 int ret;
131
132 *bdev = blkdev_get_by_path(device_path, flags, holder);
133
134 if (IS_ERR(*bdev)) {
135 ret = PTR_ERR(*bdev);
136 printk(KERN_INFO "btrfs: open %s failed\n", device_path);
137 goto error;
138 }
139
140 if (flush)
141 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
142 ret = set_blocksize(*bdev, 4096);
143 if (ret) {
144 blkdev_put(*bdev, flags);
145 goto error;
146 }
147 invalidate_bdev(*bdev);
148 *bh = btrfs_read_dev_super(*bdev);
149 if (!*bh) {
150 ret = -EINVAL;
151 blkdev_put(*bdev, flags);
152 goto error;
153 }
154
155 return 0;
156
157 error:
158 *bdev = NULL;
159 *bh = NULL;
160 return ret;
161 }
162
163 static void requeue_list(struct btrfs_pending_bios *pending_bios,
164 struct bio *head, struct bio *tail)
165 {
166
167 struct bio *old_head;
168
169 old_head = pending_bios->head;
170 pending_bios->head = head;
171 if (pending_bios->tail)
172 tail->bi_next = old_head;
173 else
174 pending_bios->tail = tail;
175 }
176
177 /*
178 * we try to collect pending bios for a device so we don't get a large
179 * number of procs sending bios down to the same device. This greatly
180 * improves the schedulers ability to collect and merge the bios.
181 *
182 * But, it also turns into a long list of bios to process and that is sure
183 * to eventually make the worker thread block. The solution here is to
184 * make some progress and then put this work struct back at the end of
185 * the list if the block device is congested. This way, multiple devices
186 * can make progress from a single worker thread.
187 */
188 static noinline void run_scheduled_bios(struct btrfs_device *device)
189 {
190 struct bio *pending;
191 struct backing_dev_info *bdi;
192 struct btrfs_fs_info *fs_info;
193 struct btrfs_pending_bios *pending_bios;
194 struct bio *tail;
195 struct bio *cur;
196 int again = 0;
197 unsigned long num_run;
198 unsigned long batch_run = 0;
199 unsigned long limit;
200 unsigned long last_waited = 0;
201 int force_reg = 0;
202 int sync_pending = 0;
203 struct blk_plug plug;
204
205 /*
206 * this function runs all the bios we've collected for
207 * a particular device. We don't want to wander off to
208 * another device without first sending all of these down.
209 * So, setup a plug here and finish it off before we return
210 */
211 blk_start_plug(&plug);
212
213 bdi = blk_get_backing_dev_info(device->bdev);
214 fs_info = device->dev_root->fs_info;
215 limit = btrfs_async_submit_limit(fs_info);
216 limit = limit * 2 / 3;
217
218 loop:
219 spin_lock(&device->io_lock);
220
221 loop_lock:
222 num_run = 0;
223
224 /* take all the bios off the list at once and process them
225 * later on (without the lock held). But, remember the
226 * tail and other pointers so the bios can be properly reinserted
227 * into the list if we hit congestion
228 */
229 if (!force_reg && device->pending_sync_bios.head) {
230 pending_bios = &device->pending_sync_bios;
231 force_reg = 1;
232 } else {
233 pending_bios = &device->pending_bios;
234 force_reg = 0;
235 }
236
237 pending = pending_bios->head;
238 tail = pending_bios->tail;
239 WARN_ON(pending && !tail);
240
241 /*
242 * if pending was null this time around, no bios need processing
243 * at all and we can stop. Otherwise it'll loop back up again
244 * and do an additional check so no bios are missed.
245 *
246 * device->running_pending is used to synchronize with the
247 * schedule_bio code.
248 */
249 if (device->pending_sync_bios.head == NULL &&
250 device->pending_bios.head == NULL) {
251 again = 0;
252 device->running_pending = 0;
253 } else {
254 again = 1;
255 device->running_pending = 1;
256 }
257
258 pending_bios->head = NULL;
259 pending_bios->tail = NULL;
260
261 spin_unlock(&device->io_lock);
262
263 while (pending) {
264
265 rmb();
266 /* we want to work on both lists, but do more bios on the
267 * sync list than the regular list
268 */
269 if ((num_run > 32 &&
270 pending_bios != &device->pending_sync_bios &&
271 device->pending_sync_bios.head) ||
272 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
273 device->pending_bios.head)) {
274 spin_lock(&device->io_lock);
275 requeue_list(pending_bios, pending, tail);
276 goto loop_lock;
277 }
278
279 cur = pending;
280 pending = pending->bi_next;
281 cur->bi_next = NULL;
282
283 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
284 waitqueue_active(&fs_info->async_submit_wait))
285 wake_up(&fs_info->async_submit_wait);
286
287 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
288
289 /*
290 * if we're doing the sync list, record that our
291 * plug has some sync requests on it
292 *
293 * If we're doing the regular list and there are
294 * sync requests sitting around, unplug before
295 * we add more
296 */
297 if (pending_bios == &device->pending_sync_bios) {
298 sync_pending = 1;
299 } else if (sync_pending) {
300 blk_finish_plug(&plug);
301 blk_start_plug(&plug);
302 sync_pending = 0;
303 }
304
305 btrfsic_submit_bio(cur->bi_rw, cur);
306 num_run++;
307 batch_run++;
308 if (need_resched())
309 cond_resched();
310
311 /*
312 * we made progress, there is more work to do and the bdi
313 * is now congested. Back off and let other work structs
314 * run instead
315 */
316 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
317 fs_info->fs_devices->open_devices > 1) {
318 struct io_context *ioc;
319
320 ioc = current->io_context;
321
322 /*
323 * the main goal here is that we don't want to
324 * block if we're going to be able to submit
325 * more requests without blocking.
326 *
327 * This code does two great things, it pokes into
328 * the elevator code from a filesystem _and_
329 * it makes assumptions about how batching works.
330 */
331 if (ioc && ioc->nr_batch_requests > 0 &&
332 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
333 (last_waited == 0 ||
334 ioc->last_waited == last_waited)) {
335 /*
336 * we want to go through our batch of
337 * requests and stop. So, we copy out
338 * the ioc->last_waited time and test
339 * against it before looping
340 */
341 last_waited = ioc->last_waited;
342 if (need_resched())
343 cond_resched();
344 continue;
345 }
346 spin_lock(&device->io_lock);
347 requeue_list(pending_bios, pending, tail);
348 device->running_pending = 1;
349
350 spin_unlock(&device->io_lock);
351 btrfs_requeue_work(&device->work);
352 goto done;
353 }
354 /* unplug every 64 requests just for good measure */
355 if (batch_run % 64 == 0) {
356 blk_finish_plug(&plug);
357 blk_start_plug(&plug);
358 sync_pending = 0;
359 }
360 }
361
362 cond_resched();
363 if (again)
364 goto loop;
365
366 spin_lock(&device->io_lock);
367 if (device->pending_bios.head || device->pending_sync_bios.head)
368 goto loop_lock;
369 spin_unlock(&device->io_lock);
370
371 done:
372 blk_finish_plug(&plug);
373 }
374
375 static void pending_bios_fn(struct btrfs_work *work)
376 {
377 struct btrfs_device *device;
378
379 device = container_of(work, struct btrfs_device, work);
380 run_scheduled_bios(device);
381 }
382
383 static noinline int device_list_add(const char *path,
384 struct btrfs_super_block *disk_super,
385 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
386 {
387 struct btrfs_device *device;
388 struct btrfs_fs_devices *fs_devices;
389 struct rcu_string *name;
390 u64 found_transid = btrfs_super_generation(disk_super);
391
392 fs_devices = find_fsid(disk_super->fsid);
393 if (!fs_devices) {
394 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
395 if (!fs_devices)
396 return -ENOMEM;
397 INIT_LIST_HEAD(&fs_devices->devices);
398 INIT_LIST_HEAD(&fs_devices->alloc_list);
399 list_add(&fs_devices->list, &fs_uuids);
400 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
401 fs_devices->latest_devid = devid;
402 fs_devices->latest_trans = found_transid;
403 mutex_init(&fs_devices->device_list_mutex);
404 device = NULL;
405 } else {
406 device = __find_device(&fs_devices->devices, devid,
407 disk_super->dev_item.uuid);
408 }
409 if (!device) {
410 if (fs_devices->opened)
411 return -EBUSY;
412
413 device = kzalloc(sizeof(*device), GFP_NOFS);
414 if (!device) {
415 /* we can safely leave the fs_devices entry around */
416 return -ENOMEM;
417 }
418 device->devid = devid;
419 device->dev_stats_valid = 0;
420 device->work.func = pending_bios_fn;
421 memcpy(device->uuid, disk_super->dev_item.uuid,
422 BTRFS_UUID_SIZE);
423 spin_lock_init(&device->io_lock);
424
425 name = rcu_string_strdup(path, GFP_NOFS);
426 if (!name) {
427 kfree(device);
428 return -ENOMEM;
429 }
430 rcu_assign_pointer(device->name, name);
431 INIT_LIST_HEAD(&device->dev_alloc_list);
432
433 /* init readahead state */
434 spin_lock_init(&device->reada_lock);
435 device->reada_curr_zone = NULL;
436 atomic_set(&device->reada_in_flight, 0);
437 device->reada_next = 0;
438 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
439 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
440
441 mutex_lock(&fs_devices->device_list_mutex);
442 list_add_rcu(&device->dev_list, &fs_devices->devices);
443 mutex_unlock(&fs_devices->device_list_mutex);
444
445 device->fs_devices = fs_devices;
446 fs_devices->num_devices++;
447 } else if (!device->name || strcmp(device->name->str, path)) {
448 name = rcu_string_strdup(path, GFP_NOFS);
449 if (!name)
450 return -ENOMEM;
451 rcu_string_free(device->name);
452 rcu_assign_pointer(device->name, name);
453 if (device->missing) {
454 fs_devices->missing_devices--;
455 device->missing = 0;
456 }
457 }
458
459 if (found_transid > fs_devices->latest_trans) {
460 fs_devices->latest_devid = devid;
461 fs_devices->latest_trans = found_transid;
462 }
463 *fs_devices_ret = fs_devices;
464 return 0;
465 }
466
467 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
468 {
469 struct btrfs_fs_devices *fs_devices;
470 struct btrfs_device *device;
471 struct btrfs_device *orig_dev;
472
473 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
474 if (!fs_devices)
475 return ERR_PTR(-ENOMEM);
476
477 INIT_LIST_HEAD(&fs_devices->devices);
478 INIT_LIST_HEAD(&fs_devices->alloc_list);
479 INIT_LIST_HEAD(&fs_devices->list);
480 mutex_init(&fs_devices->device_list_mutex);
481 fs_devices->latest_devid = orig->latest_devid;
482 fs_devices->latest_trans = orig->latest_trans;
483 fs_devices->total_devices = orig->total_devices;
484 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
485
486 /* We have held the volume lock, it is safe to get the devices. */
487 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
488 struct rcu_string *name;
489
490 device = kzalloc(sizeof(*device), GFP_NOFS);
491 if (!device)
492 goto error;
493
494 /*
495 * This is ok to do without rcu read locked because we hold the
496 * uuid mutex so nothing we touch in here is going to disappear.
497 */
498 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
499 if (!name) {
500 kfree(device);
501 goto error;
502 }
503 rcu_assign_pointer(device->name, name);
504
505 device->devid = orig_dev->devid;
506 device->work.func = pending_bios_fn;
507 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
508 spin_lock_init(&device->io_lock);
509 INIT_LIST_HEAD(&device->dev_list);
510 INIT_LIST_HEAD(&device->dev_alloc_list);
511
512 list_add(&device->dev_list, &fs_devices->devices);
513 device->fs_devices = fs_devices;
514 fs_devices->num_devices++;
515 }
516 return fs_devices;
517 error:
518 free_fs_devices(fs_devices);
519 return ERR_PTR(-ENOMEM);
520 }
521
522 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
523 struct btrfs_fs_devices *fs_devices, int step)
524 {
525 struct btrfs_device *device, *next;
526
527 struct block_device *latest_bdev = NULL;
528 u64 latest_devid = 0;
529 u64 latest_transid = 0;
530
531 mutex_lock(&uuid_mutex);
532 again:
533 /* This is the initialized path, it is safe to release the devices. */
534 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
535 if (device->in_fs_metadata) {
536 if (!device->is_tgtdev_for_dev_replace &&
537 (!latest_transid ||
538 device->generation > latest_transid)) {
539 latest_devid = device->devid;
540 latest_transid = device->generation;
541 latest_bdev = device->bdev;
542 }
543 continue;
544 }
545
546 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
547 /*
548 * In the first step, keep the device which has
549 * the correct fsid and the devid that is used
550 * for the dev_replace procedure.
551 * In the second step, the dev_replace state is
552 * read from the device tree and it is known
553 * whether the procedure is really active or
554 * not, which means whether this device is
555 * used or whether it should be removed.
556 */
557 if (step == 0 || device->is_tgtdev_for_dev_replace) {
558 continue;
559 }
560 }
561 if (device->bdev) {
562 blkdev_put(device->bdev, device->mode);
563 device->bdev = NULL;
564 fs_devices->open_devices--;
565 }
566 if (device->writeable) {
567 list_del_init(&device->dev_alloc_list);
568 device->writeable = 0;
569 if (!device->is_tgtdev_for_dev_replace)
570 fs_devices->rw_devices--;
571 }
572 list_del_init(&device->dev_list);
573 fs_devices->num_devices--;
574 rcu_string_free(device->name);
575 kfree(device);
576 }
577
578 if (fs_devices->seed) {
579 fs_devices = fs_devices->seed;
580 goto again;
581 }
582
583 fs_devices->latest_bdev = latest_bdev;
584 fs_devices->latest_devid = latest_devid;
585 fs_devices->latest_trans = latest_transid;
586
587 mutex_unlock(&uuid_mutex);
588 }
589
590 static void __free_device(struct work_struct *work)
591 {
592 struct btrfs_device *device;
593
594 device = container_of(work, struct btrfs_device, rcu_work);
595
596 if (device->bdev)
597 blkdev_put(device->bdev, device->mode);
598
599 rcu_string_free(device->name);
600 kfree(device);
601 }
602
603 static void free_device(struct rcu_head *head)
604 {
605 struct btrfs_device *device;
606
607 device = container_of(head, struct btrfs_device, rcu);
608
609 INIT_WORK(&device->rcu_work, __free_device);
610 schedule_work(&device->rcu_work);
611 }
612
613 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
614 {
615 struct btrfs_device *device;
616
617 if (--fs_devices->opened > 0)
618 return 0;
619
620 mutex_lock(&fs_devices->device_list_mutex);
621 list_for_each_entry(device, &fs_devices->devices, dev_list) {
622 struct btrfs_device *new_device;
623 struct rcu_string *name;
624
625 if (device->bdev)
626 fs_devices->open_devices--;
627
628 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
629 list_del_init(&device->dev_alloc_list);
630 fs_devices->rw_devices--;
631 }
632
633 if (device->can_discard)
634 fs_devices->num_can_discard--;
635
636 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
637 BUG_ON(!new_device); /* -ENOMEM */
638 memcpy(new_device, device, sizeof(*new_device));
639
640 /* Safe because we are under uuid_mutex */
641 if (device->name) {
642 name = rcu_string_strdup(device->name->str, GFP_NOFS);
643 BUG_ON(device->name && !name); /* -ENOMEM */
644 rcu_assign_pointer(new_device->name, name);
645 }
646 new_device->bdev = NULL;
647 new_device->writeable = 0;
648 new_device->in_fs_metadata = 0;
649 new_device->can_discard = 0;
650 list_replace_rcu(&device->dev_list, &new_device->dev_list);
651
652 call_rcu(&device->rcu, free_device);
653 }
654 mutex_unlock(&fs_devices->device_list_mutex);
655
656 WARN_ON(fs_devices->open_devices);
657 WARN_ON(fs_devices->rw_devices);
658 fs_devices->opened = 0;
659 fs_devices->seeding = 0;
660
661 return 0;
662 }
663
664 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
665 {
666 struct btrfs_fs_devices *seed_devices = NULL;
667 int ret;
668
669 mutex_lock(&uuid_mutex);
670 ret = __btrfs_close_devices(fs_devices);
671 if (!fs_devices->opened) {
672 seed_devices = fs_devices->seed;
673 fs_devices->seed = NULL;
674 }
675 mutex_unlock(&uuid_mutex);
676
677 while (seed_devices) {
678 fs_devices = seed_devices;
679 seed_devices = fs_devices->seed;
680 __btrfs_close_devices(fs_devices);
681 free_fs_devices(fs_devices);
682 }
683 return ret;
684 }
685
686 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
687 fmode_t flags, void *holder)
688 {
689 struct request_queue *q;
690 struct block_device *bdev;
691 struct list_head *head = &fs_devices->devices;
692 struct btrfs_device *device;
693 struct block_device *latest_bdev = NULL;
694 struct buffer_head *bh;
695 struct btrfs_super_block *disk_super;
696 u64 latest_devid = 0;
697 u64 latest_transid = 0;
698 u64 devid;
699 int seeding = 1;
700 int ret = 0;
701
702 flags |= FMODE_EXCL;
703
704 list_for_each_entry(device, head, dev_list) {
705 if (device->bdev)
706 continue;
707 if (!device->name)
708 continue;
709
710 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
711 &bdev, &bh);
712 if (ret)
713 continue;
714
715 disk_super = (struct btrfs_super_block *)bh->b_data;
716 devid = btrfs_stack_device_id(&disk_super->dev_item);
717 if (devid != device->devid)
718 goto error_brelse;
719
720 if (memcmp(device->uuid, disk_super->dev_item.uuid,
721 BTRFS_UUID_SIZE))
722 goto error_brelse;
723
724 device->generation = btrfs_super_generation(disk_super);
725 if (!latest_transid || device->generation > latest_transid) {
726 latest_devid = devid;
727 latest_transid = device->generation;
728 latest_bdev = bdev;
729 }
730
731 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
732 device->writeable = 0;
733 } else {
734 device->writeable = !bdev_read_only(bdev);
735 seeding = 0;
736 }
737
738 q = bdev_get_queue(bdev);
739 if (blk_queue_discard(q)) {
740 device->can_discard = 1;
741 fs_devices->num_can_discard++;
742 }
743
744 device->bdev = bdev;
745 device->in_fs_metadata = 0;
746 device->mode = flags;
747
748 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
749 fs_devices->rotating = 1;
750
751 fs_devices->open_devices++;
752 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
753 fs_devices->rw_devices++;
754 list_add(&device->dev_alloc_list,
755 &fs_devices->alloc_list);
756 }
757 brelse(bh);
758 continue;
759
760 error_brelse:
761 brelse(bh);
762 blkdev_put(bdev, flags);
763 continue;
764 }
765 if (fs_devices->open_devices == 0) {
766 ret = -EINVAL;
767 goto out;
768 }
769 fs_devices->seeding = seeding;
770 fs_devices->opened = 1;
771 fs_devices->latest_bdev = latest_bdev;
772 fs_devices->latest_devid = latest_devid;
773 fs_devices->latest_trans = latest_transid;
774 fs_devices->total_rw_bytes = 0;
775 out:
776 return ret;
777 }
778
779 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
780 fmode_t flags, void *holder)
781 {
782 int ret;
783
784 mutex_lock(&uuid_mutex);
785 if (fs_devices->opened) {
786 fs_devices->opened++;
787 ret = 0;
788 } else {
789 ret = __btrfs_open_devices(fs_devices, flags, holder);
790 }
791 mutex_unlock(&uuid_mutex);
792 return ret;
793 }
794
795 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
796 struct btrfs_fs_devices **fs_devices_ret)
797 {
798 struct btrfs_super_block *disk_super;
799 struct block_device *bdev;
800 struct buffer_head *bh;
801 int ret;
802 u64 devid;
803 u64 transid;
804 u64 total_devices;
805
806 flags |= FMODE_EXCL;
807 mutex_lock(&uuid_mutex);
808 ret = btrfs_get_bdev_and_sb(path, flags, holder, 0, &bdev, &bh);
809 if (ret)
810 goto error;
811 disk_super = (struct btrfs_super_block *)bh->b_data;
812 devid = btrfs_stack_device_id(&disk_super->dev_item);
813 transid = btrfs_super_generation(disk_super);
814 total_devices = btrfs_super_num_devices(disk_super);
815 if (disk_super->label[0]) {
816 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
817 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
818 printk(KERN_INFO "device label %s ", disk_super->label);
819 } else {
820 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
821 }
822 printk(KERN_CONT "devid %llu transid %llu %s\n",
823 (unsigned long long)devid, (unsigned long long)transid, path);
824 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
825 if (!ret && fs_devices_ret)
826 (*fs_devices_ret)->total_devices = total_devices;
827 brelse(bh);
828 blkdev_put(bdev, flags);
829 error:
830 mutex_unlock(&uuid_mutex);
831 return ret;
832 }
833
834 /* helper to account the used device space in the range */
835 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
836 u64 end, u64 *length)
837 {
838 struct btrfs_key key;
839 struct btrfs_root *root = device->dev_root;
840 struct btrfs_dev_extent *dev_extent;
841 struct btrfs_path *path;
842 u64 extent_end;
843 int ret;
844 int slot;
845 struct extent_buffer *l;
846
847 *length = 0;
848
849 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
850 return 0;
851
852 path = btrfs_alloc_path();
853 if (!path)
854 return -ENOMEM;
855 path->reada = 2;
856
857 key.objectid = device->devid;
858 key.offset = start;
859 key.type = BTRFS_DEV_EXTENT_KEY;
860
861 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
862 if (ret < 0)
863 goto out;
864 if (ret > 0) {
865 ret = btrfs_previous_item(root, path, key.objectid, key.type);
866 if (ret < 0)
867 goto out;
868 }
869
870 while (1) {
871 l = path->nodes[0];
872 slot = path->slots[0];
873 if (slot >= btrfs_header_nritems(l)) {
874 ret = btrfs_next_leaf(root, path);
875 if (ret == 0)
876 continue;
877 if (ret < 0)
878 goto out;
879
880 break;
881 }
882 btrfs_item_key_to_cpu(l, &key, slot);
883
884 if (key.objectid < device->devid)
885 goto next;
886
887 if (key.objectid > device->devid)
888 break;
889
890 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
891 goto next;
892
893 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
894 extent_end = key.offset + btrfs_dev_extent_length(l,
895 dev_extent);
896 if (key.offset <= start && extent_end > end) {
897 *length = end - start + 1;
898 break;
899 } else if (key.offset <= start && extent_end > start)
900 *length += extent_end - start;
901 else if (key.offset > start && extent_end <= end)
902 *length += extent_end - key.offset;
903 else if (key.offset > start && key.offset <= end) {
904 *length += end - key.offset + 1;
905 break;
906 } else if (key.offset > end)
907 break;
908
909 next:
910 path->slots[0]++;
911 }
912 ret = 0;
913 out:
914 btrfs_free_path(path);
915 return ret;
916 }
917
918 /*
919 * find_free_dev_extent - find free space in the specified device
920 * @device: the device which we search the free space in
921 * @num_bytes: the size of the free space that we need
922 * @start: store the start of the free space.
923 * @len: the size of the free space. that we find, or the size of the max
924 * free space if we don't find suitable free space
925 *
926 * this uses a pretty simple search, the expectation is that it is
927 * called very infrequently and that a given device has a small number
928 * of extents
929 *
930 * @start is used to store the start of the free space if we find. But if we
931 * don't find suitable free space, it will be used to store the start position
932 * of the max free space.
933 *
934 * @len is used to store the size of the free space that we find.
935 * But if we don't find suitable free space, it is used to store the size of
936 * the max free space.
937 */
938 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
939 u64 *start, u64 *len)
940 {
941 struct btrfs_key key;
942 struct btrfs_root *root = device->dev_root;
943 struct btrfs_dev_extent *dev_extent;
944 struct btrfs_path *path;
945 u64 hole_size;
946 u64 max_hole_start;
947 u64 max_hole_size;
948 u64 extent_end;
949 u64 search_start;
950 u64 search_end = device->total_bytes;
951 int ret;
952 int slot;
953 struct extent_buffer *l;
954
955 /* FIXME use last free of some kind */
956
957 /* we don't want to overwrite the superblock on the drive,
958 * so we make sure to start at an offset of at least 1MB
959 */
960 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
961
962 max_hole_start = search_start;
963 max_hole_size = 0;
964 hole_size = 0;
965
966 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
967 ret = -ENOSPC;
968 goto error;
969 }
970
971 path = btrfs_alloc_path();
972 if (!path) {
973 ret = -ENOMEM;
974 goto error;
975 }
976 path->reada = 2;
977
978 key.objectid = device->devid;
979 key.offset = search_start;
980 key.type = BTRFS_DEV_EXTENT_KEY;
981
982 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
983 if (ret < 0)
984 goto out;
985 if (ret > 0) {
986 ret = btrfs_previous_item(root, path, key.objectid, key.type);
987 if (ret < 0)
988 goto out;
989 }
990
991 while (1) {
992 l = path->nodes[0];
993 slot = path->slots[0];
994 if (slot >= btrfs_header_nritems(l)) {
995 ret = btrfs_next_leaf(root, path);
996 if (ret == 0)
997 continue;
998 if (ret < 0)
999 goto out;
1000
1001 break;
1002 }
1003 btrfs_item_key_to_cpu(l, &key, slot);
1004
1005 if (key.objectid < device->devid)
1006 goto next;
1007
1008 if (key.objectid > device->devid)
1009 break;
1010
1011 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1012 goto next;
1013
1014 if (key.offset > search_start) {
1015 hole_size = key.offset - search_start;
1016
1017 if (hole_size > max_hole_size) {
1018 max_hole_start = search_start;
1019 max_hole_size = hole_size;
1020 }
1021
1022 /*
1023 * If this free space is greater than which we need,
1024 * it must be the max free space that we have found
1025 * until now, so max_hole_start must point to the start
1026 * of this free space and the length of this free space
1027 * is stored in max_hole_size. Thus, we return
1028 * max_hole_start and max_hole_size and go back to the
1029 * caller.
1030 */
1031 if (hole_size >= num_bytes) {
1032 ret = 0;
1033 goto out;
1034 }
1035 }
1036
1037 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1038 extent_end = key.offset + btrfs_dev_extent_length(l,
1039 dev_extent);
1040 if (extent_end > search_start)
1041 search_start = extent_end;
1042 next:
1043 path->slots[0]++;
1044 cond_resched();
1045 }
1046
1047 /*
1048 * At this point, search_start should be the end of
1049 * allocated dev extents, and when shrinking the device,
1050 * search_end may be smaller than search_start.
1051 */
1052 if (search_end > search_start)
1053 hole_size = search_end - search_start;
1054
1055 if (hole_size > max_hole_size) {
1056 max_hole_start = search_start;
1057 max_hole_size = hole_size;
1058 }
1059
1060 /* See above. */
1061 if (hole_size < num_bytes)
1062 ret = -ENOSPC;
1063 else
1064 ret = 0;
1065
1066 out:
1067 btrfs_free_path(path);
1068 error:
1069 *start = max_hole_start;
1070 if (len)
1071 *len = max_hole_size;
1072 return ret;
1073 }
1074
1075 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1076 struct btrfs_device *device,
1077 u64 start)
1078 {
1079 int ret;
1080 struct btrfs_path *path;
1081 struct btrfs_root *root = device->dev_root;
1082 struct btrfs_key key;
1083 struct btrfs_key found_key;
1084 struct extent_buffer *leaf = NULL;
1085 struct btrfs_dev_extent *extent = NULL;
1086
1087 path = btrfs_alloc_path();
1088 if (!path)
1089 return -ENOMEM;
1090
1091 key.objectid = device->devid;
1092 key.offset = start;
1093 key.type = BTRFS_DEV_EXTENT_KEY;
1094 again:
1095 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1096 if (ret > 0) {
1097 ret = btrfs_previous_item(root, path, key.objectid,
1098 BTRFS_DEV_EXTENT_KEY);
1099 if (ret)
1100 goto out;
1101 leaf = path->nodes[0];
1102 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1103 extent = btrfs_item_ptr(leaf, path->slots[0],
1104 struct btrfs_dev_extent);
1105 BUG_ON(found_key.offset > start || found_key.offset +
1106 btrfs_dev_extent_length(leaf, extent) < start);
1107 key = found_key;
1108 btrfs_release_path(path);
1109 goto again;
1110 } else if (ret == 0) {
1111 leaf = path->nodes[0];
1112 extent = btrfs_item_ptr(leaf, path->slots[0],
1113 struct btrfs_dev_extent);
1114 } else {
1115 btrfs_error(root->fs_info, ret, "Slot search failed");
1116 goto out;
1117 }
1118
1119 if (device->bytes_used > 0) {
1120 u64 len = btrfs_dev_extent_length(leaf, extent);
1121 device->bytes_used -= len;
1122 spin_lock(&root->fs_info->free_chunk_lock);
1123 root->fs_info->free_chunk_space += len;
1124 spin_unlock(&root->fs_info->free_chunk_lock);
1125 }
1126 ret = btrfs_del_item(trans, root, path);
1127 if (ret) {
1128 btrfs_error(root->fs_info, ret,
1129 "Failed to remove dev extent item");
1130 }
1131 out:
1132 btrfs_free_path(path);
1133 return ret;
1134 }
1135
1136 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1137 struct btrfs_device *device,
1138 u64 chunk_tree, u64 chunk_objectid,
1139 u64 chunk_offset, u64 start, u64 num_bytes)
1140 {
1141 int ret;
1142 struct btrfs_path *path;
1143 struct btrfs_root *root = device->dev_root;
1144 struct btrfs_dev_extent *extent;
1145 struct extent_buffer *leaf;
1146 struct btrfs_key key;
1147
1148 WARN_ON(!device->in_fs_metadata);
1149 WARN_ON(device->is_tgtdev_for_dev_replace);
1150 path = btrfs_alloc_path();
1151 if (!path)
1152 return -ENOMEM;
1153
1154 key.objectid = device->devid;
1155 key.offset = start;
1156 key.type = BTRFS_DEV_EXTENT_KEY;
1157 ret = btrfs_insert_empty_item(trans, root, path, &key,
1158 sizeof(*extent));
1159 if (ret)
1160 goto out;
1161
1162 leaf = path->nodes[0];
1163 extent = btrfs_item_ptr(leaf, path->slots[0],
1164 struct btrfs_dev_extent);
1165 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1166 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1167 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1168
1169 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1170 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1171 BTRFS_UUID_SIZE);
1172
1173 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1174 btrfs_mark_buffer_dirty(leaf);
1175 out:
1176 btrfs_free_path(path);
1177 return ret;
1178 }
1179
1180 static noinline int find_next_chunk(struct btrfs_root *root,
1181 u64 objectid, u64 *offset)
1182 {
1183 struct btrfs_path *path;
1184 int ret;
1185 struct btrfs_key key;
1186 struct btrfs_chunk *chunk;
1187 struct btrfs_key found_key;
1188
1189 path = btrfs_alloc_path();
1190 if (!path)
1191 return -ENOMEM;
1192
1193 key.objectid = objectid;
1194 key.offset = (u64)-1;
1195 key.type = BTRFS_CHUNK_ITEM_KEY;
1196
1197 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1198 if (ret < 0)
1199 goto error;
1200
1201 BUG_ON(ret == 0); /* Corruption */
1202
1203 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1204 if (ret) {
1205 *offset = 0;
1206 } else {
1207 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1208 path->slots[0]);
1209 if (found_key.objectid != objectid)
1210 *offset = 0;
1211 else {
1212 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1213 struct btrfs_chunk);
1214 *offset = found_key.offset +
1215 btrfs_chunk_length(path->nodes[0], chunk);
1216 }
1217 }
1218 ret = 0;
1219 error:
1220 btrfs_free_path(path);
1221 return ret;
1222 }
1223
1224 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1225 {
1226 int ret;
1227 struct btrfs_key key;
1228 struct btrfs_key found_key;
1229 struct btrfs_path *path;
1230
1231 root = root->fs_info->chunk_root;
1232
1233 path = btrfs_alloc_path();
1234 if (!path)
1235 return -ENOMEM;
1236
1237 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1238 key.type = BTRFS_DEV_ITEM_KEY;
1239 key.offset = (u64)-1;
1240
1241 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1242 if (ret < 0)
1243 goto error;
1244
1245 BUG_ON(ret == 0); /* Corruption */
1246
1247 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1248 BTRFS_DEV_ITEM_KEY);
1249 if (ret) {
1250 *objectid = 1;
1251 } else {
1252 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1253 path->slots[0]);
1254 *objectid = found_key.offset + 1;
1255 }
1256 ret = 0;
1257 error:
1258 btrfs_free_path(path);
1259 return ret;
1260 }
1261
1262 /*
1263 * the device information is stored in the chunk root
1264 * the btrfs_device struct should be fully filled in
1265 */
1266 int btrfs_add_device(struct btrfs_trans_handle *trans,
1267 struct btrfs_root *root,
1268 struct btrfs_device *device)
1269 {
1270 int ret;
1271 struct btrfs_path *path;
1272 struct btrfs_dev_item *dev_item;
1273 struct extent_buffer *leaf;
1274 struct btrfs_key key;
1275 unsigned long ptr;
1276
1277 root = root->fs_info->chunk_root;
1278
1279 path = btrfs_alloc_path();
1280 if (!path)
1281 return -ENOMEM;
1282
1283 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1284 key.type = BTRFS_DEV_ITEM_KEY;
1285 key.offset = device->devid;
1286
1287 ret = btrfs_insert_empty_item(trans, root, path, &key,
1288 sizeof(*dev_item));
1289 if (ret)
1290 goto out;
1291
1292 leaf = path->nodes[0];
1293 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1294
1295 btrfs_set_device_id(leaf, dev_item, device->devid);
1296 btrfs_set_device_generation(leaf, dev_item, 0);
1297 btrfs_set_device_type(leaf, dev_item, device->type);
1298 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1299 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1300 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1301 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1302 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1303 btrfs_set_device_group(leaf, dev_item, 0);
1304 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1305 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1306 btrfs_set_device_start_offset(leaf, dev_item, 0);
1307
1308 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1309 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1310 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1311 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1312 btrfs_mark_buffer_dirty(leaf);
1313
1314 ret = 0;
1315 out:
1316 btrfs_free_path(path);
1317 return ret;
1318 }
1319
1320 static int btrfs_rm_dev_item(struct btrfs_root *root,
1321 struct btrfs_device *device)
1322 {
1323 int ret;
1324 struct btrfs_path *path;
1325 struct btrfs_key key;
1326 struct btrfs_trans_handle *trans;
1327
1328 root = root->fs_info->chunk_root;
1329
1330 path = btrfs_alloc_path();
1331 if (!path)
1332 return -ENOMEM;
1333
1334 trans = btrfs_start_transaction(root, 0);
1335 if (IS_ERR(trans)) {
1336 btrfs_free_path(path);
1337 return PTR_ERR(trans);
1338 }
1339 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1340 key.type = BTRFS_DEV_ITEM_KEY;
1341 key.offset = device->devid;
1342 lock_chunks(root);
1343
1344 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1345 if (ret < 0)
1346 goto out;
1347
1348 if (ret > 0) {
1349 ret = -ENOENT;
1350 goto out;
1351 }
1352
1353 ret = btrfs_del_item(trans, root, path);
1354 if (ret)
1355 goto out;
1356 out:
1357 btrfs_free_path(path);
1358 unlock_chunks(root);
1359 btrfs_commit_transaction(trans, root);
1360 return ret;
1361 }
1362
1363 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1364 {
1365 struct btrfs_device *device;
1366 struct btrfs_device *next_device;
1367 struct block_device *bdev;
1368 struct buffer_head *bh = NULL;
1369 struct btrfs_super_block *disk_super;
1370 struct btrfs_fs_devices *cur_devices;
1371 u64 all_avail;
1372 u64 devid;
1373 u64 num_devices;
1374 u8 *dev_uuid;
1375 int ret = 0;
1376 bool clear_super = false;
1377
1378 mutex_lock(&uuid_mutex);
1379
1380 all_avail = root->fs_info->avail_data_alloc_bits |
1381 root->fs_info->avail_system_alloc_bits |
1382 root->fs_info->avail_metadata_alloc_bits;
1383
1384 num_devices = root->fs_info->fs_devices->num_devices;
1385 btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1386 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1387 WARN_ON(num_devices < 1);
1388 num_devices--;
1389 }
1390 btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1391
1392 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1393 printk(KERN_ERR "btrfs: unable to go below four devices "
1394 "on raid10\n");
1395 ret = -EINVAL;
1396 goto out;
1397 }
1398
1399 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1400 printk(KERN_ERR "btrfs: unable to go below two "
1401 "devices on raid1\n");
1402 ret = -EINVAL;
1403 goto out;
1404 }
1405
1406 if (strcmp(device_path, "missing") == 0) {
1407 struct list_head *devices;
1408 struct btrfs_device *tmp;
1409
1410 device = NULL;
1411 devices = &root->fs_info->fs_devices->devices;
1412 /*
1413 * It is safe to read the devices since the volume_mutex
1414 * is held.
1415 */
1416 list_for_each_entry(tmp, devices, dev_list) {
1417 if (tmp->in_fs_metadata &&
1418 !tmp->is_tgtdev_for_dev_replace &&
1419 !tmp->bdev) {
1420 device = tmp;
1421 break;
1422 }
1423 }
1424 bdev = NULL;
1425 bh = NULL;
1426 disk_super = NULL;
1427 if (!device) {
1428 printk(KERN_ERR "btrfs: no missing devices found to "
1429 "remove\n");
1430 goto out;
1431 }
1432 } else {
1433 ret = btrfs_get_bdev_and_sb(device_path,
1434 FMODE_READ | FMODE_EXCL,
1435 root->fs_info->bdev_holder, 0,
1436 &bdev, &bh);
1437 if (ret)
1438 goto out;
1439 disk_super = (struct btrfs_super_block *)bh->b_data;
1440 devid = btrfs_stack_device_id(&disk_super->dev_item);
1441 dev_uuid = disk_super->dev_item.uuid;
1442 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1443 disk_super->fsid);
1444 if (!device) {
1445 ret = -ENOENT;
1446 goto error_brelse;
1447 }
1448 }
1449
1450 if (device->is_tgtdev_for_dev_replace) {
1451 pr_err("btrfs: unable to remove the dev_replace target dev\n");
1452 ret = -EINVAL;
1453 goto error_brelse;
1454 }
1455
1456 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1457 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1458 "device\n");
1459 ret = -EINVAL;
1460 goto error_brelse;
1461 }
1462
1463 if (device->writeable) {
1464 lock_chunks(root);
1465 list_del_init(&device->dev_alloc_list);
1466 unlock_chunks(root);
1467 root->fs_info->fs_devices->rw_devices--;
1468 clear_super = true;
1469 }
1470
1471 ret = btrfs_shrink_device(device, 0);
1472 if (ret)
1473 goto error_undo;
1474
1475 /*
1476 * TODO: the superblock still includes this device in its num_devices
1477 * counter although write_all_supers() is not locked out. This
1478 * could give a filesystem state which requires a degraded mount.
1479 */
1480 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1481 if (ret)
1482 goto error_undo;
1483
1484 spin_lock(&root->fs_info->free_chunk_lock);
1485 root->fs_info->free_chunk_space = device->total_bytes -
1486 device->bytes_used;
1487 spin_unlock(&root->fs_info->free_chunk_lock);
1488
1489 device->in_fs_metadata = 0;
1490 btrfs_scrub_cancel_dev(root->fs_info, device);
1491
1492 /*
1493 * the device list mutex makes sure that we don't change
1494 * the device list while someone else is writing out all
1495 * the device supers.
1496 */
1497
1498 cur_devices = device->fs_devices;
1499 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1500 list_del_rcu(&device->dev_list);
1501
1502 device->fs_devices->num_devices--;
1503 device->fs_devices->total_devices--;
1504
1505 if (device->missing)
1506 root->fs_info->fs_devices->missing_devices--;
1507
1508 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1509 struct btrfs_device, dev_list);
1510 if (device->bdev == root->fs_info->sb->s_bdev)
1511 root->fs_info->sb->s_bdev = next_device->bdev;
1512 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1513 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1514
1515 if (device->bdev)
1516 device->fs_devices->open_devices--;
1517
1518 call_rcu(&device->rcu, free_device);
1519 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1520
1521 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1522 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1523
1524 if (cur_devices->open_devices == 0) {
1525 struct btrfs_fs_devices *fs_devices;
1526 fs_devices = root->fs_info->fs_devices;
1527 while (fs_devices) {
1528 if (fs_devices->seed == cur_devices)
1529 break;
1530 fs_devices = fs_devices->seed;
1531 }
1532 fs_devices->seed = cur_devices->seed;
1533 cur_devices->seed = NULL;
1534 lock_chunks(root);
1535 __btrfs_close_devices(cur_devices);
1536 unlock_chunks(root);
1537 free_fs_devices(cur_devices);
1538 }
1539
1540 root->fs_info->num_tolerated_disk_barrier_failures =
1541 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1542
1543 /*
1544 * at this point, the device is zero sized. We want to
1545 * remove it from the devices list and zero out the old super
1546 */
1547 if (clear_super && disk_super) {
1548 /* make sure this device isn't detected as part of
1549 * the FS anymore
1550 */
1551 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1552 set_buffer_dirty(bh);
1553 sync_dirty_buffer(bh);
1554 }
1555
1556 ret = 0;
1557
1558 /* Notify udev that device has changed */
1559 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1560
1561 error_brelse:
1562 brelse(bh);
1563 if (bdev)
1564 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1565 out:
1566 mutex_unlock(&uuid_mutex);
1567 return ret;
1568 error_undo:
1569 if (device->writeable) {
1570 lock_chunks(root);
1571 list_add(&device->dev_alloc_list,
1572 &root->fs_info->fs_devices->alloc_list);
1573 unlock_chunks(root);
1574 root->fs_info->fs_devices->rw_devices++;
1575 }
1576 goto error_brelse;
1577 }
1578
1579 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1580 struct btrfs_device *srcdev)
1581 {
1582 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1583 list_del_rcu(&srcdev->dev_list);
1584 list_del_rcu(&srcdev->dev_alloc_list);
1585 fs_info->fs_devices->num_devices--;
1586 if (srcdev->missing) {
1587 fs_info->fs_devices->missing_devices--;
1588 fs_info->fs_devices->rw_devices++;
1589 }
1590 if (srcdev->can_discard)
1591 fs_info->fs_devices->num_can_discard--;
1592 if (srcdev->bdev)
1593 fs_info->fs_devices->open_devices--;
1594
1595 call_rcu(&srcdev->rcu, free_device);
1596 }
1597
1598 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1599 struct btrfs_device *tgtdev)
1600 {
1601 struct btrfs_device *next_device;
1602
1603 WARN_ON(!tgtdev);
1604 mutex_lock(&fs_info->fs_devices->device_list_mutex);
1605 if (tgtdev->bdev) {
1606 btrfs_scratch_superblock(tgtdev);
1607 fs_info->fs_devices->open_devices--;
1608 }
1609 fs_info->fs_devices->num_devices--;
1610 if (tgtdev->can_discard)
1611 fs_info->fs_devices->num_can_discard++;
1612
1613 next_device = list_entry(fs_info->fs_devices->devices.next,
1614 struct btrfs_device, dev_list);
1615 if (tgtdev->bdev == fs_info->sb->s_bdev)
1616 fs_info->sb->s_bdev = next_device->bdev;
1617 if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1618 fs_info->fs_devices->latest_bdev = next_device->bdev;
1619 list_del_rcu(&tgtdev->dev_list);
1620
1621 call_rcu(&tgtdev->rcu, free_device);
1622
1623 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1624 }
1625
1626 int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1627 struct btrfs_device **device)
1628 {
1629 int ret = 0;
1630 struct btrfs_super_block *disk_super;
1631 u64 devid;
1632 u8 *dev_uuid;
1633 struct block_device *bdev;
1634 struct buffer_head *bh;
1635
1636 *device = NULL;
1637 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1638 root->fs_info->bdev_holder, 0, &bdev, &bh);
1639 if (ret)
1640 return ret;
1641 disk_super = (struct btrfs_super_block *)bh->b_data;
1642 devid = btrfs_stack_device_id(&disk_super->dev_item);
1643 dev_uuid = disk_super->dev_item.uuid;
1644 *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1645 disk_super->fsid);
1646 brelse(bh);
1647 if (!*device)
1648 ret = -ENOENT;
1649 blkdev_put(bdev, FMODE_READ);
1650 return ret;
1651 }
1652
1653 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1654 char *device_path,
1655 struct btrfs_device **device)
1656 {
1657 *device = NULL;
1658 if (strcmp(device_path, "missing") == 0) {
1659 struct list_head *devices;
1660 struct btrfs_device *tmp;
1661
1662 devices = &root->fs_info->fs_devices->devices;
1663 /*
1664 * It is safe to read the devices since the volume_mutex
1665 * is held by the caller.
1666 */
1667 list_for_each_entry(tmp, devices, dev_list) {
1668 if (tmp->in_fs_metadata && !tmp->bdev) {
1669 *device = tmp;
1670 break;
1671 }
1672 }
1673
1674 if (!*device) {
1675 pr_err("btrfs: no missing device found\n");
1676 return -ENOENT;
1677 }
1678
1679 return 0;
1680 } else {
1681 return btrfs_find_device_by_path(root, device_path, device);
1682 }
1683 }
1684
1685 /*
1686 * does all the dirty work required for changing file system's UUID.
1687 */
1688 static int btrfs_prepare_sprout(struct btrfs_root *root)
1689 {
1690 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1691 struct btrfs_fs_devices *old_devices;
1692 struct btrfs_fs_devices *seed_devices;
1693 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1694 struct btrfs_device *device;
1695 u64 super_flags;
1696
1697 BUG_ON(!mutex_is_locked(&uuid_mutex));
1698 if (!fs_devices->seeding)
1699 return -EINVAL;
1700
1701 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1702 if (!seed_devices)
1703 return -ENOMEM;
1704
1705 old_devices = clone_fs_devices(fs_devices);
1706 if (IS_ERR(old_devices)) {
1707 kfree(seed_devices);
1708 return PTR_ERR(old_devices);
1709 }
1710
1711 list_add(&old_devices->list, &fs_uuids);
1712
1713 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1714 seed_devices->opened = 1;
1715 INIT_LIST_HEAD(&seed_devices->devices);
1716 INIT_LIST_HEAD(&seed_devices->alloc_list);
1717 mutex_init(&seed_devices->device_list_mutex);
1718
1719 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1720 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1721 synchronize_rcu);
1722 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1723
1724 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1725 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1726 device->fs_devices = seed_devices;
1727 }
1728
1729 fs_devices->seeding = 0;
1730 fs_devices->num_devices = 0;
1731 fs_devices->open_devices = 0;
1732 fs_devices->total_devices = 0;
1733 fs_devices->seed = seed_devices;
1734
1735 generate_random_uuid(fs_devices->fsid);
1736 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1737 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1738 super_flags = btrfs_super_flags(disk_super) &
1739 ~BTRFS_SUPER_FLAG_SEEDING;
1740 btrfs_set_super_flags(disk_super, super_flags);
1741
1742 return 0;
1743 }
1744
1745 /*
1746 * strore the expected generation for seed devices in device items.
1747 */
1748 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1749 struct btrfs_root *root)
1750 {
1751 struct btrfs_path *path;
1752 struct extent_buffer *leaf;
1753 struct btrfs_dev_item *dev_item;
1754 struct btrfs_device *device;
1755 struct btrfs_key key;
1756 u8 fs_uuid[BTRFS_UUID_SIZE];
1757 u8 dev_uuid[BTRFS_UUID_SIZE];
1758 u64 devid;
1759 int ret;
1760
1761 path = btrfs_alloc_path();
1762 if (!path)
1763 return -ENOMEM;
1764
1765 root = root->fs_info->chunk_root;
1766 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1767 key.offset = 0;
1768 key.type = BTRFS_DEV_ITEM_KEY;
1769
1770 while (1) {
1771 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1772 if (ret < 0)
1773 goto error;
1774
1775 leaf = path->nodes[0];
1776 next_slot:
1777 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1778 ret = btrfs_next_leaf(root, path);
1779 if (ret > 0)
1780 break;
1781 if (ret < 0)
1782 goto error;
1783 leaf = path->nodes[0];
1784 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1785 btrfs_release_path(path);
1786 continue;
1787 }
1788
1789 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1790 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1791 key.type != BTRFS_DEV_ITEM_KEY)
1792 break;
1793
1794 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1795 struct btrfs_dev_item);
1796 devid = btrfs_device_id(leaf, dev_item);
1797 read_extent_buffer(leaf, dev_uuid,
1798 (unsigned long)btrfs_device_uuid(dev_item),
1799 BTRFS_UUID_SIZE);
1800 read_extent_buffer(leaf, fs_uuid,
1801 (unsigned long)btrfs_device_fsid(dev_item),
1802 BTRFS_UUID_SIZE);
1803 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1804 fs_uuid);
1805 BUG_ON(!device); /* Logic error */
1806
1807 if (device->fs_devices->seeding) {
1808 btrfs_set_device_generation(leaf, dev_item,
1809 device->generation);
1810 btrfs_mark_buffer_dirty(leaf);
1811 }
1812
1813 path->slots[0]++;
1814 goto next_slot;
1815 }
1816 ret = 0;
1817 error:
1818 btrfs_free_path(path);
1819 return ret;
1820 }
1821
1822 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1823 {
1824 struct request_queue *q;
1825 struct btrfs_trans_handle *trans;
1826 struct btrfs_device *device;
1827 struct block_device *bdev;
1828 struct list_head *devices;
1829 struct super_block *sb = root->fs_info->sb;
1830 struct rcu_string *name;
1831 u64 total_bytes;
1832 int seeding_dev = 0;
1833 int ret = 0;
1834
1835 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1836 return -EROFS;
1837
1838 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1839 root->fs_info->bdev_holder);
1840 if (IS_ERR(bdev))
1841 return PTR_ERR(bdev);
1842
1843 if (root->fs_info->fs_devices->seeding) {
1844 seeding_dev = 1;
1845 down_write(&sb->s_umount);
1846 mutex_lock(&uuid_mutex);
1847 }
1848
1849 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1850
1851 devices = &root->fs_info->fs_devices->devices;
1852
1853 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1854 list_for_each_entry(device, devices, dev_list) {
1855 if (device->bdev == bdev) {
1856 ret = -EEXIST;
1857 mutex_unlock(
1858 &root->fs_info->fs_devices->device_list_mutex);
1859 goto error;
1860 }
1861 }
1862 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1863
1864 device = kzalloc(sizeof(*device), GFP_NOFS);
1865 if (!device) {
1866 /* we can safely leave the fs_devices entry around */
1867 ret = -ENOMEM;
1868 goto error;
1869 }
1870
1871 name = rcu_string_strdup(device_path, GFP_NOFS);
1872 if (!name) {
1873 kfree(device);
1874 ret = -ENOMEM;
1875 goto error;
1876 }
1877 rcu_assign_pointer(device->name, name);
1878
1879 ret = find_next_devid(root, &device->devid);
1880 if (ret) {
1881 rcu_string_free(device->name);
1882 kfree(device);
1883 goto error;
1884 }
1885
1886 trans = btrfs_start_transaction(root, 0);
1887 if (IS_ERR(trans)) {
1888 rcu_string_free(device->name);
1889 kfree(device);
1890 ret = PTR_ERR(trans);
1891 goto error;
1892 }
1893
1894 lock_chunks(root);
1895
1896 q = bdev_get_queue(bdev);
1897 if (blk_queue_discard(q))
1898 device->can_discard = 1;
1899 device->writeable = 1;
1900 device->work.func = pending_bios_fn;
1901 generate_random_uuid(device->uuid);
1902 spin_lock_init(&device->io_lock);
1903 device->generation = trans->transid;
1904 device->io_width = root->sectorsize;
1905 device->io_align = root->sectorsize;
1906 device->sector_size = root->sectorsize;
1907 device->total_bytes = i_size_read(bdev->bd_inode);
1908 device->disk_total_bytes = device->total_bytes;
1909 device->dev_root = root->fs_info->dev_root;
1910 device->bdev = bdev;
1911 device->in_fs_metadata = 1;
1912 device->is_tgtdev_for_dev_replace = 0;
1913 device->mode = FMODE_EXCL;
1914 set_blocksize(device->bdev, 4096);
1915
1916 if (seeding_dev) {
1917 sb->s_flags &= ~MS_RDONLY;
1918 ret = btrfs_prepare_sprout(root);
1919 BUG_ON(ret); /* -ENOMEM */
1920 }
1921
1922 device->fs_devices = root->fs_info->fs_devices;
1923
1924 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1925 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1926 list_add(&device->dev_alloc_list,
1927 &root->fs_info->fs_devices->alloc_list);
1928 root->fs_info->fs_devices->num_devices++;
1929 root->fs_info->fs_devices->open_devices++;
1930 root->fs_info->fs_devices->rw_devices++;
1931 root->fs_info->fs_devices->total_devices++;
1932 if (device->can_discard)
1933 root->fs_info->fs_devices->num_can_discard++;
1934 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1935
1936 spin_lock(&root->fs_info->free_chunk_lock);
1937 root->fs_info->free_chunk_space += device->total_bytes;
1938 spin_unlock(&root->fs_info->free_chunk_lock);
1939
1940 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1941 root->fs_info->fs_devices->rotating = 1;
1942
1943 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1944 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1945 total_bytes + device->total_bytes);
1946
1947 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1948 btrfs_set_super_num_devices(root->fs_info->super_copy,
1949 total_bytes + 1);
1950 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1951
1952 if (seeding_dev) {
1953 ret = init_first_rw_device(trans, root, device);
1954 if (ret) {
1955 btrfs_abort_transaction(trans, root, ret);
1956 goto error_trans;
1957 }
1958 ret = btrfs_finish_sprout(trans, root);
1959 if (ret) {
1960 btrfs_abort_transaction(trans, root, ret);
1961 goto error_trans;
1962 }
1963 } else {
1964 ret = btrfs_add_device(trans, root, device);
1965 if (ret) {
1966 btrfs_abort_transaction(trans, root, ret);
1967 goto error_trans;
1968 }
1969 }
1970
1971 /*
1972 * we've got more storage, clear any full flags on the space
1973 * infos
1974 */
1975 btrfs_clear_space_info_full(root->fs_info);
1976
1977 unlock_chunks(root);
1978 root->fs_info->num_tolerated_disk_barrier_failures =
1979 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1980 ret = btrfs_commit_transaction(trans, root);
1981
1982 if (seeding_dev) {
1983 mutex_unlock(&uuid_mutex);
1984 up_write(&sb->s_umount);
1985
1986 if (ret) /* transaction commit */
1987 return ret;
1988
1989 ret = btrfs_relocate_sys_chunks(root);
1990 if (ret < 0)
1991 btrfs_error(root->fs_info, ret,
1992 "Failed to relocate sys chunks after "
1993 "device initialization. This can be fixed "
1994 "using the \"btrfs balance\" command.");
1995 trans = btrfs_attach_transaction(root);
1996 if (IS_ERR(trans)) {
1997 if (PTR_ERR(trans) == -ENOENT)
1998 return 0;
1999 return PTR_ERR(trans);
2000 }
2001 ret = btrfs_commit_transaction(trans, root);
2002 }
2003
2004 return ret;
2005
2006 error_trans:
2007 unlock_chunks(root);
2008 btrfs_end_transaction(trans, root);
2009 rcu_string_free(device->name);
2010 kfree(device);
2011 error:
2012 blkdev_put(bdev, FMODE_EXCL);
2013 if (seeding_dev) {
2014 mutex_unlock(&uuid_mutex);
2015 up_write(&sb->s_umount);
2016 }
2017 return ret;
2018 }
2019
2020 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2021 struct btrfs_device **device_out)
2022 {
2023 struct request_queue *q;
2024 struct btrfs_device *device;
2025 struct block_device *bdev;
2026 struct btrfs_fs_info *fs_info = root->fs_info;
2027 struct list_head *devices;
2028 struct rcu_string *name;
2029 int ret = 0;
2030
2031 *device_out = NULL;
2032 if (fs_info->fs_devices->seeding)
2033 return -EINVAL;
2034
2035 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2036 fs_info->bdev_holder);
2037 if (IS_ERR(bdev))
2038 return PTR_ERR(bdev);
2039
2040 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2041
2042 devices = &fs_info->fs_devices->devices;
2043 list_for_each_entry(device, devices, dev_list) {
2044 if (device->bdev == bdev) {
2045 ret = -EEXIST;
2046 goto error;
2047 }
2048 }
2049
2050 device = kzalloc(sizeof(*device), GFP_NOFS);
2051 if (!device) {
2052 ret = -ENOMEM;
2053 goto error;
2054 }
2055
2056 name = rcu_string_strdup(device_path, GFP_NOFS);
2057 if (!name) {
2058 kfree(device);
2059 ret = -ENOMEM;
2060 goto error;
2061 }
2062 rcu_assign_pointer(device->name, name);
2063
2064 q = bdev_get_queue(bdev);
2065 if (blk_queue_discard(q))
2066 device->can_discard = 1;
2067 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2068 device->writeable = 1;
2069 device->work.func = pending_bios_fn;
2070 generate_random_uuid(device->uuid);
2071 device->devid = BTRFS_DEV_REPLACE_DEVID;
2072 spin_lock_init(&device->io_lock);
2073 device->generation = 0;
2074 device->io_width = root->sectorsize;
2075 device->io_align = root->sectorsize;
2076 device->sector_size = root->sectorsize;
2077 device->total_bytes = i_size_read(bdev->bd_inode);
2078 device->disk_total_bytes = device->total_bytes;
2079 device->dev_root = fs_info->dev_root;
2080 device->bdev = bdev;
2081 device->in_fs_metadata = 1;
2082 device->is_tgtdev_for_dev_replace = 1;
2083 device->mode = FMODE_EXCL;
2084 set_blocksize(device->bdev, 4096);
2085 device->fs_devices = fs_info->fs_devices;
2086 list_add(&device->dev_list, &fs_info->fs_devices->devices);
2087 fs_info->fs_devices->num_devices++;
2088 fs_info->fs_devices->open_devices++;
2089 if (device->can_discard)
2090 fs_info->fs_devices->num_can_discard++;
2091 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2092
2093 *device_out = device;
2094 return ret;
2095
2096 error:
2097 blkdev_put(bdev, FMODE_EXCL);
2098 return ret;
2099 }
2100
2101 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2102 struct btrfs_device *tgtdev)
2103 {
2104 WARN_ON(fs_info->fs_devices->rw_devices == 0);
2105 tgtdev->io_width = fs_info->dev_root->sectorsize;
2106 tgtdev->io_align = fs_info->dev_root->sectorsize;
2107 tgtdev->sector_size = fs_info->dev_root->sectorsize;
2108 tgtdev->dev_root = fs_info->dev_root;
2109 tgtdev->in_fs_metadata = 1;
2110 }
2111
2112 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2113 struct btrfs_device *device)
2114 {
2115 int ret;
2116 struct btrfs_path *path;
2117 struct btrfs_root *root;
2118 struct btrfs_dev_item *dev_item;
2119 struct extent_buffer *leaf;
2120 struct btrfs_key key;
2121
2122 root = device->dev_root->fs_info->chunk_root;
2123
2124 path = btrfs_alloc_path();
2125 if (!path)
2126 return -ENOMEM;
2127
2128 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2129 key.type = BTRFS_DEV_ITEM_KEY;
2130 key.offset = device->devid;
2131
2132 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2133 if (ret < 0)
2134 goto out;
2135
2136 if (ret > 0) {
2137 ret = -ENOENT;
2138 goto out;
2139 }
2140
2141 leaf = path->nodes[0];
2142 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2143
2144 btrfs_set_device_id(leaf, dev_item, device->devid);
2145 btrfs_set_device_type(leaf, dev_item, device->type);
2146 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2147 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2148 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2149 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2150 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2151 btrfs_mark_buffer_dirty(leaf);
2152
2153 out:
2154 btrfs_free_path(path);
2155 return ret;
2156 }
2157
2158 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2159 struct btrfs_device *device, u64 new_size)
2160 {
2161 struct btrfs_super_block *super_copy =
2162 device->dev_root->fs_info->super_copy;
2163 u64 old_total = btrfs_super_total_bytes(super_copy);
2164 u64 diff = new_size - device->total_bytes;
2165
2166 if (!device->writeable)
2167 return -EACCES;
2168 if (new_size <= device->total_bytes ||
2169 device->is_tgtdev_for_dev_replace)
2170 return -EINVAL;
2171
2172 btrfs_set_super_total_bytes(super_copy, old_total + diff);
2173 device->fs_devices->total_rw_bytes += diff;
2174
2175 device->total_bytes = new_size;
2176 device->disk_total_bytes = new_size;
2177 btrfs_clear_space_info_full(device->dev_root->fs_info);
2178
2179 return btrfs_update_device(trans, device);
2180 }
2181
2182 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2183 struct btrfs_device *device, u64 new_size)
2184 {
2185 int ret;
2186 lock_chunks(device->dev_root);
2187 ret = __btrfs_grow_device(trans, device, new_size);
2188 unlock_chunks(device->dev_root);
2189 return ret;
2190 }
2191
2192 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2193 struct btrfs_root *root,
2194 u64 chunk_tree, u64 chunk_objectid,
2195 u64 chunk_offset)
2196 {
2197 int ret;
2198 struct btrfs_path *path;
2199 struct btrfs_key key;
2200
2201 root = root->fs_info->chunk_root;
2202 path = btrfs_alloc_path();
2203 if (!path)
2204 return -ENOMEM;
2205
2206 key.objectid = chunk_objectid;
2207 key.offset = chunk_offset;
2208 key.type = BTRFS_CHUNK_ITEM_KEY;
2209
2210 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2211 if (ret < 0)
2212 goto out;
2213 else if (ret > 0) { /* Logic error or corruption */
2214 btrfs_error(root->fs_info, -ENOENT,
2215 "Failed lookup while freeing chunk.");
2216 ret = -ENOENT;
2217 goto out;
2218 }
2219
2220 ret = btrfs_del_item(trans, root, path);
2221 if (ret < 0)
2222 btrfs_error(root->fs_info, ret,
2223 "Failed to delete chunk item.");
2224 out:
2225 btrfs_free_path(path);
2226 return ret;
2227 }
2228
2229 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2230 chunk_offset)
2231 {
2232 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2233 struct btrfs_disk_key *disk_key;
2234 struct btrfs_chunk *chunk;
2235 u8 *ptr;
2236 int ret = 0;
2237 u32 num_stripes;
2238 u32 array_size;
2239 u32 len = 0;
2240 u32 cur;
2241 struct btrfs_key key;
2242
2243 array_size = btrfs_super_sys_array_size(super_copy);
2244
2245 ptr = super_copy->sys_chunk_array;
2246 cur = 0;
2247
2248 while (cur < array_size) {
2249 disk_key = (struct btrfs_disk_key *)ptr;
2250 btrfs_disk_key_to_cpu(&key, disk_key);
2251
2252 len = sizeof(*disk_key);
2253
2254 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2255 chunk = (struct btrfs_chunk *)(ptr + len);
2256 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2257 len += btrfs_chunk_item_size(num_stripes);
2258 } else {
2259 ret = -EIO;
2260 break;
2261 }
2262 if (key.objectid == chunk_objectid &&
2263 key.offset == chunk_offset) {
2264 memmove(ptr, ptr + len, array_size - (cur + len));
2265 array_size -= len;
2266 btrfs_set_super_sys_array_size(super_copy, array_size);
2267 } else {
2268 ptr += len;
2269 cur += len;
2270 }
2271 }
2272 return ret;
2273 }
2274
2275 static int btrfs_relocate_chunk(struct btrfs_root *root,
2276 u64 chunk_tree, u64 chunk_objectid,
2277 u64 chunk_offset)
2278 {
2279 struct extent_map_tree *em_tree;
2280 struct btrfs_root *extent_root;
2281 struct btrfs_trans_handle *trans;
2282 struct extent_map *em;
2283 struct map_lookup *map;
2284 int ret;
2285 int i;
2286
2287 root = root->fs_info->chunk_root;
2288 extent_root = root->fs_info->extent_root;
2289 em_tree = &root->fs_info->mapping_tree.map_tree;
2290
2291 ret = btrfs_can_relocate(extent_root, chunk_offset);
2292 if (ret)
2293 return -ENOSPC;
2294
2295 /* step one, relocate all the extents inside this chunk */
2296 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2297 if (ret)
2298 return ret;
2299
2300 trans = btrfs_start_transaction(root, 0);
2301 BUG_ON(IS_ERR(trans));
2302
2303 lock_chunks(root);
2304
2305 /*
2306 * step two, delete the device extents and the
2307 * chunk tree entries
2308 */
2309 read_lock(&em_tree->lock);
2310 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2311 read_unlock(&em_tree->lock);
2312
2313 BUG_ON(!em || em->start > chunk_offset ||
2314 em->start + em->len < chunk_offset);
2315 map = (struct map_lookup *)em->bdev;
2316
2317 for (i = 0; i < map->num_stripes; i++) {
2318 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2319 map->stripes[i].physical);
2320 BUG_ON(ret);
2321
2322 if (map->stripes[i].dev) {
2323 ret = btrfs_update_device(trans, map->stripes[i].dev);
2324 BUG_ON(ret);
2325 }
2326 }
2327 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2328 chunk_offset);
2329
2330 BUG_ON(ret);
2331
2332 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2333
2334 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2335 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2336 BUG_ON(ret);
2337 }
2338
2339 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2340 BUG_ON(ret);
2341
2342 write_lock(&em_tree->lock);
2343 remove_extent_mapping(em_tree, em);
2344 write_unlock(&em_tree->lock);
2345
2346 kfree(map);
2347 em->bdev = NULL;
2348
2349 /* once for the tree */
2350 free_extent_map(em);
2351 /* once for us */
2352 free_extent_map(em);
2353
2354 unlock_chunks(root);
2355 btrfs_end_transaction(trans, root);
2356 return 0;
2357 }
2358
2359 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2360 {
2361 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2362 struct btrfs_path *path;
2363 struct extent_buffer *leaf;
2364 struct btrfs_chunk *chunk;
2365 struct btrfs_key key;
2366 struct btrfs_key found_key;
2367 u64 chunk_tree = chunk_root->root_key.objectid;
2368 u64 chunk_type;
2369 bool retried = false;
2370 int failed = 0;
2371 int ret;
2372
2373 path = btrfs_alloc_path();
2374 if (!path)
2375 return -ENOMEM;
2376
2377 again:
2378 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2379 key.offset = (u64)-1;
2380 key.type = BTRFS_CHUNK_ITEM_KEY;
2381
2382 while (1) {
2383 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2384 if (ret < 0)
2385 goto error;
2386 BUG_ON(ret == 0); /* Corruption */
2387
2388 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2389 key.type);
2390 if (ret < 0)
2391 goto error;
2392 if (ret > 0)
2393 break;
2394
2395 leaf = path->nodes[0];
2396 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2397
2398 chunk = btrfs_item_ptr(leaf, path->slots[0],
2399 struct btrfs_chunk);
2400 chunk_type = btrfs_chunk_type(leaf, chunk);
2401 btrfs_release_path(path);
2402
2403 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2404 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2405 found_key.objectid,
2406 found_key.offset);
2407 if (ret == -ENOSPC)
2408 failed++;
2409 else if (ret)
2410 BUG();
2411 }
2412
2413 if (found_key.offset == 0)
2414 break;
2415 key.offset = found_key.offset - 1;
2416 }
2417 ret = 0;
2418 if (failed && !retried) {
2419 failed = 0;
2420 retried = true;
2421 goto again;
2422 } else if (failed && retried) {
2423 WARN_ON(1);
2424 ret = -ENOSPC;
2425 }
2426 error:
2427 btrfs_free_path(path);
2428 return ret;
2429 }
2430
2431 static int insert_balance_item(struct btrfs_root *root,
2432 struct btrfs_balance_control *bctl)
2433 {
2434 struct btrfs_trans_handle *trans;
2435 struct btrfs_balance_item *item;
2436 struct btrfs_disk_balance_args disk_bargs;
2437 struct btrfs_path *path;
2438 struct extent_buffer *leaf;
2439 struct btrfs_key key;
2440 int ret, err;
2441
2442 path = btrfs_alloc_path();
2443 if (!path)
2444 return -ENOMEM;
2445
2446 trans = btrfs_start_transaction(root, 0);
2447 if (IS_ERR(trans)) {
2448 btrfs_free_path(path);
2449 return PTR_ERR(trans);
2450 }
2451
2452 key.objectid = BTRFS_BALANCE_OBJECTID;
2453 key.type = BTRFS_BALANCE_ITEM_KEY;
2454 key.offset = 0;
2455
2456 ret = btrfs_insert_empty_item(trans, root, path, &key,
2457 sizeof(*item));
2458 if (ret)
2459 goto out;
2460
2461 leaf = path->nodes[0];
2462 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2463
2464 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2465
2466 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2467 btrfs_set_balance_data(leaf, item, &disk_bargs);
2468 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2469 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2470 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2471 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2472
2473 btrfs_set_balance_flags(leaf, item, bctl->flags);
2474
2475 btrfs_mark_buffer_dirty(leaf);
2476 out:
2477 btrfs_free_path(path);
2478 err = btrfs_commit_transaction(trans, root);
2479 if (err && !ret)
2480 ret = err;
2481 return ret;
2482 }
2483
2484 static int del_balance_item(struct btrfs_root *root)
2485 {
2486 struct btrfs_trans_handle *trans;
2487 struct btrfs_path *path;
2488 struct btrfs_key key;
2489 int ret, err;
2490
2491 path = btrfs_alloc_path();
2492 if (!path)
2493 return -ENOMEM;
2494
2495 trans = btrfs_start_transaction(root, 0);
2496 if (IS_ERR(trans)) {
2497 btrfs_free_path(path);
2498 return PTR_ERR(trans);
2499 }
2500
2501 key.objectid = BTRFS_BALANCE_OBJECTID;
2502 key.type = BTRFS_BALANCE_ITEM_KEY;
2503 key.offset = 0;
2504
2505 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2506 if (ret < 0)
2507 goto out;
2508 if (ret > 0) {
2509 ret = -ENOENT;
2510 goto out;
2511 }
2512
2513 ret = btrfs_del_item(trans, root, path);
2514 out:
2515 btrfs_free_path(path);
2516 err = btrfs_commit_transaction(trans, root);
2517 if (err && !ret)
2518 ret = err;
2519 return ret;
2520 }
2521
2522 /*
2523 * This is a heuristic used to reduce the number of chunks balanced on
2524 * resume after balance was interrupted.
2525 */
2526 static void update_balance_args(struct btrfs_balance_control *bctl)
2527 {
2528 /*
2529 * Turn on soft mode for chunk types that were being converted.
2530 */
2531 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2532 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2533 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2534 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2535 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2536 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2537
2538 /*
2539 * Turn on usage filter if is not already used. The idea is
2540 * that chunks that we have already balanced should be
2541 * reasonably full. Don't do it for chunks that are being
2542 * converted - that will keep us from relocating unconverted
2543 * (albeit full) chunks.
2544 */
2545 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2546 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2547 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2548 bctl->data.usage = 90;
2549 }
2550 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2551 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2552 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2553 bctl->sys.usage = 90;
2554 }
2555 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2556 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2557 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2558 bctl->meta.usage = 90;
2559 }
2560 }
2561
2562 /*
2563 * Should be called with both balance and volume mutexes held to
2564 * serialize other volume operations (add_dev/rm_dev/resize) with
2565 * restriper. Same goes for unset_balance_control.
2566 */
2567 static void set_balance_control(struct btrfs_balance_control *bctl)
2568 {
2569 struct btrfs_fs_info *fs_info = bctl->fs_info;
2570
2571 BUG_ON(fs_info->balance_ctl);
2572
2573 spin_lock(&fs_info->balance_lock);
2574 fs_info->balance_ctl = bctl;
2575 spin_unlock(&fs_info->balance_lock);
2576 }
2577
2578 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2579 {
2580 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2581
2582 BUG_ON(!fs_info->balance_ctl);
2583
2584 spin_lock(&fs_info->balance_lock);
2585 fs_info->balance_ctl = NULL;
2586 spin_unlock(&fs_info->balance_lock);
2587
2588 kfree(bctl);
2589 }
2590
2591 /*
2592 * Balance filters. Return 1 if chunk should be filtered out
2593 * (should not be balanced).
2594 */
2595 static int chunk_profiles_filter(u64 chunk_type,
2596 struct btrfs_balance_args *bargs)
2597 {
2598 chunk_type = chunk_to_extended(chunk_type) &
2599 BTRFS_EXTENDED_PROFILE_MASK;
2600
2601 if (bargs->profiles & chunk_type)
2602 return 0;
2603
2604 return 1;
2605 }
2606
2607 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2608 struct btrfs_balance_args *bargs)
2609 {
2610 struct btrfs_block_group_cache *cache;
2611 u64 chunk_used, user_thresh;
2612 int ret = 1;
2613
2614 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2615 chunk_used = btrfs_block_group_used(&cache->item);
2616
2617 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2618 if (chunk_used < user_thresh)
2619 ret = 0;
2620
2621 btrfs_put_block_group(cache);
2622 return ret;
2623 }
2624
2625 static int chunk_devid_filter(struct extent_buffer *leaf,
2626 struct btrfs_chunk *chunk,
2627 struct btrfs_balance_args *bargs)
2628 {
2629 struct btrfs_stripe *stripe;
2630 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2631 int i;
2632
2633 for (i = 0; i < num_stripes; i++) {
2634 stripe = btrfs_stripe_nr(chunk, i);
2635 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2636 return 0;
2637 }
2638
2639 return 1;
2640 }
2641
2642 /* [pstart, pend) */
2643 static int chunk_drange_filter(struct extent_buffer *leaf,
2644 struct btrfs_chunk *chunk,
2645 u64 chunk_offset,
2646 struct btrfs_balance_args *bargs)
2647 {
2648 struct btrfs_stripe *stripe;
2649 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2650 u64 stripe_offset;
2651 u64 stripe_length;
2652 int factor;
2653 int i;
2654
2655 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2656 return 0;
2657
2658 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2659 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2660 factor = 2;
2661 else
2662 factor = 1;
2663 factor = num_stripes / factor;
2664
2665 for (i = 0; i < num_stripes; i++) {
2666 stripe = btrfs_stripe_nr(chunk, i);
2667 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2668 continue;
2669
2670 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2671 stripe_length = btrfs_chunk_length(leaf, chunk);
2672 do_div(stripe_length, factor);
2673
2674 if (stripe_offset < bargs->pend &&
2675 stripe_offset + stripe_length > bargs->pstart)
2676 return 0;
2677 }
2678
2679 return 1;
2680 }
2681
2682 /* [vstart, vend) */
2683 static int chunk_vrange_filter(struct extent_buffer *leaf,
2684 struct btrfs_chunk *chunk,
2685 u64 chunk_offset,
2686 struct btrfs_balance_args *bargs)
2687 {
2688 if (chunk_offset < bargs->vend &&
2689 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2690 /* at least part of the chunk is inside this vrange */
2691 return 0;
2692
2693 return 1;
2694 }
2695
2696 static int chunk_soft_convert_filter(u64 chunk_type,
2697 struct btrfs_balance_args *bargs)
2698 {
2699 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2700 return 0;
2701
2702 chunk_type = chunk_to_extended(chunk_type) &
2703 BTRFS_EXTENDED_PROFILE_MASK;
2704
2705 if (bargs->target == chunk_type)
2706 return 1;
2707
2708 return 0;
2709 }
2710
2711 static int should_balance_chunk(struct btrfs_root *root,
2712 struct extent_buffer *leaf,
2713 struct btrfs_chunk *chunk, u64 chunk_offset)
2714 {
2715 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2716 struct btrfs_balance_args *bargs = NULL;
2717 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2718
2719 /* type filter */
2720 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2721 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2722 return 0;
2723 }
2724
2725 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2726 bargs = &bctl->data;
2727 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2728 bargs = &bctl->sys;
2729 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2730 bargs = &bctl->meta;
2731
2732 /* profiles filter */
2733 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2734 chunk_profiles_filter(chunk_type, bargs)) {
2735 return 0;
2736 }
2737
2738 /* usage filter */
2739 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2740 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2741 return 0;
2742 }
2743
2744 /* devid filter */
2745 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2746 chunk_devid_filter(leaf, chunk, bargs)) {
2747 return 0;
2748 }
2749
2750 /* drange filter, makes sense only with devid filter */
2751 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2752 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2753 return 0;
2754 }
2755
2756 /* vrange filter */
2757 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2758 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2759 return 0;
2760 }
2761
2762 /* soft profile changing mode */
2763 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2764 chunk_soft_convert_filter(chunk_type, bargs)) {
2765 return 0;
2766 }
2767
2768 return 1;
2769 }
2770
2771 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2772 {
2773 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2774 struct btrfs_root *chunk_root = fs_info->chunk_root;
2775 struct btrfs_root *dev_root = fs_info->dev_root;
2776 struct list_head *devices;
2777 struct btrfs_device *device;
2778 u64 old_size;
2779 u64 size_to_free;
2780 struct btrfs_chunk *chunk;
2781 struct btrfs_path *path;
2782 struct btrfs_key key;
2783 struct btrfs_key found_key;
2784 struct btrfs_trans_handle *trans;
2785 struct extent_buffer *leaf;
2786 int slot;
2787 int ret;
2788 int enospc_errors = 0;
2789 bool counting = true;
2790
2791 /* step one make some room on all the devices */
2792 devices = &fs_info->fs_devices->devices;
2793 list_for_each_entry(device, devices, dev_list) {
2794 old_size = device->total_bytes;
2795 size_to_free = div_factor(old_size, 1);
2796 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2797 if (!device->writeable ||
2798 device->total_bytes - device->bytes_used > size_to_free ||
2799 device->is_tgtdev_for_dev_replace)
2800 continue;
2801
2802 ret = btrfs_shrink_device(device, old_size - size_to_free);
2803 if (ret == -ENOSPC)
2804 break;
2805 BUG_ON(ret);
2806
2807 trans = btrfs_start_transaction(dev_root, 0);
2808 BUG_ON(IS_ERR(trans));
2809
2810 ret = btrfs_grow_device(trans, device, old_size);
2811 BUG_ON(ret);
2812
2813 btrfs_end_transaction(trans, dev_root);
2814 }
2815
2816 /* step two, relocate all the chunks */
2817 path = btrfs_alloc_path();
2818 if (!path) {
2819 ret = -ENOMEM;
2820 goto error;
2821 }
2822
2823 /* zero out stat counters */
2824 spin_lock(&fs_info->balance_lock);
2825 memset(&bctl->stat, 0, sizeof(bctl->stat));
2826 spin_unlock(&fs_info->balance_lock);
2827 again:
2828 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2829 key.offset = (u64)-1;
2830 key.type = BTRFS_CHUNK_ITEM_KEY;
2831
2832 while (1) {
2833 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2834 atomic_read(&fs_info->balance_cancel_req)) {
2835 ret = -ECANCELED;
2836 goto error;
2837 }
2838
2839 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2840 if (ret < 0)
2841 goto error;
2842
2843 /*
2844 * this shouldn't happen, it means the last relocate
2845 * failed
2846 */
2847 if (ret == 0)
2848 BUG(); /* FIXME break ? */
2849
2850 ret = btrfs_previous_item(chunk_root, path, 0,
2851 BTRFS_CHUNK_ITEM_KEY);
2852 if (ret) {
2853 ret = 0;
2854 break;
2855 }
2856
2857 leaf = path->nodes[0];
2858 slot = path->slots[0];
2859 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2860
2861 if (found_key.objectid != key.objectid)
2862 break;
2863
2864 /* chunk zero is special */
2865 if (found_key.offset == 0)
2866 break;
2867
2868 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2869
2870 if (!counting) {
2871 spin_lock(&fs_info->balance_lock);
2872 bctl->stat.considered++;
2873 spin_unlock(&fs_info->balance_lock);
2874 }
2875
2876 ret = should_balance_chunk(chunk_root, leaf, chunk,
2877 found_key.offset);
2878 btrfs_release_path(path);
2879 if (!ret)
2880 goto loop;
2881
2882 if (counting) {
2883 spin_lock(&fs_info->balance_lock);
2884 bctl->stat.expected++;
2885 spin_unlock(&fs_info->balance_lock);
2886 goto loop;
2887 }
2888
2889 ret = btrfs_relocate_chunk(chunk_root,
2890 chunk_root->root_key.objectid,
2891 found_key.objectid,
2892 found_key.offset);
2893 if (ret && ret != -ENOSPC)
2894 goto error;
2895 if (ret == -ENOSPC) {
2896 enospc_errors++;
2897 } else {
2898 spin_lock(&fs_info->balance_lock);
2899 bctl->stat.completed++;
2900 spin_unlock(&fs_info->balance_lock);
2901 }
2902 loop:
2903 key.offset = found_key.offset - 1;
2904 }
2905
2906 if (counting) {
2907 btrfs_release_path(path);
2908 counting = false;
2909 goto again;
2910 }
2911 error:
2912 btrfs_free_path(path);
2913 if (enospc_errors) {
2914 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2915 enospc_errors);
2916 if (!ret)
2917 ret = -ENOSPC;
2918 }
2919
2920 return ret;
2921 }
2922
2923 /**
2924 * alloc_profile_is_valid - see if a given profile is valid and reduced
2925 * @flags: profile to validate
2926 * @extended: if true @flags is treated as an extended profile
2927 */
2928 static int alloc_profile_is_valid(u64 flags, int extended)
2929 {
2930 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2931 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2932
2933 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2934
2935 /* 1) check that all other bits are zeroed */
2936 if (flags & ~mask)
2937 return 0;
2938
2939 /* 2) see if profile is reduced */
2940 if (flags == 0)
2941 return !extended; /* "0" is valid for usual profiles */
2942
2943 /* true if exactly one bit set */
2944 return (flags & (flags - 1)) == 0;
2945 }
2946
2947 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2948 {
2949 /* cancel requested || normal exit path */
2950 return atomic_read(&fs_info->balance_cancel_req) ||
2951 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2952 atomic_read(&fs_info->balance_cancel_req) == 0);
2953 }
2954
2955 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2956 {
2957 int ret;
2958
2959 unset_balance_control(fs_info);
2960 ret = del_balance_item(fs_info->tree_root);
2961 BUG_ON(ret);
2962 }
2963
2964 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2965 struct btrfs_ioctl_balance_args *bargs);
2966
2967 /*
2968 * Should be called with both balance and volume mutexes held
2969 */
2970 int btrfs_balance(struct btrfs_balance_control *bctl,
2971 struct btrfs_ioctl_balance_args *bargs)
2972 {
2973 struct btrfs_fs_info *fs_info = bctl->fs_info;
2974 u64 allowed;
2975 int mixed = 0;
2976 int ret;
2977 u64 num_devices;
2978
2979 if (btrfs_fs_closing(fs_info) ||
2980 atomic_read(&fs_info->balance_pause_req) ||
2981 atomic_read(&fs_info->balance_cancel_req)) {
2982 ret = -EINVAL;
2983 goto out;
2984 }
2985
2986 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2987 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2988 mixed = 1;
2989
2990 /*
2991 * In case of mixed groups both data and meta should be picked,
2992 * and identical options should be given for both of them.
2993 */
2994 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2995 if (mixed && (bctl->flags & allowed)) {
2996 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2997 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2998 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2999 printk(KERN_ERR "btrfs: with mixed groups data and "
3000 "metadata balance options must be the same\n");
3001 ret = -EINVAL;
3002 goto out;
3003 }
3004 }
3005
3006 num_devices = fs_info->fs_devices->num_devices;
3007 btrfs_dev_replace_lock(&fs_info->dev_replace);
3008 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3009 BUG_ON(num_devices < 1);
3010 num_devices--;
3011 }
3012 btrfs_dev_replace_unlock(&fs_info->dev_replace);
3013 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3014 if (num_devices == 1)
3015 allowed |= BTRFS_BLOCK_GROUP_DUP;
3016 else if (num_devices < 4)
3017 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3018 else
3019 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
3020 BTRFS_BLOCK_GROUP_RAID10);
3021
3022 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3023 (!alloc_profile_is_valid(bctl->data.target, 1) ||
3024 (bctl->data.target & ~allowed))) {
3025 printk(KERN_ERR "btrfs: unable to start balance with target "
3026 "data profile %llu\n",
3027 (unsigned long long)bctl->data.target);
3028 ret = -EINVAL;
3029 goto out;
3030 }
3031 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3032 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3033 (bctl->meta.target & ~allowed))) {
3034 printk(KERN_ERR "btrfs: unable to start balance with target "
3035 "metadata profile %llu\n",
3036 (unsigned long long)bctl->meta.target);
3037 ret = -EINVAL;
3038 goto out;
3039 }
3040 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3041 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3042 (bctl->sys.target & ~allowed))) {
3043 printk(KERN_ERR "btrfs: unable to start balance with target "
3044 "system profile %llu\n",
3045 (unsigned long long)bctl->sys.target);
3046 ret = -EINVAL;
3047 goto out;
3048 }
3049
3050 /* allow dup'ed data chunks only in mixed mode */
3051 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3052 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3053 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3054 ret = -EINVAL;
3055 goto out;
3056 }
3057
3058 /* allow to reduce meta or sys integrity only if force set */
3059 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3060 BTRFS_BLOCK_GROUP_RAID10;
3061 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3062 (fs_info->avail_system_alloc_bits & allowed) &&
3063 !(bctl->sys.target & allowed)) ||
3064 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3065 (fs_info->avail_metadata_alloc_bits & allowed) &&
3066 !(bctl->meta.target & allowed))) {
3067 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3068 printk(KERN_INFO "btrfs: force reducing metadata "
3069 "integrity\n");
3070 } else {
3071 printk(KERN_ERR "btrfs: balance will reduce metadata "
3072 "integrity, use force if you want this\n");
3073 ret = -EINVAL;
3074 goto out;
3075 }
3076 }
3077
3078 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3079 int num_tolerated_disk_barrier_failures;
3080 u64 target = bctl->sys.target;
3081
3082 num_tolerated_disk_barrier_failures =
3083 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3084 if (num_tolerated_disk_barrier_failures > 0 &&
3085 (target &
3086 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3087 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3088 num_tolerated_disk_barrier_failures = 0;
3089 else if (num_tolerated_disk_barrier_failures > 1 &&
3090 (target &
3091 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3092 num_tolerated_disk_barrier_failures = 1;
3093
3094 fs_info->num_tolerated_disk_barrier_failures =
3095 num_tolerated_disk_barrier_failures;
3096 }
3097
3098 ret = insert_balance_item(fs_info->tree_root, bctl);
3099 if (ret && ret != -EEXIST)
3100 goto out;
3101
3102 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3103 BUG_ON(ret == -EEXIST);
3104 set_balance_control(bctl);
3105 } else {
3106 BUG_ON(ret != -EEXIST);
3107 spin_lock(&fs_info->balance_lock);
3108 update_balance_args(bctl);
3109 spin_unlock(&fs_info->balance_lock);
3110 }
3111
3112 atomic_inc(&fs_info->balance_running);
3113 mutex_unlock(&fs_info->balance_mutex);
3114
3115 ret = __btrfs_balance(fs_info);
3116
3117 mutex_lock(&fs_info->balance_mutex);
3118 atomic_dec(&fs_info->balance_running);
3119
3120 if (bargs) {
3121 memset(bargs, 0, sizeof(*bargs));
3122 update_ioctl_balance_args(fs_info, 0, bargs);
3123 }
3124
3125 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3126 balance_need_close(fs_info)) {
3127 __cancel_balance(fs_info);
3128 }
3129
3130 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3131 fs_info->num_tolerated_disk_barrier_failures =
3132 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3133 }
3134
3135 wake_up(&fs_info->balance_wait_q);
3136
3137 return ret;
3138 out:
3139 if (bctl->flags & BTRFS_BALANCE_RESUME)
3140 __cancel_balance(fs_info);
3141 else
3142 kfree(bctl);
3143 return ret;
3144 }
3145
3146 static int balance_kthread(void *data)
3147 {
3148 struct btrfs_fs_info *fs_info = data;
3149 int ret = 0;
3150
3151 mutex_lock(&fs_info->volume_mutex);
3152 mutex_lock(&fs_info->balance_mutex);
3153
3154 if (fs_info->balance_ctl) {
3155 printk(KERN_INFO "btrfs: continuing balance\n");
3156 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3157 }
3158
3159 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3160 mutex_unlock(&fs_info->balance_mutex);
3161 mutex_unlock(&fs_info->volume_mutex);
3162
3163 return ret;
3164 }
3165
3166 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3167 {
3168 struct task_struct *tsk;
3169
3170 spin_lock(&fs_info->balance_lock);
3171 if (!fs_info->balance_ctl) {
3172 spin_unlock(&fs_info->balance_lock);
3173 return 0;
3174 }
3175 spin_unlock(&fs_info->balance_lock);
3176
3177 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3178 printk(KERN_INFO "btrfs: force skipping balance\n");
3179 return 0;
3180 }
3181
3182 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3183 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3184 if (IS_ERR(tsk))
3185 return PTR_ERR(tsk);
3186
3187 return 0;
3188 }
3189
3190 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3191 {
3192 struct btrfs_balance_control *bctl;
3193 struct btrfs_balance_item *item;
3194 struct btrfs_disk_balance_args disk_bargs;
3195 struct btrfs_path *path;
3196 struct extent_buffer *leaf;
3197 struct btrfs_key key;
3198 int ret;
3199
3200 path = btrfs_alloc_path();
3201 if (!path)
3202 return -ENOMEM;
3203
3204 key.objectid = BTRFS_BALANCE_OBJECTID;
3205 key.type = BTRFS_BALANCE_ITEM_KEY;
3206 key.offset = 0;
3207
3208 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3209 if (ret < 0)
3210 goto out;
3211 if (ret > 0) { /* ret = -ENOENT; */
3212 ret = 0;
3213 goto out;
3214 }
3215
3216 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3217 if (!bctl) {
3218 ret = -ENOMEM;
3219 goto out;
3220 }
3221
3222 leaf = path->nodes[0];
3223 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3224
3225 bctl->fs_info = fs_info;
3226 bctl->flags = btrfs_balance_flags(leaf, item);
3227 bctl->flags |= BTRFS_BALANCE_RESUME;
3228
3229 btrfs_balance_data(leaf, item, &disk_bargs);
3230 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3231 btrfs_balance_meta(leaf, item, &disk_bargs);
3232 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3233 btrfs_balance_sys(leaf, item, &disk_bargs);
3234 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3235
3236 mutex_lock(&fs_info->volume_mutex);
3237 mutex_lock(&fs_info->balance_mutex);
3238
3239 set_balance_control(bctl);
3240
3241 mutex_unlock(&fs_info->balance_mutex);
3242 mutex_unlock(&fs_info->volume_mutex);
3243 out:
3244 btrfs_free_path(path);
3245 return ret;
3246 }
3247
3248 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3249 {
3250 int ret = 0;
3251
3252 mutex_lock(&fs_info->balance_mutex);
3253 if (!fs_info->balance_ctl) {
3254 mutex_unlock(&fs_info->balance_mutex);
3255 return -ENOTCONN;
3256 }
3257
3258 if (atomic_read(&fs_info->balance_running)) {
3259 atomic_inc(&fs_info->balance_pause_req);
3260 mutex_unlock(&fs_info->balance_mutex);
3261
3262 wait_event(fs_info->balance_wait_q,
3263 atomic_read(&fs_info->balance_running) == 0);
3264
3265 mutex_lock(&fs_info->balance_mutex);
3266 /* we are good with balance_ctl ripped off from under us */
3267 BUG_ON(atomic_read(&fs_info->balance_running));
3268 atomic_dec(&fs_info->balance_pause_req);
3269 } else {
3270 ret = -ENOTCONN;
3271 }
3272
3273 mutex_unlock(&fs_info->balance_mutex);
3274 return ret;
3275 }
3276
3277 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3278 {
3279 mutex_lock(&fs_info->balance_mutex);
3280 if (!fs_info->balance_ctl) {
3281 mutex_unlock(&fs_info->balance_mutex);
3282 return -ENOTCONN;
3283 }
3284
3285 atomic_inc(&fs_info->balance_cancel_req);
3286 /*
3287 * if we are running just wait and return, balance item is
3288 * deleted in btrfs_balance in this case
3289 */
3290 if (atomic_read(&fs_info->balance_running)) {
3291 mutex_unlock(&fs_info->balance_mutex);
3292 wait_event(fs_info->balance_wait_q,
3293 atomic_read(&fs_info->balance_running) == 0);
3294 mutex_lock(&fs_info->balance_mutex);
3295 } else {
3296 /* __cancel_balance needs volume_mutex */
3297 mutex_unlock(&fs_info->balance_mutex);
3298 mutex_lock(&fs_info->volume_mutex);
3299 mutex_lock(&fs_info->balance_mutex);
3300
3301 if (fs_info->balance_ctl)
3302 __cancel_balance(fs_info);
3303
3304 mutex_unlock(&fs_info->volume_mutex);
3305 }
3306
3307 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3308 atomic_dec(&fs_info->balance_cancel_req);
3309 mutex_unlock(&fs_info->balance_mutex);
3310 return 0;
3311 }
3312
3313 /*
3314 * shrinking a device means finding all of the device extents past
3315 * the new size, and then following the back refs to the chunks.
3316 * The chunk relocation code actually frees the device extent
3317 */
3318 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3319 {
3320 struct btrfs_trans_handle *trans;
3321 struct btrfs_root *root = device->dev_root;
3322 struct btrfs_dev_extent *dev_extent = NULL;
3323 struct btrfs_path *path;
3324 u64 length;
3325 u64 chunk_tree;
3326 u64 chunk_objectid;
3327 u64 chunk_offset;
3328 int ret;
3329 int slot;
3330 int failed = 0;
3331 bool retried = false;
3332 struct extent_buffer *l;
3333 struct btrfs_key key;
3334 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3335 u64 old_total = btrfs_super_total_bytes(super_copy);
3336 u64 old_size = device->total_bytes;
3337 u64 diff = device->total_bytes - new_size;
3338
3339 if (device->is_tgtdev_for_dev_replace)
3340 return -EINVAL;
3341
3342 path = btrfs_alloc_path();
3343 if (!path)
3344 return -ENOMEM;
3345
3346 path->reada = 2;
3347
3348 lock_chunks(root);
3349
3350 device->total_bytes = new_size;
3351 if (device->writeable) {
3352 device->fs_devices->total_rw_bytes -= diff;
3353 spin_lock(&root->fs_info->free_chunk_lock);
3354 root->fs_info->free_chunk_space -= diff;
3355 spin_unlock(&root->fs_info->free_chunk_lock);
3356 }
3357 unlock_chunks(root);
3358
3359 again:
3360 key.objectid = device->devid;
3361 key.offset = (u64)-1;
3362 key.type = BTRFS_DEV_EXTENT_KEY;
3363
3364 do {
3365 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3366 if (ret < 0)
3367 goto done;
3368
3369 ret = btrfs_previous_item(root, path, 0, key.type);
3370 if (ret < 0)
3371 goto done;
3372 if (ret) {
3373 ret = 0;
3374 btrfs_release_path(path);
3375 break;
3376 }
3377
3378 l = path->nodes[0];
3379 slot = path->slots[0];
3380 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3381
3382 if (key.objectid != device->devid) {
3383 btrfs_release_path(path);
3384 break;
3385 }
3386
3387 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3388 length = btrfs_dev_extent_length(l, dev_extent);
3389
3390 if (key.offset + length <= new_size) {
3391 btrfs_release_path(path);
3392 break;
3393 }
3394
3395 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3396 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3397 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3398 btrfs_release_path(path);
3399
3400 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3401 chunk_offset);
3402 if (ret && ret != -ENOSPC)
3403 goto done;
3404 if (ret == -ENOSPC)
3405 failed++;
3406 } while (key.offset-- > 0);
3407
3408 if (failed && !retried) {
3409 failed = 0;
3410 retried = true;
3411 goto again;
3412 } else if (failed && retried) {
3413 ret = -ENOSPC;
3414 lock_chunks(root);
3415
3416 device->total_bytes = old_size;
3417 if (device->writeable)
3418 device->fs_devices->total_rw_bytes += diff;
3419 spin_lock(&root->fs_info->free_chunk_lock);
3420 root->fs_info->free_chunk_space += diff;
3421 spin_unlock(&root->fs_info->free_chunk_lock);
3422 unlock_chunks(root);
3423 goto done;
3424 }
3425
3426 /* Shrinking succeeded, else we would be at "done". */
3427 trans = btrfs_start_transaction(root, 0);
3428 if (IS_ERR(trans)) {
3429 ret = PTR_ERR(trans);
3430 goto done;
3431 }
3432
3433 lock_chunks(root);
3434
3435 device->disk_total_bytes = new_size;
3436 /* Now btrfs_update_device() will change the on-disk size. */
3437 ret = btrfs_update_device(trans, device);
3438 if (ret) {
3439 unlock_chunks(root);
3440 btrfs_end_transaction(trans, root);
3441 goto done;
3442 }
3443 WARN_ON(diff > old_total);
3444 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3445 unlock_chunks(root);
3446 btrfs_end_transaction(trans, root);
3447 done:
3448 btrfs_free_path(path);
3449 return ret;
3450 }
3451
3452 static int btrfs_add_system_chunk(struct btrfs_root *root,
3453 struct btrfs_key *key,
3454 struct btrfs_chunk *chunk, int item_size)
3455 {
3456 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3457 struct btrfs_disk_key disk_key;
3458 u32 array_size;
3459 u8 *ptr;
3460
3461 array_size = btrfs_super_sys_array_size(super_copy);
3462 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3463 return -EFBIG;
3464
3465 ptr = super_copy->sys_chunk_array + array_size;
3466 btrfs_cpu_key_to_disk(&disk_key, key);
3467 memcpy(ptr, &disk_key, sizeof(disk_key));
3468 ptr += sizeof(disk_key);
3469 memcpy(ptr, chunk, item_size);
3470 item_size += sizeof(disk_key);
3471 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3472 return 0;
3473 }
3474
3475 /*
3476 * sort the devices in descending order by max_avail, total_avail
3477 */
3478 static int btrfs_cmp_device_info(const void *a, const void *b)
3479 {
3480 const struct btrfs_device_info *di_a = a;
3481 const struct btrfs_device_info *di_b = b;
3482
3483 if (di_a->max_avail > di_b->max_avail)
3484 return -1;
3485 if (di_a->max_avail < di_b->max_avail)
3486 return 1;
3487 if (di_a->total_avail > di_b->total_avail)
3488 return -1;
3489 if (di_a->total_avail < di_b->total_avail)
3490 return 1;
3491 return 0;
3492 }
3493
3494 struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3495 { 2, 1, 0, 4, 2, 2 /* raid10 */ },
3496 { 1, 1, 2, 2, 2, 2 /* raid1 */ },
3497 { 1, 2, 1, 1, 1, 2 /* dup */ },
3498 { 1, 1, 0, 2, 1, 1 /* raid0 */ },
3499 { 1, 1, 0, 1, 1, 1 /* single */ },
3500 };
3501
3502 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3503 struct btrfs_root *extent_root,
3504 struct map_lookup **map_ret,
3505 u64 *num_bytes_out, u64 *stripe_size_out,
3506 u64 start, u64 type)
3507 {
3508 struct btrfs_fs_info *info = extent_root->fs_info;
3509 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3510 struct list_head *cur;
3511 struct map_lookup *map = NULL;
3512 struct extent_map_tree *em_tree;
3513 struct extent_map *em;
3514 struct btrfs_device_info *devices_info = NULL;
3515 u64 total_avail;
3516 int num_stripes; /* total number of stripes to allocate */
3517 int sub_stripes; /* sub_stripes info for map */
3518 int dev_stripes; /* stripes per dev */
3519 int devs_max; /* max devs to use */
3520 int devs_min; /* min devs needed */
3521 int devs_increment; /* ndevs has to be a multiple of this */
3522 int ncopies; /* how many copies to data has */
3523 int ret;
3524 u64 max_stripe_size;
3525 u64 max_chunk_size;
3526 u64 stripe_size;
3527 u64 num_bytes;
3528 int ndevs;
3529 int i;
3530 int j;
3531 int index;
3532
3533 BUG_ON(!alloc_profile_is_valid(type, 0));
3534
3535 if (list_empty(&fs_devices->alloc_list))
3536 return -ENOSPC;
3537
3538 index = __get_raid_index(type);
3539
3540 sub_stripes = btrfs_raid_array[index].sub_stripes;
3541 dev_stripes = btrfs_raid_array[index].dev_stripes;
3542 devs_max = btrfs_raid_array[index].devs_max;
3543 devs_min = btrfs_raid_array[index].devs_min;
3544 devs_increment = btrfs_raid_array[index].devs_increment;
3545 ncopies = btrfs_raid_array[index].ncopies;
3546
3547 if (type & BTRFS_BLOCK_GROUP_DATA) {
3548 max_stripe_size = 1024 * 1024 * 1024;
3549 max_chunk_size = 10 * max_stripe_size;
3550 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3551 /* for larger filesystems, use larger metadata chunks */
3552 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3553 max_stripe_size = 1024 * 1024 * 1024;
3554 else
3555 max_stripe_size = 256 * 1024 * 1024;
3556 max_chunk_size = max_stripe_size;
3557 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3558 max_stripe_size = 32 * 1024 * 1024;
3559 max_chunk_size = 2 * max_stripe_size;
3560 } else {
3561 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3562 type);
3563 BUG_ON(1);
3564 }
3565
3566 /* we don't want a chunk larger than 10% of writeable space */
3567 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3568 max_chunk_size);
3569
3570 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3571 GFP_NOFS);
3572 if (!devices_info)
3573 return -ENOMEM;
3574
3575 cur = fs_devices->alloc_list.next;
3576
3577 /*
3578 * in the first pass through the devices list, we gather information
3579 * about the available holes on each device.
3580 */
3581 ndevs = 0;
3582 while (cur != &fs_devices->alloc_list) {
3583 struct btrfs_device *device;
3584 u64 max_avail;
3585 u64 dev_offset;
3586
3587 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3588
3589 cur = cur->next;
3590
3591 if (!device->writeable) {
3592 WARN(1, KERN_ERR
3593 "btrfs: read-only device in alloc_list\n");
3594 continue;
3595 }
3596
3597 if (!device->in_fs_metadata ||
3598 device->is_tgtdev_for_dev_replace)
3599 continue;
3600
3601 if (device->total_bytes > device->bytes_used)
3602 total_avail = device->total_bytes - device->bytes_used;
3603 else
3604 total_avail = 0;
3605
3606 /* If there is no space on this device, skip it. */
3607 if (total_avail == 0)
3608 continue;
3609
3610 ret = find_free_dev_extent(device,
3611 max_stripe_size * dev_stripes,
3612 &dev_offset, &max_avail);
3613 if (ret && ret != -ENOSPC)
3614 goto error;
3615
3616 if (ret == 0)
3617 max_avail = max_stripe_size * dev_stripes;
3618
3619 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3620 continue;
3621
3622 devices_info[ndevs].dev_offset = dev_offset;
3623 devices_info[ndevs].max_avail = max_avail;
3624 devices_info[ndevs].total_avail = total_avail;
3625 devices_info[ndevs].dev = device;
3626 ++ndevs;
3627 WARN_ON(ndevs > fs_devices->rw_devices);
3628 }
3629
3630 /*
3631 * now sort the devices by hole size / available space
3632 */
3633 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3634 btrfs_cmp_device_info, NULL);
3635
3636 /* round down to number of usable stripes */
3637 ndevs -= ndevs % devs_increment;
3638
3639 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3640 ret = -ENOSPC;
3641 goto error;
3642 }
3643
3644 if (devs_max && ndevs > devs_max)
3645 ndevs = devs_max;
3646 /*
3647 * the primary goal is to maximize the number of stripes, so use as many
3648 * devices as possible, even if the stripes are not maximum sized.
3649 */
3650 stripe_size = devices_info[ndevs-1].max_avail;
3651 num_stripes = ndevs * dev_stripes;
3652
3653 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3654 stripe_size = max_chunk_size * ncopies;
3655 do_div(stripe_size, ndevs);
3656 }
3657
3658 do_div(stripe_size, dev_stripes);
3659
3660 /* align to BTRFS_STRIPE_LEN */
3661 do_div(stripe_size, BTRFS_STRIPE_LEN);
3662 stripe_size *= BTRFS_STRIPE_LEN;
3663
3664 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3665 if (!map) {
3666 ret = -ENOMEM;
3667 goto error;
3668 }
3669 map->num_stripes = num_stripes;
3670
3671 for (i = 0; i < ndevs; ++i) {
3672 for (j = 0; j < dev_stripes; ++j) {
3673 int s = i * dev_stripes + j;
3674 map->stripes[s].dev = devices_info[i].dev;
3675 map->stripes[s].physical = devices_info[i].dev_offset +
3676 j * stripe_size;
3677 }
3678 }
3679 map->sector_size = extent_root->sectorsize;
3680 map->stripe_len = BTRFS_STRIPE_LEN;
3681 map->io_align = BTRFS_STRIPE_LEN;
3682 map->io_width = BTRFS_STRIPE_LEN;
3683 map->type = type;
3684 map->sub_stripes = sub_stripes;
3685
3686 *map_ret = map;
3687 num_bytes = stripe_size * (num_stripes / ncopies);
3688
3689 *stripe_size_out = stripe_size;
3690 *num_bytes_out = num_bytes;
3691
3692 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3693
3694 em = alloc_extent_map();
3695 if (!em) {
3696 ret = -ENOMEM;
3697 goto error;
3698 }
3699 em->bdev = (struct block_device *)map;
3700 em->start = start;
3701 em->len = num_bytes;
3702 em->block_start = 0;
3703 em->block_len = em->len;
3704
3705 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3706 write_lock(&em_tree->lock);
3707 ret = add_extent_mapping(em_tree, em);
3708 write_unlock(&em_tree->lock);
3709 free_extent_map(em);
3710 if (ret)
3711 goto error;
3712
3713 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3714 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3715 start, num_bytes);
3716 if (ret)
3717 goto error;
3718
3719 for (i = 0; i < map->num_stripes; ++i) {
3720 struct btrfs_device *device;
3721 u64 dev_offset;
3722
3723 device = map->stripes[i].dev;
3724 dev_offset = map->stripes[i].physical;
3725
3726 ret = btrfs_alloc_dev_extent(trans, device,
3727 info->chunk_root->root_key.objectid,
3728 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3729 start, dev_offset, stripe_size);
3730 if (ret) {
3731 btrfs_abort_transaction(trans, extent_root, ret);
3732 goto error;
3733 }
3734 }
3735
3736 kfree(devices_info);
3737 return 0;
3738
3739 error:
3740 kfree(map);
3741 kfree(devices_info);
3742 return ret;
3743 }
3744
3745 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3746 struct btrfs_root *extent_root,
3747 struct map_lookup *map, u64 chunk_offset,
3748 u64 chunk_size, u64 stripe_size)
3749 {
3750 u64 dev_offset;
3751 struct btrfs_key key;
3752 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3753 struct btrfs_device *device;
3754 struct btrfs_chunk *chunk;
3755 struct btrfs_stripe *stripe;
3756 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3757 int index = 0;
3758 int ret;
3759
3760 chunk = kzalloc(item_size, GFP_NOFS);
3761 if (!chunk)
3762 return -ENOMEM;
3763
3764 index = 0;
3765 while (index < map->num_stripes) {
3766 device = map->stripes[index].dev;
3767 device->bytes_used += stripe_size;
3768 ret = btrfs_update_device(trans, device);
3769 if (ret)
3770 goto out_free;
3771 index++;
3772 }
3773
3774 spin_lock(&extent_root->fs_info->free_chunk_lock);
3775 extent_root->fs_info->free_chunk_space -= (stripe_size *
3776 map->num_stripes);
3777 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3778
3779 index = 0;
3780 stripe = &chunk->stripe;
3781 while (index < map->num_stripes) {
3782 device = map->stripes[index].dev;
3783 dev_offset = map->stripes[index].physical;
3784
3785 btrfs_set_stack_stripe_devid(stripe, device->devid);
3786 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3787 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3788 stripe++;
3789 index++;
3790 }
3791
3792 btrfs_set_stack_chunk_length(chunk, chunk_size);
3793 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3794 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3795 btrfs_set_stack_chunk_type(chunk, map->type);
3796 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3797 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3798 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3799 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3800 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3801
3802 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3803 key.type = BTRFS_CHUNK_ITEM_KEY;
3804 key.offset = chunk_offset;
3805
3806 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3807
3808 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3809 /*
3810 * TODO: Cleanup of inserted chunk root in case of
3811 * failure.
3812 */
3813 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3814 item_size);
3815 }
3816
3817 out_free:
3818 kfree(chunk);
3819 return ret;
3820 }
3821
3822 /*
3823 * Chunk allocation falls into two parts. The first part does works
3824 * that make the new allocated chunk useable, but not do any operation
3825 * that modifies the chunk tree. The second part does the works that
3826 * require modifying the chunk tree. This division is important for the
3827 * bootstrap process of adding storage to a seed btrfs.
3828 */
3829 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3830 struct btrfs_root *extent_root, u64 type)
3831 {
3832 u64 chunk_offset;
3833 u64 chunk_size;
3834 u64 stripe_size;
3835 struct map_lookup *map;
3836 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3837 int ret;
3838
3839 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3840 &chunk_offset);
3841 if (ret)
3842 return ret;
3843
3844 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3845 &stripe_size, chunk_offset, type);
3846 if (ret)
3847 return ret;
3848
3849 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3850 chunk_size, stripe_size);
3851 if (ret)
3852 return ret;
3853 return 0;
3854 }
3855
3856 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3857 struct btrfs_root *root,
3858 struct btrfs_device *device)
3859 {
3860 u64 chunk_offset;
3861 u64 sys_chunk_offset;
3862 u64 chunk_size;
3863 u64 sys_chunk_size;
3864 u64 stripe_size;
3865 u64 sys_stripe_size;
3866 u64 alloc_profile;
3867 struct map_lookup *map;
3868 struct map_lookup *sys_map;
3869 struct btrfs_fs_info *fs_info = root->fs_info;
3870 struct btrfs_root *extent_root = fs_info->extent_root;
3871 int ret;
3872
3873 ret = find_next_chunk(fs_info->chunk_root,
3874 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3875 if (ret)
3876 return ret;
3877
3878 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3879 fs_info->avail_metadata_alloc_bits;
3880 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3881
3882 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3883 &stripe_size, chunk_offset, alloc_profile);
3884 if (ret)
3885 return ret;
3886
3887 sys_chunk_offset = chunk_offset + chunk_size;
3888
3889 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3890 fs_info->avail_system_alloc_bits;
3891 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3892
3893 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3894 &sys_chunk_size, &sys_stripe_size,
3895 sys_chunk_offset, alloc_profile);
3896 if (ret) {
3897 btrfs_abort_transaction(trans, root, ret);
3898 goto out;
3899 }
3900
3901 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3902 if (ret) {
3903 btrfs_abort_transaction(trans, root, ret);
3904 goto out;
3905 }
3906
3907 /*
3908 * Modifying chunk tree needs allocating new blocks from both
3909 * system block group and metadata block group. So we only can
3910 * do operations require modifying the chunk tree after both
3911 * block groups were created.
3912 */
3913 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3914 chunk_size, stripe_size);
3915 if (ret) {
3916 btrfs_abort_transaction(trans, root, ret);
3917 goto out;
3918 }
3919
3920 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3921 sys_chunk_offset, sys_chunk_size,
3922 sys_stripe_size);
3923 if (ret)
3924 btrfs_abort_transaction(trans, root, ret);
3925
3926 out:
3927
3928 return ret;
3929 }
3930
3931 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3932 {
3933 struct extent_map *em;
3934 struct map_lookup *map;
3935 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3936 int readonly = 0;
3937 int i;
3938
3939 read_lock(&map_tree->map_tree.lock);
3940 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3941 read_unlock(&map_tree->map_tree.lock);
3942 if (!em)
3943 return 1;
3944
3945 if (btrfs_test_opt(root, DEGRADED)) {
3946 free_extent_map(em);
3947 return 0;
3948 }
3949
3950 map = (struct map_lookup *)em->bdev;
3951 for (i = 0; i < map->num_stripes; i++) {
3952 if (!map->stripes[i].dev->writeable) {
3953 readonly = 1;
3954 break;
3955 }
3956 }
3957 free_extent_map(em);
3958 return readonly;
3959 }
3960
3961 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3962 {
3963 extent_map_tree_init(&tree->map_tree);
3964 }
3965
3966 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3967 {
3968 struct extent_map *em;
3969
3970 while (1) {
3971 write_lock(&tree->map_tree.lock);
3972 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3973 if (em)
3974 remove_extent_mapping(&tree->map_tree, em);
3975 write_unlock(&tree->map_tree.lock);
3976 if (!em)
3977 break;
3978 kfree(em->bdev);
3979 /* once for us */
3980 free_extent_map(em);
3981 /* once for the tree */
3982 free_extent_map(em);
3983 }
3984 }
3985
3986 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
3987 {
3988 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
3989 struct extent_map *em;
3990 struct map_lookup *map;
3991 struct extent_map_tree *em_tree = &map_tree->map_tree;
3992 int ret;
3993
3994 read_lock(&em_tree->lock);
3995 em = lookup_extent_mapping(em_tree, logical, len);
3996 read_unlock(&em_tree->lock);
3997 BUG_ON(!em);
3998
3999 BUG_ON(em->start > logical || em->start + em->len < logical);
4000 map = (struct map_lookup *)em->bdev;
4001 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4002 ret = map->num_stripes;
4003 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4004 ret = map->sub_stripes;
4005 else
4006 ret = 1;
4007 free_extent_map(em);
4008
4009 btrfs_dev_replace_lock(&fs_info->dev_replace);
4010 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4011 ret++;
4012 btrfs_dev_replace_unlock(&fs_info->dev_replace);
4013
4014 return ret;
4015 }
4016
4017 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4018 struct map_lookup *map, int first, int num,
4019 int optimal, int dev_replace_is_ongoing)
4020 {
4021 int i;
4022 int tolerance;
4023 struct btrfs_device *srcdev;
4024
4025 if (dev_replace_is_ongoing &&
4026 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4027 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4028 srcdev = fs_info->dev_replace.srcdev;
4029 else
4030 srcdev = NULL;
4031
4032 /*
4033 * try to avoid the drive that is the source drive for a
4034 * dev-replace procedure, only choose it if no other non-missing
4035 * mirror is available
4036 */
4037 for (tolerance = 0; tolerance < 2; tolerance++) {
4038 if (map->stripes[optimal].dev->bdev &&
4039 (tolerance || map->stripes[optimal].dev != srcdev))
4040 return optimal;
4041 for (i = first; i < first + num; i++) {
4042 if (map->stripes[i].dev->bdev &&
4043 (tolerance || map->stripes[i].dev != srcdev))
4044 return i;
4045 }
4046 }
4047
4048 /* we couldn't find one that doesn't fail. Just return something
4049 * and the io error handling code will clean up eventually
4050 */
4051 return optimal;
4052 }
4053
4054 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4055 u64 logical, u64 *length,
4056 struct btrfs_bio **bbio_ret,
4057 int mirror_num)
4058 {
4059 struct extent_map *em;
4060 struct map_lookup *map;
4061 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4062 struct extent_map_tree *em_tree = &map_tree->map_tree;
4063 u64 offset;
4064 u64 stripe_offset;
4065 u64 stripe_end_offset;
4066 u64 stripe_nr;
4067 u64 stripe_nr_orig;
4068 u64 stripe_nr_end;
4069 int stripe_index;
4070 int i;
4071 int ret = 0;
4072 int num_stripes;
4073 int max_errors = 0;
4074 struct btrfs_bio *bbio = NULL;
4075 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4076 int dev_replace_is_ongoing = 0;
4077 int num_alloc_stripes;
4078 int patch_the_first_stripe_for_dev_replace = 0;
4079 u64 physical_to_patch_in_first_stripe = 0;
4080
4081 read_lock(&em_tree->lock);
4082 em = lookup_extent_mapping(em_tree, logical, *length);
4083 read_unlock(&em_tree->lock);
4084
4085 if (!em) {
4086 printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
4087 (unsigned long long)logical,
4088 (unsigned long long)*length);
4089 BUG();
4090 }
4091
4092 BUG_ON(em->start > logical || em->start + em->len < logical);
4093 map = (struct map_lookup *)em->bdev;
4094 offset = logical - em->start;
4095
4096 stripe_nr = offset;
4097 /*
4098 * stripe_nr counts the total number of stripes we have to stride
4099 * to get to this block
4100 */
4101 do_div(stripe_nr, map->stripe_len);
4102
4103 stripe_offset = stripe_nr * map->stripe_len;
4104 BUG_ON(offset < stripe_offset);
4105
4106 /* stripe_offset is the offset of this block in its stripe*/
4107 stripe_offset = offset - stripe_offset;
4108
4109 if (rw & REQ_DISCARD)
4110 *length = min_t(u64, em->len - offset, *length);
4111 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4112 /* we limit the length of each bio to what fits in a stripe */
4113 *length = min_t(u64, em->len - offset,
4114 map->stripe_len - stripe_offset);
4115 } else {
4116 *length = em->len - offset;
4117 }
4118
4119 if (!bbio_ret)
4120 goto out;
4121
4122 btrfs_dev_replace_lock(dev_replace);
4123 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4124 if (!dev_replace_is_ongoing)
4125 btrfs_dev_replace_unlock(dev_replace);
4126
4127 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4128 !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4129 dev_replace->tgtdev != NULL) {
4130 /*
4131 * in dev-replace case, for repair case (that's the only
4132 * case where the mirror is selected explicitly when
4133 * calling btrfs_map_block), blocks left of the left cursor
4134 * can also be read from the target drive.
4135 * For REQ_GET_READ_MIRRORS, the target drive is added as
4136 * the last one to the array of stripes. For READ, it also
4137 * needs to be supported using the same mirror number.
4138 * If the requested block is not left of the left cursor,
4139 * EIO is returned. This can happen because btrfs_num_copies()
4140 * returns one more in the dev-replace case.
4141 */
4142 u64 tmp_length = *length;
4143 struct btrfs_bio *tmp_bbio = NULL;
4144 int tmp_num_stripes;
4145 u64 srcdev_devid = dev_replace->srcdev->devid;
4146 int index_srcdev = 0;
4147 int found = 0;
4148 u64 physical_of_found = 0;
4149
4150 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4151 logical, &tmp_length, &tmp_bbio, 0);
4152 if (ret) {
4153 WARN_ON(tmp_bbio != NULL);
4154 goto out;
4155 }
4156
4157 tmp_num_stripes = tmp_bbio->num_stripes;
4158 if (mirror_num > tmp_num_stripes) {
4159 /*
4160 * REQ_GET_READ_MIRRORS does not contain this
4161 * mirror, that means that the requested area
4162 * is not left of the left cursor
4163 */
4164 ret = -EIO;
4165 kfree(tmp_bbio);
4166 goto out;
4167 }
4168
4169 /*
4170 * process the rest of the function using the mirror_num
4171 * of the source drive. Therefore look it up first.
4172 * At the end, patch the device pointer to the one of the
4173 * target drive.
4174 */
4175 for (i = 0; i < tmp_num_stripes; i++) {
4176 if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4177 /*
4178 * In case of DUP, in order to keep it
4179 * simple, only add the mirror with the
4180 * lowest physical address
4181 */
4182 if (found &&
4183 physical_of_found <=
4184 tmp_bbio->stripes[i].physical)
4185 continue;
4186 index_srcdev = i;
4187 found = 1;
4188 physical_of_found =
4189 tmp_bbio->stripes[i].physical;
4190 }
4191 }
4192
4193 if (found) {
4194 mirror_num = index_srcdev + 1;
4195 patch_the_first_stripe_for_dev_replace = 1;
4196 physical_to_patch_in_first_stripe = physical_of_found;
4197 } else {
4198 WARN_ON(1);
4199 ret = -EIO;
4200 kfree(tmp_bbio);
4201 goto out;
4202 }
4203
4204 kfree(tmp_bbio);
4205 } else if (mirror_num > map->num_stripes) {
4206 mirror_num = 0;
4207 }
4208
4209 num_stripes = 1;
4210 stripe_index = 0;
4211 stripe_nr_orig = stripe_nr;
4212 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
4213 (~(map->stripe_len - 1));
4214 do_div(stripe_nr_end, map->stripe_len);
4215 stripe_end_offset = stripe_nr_end * map->stripe_len -
4216 (offset + *length);
4217 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4218 if (rw & REQ_DISCARD)
4219 num_stripes = min_t(u64, map->num_stripes,
4220 stripe_nr_end - stripe_nr_orig);
4221 stripe_index = do_div(stripe_nr, map->num_stripes);
4222 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4223 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4224 num_stripes = map->num_stripes;
4225 else if (mirror_num)
4226 stripe_index = mirror_num - 1;
4227 else {
4228 stripe_index = find_live_mirror(fs_info, map, 0,
4229 map->num_stripes,
4230 current->pid % map->num_stripes,
4231 dev_replace_is_ongoing);
4232 mirror_num = stripe_index + 1;
4233 }
4234
4235 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4236 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4237 num_stripes = map->num_stripes;
4238 } else if (mirror_num) {
4239 stripe_index = mirror_num - 1;
4240 } else {
4241 mirror_num = 1;
4242 }
4243
4244 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4245 int factor = map->num_stripes / map->sub_stripes;
4246
4247 stripe_index = do_div(stripe_nr, factor);
4248 stripe_index *= map->sub_stripes;
4249
4250 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4251 num_stripes = map->sub_stripes;
4252 else if (rw & REQ_DISCARD)
4253 num_stripes = min_t(u64, map->sub_stripes *
4254 (stripe_nr_end - stripe_nr_orig),
4255 map->num_stripes);
4256 else if (mirror_num)
4257 stripe_index += mirror_num - 1;
4258 else {
4259 int old_stripe_index = stripe_index;
4260 stripe_index = find_live_mirror(fs_info, map,
4261 stripe_index,
4262 map->sub_stripes, stripe_index +
4263 current->pid % map->sub_stripes,
4264 dev_replace_is_ongoing);
4265 mirror_num = stripe_index - old_stripe_index + 1;
4266 }
4267 } else {
4268 /*
4269 * after this do_div call, stripe_nr is the number of stripes
4270 * on this device we have to walk to find the data, and
4271 * stripe_index is the number of our device in the stripe array
4272 */
4273 stripe_index = do_div(stripe_nr, map->num_stripes);
4274 mirror_num = stripe_index + 1;
4275 }
4276 BUG_ON(stripe_index >= map->num_stripes);
4277
4278 num_alloc_stripes = num_stripes;
4279 if (dev_replace_is_ongoing) {
4280 if (rw & (REQ_WRITE | REQ_DISCARD))
4281 num_alloc_stripes <<= 1;
4282 if (rw & REQ_GET_READ_MIRRORS)
4283 num_alloc_stripes++;
4284 }
4285 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4286 if (!bbio) {
4287 ret = -ENOMEM;
4288 goto out;
4289 }
4290 atomic_set(&bbio->error, 0);
4291
4292 if (rw & REQ_DISCARD) {
4293 int factor = 0;
4294 int sub_stripes = 0;
4295 u64 stripes_per_dev = 0;
4296 u32 remaining_stripes = 0;
4297 u32 last_stripe = 0;
4298
4299 if (map->type &
4300 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4301 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4302 sub_stripes = 1;
4303 else
4304 sub_stripes = map->sub_stripes;
4305
4306 factor = map->num_stripes / sub_stripes;
4307 stripes_per_dev = div_u64_rem(stripe_nr_end -
4308 stripe_nr_orig,
4309 factor,
4310 &remaining_stripes);
4311 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4312 last_stripe *= sub_stripes;
4313 }
4314
4315 for (i = 0; i < num_stripes; i++) {
4316 bbio->stripes[i].physical =
4317 map->stripes[stripe_index].physical +
4318 stripe_offset + stripe_nr * map->stripe_len;
4319 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4320
4321 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4322 BTRFS_BLOCK_GROUP_RAID10)) {
4323 bbio->stripes[i].length = stripes_per_dev *
4324 map->stripe_len;
4325
4326 if (i / sub_stripes < remaining_stripes)
4327 bbio->stripes[i].length +=
4328 map->stripe_len;
4329
4330 /*
4331 * Special for the first stripe and
4332 * the last stripe:
4333 *
4334 * |-------|...|-------|
4335 * |----------|
4336 * off end_off
4337 */
4338 if (i < sub_stripes)
4339 bbio->stripes[i].length -=
4340 stripe_offset;
4341
4342 if (stripe_index >= last_stripe &&
4343 stripe_index <= (last_stripe +
4344 sub_stripes - 1))
4345 bbio->stripes[i].length -=
4346 stripe_end_offset;
4347
4348 if (i == sub_stripes - 1)
4349 stripe_offset = 0;
4350 } else
4351 bbio->stripes[i].length = *length;
4352
4353 stripe_index++;
4354 if (stripe_index == map->num_stripes) {
4355 /* This could only happen for RAID0/10 */
4356 stripe_index = 0;
4357 stripe_nr++;
4358 }
4359 }
4360 } else {
4361 for (i = 0; i < num_stripes; i++) {
4362 bbio->stripes[i].physical =
4363 map->stripes[stripe_index].physical +
4364 stripe_offset +
4365 stripe_nr * map->stripe_len;
4366 bbio->stripes[i].dev =
4367 map->stripes[stripe_index].dev;
4368 stripe_index++;
4369 }
4370 }
4371
4372 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
4373 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4374 BTRFS_BLOCK_GROUP_RAID10 |
4375 BTRFS_BLOCK_GROUP_DUP)) {
4376 max_errors = 1;
4377 }
4378 }
4379
4380 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
4381 dev_replace->tgtdev != NULL) {
4382 int index_where_to_add;
4383 u64 srcdev_devid = dev_replace->srcdev->devid;
4384
4385 /*
4386 * duplicate the write operations while the dev replace
4387 * procedure is running. Since the copying of the old disk
4388 * to the new disk takes place at run time while the
4389 * filesystem is mounted writable, the regular write
4390 * operations to the old disk have to be duplicated to go
4391 * to the new disk as well.
4392 * Note that device->missing is handled by the caller, and
4393 * that the write to the old disk is already set up in the
4394 * stripes array.
4395 */
4396 index_where_to_add = num_stripes;
4397 for (i = 0; i < num_stripes; i++) {
4398 if (bbio->stripes[i].dev->devid == srcdev_devid) {
4399 /* write to new disk, too */
4400 struct btrfs_bio_stripe *new =
4401 bbio->stripes + index_where_to_add;
4402 struct btrfs_bio_stripe *old =
4403 bbio->stripes + i;
4404
4405 new->physical = old->physical;
4406 new->length = old->length;
4407 new->dev = dev_replace->tgtdev;
4408 index_where_to_add++;
4409 max_errors++;
4410 }
4411 }
4412 num_stripes = index_where_to_add;
4413 } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
4414 dev_replace->tgtdev != NULL) {
4415 u64 srcdev_devid = dev_replace->srcdev->devid;
4416 int index_srcdev = 0;
4417 int found = 0;
4418 u64 physical_of_found = 0;
4419
4420 /*
4421 * During the dev-replace procedure, the target drive can
4422 * also be used to read data in case it is needed to repair
4423 * a corrupt block elsewhere. This is possible if the
4424 * requested area is left of the left cursor. In this area,
4425 * the target drive is a full copy of the source drive.
4426 */
4427 for (i = 0; i < num_stripes; i++) {
4428 if (bbio->stripes[i].dev->devid == srcdev_devid) {
4429 /*
4430 * In case of DUP, in order to keep it
4431 * simple, only add the mirror with the
4432 * lowest physical address
4433 */
4434 if (found &&
4435 physical_of_found <=
4436 bbio->stripes[i].physical)
4437 continue;
4438 index_srcdev = i;
4439 found = 1;
4440 physical_of_found = bbio->stripes[i].physical;
4441 }
4442 }
4443 if (found) {
4444 u64 length = map->stripe_len;
4445
4446 if (physical_of_found + length <=
4447 dev_replace->cursor_left) {
4448 struct btrfs_bio_stripe *tgtdev_stripe =
4449 bbio->stripes + num_stripes;
4450
4451 tgtdev_stripe->physical = physical_of_found;
4452 tgtdev_stripe->length =
4453 bbio->stripes[index_srcdev].length;
4454 tgtdev_stripe->dev = dev_replace->tgtdev;
4455
4456 num_stripes++;
4457 }
4458 }
4459 }
4460
4461 *bbio_ret = bbio;
4462 bbio->num_stripes = num_stripes;
4463 bbio->max_errors = max_errors;
4464 bbio->mirror_num = mirror_num;
4465
4466 /*
4467 * this is the case that REQ_READ && dev_replace_is_ongoing &&
4468 * mirror_num == num_stripes + 1 && dev_replace target drive is
4469 * available as a mirror
4470 */
4471 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
4472 WARN_ON(num_stripes > 1);
4473 bbio->stripes[0].dev = dev_replace->tgtdev;
4474 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
4475 bbio->mirror_num = map->num_stripes + 1;
4476 }
4477 out:
4478 if (dev_replace_is_ongoing)
4479 btrfs_dev_replace_unlock(dev_replace);
4480 free_extent_map(em);
4481 return ret;
4482 }
4483
4484 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4485 u64 logical, u64 *length,
4486 struct btrfs_bio **bbio_ret, int mirror_num)
4487 {
4488 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
4489 mirror_num);
4490 }
4491
4492 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4493 u64 chunk_start, u64 physical, u64 devid,
4494 u64 **logical, int *naddrs, int *stripe_len)
4495 {
4496 struct extent_map_tree *em_tree = &map_tree->map_tree;
4497 struct extent_map *em;
4498 struct map_lookup *map;
4499 u64 *buf;
4500 u64 bytenr;
4501 u64 length;
4502 u64 stripe_nr;
4503 int i, j, nr = 0;
4504
4505 read_lock(&em_tree->lock);
4506 em = lookup_extent_mapping(em_tree, chunk_start, 1);
4507 read_unlock(&em_tree->lock);
4508
4509 BUG_ON(!em || em->start != chunk_start);
4510 map = (struct map_lookup *)em->bdev;
4511
4512 length = em->len;
4513 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4514 do_div(length, map->num_stripes / map->sub_stripes);
4515 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4516 do_div(length, map->num_stripes);
4517
4518 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4519 BUG_ON(!buf); /* -ENOMEM */
4520
4521 for (i = 0; i < map->num_stripes; i++) {
4522 if (devid && map->stripes[i].dev->devid != devid)
4523 continue;
4524 if (map->stripes[i].physical > physical ||
4525 map->stripes[i].physical + length <= physical)
4526 continue;
4527
4528 stripe_nr = physical - map->stripes[i].physical;
4529 do_div(stripe_nr, map->stripe_len);
4530
4531 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4532 stripe_nr = stripe_nr * map->num_stripes + i;
4533 do_div(stripe_nr, map->sub_stripes);
4534 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4535 stripe_nr = stripe_nr * map->num_stripes + i;
4536 }
4537 bytenr = chunk_start + stripe_nr * map->stripe_len;
4538 WARN_ON(nr >= map->num_stripes);
4539 for (j = 0; j < nr; j++) {
4540 if (buf[j] == bytenr)
4541 break;
4542 }
4543 if (j == nr) {
4544 WARN_ON(nr >= map->num_stripes);
4545 buf[nr++] = bytenr;
4546 }
4547 }
4548
4549 *logical = buf;
4550 *naddrs = nr;
4551 *stripe_len = map->stripe_len;
4552
4553 free_extent_map(em);
4554 return 0;
4555 }
4556
4557 static void *merge_stripe_index_into_bio_private(void *bi_private,
4558 unsigned int stripe_index)
4559 {
4560 /*
4561 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4562 * at most 1.
4563 * The alternative solution (instead of stealing bits from the
4564 * pointer) would be to allocate an intermediate structure
4565 * that contains the old private pointer plus the stripe_index.
4566 */
4567 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4568 BUG_ON(stripe_index > 3);
4569 return (void *)(((uintptr_t)bi_private) | stripe_index);
4570 }
4571
4572 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4573 {
4574 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4575 }
4576
4577 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4578 {
4579 return (unsigned int)((uintptr_t)bi_private) & 3;
4580 }
4581
4582 static void btrfs_end_bio(struct bio *bio, int err)
4583 {
4584 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4585 int is_orig_bio = 0;
4586
4587 if (err) {
4588 atomic_inc(&bbio->error);
4589 if (err == -EIO || err == -EREMOTEIO) {
4590 unsigned int stripe_index =
4591 extract_stripe_index_from_bio_private(
4592 bio->bi_private);
4593 struct btrfs_device *dev;
4594
4595 BUG_ON(stripe_index >= bbio->num_stripes);
4596 dev = bbio->stripes[stripe_index].dev;
4597 if (dev->bdev) {
4598 if (bio->bi_rw & WRITE)
4599 btrfs_dev_stat_inc(dev,
4600 BTRFS_DEV_STAT_WRITE_ERRS);
4601 else
4602 btrfs_dev_stat_inc(dev,
4603 BTRFS_DEV_STAT_READ_ERRS);
4604 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4605 btrfs_dev_stat_inc(dev,
4606 BTRFS_DEV_STAT_FLUSH_ERRS);
4607 btrfs_dev_stat_print_on_error(dev);
4608 }
4609 }
4610 }
4611
4612 if (bio == bbio->orig_bio)
4613 is_orig_bio = 1;
4614
4615 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4616 if (!is_orig_bio) {
4617 bio_put(bio);
4618 bio = bbio->orig_bio;
4619 }
4620 bio->bi_private = bbio->private;
4621 bio->bi_end_io = bbio->end_io;
4622 bio->bi_bdev = (struct block_device *)
4623 (unsigned long)bbio->mirror_num;
4624 /* only send an error to the higher layers if it is
4625 * beyond the tolerance of the multi-bio
4626 */
4627 if (atomic_read(&bbio->error) > bbio->max_errors) {
4628 err = -EIO;
4629 } else {
4630 /*
4631 * this bio is actually up to date, we didn't
4632 * go over the max number of errors
4633 */
4634 set_bit(BIO_UPTODATE, &bio->bi_flags);
4635 err = 0;
4636 }
4637 kfree(bbio);
4638
4639 bio_endio(bio, err);
4640 } else if (!is_orig_bio) {
4641 bio_put(bio);
4642 }
4643 }
4644
4645 struct async_sched {
4646 struct bio *bio;
4647 int rw;
4648 struct btrfs_fs_info *info;
4649 struct btrfs_work work;
4650 };
4651
4652 /*
4653 * see run_scheduled_bios for a description of why bios are collected for
4654 * async submit.
4655 *
4656 * This will add one bio to the pending list for a device and make sure
4657 * the work struct is scheduled.
4658 */
4659 static noinline void schedule_bio(struct btrfs_root *root,
4660 struct btrfs_device *device,
4661 int rw, struct bio *bio)
4662 {
4663 int should_queue = 1;
4664 struct btrfs_pending_bios *pending_bios;
4665
4666 /* don't bother with additional async steps for reads, right now */
4667 if (!(rw & REQ_WRITE)) {
4668 bio_get(bio);
4669 btrfsic_submit_bio(rw, bio);
4670 bio_put(bio);
4671 return;
4672 }
4673
4674 /*
4675 * nr_async_bios allows us to reliably return congestion to the
4676 * higher layers. Otherwise, the async bio makes it appear we have
4677 * made progress against dirty pages when we've really just put it
4678 * on a queue for later
4679 */
4680 atomic_inc(&root->fs_info->nr_async_bios);
4681 WARN_ON(bio->bi_next);
4682 bio->bi_next = NULL;
4683 bio->bi_rw |= rw;
4684
4685 spin_lock(&device->io_lock);
4686 if (bio->bi_rw & REQ_SYNC)
4687 pending_bios = &device->pending_sync_bios;
4688 else
4689 pending_bios = &device->pending_bios;
4690
4691 if (pending_bios->tail)
4692 pending_bios->tail->bi_next = bio;
4693
4694 pending_bios->tail = bio;
4695 if (!pending_bios->head)
4696 pending_bios->head = bio;
4697 if (device->running_pending)
4698 should_queue = 0;
4699
4700 spin_unlock(&device->io_lock);
4701
4702 if (should_queue)
4703 btrfs_queue_worker(&root->fs_info->submit_workers,
4704 &device->work);
4705 }
4706
4707 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
4708 sector_t sector)
4709 {
4710 struct bio_vec *prev;
4711 struct request_queue *q = bdev_get_queue(bdev);
4712 unsigned short max_sectors = queue_max_sectors(q);
4713 struct bvec_merge_data bvm = {
4714 .bi_bdev = bdev,
4715 .bi_sector = sector,
4716 .bi_rw = bio->bi_rw,
4717 };
4718
4719 if (bio->bi_vcnt == 0) {
4720 WARN_ON(1);
4721 return 1;
4722 }
4723
4724 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
4725 if ((bio->bi_size >> 9) > max_sectors)
4726 return 0;
4727
4728 if (!q->merge_bvec_fn)
4729 return 1;
4730
4731 bvm.bi_size = bio->bi_size - prev->bv_len;
4732 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
4733 return 0;
4734 return 1;
4735 }
4736
4737 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4738 struct bio *bio, u64 physical, int dev_nr,
4739 int rw, int async)
4740 {
4741 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
4742
4743 bio->bi_private = bbio;
4744 bio->bi_private = merge_stripe_index_into_bio_private(
4745 bio->bi_private, (unsigned int)dev_nr);
4746 bio->bi_end_io = btrfs_end_bio;
4747 bio->bi_sector = physical >> 9;
4748 #ifdef DEBUG
4749 {
4750 struct rcu_string *name;
4751
4752 rcu_read_lock();
4753 name = rcu_dereference(dev->name);
4754 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
4755 "(%s id %llu), size=%u\n", rw,
4756 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4757 name->str, dev->devid, bio->bi_size);
4758 rcu_read_unlock();
4759 }
4760 #endif
4761 bio->bi_bdev = dev->bdev;
4762 if (async)
4763 schedule_bio(root, dev, rw, bio);
4764 else
4765 btrfsic_submit_bio(rw, bio);
4766 }
4767
4768 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
4769 struct bio *first_bio, struct btrfs_device *dev,
4770 int dev_nr, int rw, int async)
4771 {
4772 struct bio_vec *bvec = first_bio->bi_io_vec;
4773 struct bio *bio;
4774 int nr_vecs = bio_get_nr_vecs(dev->bdev);
4775 u64 physical = bbio->stripes[dev_nr].physical;
4776
4777 again:
4778 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
4779 if (!bio)
4780 return -ENOMEM;
4781
4782 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
4783 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
4784 bvec->bv_offset) < bvec->bv_len) {
4785 u64 len = bio->bi_size;
4786
4787 atomic_inc(&bbio->stripes_pending);
4788 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
4789 rw, async);
4790 physical += len;
4791 goto again;
4792 }
4793 bvec++;
4794 }
4795
4796 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
4797 return 0;
4798 }
4799
4800 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
4801 {
4802 atomic_inc(&bbio->error);
4803 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4804 bio->bi_private = bbio->private;
4805 bio->bi_end_io = bbio->end_io;
4806 bio->bi_bdev = (struct block_device *)
4807 (unsigned long)bbio->mirror_num;
4808 bio->bi_sector = logical >> 9;
4809 kfree(bbio);
4810 bio_endio(bio, -EIO);
4811 }
4812 }
4813
4814 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4815 int mirror_num, int async_submit)
4816 {
4817 struct btrfs_device *dev;
4818 struct bio *first_bio = bio;
4819 u64 logical = (u64)bio->bi_sector << 9;
4820 u64 length = 0;
4821 u64 map_length;
4822 int ret;
4823 int dev_nr = 0;
4824 int total_devs = 1;
4825 struct btrfs_bio *bbio = NULL;
4826
4827 length = bio->bi_size;
4828 map_length = length;
4829
4830 ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
4831 mirror_num);
4832 if (ret)
4833 return ret;
4834
4835 total_devs = bbio->num_stripes;
4836 if (map_length < length) {
4837 printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
4838 "len %llu\n", (unsigned long long)logical,
4839 (unsigned long long)length,
4840 (unsigned long long)map_length);
4841 BUG();
4842 }
4843
4844 bbio->orig_bio = first_bio;
4845 bbio->private = first_bio->bi_private;
4846 bbio->end_io = first_bio->bi_end_io;
4847 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4848
4849 while (dev_nr < total_devs) {
4850 dev = bbio->stripes[dev_nr].dev;
4851 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
4852 bbio_error(bbio, first_bio, logical);
4853 dev_nr++;
4854 continue;
4855 }
4856
4857 /*
4858 * Check and see if we're ok with this bio based on it's size
4859 * and offset with the given device.
4860 */
4861 if (!bio_size_ok(dev->bdev, first_bio,
4862 bbio->stripes[dev_nr].physical >> 9)) {
4863 ret = breakup_stripe_bio(root, bbio, first_bio, dev,
4864 dev_nr, rw, async_submit);
4865 BUG_ON(ret);
4866 dev_nr++;
4867 continue;
4868 }
4869
4870 if (dev_nr < total_devs - 1) {
4871 bio = bio_clone(first_bio, GFP_NOFS);
4872 BUG_ON(!bio); /* -ENOMEM */
4873 } else {
4874 bio = first_bio;
4875 }
4876
4877 submit_stripe_bio(root, bbio, bio,
4878 bbio->stripes[dev_nr].physical, dev_nr, rw,
4879 async_submit);
4880 dev_nr++;
4881 }
4882 return 0;
4883 }
4884
4885 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
4886 u8 *uuid, u8 *fsid)
4887 {
4888 struct btrfs_device *device;
4889 struct btrfs_fs_devices *cur_devices;
4890
4891 cur_devices = fs_info->fs_devices;
4892 while (cur_devices) {
4893 if (!fsid ||
4894 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4895 device = __find_device(&cur_devices->devices,
4896 devid, uuid);
4897 if (device)
4898 return device;
4899 }
4900 cur_devices = cur_devices->seed;
4901 }
4902 return NULL;
4903 }
4904
4905 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4906 u64 devid, u8 *dev_uuid)
4907 {
4908 struct btrfs_device *device;
4909 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4910
4911 device = kzalloc(sizeof(*device), GFP_NOFS);
4912 if (!device)
4913 return NULL;
4914 list_add(&device->dev_list,
4915 &fs_devices->devices);
4916 device->dev_root = root->fs_info->dev_root;
4917 device->devid = devid;
4918 device->work.func = pending_bios_fn;
4919 device->fs_devices = fs_devices;
4920 device->missing = 1;
4921 fs_devices->num_devices++;
4922 fs_devices->missing_devices++;
4923 spin_lock_init(&device->io_lock);
4924 INIT_LIST_HEAD(&device->dev_alloc_list);
4925 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4926 return device;
4927 }
4928
4929 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4930 struct extent_buffer *leaf,
4931 struct btrfs_chunk *chunk)
4932 {
4933 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4934 struct map_lookup *map;
4935 struct extent_map *em;
4936 u64 logical;
4937 u64 length;
4938 u64 devid;
4939 u8 uuid[BTRFS_UUID_SIZE];
4940 int num_stripes;
4941 int ret;
4942 int i;
4943
4944 logical = key->offset;
4945 length = btrfs_chunk_length(leaf, chunk);
4946
4947 read_lock(&map_tree->map_tree.lock);
4948 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4949 read_unlock(&map_tree->map_tree.lock);
4950
4951 /* already mapped? */
4952 if (em && em->start <= logical && em->start + em->len > logical) {
4953 free_extent_map(em);
4954 return 0;
4955 } else if (em) {
4956 free_extent_map(em);
4957 }
4958
4959 em = alloc_extent_map();
4960 if (!em)
4961 return -ENOMEM;
4962 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4963 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4964 if (!map) {
4965 free_extent_map(em);
4966 return -ENOMEM;
4967 }
4968
4969 em->bdev = (struct block_device *)map;
4970 em->start = logical;
4971 em->len = length;
4972 em->orig_start = 0;
4973 em->block_start = 0;
4974 em->block_len = em->len;
4975
4976 map->num_stripes = num_stripes;
4977 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4978 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4979 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4980 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4981 map->type = btrfs_chunk_type(leaf, chunk);
4982 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4983 for (i = 0; i < num_stripes; i++) {
4984 map->stripes[i].physical =
4985 btrfs_stripe_offset_nr(leaf, chunk, i);
4986 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4987 read_extent_buffer(leaf, uuid, (unsigned long)
4988 btrfs_stripe_dev_uuid_nr(chunk, i),
4989 BTRFS_UUID_SIZE);
4990 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
4991 uuid, NULL);
4992 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4993 kfree(map);
4994 free_extent_map(em);
4995 return -EIO;
4996 }
4997 if (!map->stripes[i].dev) {
4998 map->stripes[i].dev =
4999 add_missing_dev(root, devid, uuid);
5000 if (!map->stripes[i].dev) {
5001 kfree(map);
5002 free_extent_map(em);
5003 return -EIO;
5004 }
5005 }
5006 map->stripes[i].dev->in_fs_metadata = 1;
5007 }
5008
5009 write_lock(&map_tree->map_tree.lock);
5010 ret = add_extent_mapping(&map_tree->map_tree, em);
5011 write_unlock(&map_tree->map_tree.lock);
5012 BUG_ON(ret); /* Tree corruption */
5013 free_extent_map(em);
5014
5015 return 0;
5016 }
5017
5018 static void fill_device_from_item(struct extent_buffer *leaf,
5019 struct btrfs_dev_item *dev_item,
5020 struct btrfs_device *device)
5021 {
5022 unsigned long ptr;
5023
5024 device->devid = btrfs_device_id(leaf, dev_item);
5025 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5026 device->total_bytes = device->disk_total_bytes;
5027 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5028 device->type = btrfs_device_type(leaf, dev_item);
5029 device->io_align = btrfs_device_io_align(leaf, dev_item);
5030 device->io_width = btrfs_device_io_width(leaf, dev_item);
5031 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5032 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5033 device->is_tgtdev_for_dev_replace = 0;
5034
5035 ptr = (unsigned long)btrfs_device_uuid(dev_item);
5036 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5037 }
5038
5039 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5040 {
5041 struct btrfs_fs_devices *fs_devices;
5042 int ret;
5043
5044 BUG_ON(!mutex_is_locked(&uuid_mutex));
5045
5046 fs_devices = root->fs_info->fs_devices->seed;
5047 while (fs_devices) {
5048 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5049 ret = 0;
5050 goto out;
5051 }
5052 fs_devices = fs_devices->seed;
5053 }
5054
5055 fs_devices = find_fsid(fsid);
5056 if (!fs_devices) {
5057 ret = -ENOENT;
5058 goto out;
5059 }
5060
5061 fs_devices = clone_fs_devices(fs_devices);
5062 if (IS_ERR(fs_devices)) {
5063 ret = PTR_ERR(fs_devices);
5064 goto out;
5065 }
5066
5067 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5068 root->fs_info->bdev_holder);
5069 if (ret) {
5070 free_fs_devices(fs_devices);
5071 goto out;
5072 }
5073
5074 if (!fs_devices->seeding) {
5075 __btrfs_close_devices(fs_devices);
5076 free_fs_devices(fs_devices);
5077 ret = -EINVAL;
5078 goto out;
5079 }
5080
5081 fs_devices->seed = root->fs_info->fs_devices->seed;
5082 root->fs_info->fs_devices->seed = fs_devices;
5083 out:
5084 return ret;
5085 }
5086
5087 static int read_one_dev(struct btrfs_root *root,
5088 struct extent_buffer *leaf,
5089 struct btrfs_dev_item *dev_item)
5090 {
5091 struct btrfs_device *device;
5092 u64 devid;
5093 int ret;
5094 u8 fs_uuid[BTRFS_UUID_SIZE];
5095 u8 dev_uuid[BTRFS_UUID_SIZE];
5096
5097 devid = btrfs_device_id(leaf, dev_item);
5098 read_extent_buffer(leaf, dev_uuid,
5099 (unsigned long)btrfs_device_uuid(dev_item),
5100 BTRFS_UUID_SIZE);
5101 read_extent_buffer(leaf, fs_uuid,
5102 (unsigned long)btrfs_device_fsid(dev_item),
5103 BTRFS_UUID_SIZE);
5104
5105 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5106 ret = open_seed_devices(root, fs_uuid);
5107 if (ret && !btrfs_test_opt(root, DEGRADED))
5108 return ret;
5109 }
5110
5111 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5112 if (!device || !device->bdev) {
5113 if (!btrfs_test_opt(root, DEGRADED))
5114 return -EIO;
5115
5116 if (!device) {
5117 printk(KERN_WARNING "warning devid %llu missing\n",
5118 (unsigned long long)devid);
5119 device = add_missing_dev(root, devid, dev_uuid);
5120 if (!device)
5121 return -ENOMEM;
5122 } else if (!device->missing) {
5123 /*
5124 * this happens when a device that was properly setup
5125 * in the device info lists suddenly goes bad.
5126 * device->bdev is NULL, and so we have to set
5127 * device->missing to one here
5128 */
5129 root->fs_info->fs_devices->missing_devices++;
5130 device->missing = 1;
5131 }
5132 }
5133
5134 if (device->fs_devices != root->fs_info->fs_devices) {
5135 BUG_ON(device->writeable);
5136 if (device->generation !=
5137 btrfs_device_generation(leaf, dev_item))
5138 return -EINVAL;
5139 }
5140
5141 fill_device_from_item(leaf, dev_item, device);
5142 device->dev_root = root->fs_info->dev_root;
5143 device->in_fs_metadata = 1;
5144 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5145 device->fs_devices->total_rw_bytes += device->total_bytes;
5146 spin_lock(&root->fs_info->free_chunk_lock);
5147 root->fs_info->free_chunk_space += device->total_bytes -
5148 device->bytes_used;
5149 spin_unlock(&root->fs_info->free_chunk_lock);
5150 }
5151 ret = 0;
5152 return ret;
5153 }
5154
5155 int btrfs_read_sys_array(struct btrfs_root *root)
5156 {
5157 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5158 struct extent_buffer *sb;
5159 struct btrfs_disk_key *disk_key;
5160 struct btrfs_chunk *chunk;
5161 u8 *ptr;
5162 unsigned long sb_ptr;
5163 int ret = 0;
5164 u32 num_stripes;
5165 u32 array_size;
5166 u32 len = 0;
5167 u32 cur;
5168 struct btrfs_key key;
5169
5170 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5171 BTRFS_SUPER_INFO_SIZE);
5172 if (!sb)
5173 return -ENOMEM;
5174 btrfs_set_buffer_uptodate(sb);
5175 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5176 /*
5177 * The sb extent buffer is artifical and just used to read the system array.
5178 * btrfs_set_buffer_uptodate() call does not properly mark all it's
5179 * pages up-to-date when the page is larger: extent does not cover the
5180 * whole page and consequently check_page_uptodate does not find all
5181 * the page's extents up-to-date (the hole beyond sb),
5182 * write_extent_buffer then triggers a WARN_ON.
5183 *
5184 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5185 * but sb spans only this function. Add an explicit SetPageUptodate call
5186 * to silence the warning eg. on PowerPC 64.
5187 */
5188 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5189 SetPageUptodate(sb->pages[0]);
5190
5191 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5192 array_size = btrfs_super_sys_array_size(super_copy);
5193
5194 ptr = super_copy->sys_chunk_array;
5195 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5196 cur = 0;
5197
5198 while (cur < array_size) {
5199 disk_key = (struct btrfs_disk_key *)ptr;
5200 btrfs_disk_key_to_cpu(&key, disk_key);
5201
5202 len = sizeof(*disk_key); ptr += len;
5203 sb_ptr += len;
5204 cur += len;
5205
5206 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5207 chunk = (struct btrfs_chunk *)sb_ptr;
5208 ret = read_one_chunk(root, &key, sb, chunk);
5209 if (ret)
5210 break;
5211 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5212 len = btrfs_chunk_item_size(num_stripes);
5213 } else {
5214 ret = -EIO;
5215 break;
5216 }
5217 ptr += len;
5218 sb_ptr += len;
5219 cur += len;
5220 }
5221 free_extent_buffer(sb);
5222 return ret;
5223 }
5224
5225 int btrfs_read_chunk_tree(struct btrfs_root *root)
5226 {
5227 struct btrfs_path *path;
5228 struct extent_buffer *leaf;
5229 struct btrfs_key key;
5230 struct btrfs_key found_key;
5231 int ret;
5232 int slot;
5233
5234 root = root->fs_info->chunk_root;
5235
5236 path = btrfs_alloc_path();
5237 if (!path)
5238 return -ENOMEM;
5239
5240 mutex_lock(&uuid_mutex);
5241 lock_chunks(root);
5242
5243 /* first we search for all of the device items, and then we
5244 * read in all of the chunk items. This way we can create chunk
5245 * mappings that reference all of the devices that are afound
5246 */
5247 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5248 key.offset = 0;
5249 key.type = 0;
5250 again:
5251 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5252 if (ret < 0)
5253 goto error;
5254 while (1) {
5255 leaf = path->nodes[0];
5256 slot = path->slots[0];
5257 if (slot >= btrfs_header_nritems(leaf)) {
5258 ret = btrfs_next_leaf(root, path);
5259 if (ret == 0)
5260 continue;
5261 if (ret < 0)
5262 goto error;
5263 break;
5264 }
5265 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5266 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5267 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
5268 break;
5269 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5270 struct btrfs_dev_item *dev_item;
5271 dev_item = btrfs_item_ptr(leaf, slot,
5272 struct btrfs_dev_item);
5273 ret = read_one_dev(root, leaf, dev_item);
5274 if (ret)
5275 goto error;
5276 }
5277 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
5278 struct btrfs_chunk *chunk;
5279 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
5280 ret = read_one_chunk(root, &found_key, leaf, chunk);
5281 if (ret)
5282 goto error;
5283 }
5284 path->slots[0]++;
5285 }
5286 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5287 key.objectid = 0;
5288 btrfs_release_path(path);
5289 goto again;
5290 }
5291 ret = 0;
5292 error:
5293 unlock_chunks(root);
5294 mutex_unlock(&uuid_mutex);
5295
5296 btrfs_free_path(path);
5297 return ret;
5298 }
5299
5300 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
5301 {
5302 int i;
5303
5304 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5305 btrfs_dev_stat_reset(dev, i);
5306 }
5307
5308 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
5309 {
5310 struct btrfs_key key;
5311 struct btrfs_key found_key;
5312 struct btrfs_root *dev_root = fs_info->dev_root;
5313 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5314 struct extent_buffer *eb;
5315 int slot;
5316 int ret = 0;
5317 struct btrfs_device *device;
5318 struct btrfs_path *path = NULL;
5319 int i;
5320
5321 path = btrfs_alloc_path();
5322 if (!path) {
5323 ret = -ENOMEM;
5324 goto out;
5325 }
5326
5327 mutex_lock(&fs_devices->device_list_mutex);
5328 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5329 int item_size;
5330 struct btrfs_dev_stats_item *ptr;
5331
5332 key.objectid = 0;
5333 key.type = BTRFS_DEV_STATS_KEY;
5334 key.offset = device->devid;
5335 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
5336 if (ret) {
5337 __btrfs_reset_dev_stats(device);
5338 device->dev_stats_valid = 1;
5339 btrfs_release_path(path);
5340 continue;
5341 }
5342 slot = path->slots[0];
5343 eb = path->nodes[0];
5344 btrfs_item_key_to_cpu(eb, &found_key, slot);
5345 item_size = btrfs_item_size_nr(eb, slot);
5346
5347 ptr = btrfs_item_ptr(eb, slot,
5348 struct btrfs_dev_stats_item);
5349
5350 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5351 if (item_size >= (1 + i) * sizeof(__le64))
5352 btrfs_dev_stat_set(device, i,
5353 btrfs_dev_stats_value(eb, ptr, i));
5354 else
5355 btrfs_dev_stat_reset(device, i);
5356 }
5357
5358 device->dev_stats_valid = 1;
5359 btrfs_dev_stat_print_on_load(device);
5360 btrfs_release_path(path);
5361 }
5362 mutex_unlock(&fs_devices->device_list_mutex);
5363
5364 out:
5365 btrfs_free_path(path);
5366 return ret < 0 ? ret : 0;
5367 }
5368
5369 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
5370 struct btrfs_root *dev_root,
5371 struct btrfs_device *device)
5372 {
5373 struct btrfs_path *path;
5374 struct btrfs_key key;
5375 struct extent_buffer *eb;
5376 struct btrfs_dev_stats_item *ptr;
5377 int ret;
5378 int i;
5379
5380 key.objectid = 0;
5381 key.type = BTRFS_DEV_STATS_KEY;
5382 key.offset = device->devid;
5383
5384 path = btrfs_alloc_path();
5385 BUG_ON(!path);
5386 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
5387 if (ret < 0) {
5388 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
5389 ret, rcu_str_deref(device->name));
5390 goto out;
5391 }
5392
5393 if (ret == 0 &&
5394 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
5395 /* need to delete old one and insert a new one */
5396 ret = btrfs_del_item(trans, dev_root, path);
5397 if (ret != 0) {
5398 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
5399 rcu_str_deref(device->name), ret);
5400 goto out;
5401 }
5402 ret = 1;
5403 }
5404
5405 if (ret == 1) {
5406 /* need to insert a new item */
5407 btrfs_release_path(path);
5408 ret = btrfs_insert_empty_item(trans, dev_root, path,
5409 &key, sizeof(*ptr));
5410 if (ret < 0) {
5411 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
5412 rcu_str_deref(device->name), ret);
5413 goto out;
5414 }
5415 }
5416
5417 eb = path->nodes[0];
5418 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
5419 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5420 btrfs_set_dev_stats_value(eb, ptr, i,
5421 btrfs_dev_stat_read(device, i));
5422 btrfs_mark_buffer_dirty(eb);
5423
5424 out:
5425 btrfs_free_path(path);
5426 return ret;
5427 }
5428
5429 /*
5430 * called from commit_transaction. Writes all changed device stats to disk.
5431 */
5432 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
5433 struct btrfs_fs_info *fs_info)
5434 {
5435 struct btrfs_root *dev_root = fs_info->dev_root;
5436 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5437 struct btrfs_device *device;
5438 int ret = 0;
5439
5440 mutex_lock(&fs_devices->device_list_mutex);
5441 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5442 if (!device->dev_stats_valid || !device->dev_stats_dirty)
5443 continue;
5444
5445 ret = update_dev_stat_item(trans, dev_root, device);
5446 if (!ret)
5447 device->dev_stats_dirty = 0;
5448 }
5449 mutex_unlock(&fs_devices->device_list_mutex);
5450
5451 return ret;
5452 }
5453
5454 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
5455 {
5456 btrfs_dev_stat_inc(dev, index);
5457 btrfs_dev_stat_print_on_error(dev);
5458 }
5459
5460 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
5461 {
5462 if (!dev->dev_stats_valid)
5463 return;
5464 printk_ratelimited_in_rcu(KERN_ERR
5465 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5466 rcu_str_deref(dev->name),
5467 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5468 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5469 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5470 btrfs_dev_stat_read(dev,
5471 BTRFS_DEV_STAT_CORRUPTION_ERRS),
5472 btrfs_dev_stat_read(dev,
5473 BTRFS_DEV_STAT_GENERATION_ERRS));
5474 }
5475
5476 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
5477 {
5478 int i;
5479
5480 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5481 if (btrfs_dev_stat_read(dev, i) != 0)
5482 break;
5483 if (i == BTRFS_DEV_STAT_VALUES_MAX)
5484 return; /* all values == 0, suppress message */
5485
5486 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5487 rcu_str_deref(dev->name),
5488 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5489 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5490 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5491 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5492 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5493 }
5494
5495 int btrfs_get_dev_stats(struct btrfs_root *root,
5496 struct btrfs_ioctl_get_dev_stats *stats)
5497 {
5498 struct btrfs_device *dev;
5499 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5500 int i;
5501
5502 mutex_lock(&fs_devices->device_list_mutex);
5503 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
5504 mutex_unlock(&fs_devices->device_list_mutex);
5505
5506 if (!dev) {
5507 printk(KERN_WARNING
5508 "btrfs: get dev_stats failed, device not found\n");
5509 return -ENODEV;
5510 } else if (!dev->dev_stats_valid) {
5511 printk(KERN_WARNING
5512 "btrfs: get dev_stats failed, not yet valid\n");
5513 return -ENODEV;
5514 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5515 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5516 if (stats->nr_items > i)
5517 stats->values[i] =
5518 btrfs_dev_stat_read_and_reset(dev, i);
5519 else
5520 btrfs_dev_stat_reset(dev, i);
5521 }
5522 } else {
5523 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5524 if (stats->nr_items > i)
5525 stats->values[i] = btrfs_dev_stat_read(dev, i);
5526 }
5527 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
5528 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
5529 return 0;
5530 }
5531
5532 int btrfs_scratch_superblock(struct btrfs_device *device)
5533 {
5534 struct buffer_head *bh;
5535 struct btrfs_super_block *disk_super;
5536
5537 bh = btrfs_read_dev_super(device->bdev);
5538 if (!bh)
5539 return -EINVAL;
5540 disk_super = (struct btrfs_super_block *)bh->b_data;
5541
5542 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
5543 set_buffer_dirty(bh);
5544 sync_dirty_buffer(bh);
5545 brelse(bh);
5546
5547 return 0;
5548 }