spin_lock_init(&fs_info->fs_roots_radix_lock);
spin_lock_init(&fs_info->delayed_iput_lock);
spin_lock_init(&fs_info->defrag_inodes_lock);
- spin_lock_init(&fs_info->free_chunk_lock);
spin_lock_init(&fs_info->tree_mod_seq_lock);
spin_lock_init(&fs_info->super_lock);
spin_lock_init(&fs_info->qgroup_op_lock);
fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
fs_info->metadata_ratio = 0;
fs_info->defrag_inodes = RB_ROOT;
- fs_info->free_chunk_space = 0;
+ atomic64_set(&fs_info->free_chunk_space, 0);
fs_info->tree_mod_log = RB_ROOT;
fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
* we fua the first super. The others we allow
* to go down lazy.
*/
- if (i == 0)
- ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh);
- else
+ if (i == 0) {
+ ret = btrfsic_submit_bh(REQ_OP_WRITE,
+ REQ_SYNC | REQ_FUA, bh);
+ } else {
ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
+ }
if (ret)
errors++;
}
if (wait) {
bio = device->flush_bio;
if (!bio)
+ /*
+ * This means the alloc has failed with ENOMEM, however
+ * here we return 0, as its not a device error.
+ */
return 0;
wait_for_completion(&device->flush_wait);
bio->bi_end_io = btrfs_end_empty_barrier;
bio->bi_bdev = device->bdev;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait;
device->flush_bio = bio;
return 0;
}
+static int check_barrier_error(struct btrfs_fs_devices *fsdevs)
+{
+ int submit_flush_error = 0;
+ int dev_flush_error = 0;
+ struct btrfs_device *dev;
+ int tolerance;
+
+ list_for_each_entry_rcu(dev, &fsdevs->devices, dev_list) {
+ if (!dev->bdev) {
+ submit_flush_error++;
+ dev_flush_error++;
+ continue;
+ }
+ if (dev->last_flush_error == -ENOMEM)
+ submit_flush_error++;
+ if (dev->last_flush_error && dev->last_flush_error != -ENOMEM)
+ dev_flush_error++;
+ }
+
+ tolerance = fsdevs->fs_info->num_tolerated_disk_barrier_failures;
+ if (submit_flush_error > tolerance || dev_flush_error > tolerance)
+ return -EIO;
+
+ return 0;
+}
+
/*
* send an empty flush down to each device in parallel,
* then wait for them
ret = write_dev_flush(dev, 0);
if (ret)
errors_send++;
+ dev->last_flush_error = ret;
}
/* wait for all the barriers */
continue;
ret = write_dev_flush(dev, 1);
- if (ret)
+ if (ret) {
+ dev->last_flush_error = ret;
errors_wait++;
+ }
+ }
+
+ /*
+ * Try hard in case of flush. Lets say, in RAID1 we have
+ * the following situation
+ * dev1: EIO dev2: ENOMEM
+ * this is not a fatal error as we hope to recover from
+ * ENOMEM in the next attempt to flush.
+ * But the following is considered as fatal
+ * dev1: ENOMEM dev2: ENOMEM
+ * dev1: bdev == NULL dev2: ENOMEM
+ */
+ if (errors_send || errors_wait) {
+ /*
+ * At some point we need the status of all disks
+ * to arrive at the volume status. So error checking
+ * is being pushed to a separate loop.
+ */
+ return check_barrier_error(info->fs_devices);
}
- if (errors_send > info->num_tolerated_disk_barrier_failures ||
- errors_wait > info->num_tolerated_disk_barrier_failures)
- return -EIO;
return 0;
}