]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | /* | |
3 | * Copyright (C) 2007 Oracle. All rights reserved. | |
4 | */ | |
5 | ||
6 | #ifndef BTRFS_VOLUMES_H | |
7 | #define BTRFS_VOLUMES_H | |
8 | ||
9 | #include <linux/bio.h> | |
10 | #include <linux/sort.h> | |
11 | #include <linux/btrfs.h> | |
12 | #include "async-thread.h" | |
13 | ||
14 | extern struct mutex uuid_mutex; | |
15 | ||
16 | #define BTRFS_STRIPE_LEN SZ_64K | |
17 | ||
18 | struct buffer_head; | |
19 | struct btrfs_pending_bios { | |
20 | struct bio *head; | |
21 | struct bio *tail; | |
22 | }; | |
23 | ||
24 | /* | |
25 | * Use sequence counter to get consistent device stat data on | |
26 | * 32-bit processors. | |
27 | */ | |
28 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | |
29 | #include <linux/seqlock.h> | |
30 | #define __BTRFS_NEED_DEVICE_DATA_ORDERED | |
31 | #define btrfs_device_data_ordered_init(device) \ | |
32 | seqcount_init(&device->data_seqcount) | |
33 | #else | |
34 | #define btrfs_device_data_ordered_init(device) do { } while (0) | |
35 | #endif | |
36 | ||
37 | #define BTRFS_DEV_STATE_WRITEABLE (0) | |
38 | #define BTRFS_DEV_STATE_IN_FS_METADATA (1) | |
39 | #define BTRFS_DEV_STATE_MISSING (2) | |
40 | #define BTRFS_DEV_STATE_REPLACE_TGT (3) | |
41 | #define BTRFS_DEV_STATE_FLUSH_SENT (4) | |
42 | ||
43 | struct btrfs_device { | |
44 | struct list_head dev_list; | |
45 | struct list_head dev_alloc_list; | |
46 | struct btrfs_fs_devices *fs_devices; | |
47 | struct btrfs_fs_info *fs_info; | |
48 | ||
49 | struct rcu_string *name; | |
50 | ||
51 | u64 generation; | |
52 | ||
53 | spinlock_t io_lock ____cacheline_aligned; | |
54 | int running_pending; | |
55 | /* regular prio bios */ | |
56 | struct btrfs_pending_bios pending_bios; | |
57 | /* sync bios */ | |
58 | struct btrfs_pending_bios pending_sync_bios; | |
59 | ||
60 | struct block_device *bdev; | |
61 | ||
62 | /* the mode sent to blkdev_get */ | |
63 | fmode_t mode; | |
64 | ||
65 | unsigned long dev_state; | |
66 | blk_status_t last_flush_error; | |
67 | int flush_bio_sent; | |
68 | ||
69 | #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED | |
70 | seqcount_t data_seqcount; | |
71 | #endif | |
72 | ||
73 | /* the internal btrfs device id */ | |
74 | u64 devid; | |
75 | ||
76 | /* size of the device in memory */ | |
77 | u64 total_bytes; | |
78 | ||
79 | /* size of the device on disk */ | |
80 | u64 disk_total_bytes; | |
81 | ||
82 | /* bytes used */ | |
83 | u64 bytes_used; | |
84 | ||
85 | /* optimal io alignment for this device */ | |
86 | u32 io_align; | |
87 | ||
88 | /* optimal io width for this device */ | |
89 | u32 io_width; | |
90 | /* type and info about this device */ | |
91 | u64 type; | |
92 | ||
93 | /* minimal io size for this device */ | |
94 | u32 sector_size; | |
95 | ||
96 | /* physical drive uuid (or lvm uuid) */ | |
97 | u8 uuid[BTRFS_UUID_SIZE]; | |
98 | ||
99 | /* | |
100 | * size of the device on the current transaction | |
101 | * | |
102 | * This variant is update when committing the transaction, | |
103 | * and protected by device_list_mutex | |
104 | */ | |
105 | u64 commit_total_bytes; | |
106 | ||
107 | /* bytes used on the current transaction */ | |
108 | u64 commit_bytes_used; | |
109 | /* | |
110 | * used to manage the device which is resized | |
111 | * | |
112 | * It is protected by chunk_lock. | |
113 | */ | |
114 | struct list_head resized_list; | |
115 | ||
116 | /* for sending down flush barriers */ | |
117 | struct bio *flush_bio; | |
118 | struct completion flush_wait; | |
119 | ||
120 | /* per-device scrub information */ | |
121 | struct scrub_ctx *scrub_ctx; | |
122 | ||
123 | struct btrfs_work work; | |
124 | struct rcu_head rcu; | |
125 | ||
126 | /* readahead state */ | |
127 | atomic_t reada_in_flight; | |
128 | u64 reada_next; | |
129 | struct reada_zone *reada_curr_zone; | |
130 | struct radix_tree_root reada_zones; | |
131 | struct radix_tree_root reada_extents; | |
132 | ||
133 | /* disk I/O failure stats. For detailed description refer to | |
134 | * enum btrfs_dev_stat_values in ioctl.h */ | |
135 | int dev_stats_valid; | |
136 | ||
137 | /* Counter to record the change of device stats */ | |
138 | atomic_t dev_stats_ccnt; | |
139 | atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX]; | |
140 | }; | |
141 | ||
142 | /* | |
143 | * If we read those variants at the context of their own lock, we needn't | |
144 | * use the following helpers, reading them directly is safe. | |
145 | */ | |
146 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | |
147 | #define BTRFS_DEVICE_GETSET_FUNCS(name) \ | |
148 | static inline u64 \ | |
149 | btrfs_device_get_##name(const struct btrfs_device *dev) \ | |
150 | { \ | |
151 | u64 size; \ | |
152 | unsigned int seq; \ | |
153 | \ | |
154 | do { \ | |
155 | seq = read_seqcount_begin(&dev->data_seqcount); \ | |
156 | size = dev->name; \ | |
157 | } while (read_seqcount_retry(&dev->data_seqcount, seq)); \ | |
158 | return size; \ | |
159 | } \ | |
160 | \ | |
161 | static inline void \ | |
162 | btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ | |
163 | { \ | |
164 | preempt_disable(); \ | |
165 | write_seqcount_begin(&dev->data_seqcount); \ | |
166 | dev->name = size; \ | |
167 | write_seqcount_end(&dev->data_seqcount); \ | |
168 | preempt_enable(); \ | |
169 | } | |
170 | #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) | |
171 | #define BTRFS_DEVICE_GETSET_FUNCS(name) \ | |
172 | static inline u64 \ | |
173 | btrfs_device_get_##name(const struct btrfs_device *dev) \ | |
174 | { \ | |
175 | u64 size; \ | |
176 | \ | |
177 | preempt_disable(); \ | |
178 | size = dev->name; \ | |
179 | preempt_enable(); \ | |
180 | return size; \ | |
181 | } \ | |
182 | \ | |
183 | static inline void \ | |
184 | btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ | |
185 | { \ | |
186 | preempt_disable(); \ | |
187 | dev->name = size; \ | |
188 | preempt_enable(); \ | |
189 | } | |
190 | #else | |
191 | #define BTRFS_DEVICE_GETSET_FUNCS(name) \ | |
192 | static inline u64 \ | |
193 | btrfs_device_get_##name(const struct btrfs_device *dev) \ | |
194 | { \ | |
195 | return dev->name; \ | |
196 | } \ | |
197 | \ | |
198 | static inline void \ | |
199 | btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ | |
200 | { \ | |
201 | dev->name = size; \ | |
202 | } | |
203 | #endif | |
204 | ||
205 | BTRFS_DEVICE_GETSET_FUNCS(total_bytes); | |
206 | BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes); | |
207 | BTRFS_DEVICE_GETSET_FUNCS(bytes_used); | |
208 | ||
209 | struct btrfs_fs_devices { | |
210 | u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ | |
211 | struct list_head fs_list; | |
212 | ||
213 | u64 num_devices; | |
214 | u64 open_devices; | |
215 | u64 rw_devices; | |
216 | u64 missing_devices; | |
217 | u64 total_rw_bytes; | |
218 | u64 total_devices; | |
219 | struct block_device *latest_bdev; | |
220 | ||
221 | /* all of the devices in the FS, protected by a mutex | |
222 | * so we can safely walk it to write out the supers without | |
223 | * worrying about add/remove by the multi-device code. | |
224 | * Scrubbing super can kick off supers writing by holding | |
225 | * this mutex lock. | |
226 | */ | |
227 | struct mutex device_list_mutex; | |
228 | struct list_head devices; | |
229 | ||
230 | struct list_head resized_devices; | |
231 | /* devices not currently being allocated */ | |
232 | struct list_head alloc_list; | |
233 | ||
234 | struct btrfs_fs_devices *seed; | |
235 | int seeding; | |
236 | ||
237 | int opened; | |
238 | ||
239 | /* set when we find or add a device that doesn't have the | |
240 | * nonrot flag set | |
241 | */ | |
242 | int rotating; | |
243 | ||
244 | struct btrfs_fs_info *fs_info; | |
245 | /* sysfs kobjects */ | |
246 | struct kobject fsid_kobj; | |
247 | struct kobject *device_dir_kobj; | |
248 | struct completion kobj_unregister; | |
249 | }; | |
250 | ||
251 | #define BTRFS_BIO_INLINE_CSUM_SIZE 64 | |
252 | ||
253 | /* | |
254 | * we need the mirror number and stripe index to be passed around | |
255 | * the call chain while we are processing end_io (especially errors). | |
256 | * Really, what we need is a btrfs_bio structure that has this info | |
257 | * and is properly sized with its stripe array, but we're not there | |
258 | * quite yet. We have our own btrfs bioset, and all of the bios | |
259 | * we allocate are actually btrfs_io_bios. We'll cram as much of | |
260 | * struct btrfs_bio as we can into this over time. | |
261 | */ | |
262 | typedef void (btrfs_io_bio_end_io_t) (struct btrfs_io_bio *bio, int err); | |
263 | struct btrfs_io_bio { | |
264 | unsigned int mirror_num; | |
265 | unsigned int stripe_index; | |
266 | u64 logical; | |
267 | u8 *csum; | |
268 | u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE]; | |
269 | u8 *csum_allocated; | |
270 | btrfs_io_bio_end_io_t *end_io; | |
271 | struct bvec_iter iter; | |
272 | /* | |
273 | * This member must come last, bio_alloc_bioset will allocate enough | |
274 | * bytes for entire btrfs_io_bio but relies on bio being last. | |
275 | */ | |
276 | struct bio bio; | |
277 | }; | |
278 | ||
279 | static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio) | |
280 | { | |
281 | return container_of(bio, struct btrfs_io_bio, bio); | |
282 | } | |
283 | ||
284 | struct btrfs_bio_stripe { | |
285 | struct btrfs_device *dev; | |
286 | u64 physical; | |
287 | u64 length; /* only used for discard mappings */ | |
288 | }; | |
289 | ||
290 | struct btrfs_bio; | |
291 | typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err); | |
292 | ||
293 | struct btrfs_bio { | |
294 | refcount_t refs; | |
295 | atomic_t stripes_pending; | |
296 | struct btrfs_fs_info *fs_info; | |
297 | u64 map_type; /* get from map_lookup->type */ | |
298 | bio_end_io_t *end_io; | |
299 | struct bio *orig_bio; | |
300 | unsigned long flags; | |
301 | void *private; | |
302 | atomic_t error; | |
303 | int max_errors; | |
304 | int num_stripes; | |
305 | int mirror_num; | |
306 | int num_tgtdevs; | |
307 | int *tgtdev_map; | |
308 | /* | |
309 | * logical block numbers for the start of each stripe | |
310 | * The last one or two are p/q. These are sorted, | |
311 | * so raid_map[0] is the start of our full stripe | |
312 | */ | |
313 | u64 *raid_map; | |
314 | struct btrfs_bio_stripe stripes[]; | |
315 | }; | |
316 | ||
317 | struct btrfs_device_info { | |
318 | struct btrfs_device *dev; | |
319 | u64 dev_offset; | |
320 | u64 max_avail; | |
321 | u64 total_avail; | |
322 | }; | |
323 | ||
324 | struct btrfs_raid_attr { | |
325 | int sub_stripes; /* sub_stripes info for map */ | |
326 | int dev_stripes; /* stripes per dev */ | |
327 | int devs_max; /* max devs to use */ | |
328 | int devs_min; /* min devs needed */ | |
329 | int tolerated_failures; /* max tolerated fail devs */ | |
330 | int devs_increment; /* ndevs has to be a multiple of this */ | |
331 | int ncopies; /* how many copies to data has */ | |
332 | int mindev_error; /* error code if min devs requisite is unmet */ | |
333 | const char raid_name[8]; /* name of the raid */ | |
334 | u64 bg_flag; /* block group flag of the raid */ | |
335 | }; | |
336 | ||
337 | extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES]; | |
338 | ||
339 | struct map_lookup { | |
340 | u64 type; | |
341 | int io_align; | |
342 | int io_width; | |
343 | u64 stripe_len; | |
344 | int num_stripes; | |
345 | int sub_stripes; | |
346 | struct btrfs_bio_stripe stripes[]; | |
347 | }; | |
348 | ||
349 | #define map_lookup_size(n) (sizeof(struct map_lookup) + \ | |
350 | (sizeof(struct btrfs_bio_stripe) * (n))) | |
351 | ||
352 | struct btrfs_balance_args; | |
353 | struct btrfs_balance_progress; | |
354 | struct btrfs_balance_control { | |
355 | struct btrfs_balance_args data; | |
356 | struct btrfs_balance_args meta; | |
357 | struct btrfs_balance_args sys; | |
358 | ||
359 | u64 flags; | |
360 | ||
361 | struct btrfs_balance_progress stat; | |
362 | }; | |
363 | ||
364 | enum btrfs_map_op { | |
365 | BTRFS_MAP_READ, | |
366 | BTRFS_MAP_WRITE, | |
367 | BTRFS_MAP_DISCARD, | |
368 | BTRFS_MAP_GET_READ_MIRRORS, | |
369 | }; | |
370 | ||
371 | static inline enum btrfs_map_op btrfs_op(struct bio *bio) | |
372 | { | |
373 | switch (bio_op(bio)) { | |
374 | case REQ_OP_DISCARD: | |
375 | return BTRFS_MAP_DISCARD; | |
376 | case REQ_OP_WRITE: | |
377 | return BTRFS_MAP_WRITE; | |
378 | default: | |
379 | WARN_ON_ONCE(1); | |
380 | case REQ_OP_READ: | |
381 | return BTRFS_MAP_READ; | |
382 | } | |
383 | } | |
384 | ||
385 | int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, | |
386 | u64 end, u64 *length); | |
387 | void btrfs_get_bbio(struct btrfs_bio *bbio); | |
388 | void btrfs_put_bbio(struct btrfs_bio *bbio); | |
389 | int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, | |
390 | u64 logical, u64 *length, | |
391 | struct btrfs_bio **bbio_ret, int mirror_num); | |
392 | int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, | |
393 | u64 logical, u64 *length, | |
394 | struct btrfs_bio **bbio_ret); | |
395 | int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, | |
396 | u64 physical, u64 **logical, int *naddrs, int *stripe_len); | |
397 | int btrfs_read_sys_array(struct btrfs_fs_info *fs_info); | |
398 | int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info); | |
399 | int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |
400 | struct btrfs_fs_info *fs_info, u64 type); | |
401 | void btrfs_mapping_init(struct btrfs_mapping_tree *tree); | |
402 | void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); | |
403 | blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, | |
404 | int mirror_num, int async_submit); | |
405 | int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |
406 | fmode_t flags, void *holder); | |
407 | int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, | |
408 | struct btrfs_fs_devices **fs_devices_ret); | |
409 | int btrfs_close_devices(struct btrfs_fs_devices *fs_devices); | |
410 | void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step); | |
411 | void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, | |
412 | struct btrfs_device *device, struct btrfs_device *this_dev); | |
413 | int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info, | |
414 | const char *device_path, | |
415 | struct btrfs_device **device); | |
416 | int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid, | |
417 | const char *devpath, | |
418 | struct btrfs_device **device); | |
419 | struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, | |
420 | const u64 *devid, | |
421 | const u8 *uuid); | |
422 | void btrfs_free_device(struct btrfs_device *device); | |
423 | int btrfs_rm_device(struct btrfs_fs_info *fs_info, | |
424 | const char *device_path, u64 devid); | |
425 | void __exit btrfs_cleanup_fs_uuids(void); | |
426 | int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len); | |
427 | int btrfs_grow_device(struct btrfs_trans_handle *trans, | |
428 | struct btrfs_device *device, u64 new_size); | |
429 | struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid, | |
430 | u8 *uuid, u8 *fsid); | |
431 | int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); | |
432 | int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path); | |
433 | int btrfs_balance(struct btrfs_fs_info *fs_info, | |
434 | struct btrfs_balance_control *bctl, | |
435 | struct btrfs_ioctl_balance_args *bargs); | |
436 | int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info); | |
437 | int btrfs_recover_balance(struct btrfs_fs_info *fs_info); | |
438 | int btrfs_pause_balance(struct btrfs_fs_info *fs_info); | |
439 | int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); | |
440 | int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info); | |
441 | int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info); | |
442 | int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset); | |
443 | int find_free_dev_extent_start(struct btrfs_transaction *transaction, | |
444 | struct btrfs_device *device, u64 num_bytes, | |
445 | u64 search_start, u64 *start, u64 *max_avail); | |
446 | int find_free_dev_extent(struct btrfs_trans_handle *trans, | |
447 | struct btrfs_device *device, u64 num_bytes, | |
448 | u64 *start, u64 *max_avail); | |
449 | void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); | |
450 | int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, | |
451 | struct btrfs_ioctl_get_dev_stats *stats); | |
452 | void btrfs_init_devices_late(struct btrfs_fs_info *fs_info); | |
453 | int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info); | |
454 | int btrfs_run_dev_stats(struct btrfs_trans_handle *trans, | |
455 | struct btrfs_fs_info *fs_info); | |
456 | void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info, | |
457 | struct btrfs_device *srcdev); | |
458 | void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info, | |
459 | struct btrfs_device *srcdev); | |
460 | void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, | |
461 | struct btrfs_device *tgtdev); | |
462 | void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path); | |
463 | int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, | |
464 | u64 logical, u64 len); | |
465 | unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, | |
466 | u64 logical); | |
467 | int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, | |
468 | struct btrfs_fs_info *fs_info, | |
469 | u64 chunk_offset, u64 chunk_size); | |
470 | int btrfs_remove_chunk(struct btrfs_trans_handle *trans, | |
471 | struct btrfs_fs_info *fs_info, u64 chunk_offset); | |
472 | ||
473 | static inline void btrfs_dev_stat_inc(struct btrfs_device *dev, | |
474 | int index) | |
475 | { | |
476 | atomic_inc(dev->dev_stat_values + index); | |
477 | /* | |
478 | * This memory barrier orders stores updating statistics before stores | |
479 | * updating dev_stats_ccnt. | |
480 | * | |
481 | * It pairs with smp_rmb() in btrfs_run_dev_stats(). | |
482 | */ | |
483 | smp_mb__before_atomic(); | |
484 | atomic_inc(&dev->dev_stats_ccnt); | |
485 | } | |
486 | ||
487 | static inline int btrfs_dev_stat_read(struct btrfs_device *dev, | |
488 | int index) | |
489 | { | |
490 | return atomic_read(dev->dev_stat_values + index); | |
491 | } | |
492 | ||
493 | static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev, | |
494 | int index) | |
495 | { | |
496 | int ret; | |
497 | ||
498 | ret = atomic_xchg(dev->dev_stat_values + index, 0); | |
499 | /* | |
500 | * atomic_xchg implies a full memory barriers as per atomic_t.txt: | |
501 | * - RMW operations that have a return value are fully ordered; | |
502 | * | |
503 | * This implicit memory barriers is paired with the smp_rmb in | |
504 | * btrfs_run_dev_stats | |
505 | */ | |
506 | atomic_inc(&dev->dev_stats_ccnt); | |
507 | return ret; | |
508 | } | |
509 | ||
510 | static inline void btrfs_dev_stat_set(struct btrfs_device *dev, | |
511 | int index, unsigned long val) | |
512 | { | |
513 | atomic_set(dev->dev_stat_values + index, val); | |
514 | /* | |
515 | * This memory barrier orders stores updating statistics before stores | |
516 | * updating dev_stats_ccnt. | |
517 | * | |
518 | * It pairs with smp_rmb() in btrfs_run_dev_stats(). | |
519 | */ | |
520 | smp_mb__before_atomic(); | |
521 | atomic_inc(&dev->dev_stats_ccnt); | |
522 | } | |
523 | ||
524 | static inline void btrfs_dev_stat_reset(struct btrfs_device *dev, | |
525 | int index) | |
526 | { | |
527 | btrfs_dev_stat_set(dev, index, 0); | |
528 | } | |
529 | ||
530 | /* | |
531 | * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which | |
532 | * can be used as index to access btrfs_raid_array[]. | |
533 | */ | |
534 | static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags) | |
535 | { | |
536 | if (flags & BTRFS_BLOCK_GROUP_RAID10) | |
537 | return BTRFS_RAID_RAID10; | |
538 | else if (flags & BTRFS_BLOCK_GROUP_RAID1) | |
539 | return BTRFS_RAID_RAID1; | |
540 | else if (flags & BTRFS_BLOCK_GROUP_DUP) | |
541 | return BTRFS_RAID_DUP; | |
542 | else if (flags & BTRFS_BLOCK_GROUP_RAID0) | |
543 | return BTRFS_RAID_RAID0; | |
544 | else if (flags & BTRFS_BLOCK_GROUP_RAID5) | |
545 | return BTRFS_RAID_RAID5; | |
546 | else if (flags & BTRFS_BLOCK_GROUP_RAID6) | |
547 | return BTRFS_RAID_RAID6; | |
548 | ||
549 | return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */ | |
550 | } | |
551 | ||
552 | const char *get_raid_name(enum btrfs_raid_types type); | |
553 | ||
554 | void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info); | |
555 | void btrfs_update_commit_device_bytes_used(struct btrfs_transaction *trans); | |
556 | ||
557 | struct list_head *btrfs_get_fs_uuids(void); | |
558 | void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info); | |
559 | void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info); | |
560 | bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, | |
561 | struct btrfs_device *failing_dev); | |
562 | ||
563 | #endif |