]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/md/md.h
Merge remote-tracking branch 'regulator/fix/max77802' into regulator-linus
[mirror_ubuntu-artful-kernel.git] / drivers / md / md.h
1 /*
2 md.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
13 */
14
15 #ifndef _MD_MD_H
16 #define _MD_MD_H
17
18 #include <linux/blkdev.h>
19 #include <linux/backing-dev.h>
20 #include <linux/badblocks.h>
21 #include <linux/kobject.h>
22 #include <linux/list.h>
23 #include <linux/mm.h>
24 #include <linux/mutex.h>
25 #include <linux/timer.h>
26 #include <linux/wait.h>
27 #include <linux/workqueue.h>
28 #include "md-cluster.h"
29
30 #define MaxSector (~(sector_t)0)
31
32 /*
33 * These flags should really be called "NO_RETRY" rather than
34 * "FAILFAST" because they don't make any promise about time lapse,
35 * only about the number of retries, which will be zero.
36 * REQ_FAILFAST_DRIVER is not included because
37 * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
38 * seems to suggest that the errors it avoids retrying should usually
39 * be retried.
40 */
41 #define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
42 /*
43 * MD's 'extended' device
44 */
45 struct md_rdev {
46 struct list_head same_set; /* RAID devices within the same set */
47
48 sector_t sectors; /* Device size (in 512bytes sectors) */
49 struct mddev *mddev; /* RAID array if running */
50 int last_events; /* IO event timestamp */
51
52 /*
53 * If meta_bdev is non-NULL, it means that a separate device is
54 * being used to store the metadata (superblock/bitmap) which
55 * would otherwise be contained on the same device as the data (bdev).
56 */
57 struct block_device *meta_bdev;
58 struct block_device *bdev; /* block device handle */
59
60 struct page *sb_page, *bb_page;
61 int sb_loaded;
62 __u64 sb_events;
63 sector_t data_offset; /* start of data in array */
64 sector_t new_data_offset;/* only relevant while reshaping */
65 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
66 int sb_size; /* bytes in the superblock */
67 int preferred_minor; /* autorun support */
68
69 struct kobject kobj;
70
71 /* A device can be in one of three states based on two flags:
72 * Not working: faulty==1 in_sync==0
73 * Fully working: faulty==0 in_sync==1
74 * Working, but not
75 * in sync with array
76 * faulty==0 in_sync==0
77 *
78 * It can never have faulty==1, in_sync==1
79 * This reduces the burden of testing multiple flags in many cases
80 */
81
82 unsigned long flags; /* bit set of 'enum flag_bits' bits. */
83 wait_queue_head_t blocked_wait;
84
85 int desc_nr; /* descriptor index in the superblock */
86 int raid_disk; /* role of device in array */
87 int new_raid_disk; /* role that the device will have in
88 * the array after a level-change completes.
89 */
90 int saved_raid_disk; /* role that device used to have in the
91 * array and could again if we did a partial
92 * resync from the bitmap
93 */
94 union {
95 sector_t recovery_offset;/* If this device has been partially
96 * recovered, this is where we were
97 * up to.
98 */
99 sector_t journal_tail; /* If this device is a journal device,
100 * this is the journal tail (journal
101 * recovery start point)
102 */
103 };
104
105 atomic_t nr_pending; /* number of pending requests.
106 * only maintained for arrays that
107 * support hot removal
108 */
109 atomic_t read_errors; /* number of consecutive read errors that
110 * we have tried to ignore.
111 */
112 time64_t last_read_error; /* monotonic time since our
113 * last read error
114 */
115 atomic_t corrected_errors; /* number of corrected read errors,
116 * for reporting to userspace and storing
117 * in superblock.
118 */
119 struct work_struct del_work; /* used for delayed sysfs removal */
120
121 struct kernfs_node *sysfs_state; /* handle for 'state'
122 * sysfs entry */
123
124 struct badblocks badblocks;
125
126 struct {
127 short offset; /* Offset from superblock to start of PPL.
128 * Not used by external metadata. */
129 unsigned int size; /* Size in sectors of the PPL space */
130 sector_t sector; /* First sector of the PPL space */
131 } ppl;
132 };
133 enum flag_bits {
134 Faulty, /* device is known to have a fault */
135 In_sync, /* device is in_sync with rest of array */
136 Bitmap_sync, /* ..actually, not quite In_sync. Need a
137 * bitmap-based recovery to get fully in sync
138 */
139 WriteMostly, /* Avoid reading if at all possible */
140 AutoDetected, /* added by auto-detect */
141 Blocked, /* An error occurred but has not yet
142 * been acknowledged by the metadata
143 * handler, so don't allow writes
144 * until it is cleared */
145 WriteErrorSeen, /* A write error has been seen on this
146 * device
147 */
148 FaultRecorded, /* Intermediate state for clearing
149 * Blocked. The Fault is/will-be
150 * recorded in the metadata, but that
151 * metadata hasn't been stored safely
152 * on disk yet.
153 */
154 BlockedBadBlocks, /* A writer is blocked because they
155 * found an unacknowledged bad-block.
156 * This can safely be cleared at any
157 * time, and the writer will re-check.
158 * It may be set at any time, and at
159 * worst the writer will timeout and
160 * re-check. So setting it as
161 * accurately as possible is good, but
162 * not absolutely critical.
163 */
164 WantReplacement, /* This device is a candidate to be
165 * hot-replaced, either because it has
166 * reported some faults, or because
167 * of explicit request.
168 */
169 Replacement, /* This device is a replacement for
170 * a want_replacement device with same
171 * raid_disk number.
172 */
173 Candidate, /* For clustered environments only:
174 * This device is seen locally but not
175 * by the whole cluster
176 */
177 Journal, /* This device is used as journal for
178 * raid-5/6.
179 * Usually, this device should be faster
180 * than other devices in the array
181 */
182 ClusterRemove,
183 RemoveSynchronized, /* synchronize_rcu() was called after
184 * this device was known to be faulty,
185 * so it is safe to remove without
186 * another synchronize_rcu() call.
187 */
188 ExternalBbl, /* External metadata provides bad
189 * block management for a disk
190 */
191 FailFast, /* Minimal retries should be attempted on
192 * this device, so use REQ_FAILFAST_DEV.
193 * Also don't try to repair failed reads.
194 * It is expects that no bad block log
195 * is present.
196 */
197 LastDev, /* Seems to be the last working dev as
198 * it didn't fail, so don't use FailFast
199 * any more for metadata
200 */
201 };
202
203 static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
204 sector_t *first_bad, int *bad_sectors)
205 {
206 if (unlikely(rdev->badblocks.count)) {
207 int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
208 sectors,
209 first_bad, bad_sectors);
210 if (rv)
211 *first_bad -= rdev->data_offset;
212 return rv;
213 }
214 return 0;
215 }
216 extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
217 int is_new);
218 extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
219 int is_new);
220 struct md_cluster_info;
221
222 /* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
223 enum mddev_flags {
224 MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
225 MD_CLOSING, /* If set, we are closing the array, do not open
226 * it then */
227 MD_JOURNAL_CLEAN, /* A raid with journal is already clean */
228 MD_HAS_JOURNAL, /* The raid array has journal feature set */
229 MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
230 * already took resync lock, need to
231 * release the lock */
232 MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is
233 * supported as calls to md_error() will
234 * never cause the array to become failed.
235 */
236 MD_HAS_PPL, /* The raid array has PPL feature set */
237 };
238
239 enum mddev_sb_flags {
240 MD_SB_CHANGE_DEVS, /* Some device status has changed */
241 MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */
242 MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
243 MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
244 };
245
246 struct mddev {
247 void *private;
248 struct md_personality *pers;
249 dev_t unit;
250 int md_minor;
251 struct list_head disks;
252 unsigned long flags;
253 unsigned long sb_flags;
254
255 int suspended;
256 atomic_t active_io;
257 int ro;
258 int sysfs_active; /* set when sysfs deletes
259 * are happening, so run/
260 * takeover/stop are not safe
261 */
262 struct gendisk *gendisk;
263
264 struct kobject kobj;
265 int hold_active;
266 #define UNTIL_IOCTL 1
267 #define UNTIL_STOP 2
268
269 /* Superblock information */
270 int major_version,
271 minor_version,
272 patch_version;
273 int persistent;
274 int external; /* metadata is
275 * managed externally */
276 char metadata_type[17]; /* externally set*/
277 int chunk_sectors;
278 time64_t ctime, utime;
279 int level, layout;
280 char clevel[16];
281 int raid_disks;
282 int max_disks;
283 sector_t dev_sectors; /* used size of
284 * component devices */
285 sector_t array_sectors; /* exported array size */
286 int external_size; /* size managed
287 * externally */
288 __u64 events;
289 /* If the last 'event' was simply a clean->dirty transition, and
290 * we didn't write it to the spares, then it is safe and simple
291 * to just decrement the event count on a dirty->clean transition.
292 * So we record that possibility here.
293 */
294 int can_decrease_events;
295
296 char uuid[16];
297
298 /* If the array is being reshaped, we need to record the
299 * new shape and an indication of where we are up to.
300 * This is written to the superblock.
301 * If reshape_position is MaxSector, then no reshape is happening (yet).
302 */
303 sector_t reshape_position;
304 int delta_disks, new_level, new_layout;
305 int new_chunk_sectors;
306 int reshape_backwards;
307
308 struct md_thread *thread; /* management thread */
309 struct md_thread *sync_thread; /* doing resync or reconstruct */
310
311 /* 'last_sync_action' is initialized to "none". It is set when a
312 * sync operation (i.e "data-check", "requested-resync", "resync",
313 * "recovery", or "reshape") is started. It holds this value even
314 * when the sync thread is "frozen" (interrupted) or "idle" (stopped
315 * or finished). It is overwritten when a new sync operation is begun.
316 */
317 char *last_sync_action;
318 sector_t curr_resync; /* last block scheduled */
319 /* As resync requests can complete out of order, we cannot easily track
320 * how much resync has been completed. So we occasionally pause until
321 * everything completes, then set curr_resync_completed to curr_resync.
322 * As such it may be well behind the real resync mark, but it is a value
323 * we are certain of.
324 */
325 sector_t curr_resync_completed;
326 unsigned long resync_mark; /* a recent timestamp */
327 sector_t resync_mark_cnt;/* blocks written at resync_mark */
328 sector_t curr_mark_cnt; /* blocks scheduled now */
329
330 sector_t resync_max_sectors; /* may be set by personality */
331
332 atomic64_t resync_mismatches; /* count of sectors where
333 * parity/replica mismatch found
334 */
335
336 /* allow user-space to request suspension of IO to regions of the array */
337 sector_t suspend_lo;
338 sector_t suspend_hi;
339 /* if zero, use the system-wide default */
340 int sync_speed_min;
341 int sync_speed_max;
342
343 /* resync even though the same disks are shared among md-devices */
344 int parallel_resync;
345
346 int ok_start_degraded;
347
348 unsigned long recovery;
349 /* If a RAID personality determines that recovery (of a particular
350 * device) will fail due to a read error on the source device, it
351 * takes a copy of this number and does not attempt recovery again
352 * until this number changes.
353 */
354 int recovery_disabled;
355
356 int in_sync; /* know to not need resync */
357 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
358 * that we are never stopping an array while it is open.
359 * 'reconfig_mutex' protects all other reconfiguration.
360 * These locks are separate due to conflicting interactions
361 * with bdev->bd_mutex.
362 * Lock ordering is:
363 * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
364 * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
365 */
366 struct mutex open_mutex;
367 struct mutex reconfig_mutex;
368 atomic_t active; /* general refcount */
369 atomic_t openers; /* number of active opens */
370
371 int changed; /* True if we might need to
372 * reread partition info */
373 int degraded; /* whether md should consider
374 * adding a spare
375 */
376
377 atomic_t recovery_active; /* blocks scheduled, but not written */
378 wait_queue_head_t recovery_wait;
379 sector_t recovery_cp;
380 sector_t resync_min; /* user requested sync
381 * starts here */
382 sector_t resync_max; /* resync should pause
383 * when it gets here */
384
385 struct kernfs_node *sysfs_state; /* handle for 'array_state'
386 * file in sysfs.
387 */
388 struct kernfs_node *sysfs_action; /* handle for 'sync_action' */
389
390 struct work_struct del_work; /* used for delayed sysfs removal */
391
392 /* "lock" protects:
393 * flush_bio transition from NULL to !NULL
394 * rdev superblocks, events
395 * clearing MD_CHANGE_*
396 * in_sync - and related safemode and MD_CHANGE changes
397 * pers (also protected by reconfig_mutex and pending IO).
398 * clearing ->bitmap
399 * clearing ->bitmap_info.file
400 * changing ->resync_{min,max}
401 * setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
402 */
403 spinlock_t lock;
404 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
405 atomic_t pending_writes; /* number of active superblock writes */
406
407 unsigned int safemode; /* if set, update "clean" superblock
408 * when no writes pending.
409 */
410 unsigned int safemode_delay;
411 struct timer_list safemode_timer;
412 struct percpu_ref writes_pending;
413 int sync_checkers; /* # of threads checking writes_pending */
414 struct request_queue *queue; /* for plugging ... */
415
416 struct bitmap *bitmap; /* the bitmap for the device */
417 struct {
418 struct file *file; /* the bitmap file */
419 loff_t offset; /* offset from superblock of
420 * start of bitmap. May be
421 * negative, but not '0'
422 * For external metadata, offset
423 * from start of device.
424 */
425 unsigned long space; /* space available at this offset */
426 loff_t default_offset; /* this is the offset to use when
427 * hot-adding a bitmap. It should
428 * eventually be settable by sysfs.
429 */
430 unsigned long default_space; /* space available at
431 * default offset */
432 struct mutex mutex;
433 unsigned long chunksize;
434 unsigned long daemon_sleep; /* how many jiffies between updates? */
435 unsigned long max_write_behind; /* write-behind mode */
436 int external;
437 int nodes; /* Maximum number of nodes in the cluster */
438 char cluster_name[64]; /* Name of the cluster */
439 } bitmap_info;
440
441 atomic_t max_corr_read_errors; /* max read retries */
442 struct list_head all_mddevs;
443
444 struct attribute_group *to_remove;
445
446 struct bio_set *bio_set;
447
448 /* Generic flush handling.
449 * The last to finish preflush schedules a worker to submit
450 * the rest of the request (without the REQ_PREFLUSH flag).
451 */
452 struct bio *flush_bio;
453 atomic_t flush_pending;
454 struct work_struct flush_work;
455 struct work_struct event_work; /* used by dm to report failure event */
456 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
457 struct md_cluster_info *cluster_info;
458 unsigned int good_device_nr; /* good device num within cluster raid */
459 };
460
461 enum recovery_flags {
462 /*
463 * If neither SYNC or RESHAPE are set, then it is a recovery.
464 */
465 MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */
466 MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */
467 MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */
468 MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */
469 MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */
470 MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */
471 MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */
472 MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */
473 MD_RECOVERY_RESHAPE, /* A reshape is happening */
474 MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
475 MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
476 };
477
478 static inline int __must_check mddev_lock(struct mddev *mddev)
479 {
480 return mutex_lock_interruptible(&mddev->reconfig_mutex);
481 }
482
483 /* Sometimes we need to take the lock in a situation where
484 * failure due to interrupts is not acceptable.
485 */
486 static inline void mddev_lock_nointr(struct mddev *mddev)
487 {
488 mutex_lock(&mddev->reconfig_mutex);
489 }
490
491 static inline int mddev_is_locked(struct mddev *mddev)
492 {
493 return mutex_is_locked(&mddev->reconfig_mutex);
494 }
495
496 static inline int mddev_trylock(struct mddev *mddev)
497 {
498 return mutex_trylock(&mddev->reconfig_mutex);
499 }
500 extern void mddev_unlock(struct mddev *mddev);
501
502 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
503 {
504 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
505 }
506
507 struct md_personality
508 {
509 char *name;
510 int level;
511 struct list_head list;
512 struct module *owner;
513 void (*make_request)(struct mddev *mddev, struct bio *bio);
514 int (*run)(struct mddev *mddev);
515 void (*free)(struct mddev *mddev, void *priv);
516 void (*status)(struct seq_file *seq, struct mddev *mddev);
517 /* error_handler must set ->faulty and clear ->in_sync
518 * if appropriate, and should abort recovery if needed
519 */
520 void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
521 int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
522 int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
523 int (*spare_active) (struct mddev *mddev);
524 sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
525 int (*resize) (struct mddev *mddev, sector_t sectors);
526 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
527 int (*check_reshape) (struct mddev *mddev);
528 int (*start_reshape) (struct mddev *mddev);
529 void (*finish_reshape) (struct mddev *mddev);
530 /* quiesce moves between quiescence states
531 * 0 - fully active
532 * 1 - no new requests allowed
533 * others - reserved
534 */
535 void (*quiesce) (struct mddev *mddev, int state);
536 /* takeover is used to transition an array from one
537 * personality to another. The new personality must be able
538 * to handle the data in the current layout.
539 * e.g. 2drive raid1 -> 2drive raid5
540 * ndrive raid5 -> degraded n+1drive raid6 with special layout
541 * If the takeover succeeds, a new 'private' structure is returned.
542 * This needs to be installed and then ->run used to activate the
543 * array.
544 */
545 void *(*takeover) (struct mddev *mddev);
546 /* congested implements bdi.congested_fn().
547 * Will not be called while array is 'suspended' */
548 int (*congested)(struct mddev *mddev, int bits);
549 /* Changes the consistency policy of an active array. */
550 int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
551 };
552
553 struct md_sysfs_entry {
554 struct attribute attr;
555 ssize_t (*show)(struct mddev *, char *);
556 ssize_t (*store)(struct mddev *, const char *, size_t);
557 };
558 extern struct attribute_group md_bitmap_group;
559
560 static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
561 {
562 if (sd)
563 return sysfs_get_dirent(sd, name);
564 return sd;
565 }
566 static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
567 {
568 if (sd)
569 sysfs_notify_dirent(sd);
570 }
571
572 static inline char * mdname (struct mddev * mddev)
573 {
574 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
575 }
576
577 static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
578 {
579 char nm[20];
580 if (!test_bit(Replacement, &rdev->flags) &&
581 !test_bit(Journal, &rdev->flags) &&
582 mddev->kobj.sd) {
583 sprintf(nm, "rd%d", rdev->raid_disk);
584 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
585 } else
586 return 0;
587 }
588
589 static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
590 {
591 char nm[20];
592 if (!test_bit(Replacement, &rdev->flags) &&
593 !test_bit(Journal, &rdev->flags) &&
594 mddev->kobj.sd) {
595 sprintf(nm, "rd%d", rdev->raid_disk);
596 sysfs_remove_link(&mddev->kobj, nm);
597 }
598 }
599
600 /*
601 * iterates through some rdev ringlist. It's safe to remove the
602 * current 'rdev'. Dont touch 'tmp' though.
603 */
604 #define rdev_for_each_list(rdev, tmp, head) \
605 list_for_each_entry_safe(rdev, tmp, head, same_set)
606
607 /*
608 * iterates through the 'same array disks' ringlist
609 */
610 #define rdev_for_each(rdev, mddev) \
611 list_for_each_entry(rdev, &((mddev)->disks), same_set)
612
613 #define rdev_for_each_safe(rdev, tmp, mddev) \
614 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
615
616 #define rdev_for_each_rcu(rdev, mddev) \
617 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
618
619 struct md_thread {
620 void (*run) (struct md_thread *thread);
621 struct mddev *mddev;
622 wait_queue_head_t wqueue;
623 unsigned long flags;
624 struct task_struct *tsk;
625 unsigned long timeout;
626 void *private;
627 };
628
629 #define THREAD_WAKEUP 0
630
631 static inline void safe_put_page(struct page *p)
632 {
633 if (p) put_page(p);
634 }
635
636 extern int register_md_personality(struct md_personality *p);
637 extern int unregister_md_personality(struct md_personality *p);
638 extern int register_md_cluster_operations(struct md_cluster_operations *ops,
639 struct module *module);
640 extern int unregister_md_cluster_operations(void);
641 extern int md_setup_cluster(struct mddev *mddev, int nodes);
642 extern void md_cluster_stop(struct mddev *mddev);
643 extern struct md_thread *md_register_thread(
644 void (*run)(struct md_thread *thread),
645 struct mddev *mddev,
646 const char *name);
647 extern void md_unregister_thread(struct md_thread **threadp);
648 extern void md_wakeup_thread(struct md_thread *thread);
649 extern void md_check_recovery(struct mddev *mddev);
650 extern void md_reap_sync_thread(struct mddev *mddev);
651 extern int mddev_init_writes_pending(struct mddev *mddev);
652 extern void md_write_start(struct mddev *mddev, struct bio *bi);
653 extern void md_write_inc(struct mddev *mddev, struct bio *bi);
654 extern void md_write_end(struct mddev *mddev);
655 extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
656 extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
657 extern void md_finish_reshape(struct mddev *mddev);
658
659 extern int mddev_congested(struct mddev *mddev, int bits);
660 extern void md_flush_request(struct mddev *mddev, struct bio *bio);
661 extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
662 sector_t sector, int size, struct page *page);
663 extern int md_super_wait(struct mddev *mddev);
664 extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
665 struct page *page, int op, int op_flags,
666 bool metadata_op);
667 extern void md_do_sync(struct md_thread *thread);
668 extern void md_new_event(struct mddev *mddev);
669 extern void md_allow_write(struct mddev *mddev);
670 extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
671 extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
672 extern int md_check_no_bitmap(struct mddev *mddev);
673 extern int md_integrity_register(struct mddev *mddev);
674 extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
675 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
676
677 extern void mddev_init(struct mddev *mddev);
678 extern int md_run(struct mddev *mddev);
679 extern void md_stop(struct mddev *mddev);
680 extern void md_stop_writes(struct mddev *mddev);
681 extern int md_rdev_init(struct md_rdev *rdev);
682 extern void md_rdev_clear(struct md_rdev *rdev);
683
684 extern void mddev_suspend(struct mddev *mddev);
685 extern void mddev_resume(struct mddev *mddev);
686 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
687 struct mddev *mddev);
688
689 extern void md_reload_sb(struct mddev *mddev, int raid_disk);
690 extern void md_update_sb(struct mddev *mddev, int force);
691 extern void md_kick_rdev_from_array(struct md_rdev * rdev);
692 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
693
694 static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
695 {
696 int faulty = test_bit(Faulty, &rdev->flags);
697 if (atomic_dec_and_test(&rdev->nr_pending) && faulty) {
698 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
699 md_wakeup_thread(mddev->thread);
700 }
701 }
702
703 extern struct md_cluster_operations *md_cluster_ops;
704 static inline int mddev_is_clustered(struct mddev *mddev)
705 {
706 return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
707 }
708
709 /* clear unsupported mddev_flags */
710 static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
711 unsigned long unsupported_flags)
712 {
713 mddev->flags &= ~unsupported_flags;
714 }
715
716 static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
717 {
718 if (bio_op(bio) == REQ_OP_WRITE_SAME &&
719 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)
720 mddev->queue->limits.max_write_same_sectors = 0;
721 }
722
723 static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
724 {
725 if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
726 !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
727 mddev->queue->limits.max_write_zeroes_sectors = 0;
728 }
729
730 /* Maximum size of each resync request */
731 #define RESYNC_BLOCK_SIZE (64*1024)
732 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
733
734 /* for managing resync I/O pages */
735 struct resync_pages {
736 unsigned idx; /* for get/put page from the pool */
737 void *raid_bio;
738 struct page *pages[RESYNC_PAGES];
739 };
740
741 static inline int resync_alloc_pages(struct resync_pages *rp,
742 gfp_t gfp_flags)
743 {
744 int i;
745
746 for (i = 0; i < RESYNC_PAGES; i++) {
747 rp->pages[i] = alloc_page(gfp_flags);
748 if (!rp->pages[i])
749 goto out_free;
750 }
751
752 return 0;
753
754 out_free:
755 while (--i >= 0)
756 put_page(rp->pages[i]);
757 return -ENOMEM;
758 }
759
760 static inline void resync_free_pages(struct resync_pages *rp)
761 {
762 int i;
763
764 for (i = 0; i < RESYNC_PAGES; i++)
765 put_page(rp->pages[i]);
766 }
767
768 static inline void resync_get_all_pages(struct resync_pages *rp)
769 {
770 int i;
771
772 for (i = 0; i < RESYNC_PAGES; i++)
773 get_page(rp->pages[i]);
774 }
775
776 static inline struct page *resync_fetch_page(struct resync_pages *rp,
777 unsigned idx)
778 {
779 if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
780 return NULL;
781 return rp->pages[idx];
782 }
783 #endif /* _MD_MD_H */