2 md.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 #include <linux/blkdev.h>
19 #include <linux/backing-dev.h>
20 #include <linux/kobject.h>
21 #include <linux/list.h>
23 #include <linux/mutex.h>
24 #include <linux/timer.h>
25 #include <linux/wait.h>
26 #include <linux/workqueue.h>
27 #include "md-cluster.h"
29 #define MaxSector (~(sector_t)0)
31 /* Bad block numbers are stored sorted in a single page.
32 * 64bits is used for each block or extent.
33 * 54 bits are sector number, 9 bits are extent size,
34 * 1 bit is an 'acknowledged' flag.
36 #define MD_MAX_BADBLOCKS (PAGE_SIZE/8)
39 * MD's 'extended' device
42 struct list_head same_set
; /* RAID devices within the same set */
44 sector_t sectors
; /* Device size (in 512bytes sectors) */
45 struct mddev
*mddev
; /* RAID array if running */
46 int last_events
; /* IO event timestamp */
49 * If meta_bdev is non-NULL, it means that a separate device is
50 * being used to store the metadata (superblock/bitmap) which
51 * would otherwise be contained on the same device as the data (bdev).
53 struct block_device
*meta_bdev
;
54 struct block_device
*bdev
; /* block device handle */
56 struct page
*sb_page
, *bb_page
;
59 sector_t data_offset
; /* start of data in array */
60 sector_t new_data_offset
;/* only relevant while reshaping */
61 sector_t sb_start
; /* offset of the super block (in 512byte sectors) */
62 int sb_size
; /* bytes in the superblock */
63 int preferred_minor
; /* autorun support */
67 /* A device can be in one of three states based on two flags:
68 * Not working: faulty==1 in_sync==0
69 * Fully working: faulty==0 in_sync==1
72 * faulty==0 in_sync==0
74 * It can never have faulty==1, in_sync==1
75 * This reduces the burden of testing multiple flags in many cases
78 unsigned long flags
; /* bit set of 'enum flag_bits' bits. */
79 wait_queue_head_t blocked_wait
;
81 int desc_nr
; /* descriptor index in the superblock */
82 int raid_disk
; /* role of device in array */
83 int new_raid_disk
; /* role that the device will have in
84 * the array after a level-change completes.
86 int saved_raid_disk
; /* role that device used to have in the
87 * array and could again if we did a partial
88 * resync from the bitmap
91 sector_t recovery_offset
;/* If this device has been partially
92 * recovered, this is where we were
95 sector_t journal_tail
; /* If this device is a journal device,
96 * this is the journal tail (journal
97 * recovery start point)
101 atomic_t nr_pending
; /* number of pending requests.
102 * only maintained for arrays that
103 * support hot removal
105 atomic_t read_errors
; /* number of consecutive read errors that
106 * we have tried to ignore.
108 struct timespec last_read_error
; /* monotonic time since our
111 atomic_t corrected_errors
; /* number of corrected read errors,
112 * for reporting to userspace and storing
115 struct work_struct del_work
; /* used for delayed sysfs removal */
117 struct kernfs_node
*sysfs_state
; /* handle for 'state'
121 int count
; /* count of bad blocks */
122 int unacked_exist
; /* there probably are unacknowledged
123 * bad blocks. This is only cleared
124 * when a read discovers none
126 int shift
; /* shift from sectors to block size
127 * a -ve shift means badblocks are
129 u64
*page
; /* badblock list */
134 sector_t size
; /* in sectors */
138 Faulty
, /* device is known to have a fault */
139 In_sync
, /* device is in_sync with rest of array */
140 Bitmap_sync
, /* ..actually, not quite In_sync. Need a
141 * bitmap-based recovery to get fully in sync
143 WriteMostly
, /* Avoid reading if at all possible */
144 AutoDetected
, /* added by auto-detect */
145 Blocked
, /* An error occurred but has not yet
146 * been acknowledged by the metadata
147 * handler, so don't allow writes
148 * until it is cleared */
149 WriteErrorSeen
, /* A write error has been seen on this
152 FaultRecorded
, /* Intermediate state for clearing
153 * Blocked. The Fault is/will-be
154 * recorded in the metadata, but that
155 * metadata hasn't been stored safely
158 BlockedBadBlocks
, /* A writer is blocked because they
159 * found an unacknowledged bad-block.
160 * This can safely be cleared at any
161 * time, and the writer will re-check.
162 * It may be set at any time, and at
163 * worst the writer will timeout and
164 * re-check. So setting it as
165 * accurately as possible is good, but
166 * not absolutely critical.
168 WantReplacement
, /* This device is a candidate to be
169 * hot-replaced, either because it has
170 * reported some faults, or because
171 * of explicit request.
173 Replacement
, /* This device is a replacement for
174 * a want_replacement device with same
177 Candidate
, /* For clustered environments only:
178 * This device is seen locally but not
179 * by the whole cluster
181 Journal
, /* This device is used as journal for
183 * Usually, this device should be faster
184 * than other devices in the array
189 #define BB_LEN_MASK (0x00000000000001FFULL)
190 #define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
191 #define BB_ACK_MASK (0x8000000000000000ULL)
192 #define BB_MAX_LEN 512
193 #define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
194 #define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
195 #define BB_ACK(x) (!!((x) & BB_ACK_MASK))
196 #define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
198 extern int md_is_badblock(struct badblocks
*bb
, sector_t s
, int sectors
,
199 sector_t
*first_bad
, int *bad_sectors
);
200 static inline int is_badblock(struct md_rdev
*rdev
, sector_t s
, int sectors
,
201 sector_t
*first_bad
, int *bad_sectors
)
203 if (unlikely(rdev
->badblocks
.count
)) {
204 int rv
= md_is_badblock(&rdev
->badblocks
, rdev
->data_offset
+ s
,
206 first_bad
, bad_sectors
);
208 *first_bad
-= rdev
->data_offset
;
213 extern int rdev_set_badblocks(struct md_rdev
*rdev
, sector_t s
, int sectors
,
215 extern int rdev_clear_badblocks(struct md_rdev
*rdev
, sector_t s
, int sectors
,
217 extern void md_ack_all_badblocks(struct badblocks
*bb
);
219 struct md_cluster_info
;
223 struct md_personality
*pers
;
226 struct list_head disks
;
228 #define MD_CHANGE_DEVS 0 /* Some device status has changed */
229 #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
230 #define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
231 #define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */
232 #define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
233 #define MD_STILL_CLOSED 4 /* If set, then array has not been opened since
234 * md_ioctl checked on it.
236 #define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */
237 #define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */
238 #define MD_RELOAD_SB 7 /* Reload the superblock because another node
245 int sysfs_active
; /* set when sysfs deletes
246 * are happening, so run/
247 * takeover/stop are not safe
249 struct gendisk
*gendisk
;
253 #define UNTIL_IOCTL 1
256 /* Superblock information */
261 int external
; /* metadata is
262 * managed externally */
263 char metadata_type
[17]; /* externally set*/
265 time64_t ctime
, utime
;
270 sector_t dev_sectors
; /* used size of
271 * component devices */
272 sector_t array_sectors
; /* exported array size */
273 int external_size
; /* size managed
276 /* If the last 'event' was simply a clean->dirty transition, and
277 * we didn't write it to the spares, then it is safe and simple
278 * to just decrement the event count on a dirty->clean transition.
279 * So we record that possibility here.
281 int can_decrease_events
;
285 /* If the array is being reshaped, we need to record the
286 * new shape and an indication of where we are up to.
287 * This is written to the superblock.
288 * If reshape_position is MaxSector, then no reshape is happening (yet).
290 sector_t reshape_position
;
291 int delta_disks
, new_level
, new_layout
;
292 int new_chunk_sectors
;
293 int reshape_backwards
;
295 struct md_thread
*thread
; /* management thread */
296 struct md_thread
*sync_thread
; /* doing resync or reconstruct */
298 /* 'last_sync_action' is initialized to "none". It is set when a
299 * sync operation (i.e "data-check", "requested-resync", "resync",
300 * "recovery", or "reshape") is started. It holds this value even
301 * when the sync thread is "frozen" (interrupted) or "idle" (stopped
302 * or finished). It is overwritten when a new sync operation is begun.
304 char *last_sync_action
;
305 sector_t curr_resync
; /* last block scheduled */
306 /* As resync requests can complete out of order, we cannot easily track
307 * how much resync has been completed. So we occasionally pause until
308 * everything completes, then set curr_resync_completed to curr_resync.
309 * As such it may be well behind the real resync mark, but it is a value
312 sector_t curr_resync_completed
;
313 unsigned long resync_mark
; /* a recent timestamp */
314 sector_t resync_mark_cnt
;/* blocks written at resync_mark */
315 sector_t curr_mark_cnt
; /* blocks scheduled now */
317 sector_t resync_max_sectors
; /* may be set by personality */
319 atomic64_t resync_mismatches
; /* count of sectors where
320 * parity/replica mismatch found
323 /* allow user-space to request suspension of IO to regions of the array */
326 /* if zero, use the system-wide default */
330 /* resync even though the same disks are shared among md-devices */
333 int ok_start_degraded
;
334 /* recovery/resync flags
335 * NEEDED: we might need to start a resync/recover
336 * RUNNING: a thread is running, or about to be started
337 * SYNC: actually doing a resync, not a recovery
338 * RECOVER: doing recovery, or need to try it.
339 * INTR: resync needs to be aborted for some reason
340 * DONE: thread is done and is waiting to be reaped
341 * REQUEST: user-space has requested a sync (used with SYNC)
342 * CHECK: user-space request for check-only, no repair
343 * RESHAPE: A reshape is happening
344 * ERROR: sync-action interrupted because io-error
346 * If neither SYNC or RESHAPE are set, then it is a recovery.
348 #define MD_RECOVERY_RUNNING 0
349 #define MD_RECOVERY_SYNC 1
350 #define MD_RECOVERY_RECOVER 2
351 #define MD_RECOVERY_INTR 3
352 #define MD_RECOVERY_DONE 4
353 #define MD_RECOVERY_NEEDED 5
354 #define MD_RECOVERY_REQUESTED 6
355 #define MD_RECOVERY_CHECK 7
356 #define MD_RECOVERY_RESHAPE 8
357 #define MD_RECOVERY_FROZEN 9
358 #define MD_RECOVERY_ERROR 10
360 unsigned long recovery
;
361 /* If a RAID personality determines that recovery (of a particular
362 * device) will fail due to a read error on the source device, it
363 * takes a copy of this number and does not attempt recovery again
364 * until this number changes.
366 int recovery_disabled
;
368 int in_sync
; /* know to not need resync */
369 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
370 * that we are never stopping an array while it is open.
371 * 'reconfig_mutex' protects all other reconfiguration.
372 * These locks are separate due to conflicting interactions
373 * with bdev->bd_mutex.
375 * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
376 * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
378 struct mutex open_mutex
;
379 struct mutex reconfig_mutex
;
380 atomic_t active
; /* general refcount */
381 atomic_t openers
; /* number of active opens */
383 int changed
; /* True if we might need to
384 * reread partition info */
385 int degraded
; /* whether md should consider
389 atomic_t recovery_active
; /* blocks scheduled, but not written */
390 wait_queue_head_t recovery_wait
;
391 sector_t recovery_cp
;
392 sector_t resync_min
; /* user requested sync
394 sector_t resync_max
; /* resync should pause
395 * when it gets here */
397 struct kernfs_node
*sysfs_state
; /* handle for 'array_state'
400 struct kernfs_node
*sysfs_action
; /* handle for 'sync_action' */
402 struct work_struct del_work
; /* used for delayed sysfs removal */
405 * flush_bio transition from NULL to !NULL
406 * rdev superblocks, events
407 * clearing MD_CHANGE_*
408 * in_sync - and related safemode and MD_CHANGE changes
409 * pers (also protected by reconfig_mutex and pending IO).
411 * clearing ->bitmap_info.file
412 * changing ->resync_{min,max}
413 * setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
416 wait_queue_head_t sb_wait
; /* for waiting on superblock updates */
417 atomic_t pending_writes
; /* number of active superblock writes */
419 unsigned int safemode
; /* if set, update "clean" superblock
420 * when no writes pending.
422 unsigned int safemode_delay
;
423 struct timer_list safemode_timer
;
424 atomic_t writes_pending
;
425 struct request_queue
*queue
; /* for plugging ... */
427 struct bitmap
*bitmap
; /* the bitmap for the device */
429 struct file
*file
; /* the bitmap file */
430 loff_t offset
; /* offset from superblock of
431 * start of bitmap. May be
432 * negative, but not '0'
433 * For external metadata, offset
434 * from start of device.
436 unsigned long space
; /* space available at this offset */
437 loff_t default_offset
; /* this is the offset to use when
438 * hot-adding a bitmap. It should
439 * eventually be settable by sysfs.
441 unsigned long default_space
; /* space available at
444 unsigned long chunksize
;
445 unsigned long daemon_sleep
; /* how many jiffies between updates? */
446 unsigned long max_write_behind
; /* write-behind mode */
448 int nodes
; /* Maximum number of nodes in the cluster */
449 char cluster_name
[64]; /* Name of the cluster */
452 atomic_t max_corr_read_errors
; /* max read retries */
453 struct list_head all_mddevs
;
455 struct attribute_group
*to_remove
;
457 struct bio_set
*bio_set
;
459 /* Generic flush handling.
460 * The last to finish preflush schedules a worker to submit
461 * the rest of the request (without the REQ_FLUSH flag).
463 struct bio
*flush_bio
;
464 atomic_t flush_pending
;
465 struct work_struct flush_work
;
466 struct work_struct event_work
; /* used by dm to report failure event */
467 void (*sync_super
)(struct mddev
*mddev
, struct md_rdev
*rdev
);
468 struct md_cluster_info
*cluster_info
;
469 unsigned int good_device_nr
; /* good device num within cluster raid */
472 static inline int __must_check
mddev_lock(struct mddev
*mddev
)
474 return mutex_lock_interruptible(&mddev
->reconfig_mutex
);
477 /* Sometimes we need to take the lock in a situation where
478 * failure due to interrupts is not acceptable.
480 static inline void mddev_lock_nointr(struct mddev
*mddev
)
482 mutex_lock(&mddev
->reconfig_mutex
);
485 static inline int mddev_is_locked(struct mddev
*mddev
)
487 return mutex_is_locked(&mddev
->reconfig_mutex
);
490 static inline int mddev_trylock(struct mddev
*mddev
)
492 return mutex_trylock(&mddev
->reconfig_mutex
);
494 extern void mddev_unlock(struct mddev
*mddev
);
496 static inline void md_sync_acct(struct block_device
*bdev
, unsigned long nr_sectors
)
498 atomic_add(nr_sectors
, &bdev
->bd_contains
->bd_disk
->sync_io
);
501 struct md_personality
505 struct list_head list
;
506 struct module
*owner
;
507 void (*make_request
)(struct mddev
*mddev
, struct bio
*bio
);
508 int (*run
)(struct mddev
*mddev
);
509 void (*free
)(struct mddev
*mddev
, void *priv
);
510 void (*status
)(struct seq_file
*seq
, struct mddev
*mddev
);
511 /* error_handler must set ->faulty and clear ->in_sync
512 * if appropriate, and should abort recovery if needed
514 void (*error_handler
)(struct mddev
*mddev
, struct md_rdev
*rdev
);
515 int (*hot_add_disk
) (struct mddev
*mddev
, struct md_rdev
*rdev
);
516 int (*hot_remove_disk
) (struct mddev
*mddev
, struct md_rdev
*rdev
);
517 int (*spare_active
) (struct mddev
*mddev
);
518 sector_t (*sync_request
)(struct mddev
*mddev
, sector_t sector_nr
, int *skipped
);
519 int (*resize
) (struct mddev
*mddev
, sector_t sectors
);
520 sector_t (*size
) (struct mddev
*mddev
, sector_t sectors
, int raid_disks
);
521 int (*check_reshape
) (struct mddev
*mddev
);
522 int (*start_reshape
) (struct mddev
*mddev
);
523 void (*finish_reshape
) (struct mddev
*mddev
);
524 /* quiesce moves between quiescence states
526 * 1 - no new requests allowed
529 void (*quiesce
) (struct mddev
*mddev
, int state
);
530 /* takeover is used to transition an array from one
531 * personality to another. The new personality must be able
532 * to handle the data in the current layout.
533 * e.g. 2drive raid1 -> 2drive raid5
534 * ndrive raid5 -> degraded n+1drive raid6 with special layout
535 * If the takeover succeeds, a new 'private' structure is returned.
536 * This needs to be installed and then ->run used to activate the
539 void *(*takeover
) (struct mddev
*mddev
);
540 /* congested implements bdi.congested_fn().
541 * Will not be called while array is 'suspended' */
542 int (*congested
)(struct mddev
*mddev
, int bits
);
545 struct md_sysfs_entry
{
546 struct attribute attr
;
547 ssize_t (*show
)(struct mddev
*, char *);
548 ssize_t (*store
)(struct mddev
*, const char *, size_t);
550 extern struct attribute_group md_bitmap_group
;
552 static inline struct kernfs_node
*sysfs_get_dirent_safe(struct kernfs_node
*sd
, char *name
)
555 return sysfs_get_dirent(sd
, name
);
558 static inline void sysfs_notify_dirent_safe(struct kernfs_node
*sd
)
561 sysfs_notify_dirent(sd
);
564 static inline char * mdname (struct mddev
* mddev
)
566 return mddev
->gendisk
? mddev
->gendisk
->disk_name
: "mdX";
569 static inline int sysfs_link_rdev(struct mddev
*mddev
, struct md_rdev
*rdev
)
572 if (!test_bit(Replacement
, &rdev
->flags
) &&
573 !test_bit(Journal
, &rdev
->flags
) &&
575 sprintf(nm
, "rd%d", rdev
->raid_disk
);
576 return sysfs_create_link(&mddev
->kobj
, &rdev
->kobj
, nm
);
581 static inline void sysfs_unlink_rdev(struct mddev
*mddev
, struct md_rdev
*rdev
)
584 if (!test_bit(Replacement
, &rdev
->flags
) &&
585 !test_bit(Journal
, &rdev
->flags
) &&
587 sprintf(nm
, "rd%d", rdev
->raid_disk
);
588 sysfs_remove_link(&mddev
->kobj
, nm
);
593 * iterates through some rdev ringlist. It's safe to remove the
594 * current 'rdev'. Dont touch 'tmp' though.
596 #define rdev_for_each_list(rdev, tmp, head) \
597 list_for_each_entry_safe(rdev, tmp, head, same_set)
600 * iterates through the 'same array disks' ringlist
602 #define rdev_for_each(rdev, mddev) \
603 list_for_each_entry(rdev, &((mddev)->disks), same_set)
605 #define rdev_for_each_safe(rdev, tmp, mddev) \
606 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
608 #define rdev_for_each_rcu(rdev, mddev) \
609 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
612 void (*run
) (struct md_thread
*thread
);
614 wait_queue_head_t wqueue
;
616 struct task_struct
*tsk
;
617 unsigned long timeout
;
621 #define THREAD_WAKEUP 0
623 static inline void safe_put_page(struct page
*p
)
628 extern int register_md_personality(struct md_personality
*p
);
629 extern int unregister_md_personality(struct md_personality
*p
);
630 extern int register_md_cluster_operations(struct md_cluster_operations
*ops
,
631 struct module
*module
);
632 extern int unregister_md_cluster_operations(void);
633 extern int md_setup_cluster(struct mddev
*mddev
, int nodes
);
634 extern void md_cluster_stop(struct mddev
*mddev
);
635 extern struct md_thread
*md_register_thread(
636 void (*run
)(struct md_thread
*thread
),
639 extern void md_unregister_thread(struct md_thread
**threadp
);
640 extern void md_wakeup_thread(struct md_thread
*thread
);
641 extern void md_check_recovery(struct mddev
*mddev
);
642 extern void md_reap_sync_thread(struct mddev
*mddev
);
643 extern void md_write_start(struct mddev
*mddev
, struct bio
*bi
);
644 extern void md_write_end(struct mddev
*mddev
);
645 extern void md_done_sync(struct mddev
*mddev
, int blocks
, int ok
);
646 extern void md_error(struct mddev
*mddev
, struct md_rdev
*rdev
);
647 extern void md_finish_reshape(struct mddev
*mddev
);
649 extern int mddev_congested(struct mddev
*mddev
, int bits
);
650 extern void md_flush_request(struct mddev
*mddev
, struct bio
*bio
);
651 extern void md_super_write(struct mddev
*mddev
, struct md_rdev
*rdev
,
652 sector_t sector
, int size
, struct page
*page
);
653 extern void md_super_wait(struct mddev
*mddev
);
654 extern int sync_page_io(struct md_rdev
*rdev
, sector_t sector
, int size
,
655 struct page
*page
, int rw
, bool metadata_op
);
656 extern void md_do_sync(struct md_thread
*thread
);
657 extern void md_new_event(struct mddev
*mddev
);
658 extern int md_allow_write(struct mddev
*mddev
);
659 extern void md_wait_for_blocked_rdev(struct md_rdev
*rdev
, struct mddev
*mddev
);
660 extern void md_set_array_sectors(struct mddev
*mddev
, sector_t array_sectors
);
661 extern int md_check_no_bitmap(struct mddev
*mddev
);
662 extern int md_integrity_register(struct mddev
*mddev
);
663 extern int md_integrity_add_rdev(struct md_rdev
*rdev
, struct mddev
*mddev
);
664 extern int strict_strtoul_scaled(const char *cp
, unsigned long *res
, int scale
);
666 extern void mddev_init(struct mddev
*mddev
);
667 extern int md_run(struct mddev
*mddev
);
668 extern void md_stop(struct mddev
*mddev
);
669 extern void md_stop_writes(struct mddev
*mddev
);
670 extern int md_rdev_init(struct md_rdev
*rdev
);
671 extern void md_rdev_clear(struct md_rdev
*rdev
);
673 extern void mddev_suspend(struct mddev
*mddev
);
674 extern void mddev_resume(struct mddev
*mddev
);
675 extern struct bio
*bio_clone_mddev(struct bio
*bio
, gfp_t gfp_mask
,
676 struct mddev
*mddev
);
677 extern struct bio
*bio_alloc_mddev(gfp_t gfp_mask
, int nr_iovecs
,
678 struct mddev
*mddev
);
680 extern void md_unplug(struct blk_plug_cb
*cb
, bool from_schedule
);
681 extern void md_reload_sb(struct mddev
*mddev
, int raid_disk
);
682 extern void md_update_sb(struct mddev
*mddev
, int force
);
683 extern void md_kick_rdev_from_array(struct md_rdev
* rdev
);
684 struct md_rdev
*md_find_rdev_nr_rcu(struct mddev
*mddev
, int nr
);
685 static inline int mddev_check_plugged(struct mddev
*mddev
)
687 return !!blk_check_plugged(md_unplug
, mddev
,
688 sizeof(struct blk_plug_cb
));
691 static inline void rdev_dec_pending(struct md_rdev
*rdev
, struct mddev
*mddev
)
693 int faulty
= test_bit(Faulty
, &rdev
->flags
);
694 if (atomic_dec_and_test(&rdev
->nr_pending
) && faulty
) {
695 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
696 md_wakeup_thread(mddev
->thread
);
700 extern struct md_cluster_operations
*md_cluster_ops
;
701 static inline int mddev_is_clustered(struct mddev
*mddev
)
703 return mddev
->cluster_info
&& mddev
->bitmap_info
.nodes
> 1;
705 #endif /* _MD_MD_H */