4 #include <linux/raid/xor.h>
5 #include <linux/dmaengine.h>
9 * Each stripe contains one buffer per device. Each buffer can be in
10 * one of a number of states stored in "flags". Changes between
11 * these states happen *almost* exclusively under the protection of the
12 * STRIPE_ACTIVE flag. Some very specific changes can happen in bi_end_io, and
13 * these are not protected by STRIPE_ACTIVE.
15 * The flag bits that are used to represent these states are:
16 * R5_UPTODATE and R5_LOCKED
18 * State Empty == !UPTODATE, !LOCK
19 * We have no data, and there is no active request
20 * State Want == !UPTODATE, LOCK
21 * A read request is being submitted for this block
22 * State Dirty == UPTODATE, LOCK
23 * Some new data is in this buffer, and it is being written out
24 * State Clean == UPTODATE, !LOCK
25 * We have valid data which is the same as on disc
27 * The possible state transitions are:
29 * Empty -> Want - on read or write to get old data for parity calc
30 * Empty -> Dirty - on compute_parity to satisfy write/sync request.
31 * Empty -> Clean - on compute_block when computing a block for failed drive
32 * Want -> Empty - on failed read
33 * Want -> Clean - on successful completion of read request
34 * Dirty -> Clean - on successful completion of write request
35 * Dirty -> Clean - on failed write
36 * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
38 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
39 * all happen in b_end_io at interrupt time.
40 * Each sets the Uptodate bit before releasing the Lock bit.
41 * This leaves one multi-stage transition:
43 * This is safe because thinking that a Clean buffer is actually dirty
44 * will at worst delay some action, and the stripe will be scheduled
45 * for attention after the transition is complete.
47 * There is one possibility that is not covered by these states. That
48 * is if one drive has failed and there is a spare being rebuilt. We
49 * can't distinguish between a clean block that has been generated
50 * from parity calculations, and a clean block that has been
51 * successfully written to the spare ( or to parity when resyncing).
52 * To distinguish these states we have a stripe bit STRIPE_INSYNC that
53 * is set whenever a write is scheduled to the spare, or to the parity
54 * disc if there is no spare. A sync request clears this bit, and
55 * when we find it set with no buffers locked, we know the sync is
58 * Buffers for the md device that arrive via make_request are attached
59 * to the appropriate stripe in one of two lists linked on b_reqnext.
60 * One list (bh_read) for read requests, one (bh_write) for write.
61 * There should never be more than one buffer on the two lists
62 * together, but we are not guaranteed of that so we allow for more.
64 * If a buffer is on the read list when the associated cache buffer is
65 * Uptodate, the data is copied into the read buffer and it's b_end_io
66 * routine is called. This may happen in the end_request routine only
67 * if the buffer has just successfully been read. end_request should
68 * remove the buffers from the list and then set the Uptodate bit on
69 * the buffer. Other threads may do this only if they first check
70 * that the Uptodate bit is set. Once they have checked that they may
71 * take buffers off the read queue.
73 * When a buffer on the write list is committed for write it is copied
74 * into the cache buffer, which is then marked dirty, and moved onto a
75 * third list, the written list (bh_written). Once both the parity
76 * block and the cached buffer are successfully written, any buffer on
77 * a written list can be returned with b_end_io.
79 * The write list and read list both act as fifos. The read list,
80 * write list and written list are protected by the device_lock.
81 * The device_lock is only for list manipulations and will only be
82 * held for a very short time. It can be claimed from interrupts.
85 * Stripes in the stripe cache can be on one of two lists (or on
86 * neither). The "inactive_list" contains stripes which are not
87 * currently being used for any request. They can freely be reused
88 * for another stripe. The "handle_list" contains stripes that need
89 * to be handled in some way. Both of these are fifo queues. Each
90 * stripe is also (potentially) linked to a hash bucket in the hash
91 * table so that it can be found by sector number. Stripes that are
92 * not hashed must be on the inactive_list, and will normally be at
93 * the front. All stripes start life this way.
95 * The inactive_list, handle_list and hash bucket lists are all protected by the
97 * - stripes have a reference counter. If count==0, they are on a list.
98 * - If a stripe might need handling, STRIPE_HANDLE is set.
99 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
100 * handle_list else inactive_list
102 * This, combined with the fact that STRIPE_HANDLE is only ever
103 * cleared while a stripe has a non-zero count means that if the
104 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
105 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
106 * the stripe is on inactive_list.
108 * The possible transitions are:
109 * activate an unhashed/inactive stripe (get_active_stripe())
110 * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
111 * activate a hashed, possibly active stripe (get_active_stripe())
112 * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
113 * attach a request to an active stripe (add_stripe_bh())
114 * lockdev attach-buffer unlockdev
115 * handle a stripe (handle_stripe())
116 * setSTRIPE_ACTIVE, clrSTRIPE_HANDLE ...
117 * (lockdev check-buffers unlockdev) ..
119 * record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
120 * release an active stripe (release_stripe())
121 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
123 * The refcount counts each thread that have activated the stripe,
124 * plus raid5d if it is handling it, plus one for each active request
125 * on a cached buffer, and plus one if the stripe is undergoing stripe
128 * The stripe operations are:
129 * -copying data between the stripe cache and user application buffers
130 * -computing blocks to save a disk access, or to recover a missing block
131 * -updating the parity on a write operation (reconstruct write and
133 * -checking parity correctness
134 * -running i/o to disk
135 * These operations are carried out by raid5_run_ops which uses the async_tx
136 * api to (optionally) offload operations to dedicated hardware engines.
137 * When requesting an operation handle_stripe sets the pending bit for the
138 * operation and increments the count. raid5_run_ops is then run whenever
139 * the count is non-zero.
140 * There are some critical dependencies between the operations that prevent some
141 * from being requested while another is in flight.
142 * 1/ Parity check operations destroy the in cache version of the parity block,
143 * so we prevent parity dependent operations like writes and compute_blocks
144 * from starting while a check is in progress. Some dma engines can perform
145 * the check without damaging the parity block, in these cases the parity
146 * block is re-marked up to date (assuming the check was successful) and is
147 * not re-read from disk.
148 * 2/ When a write operation is requested we immediately lock the affected
149 * blocks, and mark them as not up to date. This causes new read requests
150 * to be held off, as well as parity checks and compute block operations.
151 * 3/ Once a compute block operation has been requested handle_stripe treats
152 * that block as if it is up to date. raid5_run_ops guaruntees that any
153 * operation that is dependent on the compute block result is initiated after
154 * the compute block completes.
158 * Operations state - intermediate states that are visible outside of
160 * In general _idle indicates nothing is running, _run indicates a data
161 * processing operation is active, and _result means the data processing result
162 * is stable and can be acted upon. For simple operations like biofill and
163 * compute that only have an _idle and _run state they are indicated with
164 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
167 * enum check_states - handles syncing / repairing a stripe
168 * @check_state_idle - check operations are quiesced
169 * @check_state_run - check operation is running
170 * @check_state_result - set outside lock when check result is valid
171 * @check_state_compute_run - check failed and we are repairing
172 * @check_state_compute_result - set outside lock when compute result is valid
175 check_state_idle
= 0,
176 check_state_run
, /* xor parity check */
177 check_state_run_q
, /* q-parity check */
178 check_state_run_pq
, /* pq dual parity check */
179 check_state_check_result
,
180 check_state_compute_run
, /* parity repair */
181 check_state_compute_result
,
185 * enum reconstruct_states - handles writing or expanding a stripe
187 enum reconstruct_states
{
188 reconstruct_state_idle
= 0,
189 reconstruct_state_prexor_drain_run
, /* prexor-write */
190 reconstruct_state_drain_run
, /* write */
191 reconstruct_state_run
, /* expand */
192 reconstruct_state_prexor_drain_result
,
193 reconstruct_state_drain_result
,
194 reconstruct_state_result
,
198 struct hlist_node hash
;
199 struct list_head lru
; /* inactive_list or handle_list */
200 struct llist_node release_list
;
201 struct r5conf
*raid_conf
;
202 short generation
; /* increments with every
204 sector_t sector
; /* sector of this row */
205 short pd_idx
; /* parity disk index */
206 short qd_idx
; /* 'Q' disk index for raid6 */
207 short ddf_layout
;/* use DDF ordering to calculate Q */
208 short hash_lock_index
;
209 unsigned long state
; /* state flags */
210 atomic_t count
; /* nr of active thread/requests */
211 int bm_seq
; /* sequence number for bitmap flushes */
212 int disks
; /* disks in stripe */
213 int overwrite_disks
; /* total overwrite disks in stripe,
214 * this is only checked when stripe
215 * has STRIPE_BATCH_READY
217 enum check_states check_state
;
218 enum reconstruct_states reconstruct_state
;
219 spinlock_t stripe_lock
;
221 struct r5worker_group
*group
;
223 struct stripe_head
*batch_head
; /* protected by stripe lock */
224 spinlock_t batch_lock
; /* only header's lock is useful */
225 struct list_head batch_list
; /* protected by head's batch lock*/
228 struct r5l_io_unit
*log_io
;
229 struct ppl_io_unit
*ppl_io
;
232 struct list_head log_list
;
233 sector_t log_start
; /* first meta block on the journal */
234 struct list_head r5c
; /* for r5c_cache->stripe_in_journal */
236 struct page
*ppl_page
; /* partial parity of this stripe */
238 * struct stripe_operations
239 * @target - STRIPE_OP_COMPUTE_BLK target
240 * @target2 - 2nd compute target in the raid6 case
241 * @zero_sum_result - P and Q verification flags
242 * @request - async service request flags for raid_run_ops
244 struct stripe_operations
{
246 enum sum_check_flags zero_sum_result
;
249 /* rreq and rvec are used for the replacement device when
250 * writing data to both devices.
252 struct bio req
, rreq
;
253 struct bio_vec vec
, rvec
;
254 struct page
*page
, *orig_page
;
255 struct bio
*toread
, *read
, *towrite
, *written
;
256 sector_t sector
; /* sector of this page */
259 } dev
[1]; /* allocated with extra space depending of RAID geometry */
262 /* stripe_head_state - collects and tracks the dynamic state of a stripe_head
265 struct stripe_head_state
{
266 /* 'syncing' means that we need to read all devices, either
267 * to check/correct parity, or to reconstruct a missing device.
268 * 'replacing' means we are replacing one or more drives and
269 * the source is valid at this point so we don't need to
270 * read all devices, just the replacement targets.
272 int syncing
, expanding
, expanded
, replacing
;
273 int locked
, uptodate
, to_read
, to_write
, failed
, written
;
274 int to_fill
, compute
, req_compute
, non_overwrite
;
275 int injournal
, just_cached
;
277 int p_failed
, q_failed
;
278 int dec_preread_active
;
279 unsigned long ops_request
;
281 struct bio_list return_bi
;
282 struct md_rdev
*blocked_rdev
;
283 int handle_bad_blocks
;
285 int waiting_extra_page
;
288 /* Flags for struct r5dev.flags */
290 R5_UPTODATE
, /* page contains current data */
291 R5_LOCKED
, /* IO has been submitted on "req" */
292 R5_DOUBLE_LOCKED
,/* Cannot clear R5_LOCKED until 2 writes complete */
293 R5_OVERWRITE
, /* towrite covers whole page */
294 /* and some that are internal to handle_stripe */
295 R5_Insync
, /* rdev && rdev->in_sync at start */
296 R5_Wantread
, /* want to schedule a read */
298 R5_Overlap
, /* There is a pending overlapping request
300 R5_ReadNoMerge
, /* prevent bio from merging in block-layer */
301 R5_ReadError
, /* seen a read error here recently */
302 R5_ReWrite
, /* have tried to over-write the readerror */
304 R5_Expanded
, /* This block now has post-expand data */
305 R5_Wantcompute
, /* compute_block in progress treat as
308 R5_Wantfill
, /* dev->toread contains a bio that needs
311 R5_Wantdrain
, /* dev->towrite needs to be drained */
312 R5_WantFUA
, /* Write should be FUA */
313 R5_SyncIO
, /* The IO is sync */
314 R5_WriteError
, /* got a write error - need to record it */
315 R5_MadeGood
, /* A bad block has been fixed by writing to it */
316 R5_ReadRepl
, /* Will/did read from replacement rather than orig */
317 R5_MadeGoodRepl
,/* A bad block on the replacement device has been
318 * fixed by writing to it */
319 R5_NeedReplace
, /* This device has a replacement which is not
320 * up-to-date at this stripe. */
321 R5_WantReplace
, /* We need to update the replacement, we have read
322 * data in, and now is a good time to write it out.
324 R5_Discard
, /* Discard the stripe */
325 R5_SkipCopy
, /* Don't copy data from bio to stripe cache */
326 R5_InJournal
, /* data being written is in the journal device.
327 * if R5_InJournal is set for parity pd_idx, all the
328 * data and parity being written are in the journal
331 R5_OrigPageUPTDODATE
, /* with write back cache, we read old data into
332 * dev->orig_page for prexor. When this flag is
333 * set, orig_page contains latest data in the
344 STRIPE_SYNC_REQUESTED
,
348 STRIPE_PREREAD_ACTIVE
,
353 STRIPE_EXPAND_SOURCE
,
355 STRIPE_IO_STARTED
, /* do not count towards 'bypass_count' */
356 STRIPE_FULL_WRITE
, /* all blocks are set to be overwritten */
359 STRIPE_OPS_REQ_PENDING
,
360 STRIPE_ON_UNPLUG_LIST
,
362 STRIPE_ON_RELEASE_LIST
,
365 STRIPE_BITMAP_PENDING
, /* Being added to bitmap, don't add
368 STRIPE_LOG_TRAPPED
, /* trapped into log (see raid5-cache.c)
369 * this bit is used in two scenarios:
372 * set in first entry of r5l_write_stripe
373 * clear in second entry of r5l_write_stripe
374 * used to bypass logic in handle_stripe
377 * set in r5c_try_caching_write()
378 * clear when journal write is done
379 * used to initiate r5c_cache_data()
380 * also used to bypass logic in handle_stripe
382 STRIPE_R5C_CACHING
, /* the stripe is in caching phase
383 * see more detail in the raid5-cache.c
385 STRIPE_R5C_PARTIAL_STRIPE
, /* in r5c cache (to-be/being handled or
386 * in conf->r5c_partial_stripe_list)
388 STRIPE_R5C_FULL_STRIPE
, /* in r5c cache (to-be/being handled or
389 * in conf->r5c_full_stripe_list)
391 STRIPE_R5C_PREFLUSH
, /* need to flush journal device */
394 #define STRIPE_EXPAND_SYNC_FLAGS \
395 ((1 << STRIPE_EXPAND_SOURCE) |\
396 (1 << STRIPE_EXPAND_READY) |\
397 (1 << STRIPE_EXPANDING) |\
398 (1 << STRIPE_SYNC_REQUESTED))
400 * Operation request flags
404 STRIPE_OP_COMPUTE_BLK
,
407 STRIPE_OP_RECONSTRUCT
,
409 STRIPE_OP_PARTIAL_PARITY
,
413 * RAID parity calculation preferences
416 PARITY_DISABLE_RMW
= 0,
422 * Pages requested from set_syndrome_sources()
426 SYNDROME_SRC_WANT_DRAIN
,
427 SYNDROME_SRC_WRITTEN
,
432 * To improve write throughput, we need to delay the handling of some
433 * stripes until there has been a chance that several write requests
434 * for the one stripe have all been collected.
435 * In particular, any write request that would require pre-reading
436 * is put on a "delayed" queue until there are no stripes currently
437 * in a pre-read phase. Further, if the "delayed" queue is empty when
438 * a stripe is put on it then we "plug" the queue and do not process it
439 * until an unplug call is made. (the unplug_io_fn() is called).
441 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
442 * it to the count of prereading stripes.
443 * When write is initiated, or the stripe refcnt == 0 (just in case) we
444 * clear the PREREAD_ACTIVE flag and decrement the count
445 * Whenever the 'handle' queue is empty and the device is not plugged, we
446 * move any strips from delayed to handle and clear the DELAYED flag and set
448 * In stripe_handle, if we find pre-reading is necessary, we do it if
449 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
450 * HANDLE gets cleared if stripe_handle leaves nothing locked.
454 struct md_rdev
*rdev
, *replacement
;
455 struct page
*extra_page
; /* extra page to use in prexor */
462 #define NR_STRIPES 256
463 #define STRIPE_SIZE PAGE_SIZE
464 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
465 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
466 #define IO_THRESHOLD 1
467 #define BYPASS_THRESHOLD 1
468 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
469 #define HASH_MASK (NR_HASH - 1)
470 #define MAX_STRIPE_BATCH 8
472 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
473 * order without overlap. There may be several bio's per stripe+device, and
474 * a bio could span several devices.
475 * When walking this list for a particular stripe+device, we must never proceed
476 * beyond a bio that extends past this device, as the next bio might no longer
478 * This function is used to determine the 'next' bio in the list, given the
479 * sector of the current stripe+device
481 static inline struct bio
*r5_next_bio(struct bio
*bio
, sector_t sector
)
483 int sectors
= bio_sectors(bio
);
485 if (bio
->bi_iter
.bi_sector
+ sectors
< sector
+ STRIPE_SECTORS
)
492 * We maintain a biased count of active stripes in the bottom 16 bits of
493 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
495 static inline int raid5_bi_processed_stripes(struct bio
*bio
)
497 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
499 return (atomic_read(segments
) >> 16) & 0xffff;
502 static inline int raid5_dec_bi_active_stripes(struct bio
*bio
)
504 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
506 return atomic_sub_return(1, segments
) & 0xffff;
509 static inline void raid5_inc_bi_active_stripes(struct bio
*bio
)
511 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
513 atomic_inc(segments
);
516 static inline void raid5_set_bi_processed_stripes(struct bio
*bio
,
519 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
523 old
= atomic_read(segments
);
524 new = (old
& 0xffff) | (cnt
<< 16);
525 } while (atomic_cmpxchg(segments
, old
, new) != old
);
528 static inline void raid5_set_bi_stripes(struct bio
*bio
, unsigned int cnt
)
530 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
532 atomic_set(segments
, cnt
);
535 /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
536 * This is because we sometimes take all the spinlocks
537 * and creating that much locking depth can cause
540 #define NR_STRIPE_HASH_LOCKS 8
541 #define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
544 struct work_struct work
;
545 struct r5worker_group
*group
;
546 struct list_head temp_inactive_list
[NR_STRIPE_HASH_LOCKS
];
550 struct r5worker_group
{
551 struct list_head handle_list
;
552 struct list_head loprio_list
;
554 struct r5worker
*workers
;
558 enum r5_cache_state
{
559 R5_INACTIVE_BLOCKED
, /* release of inactive stripes blocked,
560 * waiting for 25% to be free
562 R5_ALLOC_MORE
, /* It might help to allocate another
565 R5_DID_ALLOC
, /* A stripe was allocated, don't allocate
566 * more until at least one has been
567 * released. This avoids flooding
570 R5C_LOG_TIGHT
, /* log device space tight, need to
571 * prioritize stripes at last_checkpoint
573 R5C_LOG_CRITICAL
, /* log device is running out of space,
574 * only process stripes that are already
577 R5C_EXTRA_PAGE_IN_USE
, /* a stripe is using disk_info.extra_page
582 #define PENDING_IO_MAX 512
583 #define PENDING_IO_ONE_FLUSH 128
584 struct r5pending_data
{
585 struct list_head sibling
;
586 sector_t sector
; /* stripe sector */
587 struct bio_list bios
;
591 struct hlist_head
*stripe_hashtbl
;
592 /* only protect corresponding hash list and inactive_list */
593 spinlock_t hash_locks
[NR_STRIPE_HASH_LOCKS
];
596 int level
, algorithm
, rmw_level
;
602 /* reshape_progress is the leading edge of a 'reshape'
603 * It has value MaxSector when no reshape is happening
604 * If delta_disks < 0, it is the last sector we started work on,
605 * else is it the next sector to work on.
607 sector_t reshape_progress
;
608 /* reshape_safe is the trailing edge of a reshape. We know that
609 * before (or after) this address, all reshape has completed.
611 sector_t reshape_safe
;
612 int previous_raid_disks
;
613 int prev_chunk_sectors
;
615 short generation
; /* increments with every reshape */
616 seqcount_t gen_lock
; /* lock against generation changes */
617 unsigned long reshape_checkpoint
; /* Time we last updated
619 long long min_offset_diff
; /* minimum difference between
621 * new_data_offset across all
622 * devices. May be negative,
623 * but is closest to zero.
626 struct list_head handle_list
; /* stripes needing handling */
627 struct list_head loprio_list
; /* low priority stripes */
628 struct list_head hold_list
; /* preread ready stripes */
629 struct list_head delayed_list
; /* stripes that have plugged requests */
630 struct list_head bitmap_list
; /* stripes delaying awaiting bitmap update */
631 struct bio
*retry_read_aligned
; /* currently retrying aligned bios */
632 struct bio
*retry_read_aligned_list
; /* aligned bios retry list */
633 atomic_t preread_active_stripes
; /* stripes with scheduled io */
634 atomic_t active_aligned_reads
;
635 atomic_t pending_full_writes
; /* full write backlog */
636 int bypass_count
; /* bypassed prereads */
637 int bypass_threshold
; /* preread nice */
638 int skip_copy
; /* Don't copy data from bio to stripe cache */
639 struct list_head
*last_hold
; /* detect hold_list promotions */
641 atomic_t reshape_stripes
; /* stripes with pending writes for reshape */
642 /* unfortunately we need two cache names as we temporarily have
646 char cache_name
[2][32];
647 struct kmem_cache
*slab_cache
; /* for allocating stripes */
648 struct mutex cache_size_mutex
; /* Protect changes to cache size */
650 int seq_flush
, seq_write
;
653 int fullsync
; /* set to 1 if a full sync is needed,
654 * (fresh device added).
655 * Cleared when a sync completes.
657 int recovery_disabled
;
658 /* per cpu variables */
659 struct raid5_percpu
{
660 struct page
*spare_page
; /* Used when checking P/Q in raid6 */
661 struct flex_array
*scribble
; /* space for constructing buffer
662 * lists and performing address
667 int scribble_sectors
;
668 struct hlist_node node
;
673 atomic_t active_stripes
;
674 struct list_head inactive_list
[NR_STRIPE_HASH_LOCKS
];
676 atomic_t r5c_cached_full_stripes
;
677 struct list_head r5c_full_stripe_list
;
678 atomic_t r5c_cached_partial_stripes
;
679 struct list_head r5c_partial_stripe_list
;
680 atomic_t r5c_flushing_full_stripes
;
681 atomic_t r5c_flushing_partial_stripes
;
683 atomic_t empty_inactive_list_nr
;
684 struct llist_head released_stripes
;
685 wait_queue_head_t wait_for_quiescent
;
686 wait_queue_head_t wait_for_stripe
;
687 wait_queue_head_t wait_for_overlap
;
688 unsigned long cache_state
;
689 struct shrinker shrinker
;
690 int pool_size
; /* number of disks in stripeheads in pool */
691 spinlock_t device_lock
;
692 struct disk_info
*disks
;
694 /* When taking over an array from a different personality, we store
695 * the new thread here until we fully activate the array.
697 struct md_thread
*thread
;
698 struct list_head temp_inactive_list
[NR_STRIPE_HASH_LOCKS
];
699 struct r5worker_group
*worker_groups
;
701 int worker_cnt_per_group
;
705 spinlock_t pending_bios_lock
;
706 bool batch_bio_dispatch
;
707 struct r5pending_data
*pending_data
;
708 struct list_head free_list
;
709 struct list_head pending_list
;
710 int pending_data_cnt
;
711 struct r5pending_data
*next_pending_data
;
716 * Our supported algorithms
718 #define ALGORITHM_LEFT_ASYMMETRIC 0 /* Rotating Parity N with Data Restart */
719 #define ALGORITHM_RIGHT_ASYMMETRIC 1 /* Rotating Parity 0 with Data Restart */
720 #define ALGORITHM_LEFT_SYMMETRIC 2 /* Rotating Parity N with Data Continuation */
721 #define ALGORITHM_RIGHT_SYMMETRIC 3 /* Rotating Parity 0 with Data Continuation */
723 /* Define non-rotating (raid4) algorithms. These allow
724 * conversion of raid4 to raid5.
726 #define ALGORITHM_PARITY_0 4 /* P or P,Q are initial devices */
727 #define ALGORITHM_PARITY_N 5 /* P or P,Q are final devices. */
729 /* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
730 * Firstly, the exact positioning of the parity block is slightly
731 * different between the 'LEFT_*' modes of md and the "_N_*" modes
733 * Secondly, or order of datablocks over which the Q syndrome is computed
735 * Consequently we have different layouts for DDF/raid6 than md/raid6.
736 * These layouts are from the DDFv1.2 spec.
737 * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
738 * leaves RLQ=3 as 'Vendor Specific'
741 #define ALGORITHM_ROTATING_ZERO_RESTART 8 /* DDF PRL=6 RLQ=1 */
742 #define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */
743 #define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */
745 /* For every RAID5 algorithm we define a RAID6 algorithm
746 * with exactly the same layout for data and parity, and
747 * with the Q block always on the last device (N-1).
748 * This allows trivial conversion from RAID5 to RAID6
750 #define ALGORITHM_LEFT_ASYMMETRIC_6 16
751 #define ALGORITHM_RIGHT_ASYMMETRIC_6 17
752 #define ALGORITHM_LEFT_SYMMETRIC_6 18
753 #define ALGORITHM_RIGHT_SYMMETRIC_6 19
754 #define ALGORITHM_PARITY_0_6 20
755 #define ALGORITHM_PARITY_N_6 ALGORITHM_PARITY_N
757 static inline int algorithm_valid_raid5(int layout
)
759 return (layout
>= 0) &&
762 static inline int algorithm_valid_raid6(int layout
)
764 return (layout
>= 0 && layout
<= 5)
766 (layout
>= 8 && layout
<= 10)
768 (layout
>= 16 && layout
<= 20);
771 static inline int algorithm_is_DDF(int layout
)
773 return layout
>= 8 && layout
<= 10;
776 extern void md_raid5_kick_device(struct r5conf
*conf
);
777 extern int raid5_set_cache_size(struct mddev
*mddev
, int size
);
778 extern sector_t
raid5_compute_blocknr(struct stripe_head
*sh
, int i
, int previous
);
779 extern void raid5_release_stripe(struct stripe_head
*sh
);
780 extern sector_t
raid5_compute_sector(struct r5conf
*conf
, sector_t r_sector
,
781 int previous
, int *dd_idx
,
782 struct stripe_head
*sh
);
783 extern struct stripe_head
*
784 raid5_get_active_stripe(struct r5conf
*conf
, sector_t sector
,
785 int previous
, int noblock
, int noquiesce
);
786 extern int raid5_calc_degraded(struct r5conf
*conf
);