4 #include <linux/raid/xor.h>
5 #include <linux/dmaengine.h>
9 * Each stripe contains one buffer per disc. Each buffer can be in
10 * one of a number of states stored in "flags". Changes between
11 * these states happen *almost* exclusively under a per-stripe
12 * spinlock. Some very specific changes can happen in bi_end_io, and
13 * these are not protected by the spin lock.
15 * The flag bits that are used to represent these states are:
16 * R5_UPTODATE and R5_LOCKED
18 * State Empty == !UPTODATE, !LOCK
19 * We have no data, and there is no active request
20 * State Want == !UPTODATE, LOCK
21 * A read request is being submitted for this block
22 * State Dirty == UPTODATE, LOCK
23 * Some new data is in this buffer, and it is being written out
24 * State Clean == UPTODATE, !LOCK
25 * We have valid data which is the same as on disc
27 * The possible state transitions are:
29 * Empty -> Want - on read or write to get old data for parity calc
30 * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
31 * Empty -> Clean - on compute_block when computing a block for failed drive
32 * Want -> Empty - on failed read
33 * Want -> Clean - on successful completion of read request
34 * Dirty -> Clean - on successful completion of write request
35 * Dirty -> Clean - on failed write
36 * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
38 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
39 * all happen in b_end_io at interrupt time.
40 * Each sets the Uptodate bit before releasing the Lock bit.
41 * This leaves one multi-stage transition:
43 * This is safe because thinking that a Clean buffer is actually dirty
44 * will at worst delay some action, and the stripe will be scheduled
45 * for attention after the transition is complete.
47 * There is one possibility that is not covered by these states. That
48 * is if one drive has failed and there is a spare being rebuilt. We
49 * can't distinguish between a clean block that has been generated
50 * from parity calculations, and a clean block that has been
51 * successfully written to the spare ( or to parity when resyncing).
52 * To distingush these states we have a stripe bit STRIPE_INSYNC that
53 * is set whenever a write is scheduled to the spare, or to the parity
54 * disc if there is no spare. A sync request clears this bit, and
55 * when we find it set with no buffers locked, we know the sync is
58 * Buffers for the md device that arrive via make_request are attached
59 * to the appropriate stripe in one of two lists linked on b_reqnext.
60 * One list (bh_read) for read requests, one (bh_write) for write.
61 * There should never be more than one buffer on the two lists
62 * together, but we are not guaranteed of that so we allow for more.
64 * If a buffer is on the read list when the associated cache buffer is
65 * Uptodate, the data is copied into the read buffer and it's b_end_io
66 * routine is called. This may happen in the end_request routine only
67 * if the buffer has just successfully been read. end_request should
68 * remove the buffers from the list and then set the Uptodate bit on
69 * the buffer. Other threads may do this only if they first check
70 * that the Uptodate bit is set. Once they have checked that they may
71 * take buffers off the read queue.
73 * When a buffer on the write list is committed for write it is copied
74 * into the cache buffer, which is then marked dirty, and moved onto a
75 * third list, the written list (bh_written). Once both the parity
76 * block and the cached buffer are successfully written, any buffer on
77 * a written list can be returned with b_end_io.
79 * The write list and read list both act as fifos. The read list is
80 * protected by the device_lock. The write and written lists are
81 * protected by the stripe lock. The device_lock, which can be
82 * claimed while the stipe lock is held, is only for list
83 * manipulations and will only be held for a very short time. It can
84 * be claimed from interrupts.
87 * Stripes in the stripe cache can be on one of two lists (or on
88 * neither). The "inactive_list" contains stripes which are not
89 * currently being used for any request. They can freely be reused
90 * for another stripe. The "handle_list" contains stripes that need
91 * to be handled in some way. Both of these are fifo queues. Each
92 * stripe is also (potentially) linked to a hash bucket in the hash
93 * table so that it can be found by sector number. Stripes that are
94 * not hashed must be on the inactive_list, and will normally be at
95 * the front. All stripes start life this way.
97 * The inactive_list, handle_list and hash bucket lists are all protected by the
99 * - stripes on the inactive_list never have their stripe_lock held.
100 * - stripes have a reference counter. If count==0, they are on a list.
101 * - If a stripe might need handling, STRIPE_HANDLE is set.
102 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
103 * handle_list else inactive_list
105 * This, combined with the fact that STRIPE_HANDLE is only ever
106 * cleared while a stripe has a non-zero count means that if the
107 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
108 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
109 * the stripe is on inactive_list.
111 * The possible transitions are:
112 * activate an unhashed/inactive stripe (get_active_stripe())
113 * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
114 * activate a hashed, possibly active stripe (get_active_stripe())
115 * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
116 * attach a request to an active stripe (add_stripe_bh())
117 * lockdev attach-buffer unlockdev
118 * handle a stripe (handle_stripe())
119 * lockstripe clrSTRIPE_HANDLE ...
120 * (lockdev check-buffers unlockdev) ..
122 * record io/ops needed unlockstripe schedule io/ops
123 * release an active stripe (release_stripe())
124 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
126 * The refcount counts each thread that have activated the stripe,
127 * plus raid5d if it is handling it, plus one for each active request
128 * on a cached buffer, and plus one if the stripe is undergoing stripe
131 * Stripe operations are performed outside the stripe lock,
132 * the stripe operations are:
133 * -copying data between the stripe cache and user application buffers
134 * -computing blocks to save a disk access, or to recover a missing block
135 * -updating the parity on a write operation (reconstruct write and
137 * -checking parity correctness
138 * -running i/o to disk
139 * These operations are carried out by raid5_run_ops which uses the async_tx
140 * api to (optionally) offload operations to dedicated hardware engines.
141 * When requesting an operation handle_stripe sets the pending bit for the
142 * operation and increments the count. raid5_run_ops is then run whenever
143 * the count is non-zero.
144 * There are some critical dependencies between the operations that prevent some
145 * from being requested while another is in flight.
146 * 1/ Parity check operations destroy the in cache version of the parity block,
147 * so we prevent parity dependent operations like writes and compute_blocks
148 * from starting while a check is in progress. Some dma engines can perform
149 * the check without damaging the parity block, in these cases the parity
150 * block is re-marked up to date (assuming the check was successful) and is
151 * not re-read from disk.
152 * 2/ When a write operation is requested we immediately lock the affected
153 * blocks, and mark them as not up to date. This causes new read requests
154 * to be held off, as well as parity checks and compute block operations.
155 * 3/ Once a compute block operation has been requested handle_stripe treats
156 * that block as if it is up to date. raid5_run_ops guaruntees that any
157 * operation that is dependent on the compute block result is initiated after
158 * the compute block completes.
162 * Operations state - intermediate states that are visible outside of sh->lock
163 * In general _idle indicates nothing is running, _run indicates a data
164 * processing operation is active, and _result means the data processing result
165 * is stable and can be acted upon. For simple operations like biofill and
166 * compute that only have an _idle and _run state they are indicated with
167 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
170 * enum check_states - handles syncing / repairing a stripe
171 * @check_state_idle - check operations are quiesced
172 * @check_state_run - check operation is running
173 * @check_state_result - set outside lock when check result is valid
174 * @check_state_compute_run - check failed and we are repairing
175 * @check_state_compute_result - set outside lock when compute result is valid
178 check_state_idle
= 0,
179 check_state_run
, /* xor parity check */
180 check_state_run_q
, /* q-parity check */
181 check_state_run_pq
, /* pq dual parity check */
182 check_state_check_result
,
183 check_state_compute_run
, /* parity repair */
184 check_state_compute_result
,
188 * enum reconstruct_states - handles writing or expanding a stripe
190 enum reconstruct_states
{
191 reconstruct_state_idle
= 0,
192 reconstruct_state_prexor_drain_run
, /* prexor-write */
193 reconstruct_state_drain_run
, /* write */
194 reconstruct_state_run
, /* expand */
195 reconstruct_state_prexor_drain_result
,
196 reconstruct_state_drain_result
,
197 reconstruct_state_result
,
201 struct hlist_node hash
;
202 struct list_head lru
; /* inactive_list or handle_list */
203 struct raid5_private_data
*raid_conf
;
204 short generation
; /* increments with every
206 sector_t sector
; /* sector of this row */
207 short pd_idx
; /* parity disk index */
208 short qd_idx
; /* 'Q' disk index for raid6 */
209 short ddf_layout
;/* use DDF ordering to calculate Q */
210 unsigned long state
; /* state flags */
211 atomic_t count
; /* nr of active thread/requests */
213 int bm_seq
; /* sequence number for bitmap flushes */
214 int disks
; /* disks in stripe */
215 enum check_states check_state
;
216 enum reconstruct_states reconstruct_state
;
218 * struct stripe_operations
219 * @target - STRIPE_OP_COMPUTE_BLK target
220 * @target2 - 2nd compute target in the raid6 case
221 * @zero_sum_result - P and Q verification flags
222 * @request - async service request flags for raid_run_ops
224 struct stripe_operations
{
226 enum sum_check_flags zero_sum_result
;
227 #ifdef CONFIG_MULTICORE_RAID456
228 unsigned long request
;
229 wait_queue_head_t wait_for_ops
;
236 struct bio
*toread
, *read
, *towrite
, *written
;
237 sector_t sector
; /* sector of this page */
239 } dev
[1]; /* allocated with extra space depending of RAID geometry */
242 /* stripe_head_state - collects and tracks the dynamic state of a stripe_head
243 * for handle_stripe. It is only valid under spin_lock(sh->lock);
245 struct stripe_head_state
{
246 int syncing
, expanding
, expanded
;
247 int locked
, uptodate
, to_read
, to_write
, failed
, written
;
248 int to_fill
, compute
, req_compute
, non_overwrite
;
250 unsigned long ops_request
;
253 /* r6_state - extra state data only relevant to r6 */
255 int p_failed
, q_failed
, failed_num
[2];
259 #define R5_UPTODATE 0 /* page contains current data */
260 #define R5_LOCKED 1 /* IO has been submitted on "req" */
261 #define R5_OVERWRITE 2 /* towrite covers whole page */
262 /* and some that are internal to handle_stripe */
263 #define R5_Insync 3 /* rdev && rdev->in_sync at start */
264 #define R5_Wantread 4 /* want to schedule a read */
265 #define R5_Wantwrite 5
266 #define R5_Overlap 7 /* There is a pending overlapping request on this block */
267 #define R5_ReadError 8 /* seen a read error here recently */
268 #define R5_ReWrite 9 /* have tried to over-write the readerror */
270 #define R5_Expanded 10 /* This block now has post-expand data */
271 #define R5_Wantcompute 11 /* compute_block in progress treat as
274 #define R5_Wantfill 12 /* dev->toread contains a bio that needs
277 #define R5_Wantdrain 13 /* dev->towrite needs to be drained */
278 #define R5_WantFUA 14 /* Write should be FUA */
282 #define RECONSTRUCT_WRITE 1
283 #define READ_MODIFY_WRITE 2
284 /* not a write method, but a compute_parity mode */
285 #define CHECK_PARITY 3
286 /* Additional compute_parity mode -- updates the parity w/o LOCKING */
287 #define UPDATE_PARITY 4
292 #define STRIPE_HANDLE 2
293 #define STRIPE_SYNCING 3
294 #define STRIPE_INSYNC 4
295 #define STRIPE_PREREAD_ACTIVE 5
296 #define STRIPE_DELAYED 6
297 #define STRIPE_DEGRADED 7
298 #define STRIPE_BIT_DELAY 8
299 #define STRIPE_EXPANDING 9
300 #define STRIPE_EXPAND_SOURCE 10
301 #define STRIPE_EXPAND_READY 11
302 #define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */
303 #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
304 #define STRIPE_BIOFILL_RUN 14
305 #define STRIPE_COMPUTE_RUN 15
306 #define STRIPE_OPS_REQ_PENDING 16
309 * Operation request flags
311 #define STRIPE_OP_BIOFILL 0
312 #define STRIPE_OP_COMPUTE_BLK 1
313 #define STRIPE_OP_PREXOR 2
314 #define STRIPE_OP_BIODRAIN 3
315 #define STRIPE_OP_RECONSTRUCT 4
316 #define STRIPE_OP_CHECK 5
321 * To improve write throughput, we need to delay the handling of some
322 * stripes until there has been a chance that several write requests
323 * for the one stripe have all been collected.
324 * In particular, any write request that would require pre-reading
325 * is put on a "delayed" queue until there are no stripes currently
326 * in a pre-read phase. Further, if the "delayed" queue is empty when
327 * a stripe is put on it then we "plug" the queue and do not process it
328 * until an unplug call is made. (the unplug_io_fn() is called).
330 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
331 * it to the count of prereading stripes.
332 * When write is initiated, or the stripe refcnt == 0 (just in case) we
333 * clear the PREREAD_ACTIVE flag and decrement the count
334 * Whenever the 'handle' queue is empty and the device is not plugged, we
335 * move any strips from delayed to handle and clear the DELAYED flag and set
337 * In stripe_handle, if we find pre-reading is necessary, we do it if
338 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
339 * HANDLE gets cleared if stripe_handle leave nothing locked.
347 struct raid5_private_data
{
348 struct hlist_head
*stripe_hashtbl
;
350 struct disk_info
*spare
;
352 int level
, algorithm
;
357 /* reshape_progress is the leading edge of a 'reshape'
358 * It has value MaxSector when no reshape is happening
359 * If delta_disks < 0, it is the last sector we started work on,
360 * else is it the next sector to work on.
362 sector_t reshape_progress
;
363 /* reshape_safe is the trailing edge of a reshape. We know that
364 * before (or after) this address, all reshape has completed.
366 sector_t reshape_safe
;
367 int previous_raid_disks
;
368 int prev_chunk_sectors
;
370 short generation
; /* increments with every reshape */
371 unsigned long reshape_checkpoint
; /* Time we last updated
374 struct list_head handle_list
; /* stripes needing handling */
375 struct list_head hold_list
; /* preread ready stripes */
376 struct list_head delayed_list
; /* stripes that have plugged requests */
377 struct list_head bitmap_list
; /* stripes delaying awaiting bitmap update */
378 struct bio
*retry_read_aligned
; /* currently retrying aligned bios */
379 struct bio
*retry_read_aligned_list
; /* aligned bios retry list */
380 atomic_t preread_active_stripes
; /* stripes with scheduled io */
381 atomic_t active_aligned_reads
;
382 atomic_t pending_full_writes
; /* full write backlog */
383 int bypass_count
; /* bypassed prereads */
384 int bypass_threshold
; /* preread nice */
385 struct list_head
*last_hold
; /* detect hold_list promotions */
387 atomic_t reshape_stripes
; /* stripes with pending writes for reshape */
388 /* unfortunately we need two cache names as we temporarily have
392 char cache_name
[2][32];
393 struct kmem_cache
*slab_cache
; /* for allocating stripes */
395 int seq_flush
, seq_write
;
398 int fullsync
; /* set to 1 if a full sync is needed,
399 * (fresh device added).
400 * Cleared when a sync completes.
403 struct plug_handle plug
;
405 /* per cpu variables */
406 struct raid5_percpu
{
407 struct page
*spare_page
; /* Used when checking P/Q in raid6 */
408 void *scribble
; /* space for constructing buffer
409 * lists and performing address
413 size_t scribble_len
; /* size of scribble region must be
414 * associated with conf to handle
415 * cpu hotplug while reshaping
417 #ifdef CONFIG_HOTPLUG_CPU
418 struct notifier_block cpu_notify
;
424 atomic_t active_stripes
;
425 struct list_head inactive_list
;
426 wait_queue_head_t wait_for_stripe
;
427 wait_queue_head_t wait_for_overlap
;
428 int inactive_blocked
; /* release of inactive stripes blocked,
429 * waiting for 25% to be free
431 int pool_size
; /* number of disks in stripeheads in pool */
432 spinlock_t device_lock
;
433 struct disk_info
*disks
;
435 /* When taking over an array from a different personality, we store
436 * the new thread here until we fully activate the array.
438 struct mdk_thread_s
*thread
;
441 typedef struct raid5_private_data raid5_conf_t
;
444 * Our supported algorithms
446 #define ALGORITHM_LEFT_ASYMMETRIC 0 /* Rotating Parity N with Data Restart */
447 #define ALGORITHM_RIGHT_ASYMMETRIC 1 /* Rotating Parity 0 with Data Restart */
448 #define ALGORITHM_LEFT_SYMMETRIC 2 /* Rotating Parity N with Data Continuation */
449 #define ALGORITHM_RIGHT_SYMMETRIC 3 /* Rotating Parity 0 with Data Continuation */
451 /* Define non-rotating (raid4) algorithms. These allow
452 * conversion of raid4 to raid5.
454 #define ALGORITHM_PARITY_0 4 /* P or P,Q are initial devices */
455 #define ALGORITHM_PARITY_N 5 /* P or P,Q are final devices. */
457 /* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
458 * Firstly, the exact positioning of the parity block is slightly
459 * different between the 'LEFT_*' modes of md and the "_N_*" modes
461 * Secondly, or order of datablocks over which the Q syndrome is computed
463 * Consequently we have different layouts for DDF/raid6 than md/raid6.
464 * These layouts are from the DDFv1.2 spec.
465 * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
466 * leaves RLQ=3 as 'Vendor Specific'
469 #define ALGORITHM_ROTATING_ZERO_RESTART 8 /* DDF PRL=6 RLQ=1 */
470 #define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */
471 #define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */
474 /* For every RAID5 algorithm we define a RAID6 algorithm
475 * with exactly the same layout for data and parity, and
476 * with the Q block always on the last device (N-1).
477 * This allows trivial conversion from RAID5 to RAID6
479 #define ALGORITHM_LEFT_ASYMMETRIC_6 16
480 #define ALGORITHM_RIGHT_ASYMMETRIC_6 17
481 #define ALGORITHM_LEFT_SYMMETRIC_6 18
482 #define ALGORITHM_RIGHT_SYMMETRIC_6 19
483 #define ALGORITHM_PARITY_0_6 20
484 #define ALGORITHM_PARITY_N_6 ALGORITHM_PARITY_N
486 static inline int algorithm_valid_raid5(int layout
)
488 return (layout
>= 0) &&
491 static inline int algorithm_valid_raid6(int layout
)
493 return (layout
>= 0 && layout
<= 5)
495 (layout
>= 8 && layout
<= 10)
497 (layout
>= 16 && layout
<= 20);
500 static inline int algorithm_is_DDF(int layout
)
502 return layout
>= 8 && layout
<= 10;
505 extern int md_raid5_congested(mddev_t
*mddev
, int bits
);
506 extern void md_raid5_kick_device(raid5_conf_t
*conf
);
507 extern int raid5_set_cache_size(mddev_t
*mddev
, int size
);