]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/vdev_raidz.c
DLPX-44812 integrate EP-220 large memory scalability
[mirror_zfs.git] / module / zfs / vdev_raidz.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
25 * Copyright (c) 2016 Gvozden Nešković. All rights reserved.
26 */
27
28 #include <sys/zfs_context.h>
29 #include <sys/spa.h>
30 #include <sys/vdev_impl.h>
31 #include <sys/zio.h>
32 #include <sys/zio_checksum.h>
33 #include <sys/abd.h>
34 #include <sys/fs/zfs.h>
35 #include <sys/fm/fs/zfs.h>
36 #include <sys/vdev_raidz.h>
37 #include <sys/vdev_raidz_impl.h>
38
39 /*
40 * Virtual device vector for RAID-Z.
41 *
42 * This vdev supports single, double, and triple parity. For single parity,
43 * we use a simple XOR of all the data columns. For double or triple parity,
44 * we use a special case of Reed-Solomon coding. This extends the
45 * technique described in "The mathematics of RAID-6" by H. Peter Anvin by
46 * drawing on the system described in "A Tutorial on Reed-Solomon Coding for
47 * Fault-Tolerance in RAID-like Systems" by James S. Plank on which the
48 * former is also based. The latter is designed to provide higher performance
49 * for writes.
50 *
51 * Note that the Plank paper claimed to support arbitrary N+M, but was then
52 * amended six years later identifying a critical flaw that invalidates its
53 * claims. Nevertheless, the technique can be adapted to work for up to
54 * triple parity. For additional parity, the amendment "Note: Correction to
55 * the 1997 Tutorial on Reed-Solomon Coding" by James S. Plank and Ying Ding
56 * is viable, but the additional complexity means that write performance will
57 * suffer.
58 *
59 * All of the methods above operate on a Galois field, defined over the
60 * integers mod 2^N. In our case we choose N=8 for GF(8) so that all elements
61 * can be expressed with a single byte. Briefly, the operations on the
62 * field are defined as follows:
63 *
64 * o addition (+) is represented by a bitwise XOR
65 * o subtraction (-) is therefore identical to addition: A + B = A - B
66 * o multiplication of A by 2 is defined by the following bitwise expression:
67 *
68 * (A * 2)_7 = A_6
69 * (A * 2)_6 = A_5
70 * (A * 2)_5 = A_4
71 * (A * 2)_4 = A_3 + A_7
72 * (A * 2)_3 = A_2 + A_7
73 * (A * 2)_2 = A_1 + A_7
74 * (A * 2)_1 = A_0
75 * (A * 2)_0 = A_7
76 *
77 * In C, multiplying by 2 is therefore ((a << 1) ^ ((a & 0x80) ? 0x1d : 0)).
78 * As an aside, this multiplication is derived from the error correcting
79 * primitive polynomial x^8 + x^4 + x^3 + x^2 + 1.
80 *
81 * Observe that any number in the field (except for 0) can be expressed as a
82 * power of 2 -- a generator for the field. We store a table of the powers of
83 * 2 and logs base 2 for quick look ups, and exploit the fact that A * B can
84 * be rewritten as 2^(log_2(A) + log_2(B)) (where '+' is normal addition rather
85 * than field addition). The inverse of a field element A (A^-1) is therefore
86 * A ^ (255 - 1) = A^254.
87 *
88 * The up-to-three parity columns, P, Q, R over several data columns,
89 * D_0, ... D_n-1, can be expressed by field operations:
90 *
91 * P = D_0 + D_1 + ... + D_n-2 + D_n-1
92 * Q = 2^n-1 * D_0 + 2^n-2 * D_1 + ... + 2^1 * D_n-2 + 2^0 * D_n-1
93 * = ((...((D_0) * 2 + D_1) * 2 + ...) * 2 + D_n-2) * 2 + D_n-1
94 * R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1
95 * = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1
96 *
97 * We chose 1, 2, and 4 as our generators because 1 corresponds to the trival
98 * XOR operation, and 2 and 4 can be computed quickly and generate linearly-
99 * independent coefficients. (There are no additional coefficients that have
100 * this property which is why the uncorrected Plank method breaks down.)
101 *
102 * See the reconstruction code below for how P, Q and R can used individually
103 * or in concert to recover missing data columns.
104 */
105
106 #define VDEV_RAIDZ_P 0
107 #define VDEV_RAIDZ_Q 1
108 #define VDEV_RAIDZ_R 2
109
110 #define VDEV_RAIDZ_MUL_2(x) (((x) << 1) ^ (((x) & 0x80) ? 0x1d : 0))
111 #define VDEV_RAIDZ_MUL_4(x) (VDEV_RAIDZ_MUL_2(VDEV_RAIDZ_MUL_2(x)))
112
113 /*
114 * We provide a mechanism to perform the field multiplication operation on a
115 * 64-bit value all at once rather than a byte at a time. This works by
116 * creating a mask from the top bit in each byte and using that to
117 * conditionally apply the XOR of 0x1d.
118 */
119 #define VDEV_RAIDZ_64MUL_2(x, mask) \
120 { \
121 (mask) = (x) & 0x8080808080808080ULL; \
122 (mask) = ((mask) << 1) - ((mask) >> 7); \
123 (x) = (((x) << 1) & 0xfefefefefefefefeULL) ^ \
124 ((mask) & 0x1d1d1d1d1d1d1d1dULL); \
125 }
126
127 #define VDEV_RAIDZ_64MUL_4(x, mask) \
128 { \
129 VDEV_RAIDZ_64MUL_2((x), mask); \
130 VDEV_RAIDZ_64MUL_2((x), mask); \
131 }
132
133 void
134 vdev_raidz_map_free(raidz_map_t *rm)
135 {
136 int c;
137 size_t size;
138
139 for (c = 0; c < rm->rm_firstdatacol; c++) {
140 abd_free(rm->rm_col[c].rc_abd);
141
142 if (rm->rm_col[c].rc_gdata != NULL)
143 zio_buf_free(rm->rm_col[c].rc_gdata,
144 rm->rm_col[c].rc_size);
145 }
146
147 size = 0;
148 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
149 abd_put(rm->rm_col[c].rc_abd);
150 size += rm->rm_col[c].rc_size;
151 }
152
153 if (rm->rm_abd_copy != NULL)
154 abd_free(rm->rm_abd_copy);
155
156 kmem_free(rm, offsetof(raidz_map_t, rm_col[rm->rm_scols]));
157 }
158
159 static void
160 vdev_raidz_map_free_vsd(zio_t *zio)
161 {
162 raidz_map_t *rm = zio->io_vsd;
163
164 ASSERT0(rm->rm_freed);
165 rm->rm_freed = 1;
166
167 if (rm->rm_reports == 0)
168 vdev_raidz_map_free(rm);
169 }
170
171 /*ARGSUSED*/
172 static void
173 vdev_raidz_cksum_free(void *arg, size_t ignored)
174 {
175 raidz_map_t *rm = arg;
176
177 ASSERT3U(rm->rm_reports, >, 0);
178
179 if (--rm->rm_reports == 0 && rm->rm_freed != 0)
180 vdev_raidz_map_free(rm);
181 }
182
183 static void
184 vdev_raidz_cksum_finish(zio_cksum_report_t *zcr, const void *good_data)
185 {
186 raidz_map_t *rm = zcr->zcr_cbdata;
187 size_t c = zcr->zcr_cbinfo;
188 size_t x;
189
190 const char *good = NULL;
191 char *bad;
192
193 if (good_data == NULL) {
194 zfs_ereport_finish_checksum(zcr, NULL, NULL, B_FALSE);
195 return;
196 }
197
198 if (c < rm->rm_firstdatacol) {
199 /*
200 * The first time through, calculate the parity blocks for
201 * the good data (this relies on the fact that the good
202 * data never changes for a given logical ZIO)
203 */
204 if (rm->rm_col[0].rc_gdata == NULL) {
205 abd_t *bad_parity[VDEV_RAIDZ_MAXPARITY];
206 char *buf;
207 int offset;
208
209 /*
210 * Set up the rm_col[]s to generate the parity for
211 * good_data, first saving the parity bufs and
212 * replacing them with buffers to hold the result.
213 */
214 for (x = 0; x < rm->rm_firstdatacol; x++) {
215 bad_parity[x] = rm->rm_col[x].rc_abd;
216 rm->rm_col[x].rc_gdata =
217 zio_buf_alloc(rm->rm_col[x].rc_size);
218 rm->rm_col[x].rc_abd =
219 abd_get_from_buf(rm->rm_col[x].rc_gdata,
220 rm->rm_col[x].rc_size);
221 }
222
223 /* fill in the data columns from good_data */
224 buf = (char *)good_data;
225 for (; x < rm->rm_cols; x++) {
226 abd_put(rm->rm_col[x].rc_abd);
227 rm->rm_col[x].rc_abd = abd_get_from_buf(buf,
228 rm->rm_col[x].rc_size);
229 buf += rm->rm_col[x].rc_size;
230 }
231
232 /*
233 * Construct the parity from the good data.
234 */
235 vdev_raidz_generate_parity(rm);
236
237 /* restore everything back to its original state */
238 for (x = 0; x < rm->rm_firstdatacol; x++) {
239 abd_put(rm->rm_col[x].rc_abd);
240 rm->rm_col[x].rc_abd = bad_parity[x];
241 }
242
243 offset = 0;
244 for (x = rm->rm_firstdatacol; x < rm->rm_cols; x++) {
245 abd_put(rm->rm_col[x].rc_abd);
246 rm->rm_col[x].rc_abd = abd_get_offset(
247 rm->rm_abd_copy, offset);
248 offset += rm->rm_col[x].rc_size;
249 }
250 }
251
252 ASSERT3P(rm->rm_col[c].rc_gdata, !=, NULL);
253 good = rm->rm_col[c].rc_gdata;
254 } else {
255 /* adjust good_data to point at the start of our column */
256 good = good_data;
257
258 for (x = rm->rm_firstdatacol; x < c; x++)
259 good += rm->rm_col[x].rc_size;
260 }
261
262 bad = abd_borrow_buf_copy(rm->rm_col[c].rc_abd, rm->rm_col[c].rc_size);
263 /* we drop the ereport if it ends up that the data was good */
264 zfs_ereport_finish_checksum(zcr, good, bad, B_TRUE);
265 abd_return_buf(rm->rm_col[c].rc_abd, bad, rm->rm_col[c].rc_size);
266 }
267
268 /*
269 * Invoked indirectly by zfs_ereport_start_checksum(), called
270 * below when our read operation fails completely. The main point
271 * is to keep a copy of everything we read from disk, so that at
272 * vdev_raidz_cksum_finish() time we can compare it with the good data.
273 */
274 static void
275 vdev_raidz_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *arg)
276 {
277 size_t c = (size_t)(uintptr_t)arg;
278 size_t offset;
279
280 raidz_map_t *rm = zio->io_vsd;
281 size_t size;
282
283 /* set up the report and bump the refcount */
284 zcr->zcr_cbdata = rm;
285 zcr->zcr_cbinfo = c;
286 zcr->zcr_finish = vdev_raidz_cksum_finish;
287 zcr->zcr_free = vdev_raidz_cksum_free;
288
289 rm->rm_reports++;
290 ASSERT3U(rm->rm_reports, >, 0);
291
292 if (rm->rm_abd_copy != NULL)
293 return;
294
295 /*
296 * It's the first time we're called for this raidz_map_t, so we need
297 * to copy the data aside; there's no guarantee that our zio's buffer
298 * won't be re-used for something else.
299 *
300 * Our parity data is already in separate buffers, so there's no need
301 * to copy them.
302 */
303
304 size = 0;
305 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++)
306 size += rm->rm_col[c].rc_size;
307
308 rm->rm_abd_copy =
309 abd_alloc_sametype(rm->rm_col[rm->rm_firstdatacol].rc_abd, size);
310
311 for (offset = 0, c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
312 raidz_col_t *col = &rm->rm_col[c];
313 abd_t *tmp = abd_get_offset(rm->rm_abd_copy, offset);
314
315 abd_copy(tmp, col->rc_abd, col->rc_size);
316 abd_put(col->rc_abd);
317 col->rc_abd = tmp;
318
319 offset += col->rc_size;
320 }
321 ASSERT3U(offset, ==, size);
322 }
323
324 static const zio_vsd_ops_t vdev_raidz_vsd_ops = {
325 vdev_raidz_map_free_vsd,
326 vdev_raidz_cksum_report
327 };
328
329 /*
330 * Divides the IO evenly across all child vdevs; usually, dcols is
331 * the number of children in the target vdev.
332 *
333 * Avoid inlining the function to keep vdev_raidz_io_start(), which
334 * is this functions only caller, as small as possible on the stack.
335 */
336 noinline raidz_map_t *
337 vdev_raidz_map_alloc(zio_t *zio, uint64_t unit_shift, uint64_t dcols,
338 uint64_t nparity)
339 {
340 raidz_map_t *rm;
341 /* The starting RAIDZ (parent) vdev sector of the block. */
342 uint64_t b = zio->io_offset >> unit_shift;
343 /* The zio's size in units of the vdev's minimum sector size. */
344 uint64_t s = zio->io_size >> unit_shift;
345 /* The first column for this stripe. */
346 uint64_t f = b % dcols;
347 /* The starting byte offset on each child vdev. */
348 uint64_t o = (b / dcols) << unit_shift;
349 uint64_t q, r, c, bc, col, acols, scols, coff, devidx, asize, tot;
350 uint64_t off = 0;
351
352 /*
353 * "Quotient": The number of data sectors for this stripe on all but
354 * the "big column" child vdevs that also contain "remainder" data.
355 */
356 q = s / (dcols - nparity);
357
358 /*
359 * "Remainder": The number of partial stripe data sectors in this I/O.
360 * This will add a sector to some, but not all, child vdevs.
361 */
362 r = s - q * (dcols - nparity);
363
364 /* The number of "big columns" - those which contain remainder data. */
365 bc = (r == 0 ? 0 : r + nparity);
366
367 /*
368 * The total number of data and parity sectors associated with
369 * this I/O.
370 */
371 tot = s + nparity * (q + (r == 0 ? 0 : 1));
372
373 /* acols: The columns that will be accessed. */
374 /* scols: The columns that will be accessed or skipped. */
375 if (q == 0) {
376 /* Our I/O request doesn't span all child vdevs. */
377 acols = bc;
378 scols = MIN(dcols, roundup(bc, nparity + 1));
379 } else {
380 acols = dcols;
381 scols = dcols;
382 }
383
384 ASSERT3U(acols, <=, scols);
385
386 rm = kmem_alloc(offsetof(raidz_map_t, rm_col[scols]), KM_SLEEP);
387
388 rm->rm_cols = acols;
389 rm->rm_scols = scols;
390 rm->rm_bigcols = bc;
391 rm->rm_skipstart = bc;
392 rm->rm_missingdata = 0;
393 rm->rm_missingparity = 0;
394 rm->rm_firstdatacol = nparity;
395 rm->rm_abd_copy = NULL;
396 rm->rm_reports = 0;
397 rm->rm_freed = 0;
398 rm->rm_ecksuminjected = 0;
399
400 asize = 0;
401
402 for (c = 0; c < scols; c++) {
403 col = f + c;
404 coff = o;
405 if (col >= dcols) {
406 col -= dcols;
407 coff += 1ULL << unit_shift;
408 }
409 rm->rm_col[c].rc_devidx = col;
410 rm->rm_col[c].rc_offset = coff;
411 rm->rm_col[c].rc_abd = NULL;
412 rm->rm_col[c].rc_gdata = NULL;
413 rm->rm_col[c].rc_error = 0;
414 rm->rm_col[c].rc_tried = 0;
415 rm->rm_col[c].rc_skipped = 0;
416
417 if (c >= acols)
418 rm->rm_col[c].rc_size = 0;
419 else if (c < bc)
420 rm->rm_col[c].rc_size = (q + 1) << unit_shift;
421 else
422 rm->rm_col[c].rc_size = q << unit_shift;
423
424 asize += rm->rm_col[c].rc_size;
425 }
426
427 ASSERT3U(asize, ==, tot << unit_shift);
428 rm->rm_asize = roundup(asize, (nparity + 1) << unit_shift);
429 rm->rm_nskip = roundup(tot, nparity + 1) - tot;
430 ASSERT3U(rm->rm_asize - asize, ==, rm->rm_nskip << unit_shift);
431 ASSERT3U(rm->rm_nskip, <=, nparity);
432
433 for (c = 0; c < rm->rm_firstdatacol; c++)
434 rm->rm_col[c].rc_abd =
435 abd_alloc_linear(rm->rm_col[c].rc_size, B_TRUE);
436
437 rm->rm_col[c].rc_abd = abd_get_offset(zio->io_abd, 0);
438 off = rm->rm_col[c].rc_size;
439
440 for (c = c + 1; c < acols; c++) {
441 rm->rm_col[c].rc_abd = abd_get_offset(zio->io_abd, off);
442 off += rm->rm_col[c].rc_size;
443 }
444
445 /*
446 * If all data stored spans all columns, there's a danger that parity
447 * will always be on the same device and, since parity isn't read
448 * during normal operation, that that device's I/O bandwidth won't be
449 * used effectively. We therefore switch the parity every 1MB.
450 *
451 * ... at least that was, ostensibly, the theory. As a practical
452 * matter unless we juggle the parity between all devices evenly, we
453 * won't see any benefit. Further, occasional writes that aren't a
454 * multiple of the LCM of the number of children and the minimum
455 * stripe width are sufficient to avoid pessimal behavior.
456 * Unfortunately, this decision created an implicit on-disk format
457 * requirement that we need to support for all eternity, but only
458 * for single-parity RAID-Z.
459 *
460 * If we intend to skip a sector in the zeroth column for padding
461 * we must make sure to note this swap. We will never intend to
462 * skip the first column since at least one data and one parity
463 * column must appear in each row.
464 */
465 ASSERT(rm->rm_cols >= 2);
466 ASSERT(rm->rm_col[0].rc_size == rm->rm_col[1].rc_size);
467
468 if (rm->rm_firstdatacol == 1 && (zio->io_offset & (1ULL << 20))) {
469 devidx = rm->rm_col[0].rc_devidx;
470 o = rm->rm_col[0].rc_offset;
471 rm->rm_col[0].rc_devidx = rm->rm_col[1].rc_devidx;
472 rm->rm_col[0].rc_offset = rm->rm_col[1].rc_offset;
473 rm->rm_col[1].rc_devidx = devidx;
474 rm->rm_col[1].rc_offset = o;
475
476 if (rm->rm_skipstart == 0)
477 rm->rm_skipstart = 1;
478 }
479
480 zio->io_vsd = rm;
481 zio->io_vsd_ops = &vdev_raidz_vsd_ops;
482
483 /* init RAIDZ parity ops */
484 rm->rm_ops = vdev_raidz_math_get_ops();
485
486 return (rm);
487 }
488
489 struct pqr_struct {
490 uint64_t *p;
491 uint64_t *q;
492 uint64_t *r;
493 };
494
495 static int
496 vdev_raidz_p_func(void *buf, size_t size, void *private)
497 {
498 struct pqr_struct *pqr = private;
499 const uint64_t *src = buf;
500 int i, cnt = size / sizeof (src[0]);
501
502 ASSERT(pqr->p && !pqr->q && !pqr->r);
503
504 for (i = 0; i < cnt; i++, src++, pqr->p++)
505 *pqr->p ^= *src;
506
507 return (0);
508 }
509
510 static int
511 vdev_raidz_pq_func(void *buf, size_t size, void *private)
512 {
513 struct pqr_struct *pqr = private;
514 const uint64_t *src = buf;
515 uint64_t mask;
516 int i, cnt = size / sizeof (src[0]);
517
518 ASSERT(pqr->p && pqr->q && !pqr->r);
519
520 for (i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++) {
521 *pqr->p ^= *src;
522 VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
523 *pqr->q ^= *src;
524 }
525
526 return (0);
527 }
528
529 static int
530 vdev_raidz_pqr_func(void *buf, size_t size, void *private)
531 {
532 struct pqr_struct *pqr = private;
533 const uint64_t *src = buf;
534 uint64_t mask;
535 int i, cnt = size / sizeof (src[0]);
536
537 ASSERT(pqr->p && pqr->q && pqr->r);
538
539 for (i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++, pqr->r++) {
540 *pqr->p ^= *src;
541 VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
542 *pqr->q ^= *src;
543 VDEV_RAIDZ_64MUL_4(*pqr->r, mask);
544 *pqr->r ^= *src;
545 }
546
547 return (0);
548 }
549
550 static void
551 vdev_raidz_generate_parity_p(raidz_map_t *rm)
552 {
553 uint64_t *p;
554 int c;
555 abd_t *src;
556
557 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
558 src = rm->rm_col[c].rc_abd;
559 p = abd_to_buf(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
560
561 if (c == rm->rm_firstdatacol) {
562 abd_copy_to_buf(p, src, rm->rm_col[c].rc_size);
563 } else {
564 struct pqr_struct pqr = { p, NULL, NULL };
565 (void) abd_iterate_func(src, 0, rm->rm_col[c].rc_size,
566 vdev_raidz_p_func, &pqr);
567 }
568 }
569 }
570
571 static void
572 vdev_raidz_generate_parity_pq(raidz_map_t *rm)
573 {
574 uint64_t *p, *q, pcnt, ccnt, mask, i;
575 int c;
576 abd_t *src;
577
578 pcnt = rm->rm_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
579 ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size ==
580 rm->rm_col[VDEV_RAIDZ_Q].rc_size);
581
582 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
583 src = rm->rm_col[c].rc_abd;
584 p = abd_to_buf(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
585 q = abd_to_buf(rm->rm_col[VDEV_RAIDZ_Q].rc_abd);
586
587 ccnt = rm->rm_col[c].rc_size / sizeof (p[0]);
588
589 if (c == rm->rm_firstdatacol) {
590 abd_copy_to_buf(p, src, rm->rm_col[c].rc_size);
591 (void) memcpy(q, p, rm->rm_col[c].rc_size);
592 } else {
593 struct pqr_struct pqr = { p, q, NULL };
594 (void) abd_iterate_func(src, 0, rm->rm_col[c].rc_size,
595 vdev_raidz_pq_func, &pqr);
596 }
597
598 if (c == rm->rm_firstdatacol) {
599 for (i = ccnt; i < pcnt; i++) {
600 p[i] = 0;
601 q[i] = 0;
602 }
603 } else {
604
605 /*
606 * Treat short columns as though they are full of 0s.
607 * Note that there's therefore nothing needed for P.
608 */
609 for (i = ccnt; i < pcnt; i++) {
610 VDEV_RAIDZ_64MUL_2(q[i], mask);
611 }
612 }
613 }
614 }
615
616 static void
617 vdev_raidz_generate_parity_pqr(raidz_map_t *rm)
618 {
619 uint64_t *p, *q, *r, pcnt, ccnt, mask, i;
620 int c;
621 abd_t *src;
622
623 pcnt = rm->rm_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
624 ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size ==
625 rm->rm_col[VDEV_RAIDZ_Q].rc_size);
626 ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size ==
627 rm->rm_col[VDEV_RAIDZ_R].rc_size);
628
629 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
630 src = rm->rm_col[c].rc_abd;
631 p = abd_to_buf(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
632 q = abd_to_buf(rm->rm_col[VDEV_RAIDZ_Q].rc_abd);
633 r = abd_to_buf(rm->rm_col[VDEV_RAIDZ_R].rc_abd);
634
635 ccnt = rm->rm_col[c].rc_size / sizeof (p[0]);
636
637 if (c == rm->rm_firstdatacol) {
638 abd_copy_to_buf(p, src, rm->rm_col[c].rc_size);
639 (void) memcpy(q, p, rm->rm_col[c].rc_size);
640 (void) memcpy(r, p, rm->rm_col[c].rc_size);
641 } else {
642 struct pqr_struct pqr = { p, q, r };
643 (void) abd_iterate_func(src, 0, rm->rm_col[c].rc_size,
644 vdev_raidz_pqr_func, &pqr);
645 }
646
647 if (c == rm->rm_firstdatacol) {
648 for (i = ccnt; i < pcnt; i++) {
649 p[i] = 0;
650 q[i] = 0;
651 r[i] = 0;
652 }
653 } else {
654 /*
655 * Treat short columns as though they are full of 0s.
656 * Note that there's therefore nothing needed for P.
657 */
658 for (i = ccnt; i < pcnt; i++) {
659 VDEV_RAIDZ_64MUL_2(q[i], mask);
660 VDEV_RAIDZ_64MUL_4(r[i], mask);
661 }
662 }
663 }
664 }
665
666 /*
667 * Generate RAID parity in the first virtual columns according to the number of
668 * parity columns available.
669 */
670 void
671 vdev_raidz_generate_parity(raidz_map_t *rm)
672 {
673 /* Generate using the new math implementation */
674 if (vdev_raidz_math_generate(rm) != RAIDZ_ORIGINAL_IMPL)
675 return;
676
677 switch (rm->rm_firstdatacol) {
678 case 1:
679 vdev_raidz_generate_parity_p(rm);
680 break;
681 case 2:
682 vdev_raidz_generate_parity_pq(rm);
683 break;
684 case 3:
685 vdev_raidz_generate_parity_pqr(rm);
686 break;
687 default:
688 cmn_err(CE_PANIC, "invalid RAID-Z configuration");
689 }
690 }
691
692 /* ARGSUSED */
693 static int
694 vdev_raidz_reconst_p_func(void *dbuf, void *sbuf, size_t size, void *private)
695 {
696 uint64_t *dst = dbuf;
697 uint64_t *src = sbuf;
698 int cnt = size / sizeof (src[0]);
699 int i;
700
701 for (i = 0; i < cnt; i++) {
702 dst[i] ^= src[i];
703 }
704
705 return (0);
706 }
707
708 /* ARGSUSED */
709 static int
710 vdev_raidz_reconst_q_pre_func(void *dbuf, void *sbuf, size_t size,
711 void *private)
712 {
713 uint64_t *dst = dbuf;
714 uint64_t *src = sbuf;
715 uint64_t mask;
716 int cnt = size / sizeof (dst[0]);
717 int i;
718
719 for (i = 0; i < cnt; i++, dst++, src++) {
720 VDEV_RAIDZ_64MUL_2(*dst, mask);
721 *dst ^= *src;
722 }
723
724 return (0);
725 }
726
727 /* ARGSUSED */
728 static int
729 vdev_raidz_reconst_q_pre_tail_func(void *buf, size_t size, void *private)
730 {
731 uint64_t *dst = buf;
732 uint64_t mask;
733 int cnt = size / sizeof (dst[0]);
734 int i;
735
736 for (i = 0; i < cnt; i++, dst++) {
737 /* same operation as vdev_raidz_reconst_q_pre_func() on dst */
738 VDEV_RAIDZ_64MUL_2(*dst, mask);
739 }
740
741 return (0);
742 }
743
744 struct reconst_q_struct {
745 uint64_t *q;
746 int exp;
747 };
748
749 static int
750 vdev_raidz_reconst_q_post_func(void *buf, size_t size, void *private)
751 {
752 struct reconst_q_struct *rq = private;
753 uint64_t *dst = buf;
754 int cnt = size / sizeof (dst[0]);
755 int i;
756
757 for (i = 0; i < cnt; i++, dst++, rq->q++) {
758 int j;
759 uint8_t *b;
760
761 *dst ^= *rq->q;
762 for (j = 0, b = (uint8_t *)dst; j < 8; j++, b++) {
763 *b = vdev_raidz_exp2(*b, rq->exp);
764 }
765 }
766
767 return (0);
768 }
769
770 struct reconst_pq_struct {
771 uint8_t *p;
772 uint8_t *q;
773 uint8_t *pxy;
774 uint8_t *qxy;
775 int aexp;
776 int bexp;
777 };
778
779 static int
780 vdev_raidz_reconst_pq_func(void *xbuf, void *ybuf, size_t size, void *private)
781 {
782 struct reconst_pq_struct *rpq = private;
783 uint8_t *xd = xbuf;
784 uint8_t *yd = ybuf;
785 int i;
786
787 for (i = 0; i < size;
788 i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++, yd++) {
789 *xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
790 vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
791 *yd = *rpq->p ^ *rpq->pxy ^ *xd;
792 }
793
794 return (0);
795 }
796
797 static int
798 vdev_raidz_reconst_pq_tail_func(void *xbuf, size_t size, void *private)
799 {
800 struct reconst_pq_struct *rpq = private;
801 uint8_t *xd = xbuf;
802 int i;
803
804 for (i = 0; i < size;
805 i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++) {
806 /* same operation as vdev_raidz_reconst_pq_func() on xd */
807 *xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
808 vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
809 }
810
811 return (0);
812 }
813
814 static int
815 vdev_raidz_reconstruct_p(raidz_map_t *rm, int *tgts, int ntgts)
816 {
817 int x = tgts[0];
818 int c;
819 abd_t *dst, *src;
820
821 ASSERT(ntgts == 1);
822 ASSERT(x >= rm->rm_firstdatacol);
823 ASSERT(x < rm->rm_cols);
824
825 ASSERT(rm->rm_col[x].rc_size <= rm->rm_col[VDEV_RAIDZ_P].rc_size);
826 ASSERT(rm->rm_col[x].rc_size > 0);
827
828 src = rm->rm_col[VDEV_RAIDZ_P].rc_abd;
829 dst = rm->rm_col[x].rc_abd;
830
831 abd_copy_from_buf(dst, abd_to_buf(src), rm->rm_col[x].rc_size);
832
833 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
834 uint64_t size = MIN(rm->rm_col[x].rc_size,
835 rm->rm_col[c].rc_size);
836
837 src = rm->rm_col[c].rc_abd;
838 dst = rm->rm_col[x].rc_abd;
839
840 if (c == x)
841 continue;
842
843 (void) abd_iterate_func2(dst, src, 0, 0, size,
844 vdev_raidz_reconst_p_func, NULL);
845 }
846
847 return (1 << VDEV_RAIDZ_P);
848 }
849
850 static int
851 vdev_raidz_reconstruct_q(raidz_map_t *rm, int *tgts, int ntgts)
852 {
853 int x = tgts[0];
854 int c, exp;
855 abd_t *dst, *src;
856 struct reconst_q_struct rq;
857
858 ASSERT(ntgts == 1);
859
860 ASSERT(rm->rm_col[x].rc_size <= rm->rm_col[VDEV_RAIDZ_Q].rc_size);
861
862 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
863 uint64_t size = (c == x) ? 0 : MIN(rm->rm_col[x].rc_size,
864 rm->rm_col[c].rc_size);
865
866 src = rm->rm_col[c].rc_abd;
867 dst = rm->rm_col[x].rc_abd;
868
869 if (c == rm->rm_firstdatacol) {
870 abd_copy(dst, src, size);
871 if (rm->rm_col[x].rc_size > size)
872 abd_zero_off(dst, size,
873 rm->rm_col[x].rc_size - size);
874
875 } else {
876 ASSERT3U(size, <=, rm->rm_col[x].rc_size);
877 (void) abd_iterate_func2(dst, src, 0, 0, size,
878 vdev_raidz_reconst_q_pre_func, NULL);
879 (void) abd_iterate_func(dst,
880 size, rm->rm_col[x].rc_size - size,
881 vdev_raidz_reconst_q_pre_tail_func, NULL);
882 }
883 }
884
885 src = rm->rm_col[VDEV_RAIDZ_Q].rc_abd;
886 dst = rm->rm_col[x].rc_abd;
887 exp = 255 - (rm->rm_cols - 1 - x);
888 rq.q = abd_to_buf(src);
889 rq.exp = exp;
890
891 (void) abd_iterate_func(dst, 0, rm->rm_col[x].rc_size,
892 vdev_raidz_reconst_q_post_func, &rq);
893
894 return (1 << VDEV_RAIDZ_Q);
895 }
896
897 static int
898 vdev_raidz_reconstruct_pq(raidz_map_t *rm, int *tgts, int ntgts)
899 {
900 uint8_t *p, *q, *pxy, *qxy, tmp, a, b, aexp, bexp;
901 abd_t *pdata, *qdata;
902 uint64_t xsize, ysize;
903 int x = tgts[0];
904 int y = tgts[1];
905 abd_t *xd, *yd;
906 struct reconst_pq_struct rpq;
907
908 ASSERT(ntgts == 2);
909 ASSERT(x < y);
910 ASSERT(x >= rm->rm_firstdatacol);
911 ASSERT(y < rm->rm_cols);
912
913 ASSERT(rm->rm_col[x].rc_size >= rm->rm_col[y].rc_size);
914
915 /*
916 * Move the parity data aside -- we're going to compute parity as
917 * though columns x and y were full of zeros -- Pxy and Qxy. We want to
918 * reuse the parity generation mechanism without trashing the actual
919 * parity so we make those columns appear to be full of zeros by
920 * setting their lengths to zero.
921 */
922 pdata = rm->rm_col[VDEV_RAIDZ_P].rc_abd;
923 qdata = rm->rm_col[VDEV_RAIDZ_Q].rc_abd;
924 xsize = rm->rm_col[x].rc_size;
925 ysize = rm->rm_col[y].rc_size;
926
927 rm->rm_col[VDEV_RAIDZ_P].rc_abd =
928 abd_alloc_linear(rm->rm_col[VDEV_RAIDZ_P].rc_size, B_TRUE);
929 rm->rm_col[VDEV_RAIDZ_Q].rc_abd =
930 abd_alloc_linear(rm->rm_col[VDEV_RAIDZ_Q].rc_size, B_TRUE);
931 rm->rm_col[x].rc_size = 0;
932 rm->rm_col[y].rc_size = 0;
933
934 vdev_raidz_generate_parity_pq(rm);
935
936 rm->rm_col[x].rc_size = xsize;
937 rm->rm_col[y].rc_size = ysize;
938
939 p = abd_to_buf(pdata);
940 q = abd_to_buf(qdata);
941 pxy = abd_to_buf(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
942 qxy = abd_to_buf(rm->rm_col[VDEV_RAIDZ_Q].rc_abd);
943 xd = rm->rm_col[x].rc_abd;
944 yd = rm->rm_col[y].rc_abd;
945
946 /*
947 * We now have:
948 * Pxy = P + D_x + D_y
949 * Qxy = Q + 2^(ndevs - 1 - x) * D_x + 2^(ndevs - 1 - y) * D_y
950 *
951 * We can then solve for D_x:
952 * D_x = A * (P + Pxy) + B * (Q + Qxy)
953 * where
954 * A = 2^(x - y) * (2^(x - y) + 1)^-1
955 * B = 2^(ndevs - 1 - x) * (2^(x - y) + 1)^-1
956 *
957 * With D_x in hand, we can easily solve for D_y:
958 * D_y = P + Pxy + D_x
959 */
960
961 a = vdev_raidz_pow2[255 + x - y];
962 b = vdev_raidz_pow2[255 - (rm->rm_cols - 1 - x)];
963 tmp = 255 - vdev_raidz_log2[a ^ 1];
964
965 aexp = vdev_raidz_log2[vdev_raidz_exp2(a, tmp)];
966 bexp = vdev_raidz_log2[vdev_raidz_exp2(b, tmp)];
967
968 ASSERT3U(xsize, >=, ysize);
969 rpq.p = p;
970 rpq.q = q;
971 rpq.pxy = pxy;
972 rpq.qxy = qxy;
973 rpq.aexp = aexp;
974 rpq.bexp = bexp;
975
976 (void) abd_iterate_func2(xd, yd, 0, 0, ysize,
977 vdev_raidz_reconst_pq_func, &rpq);
978 (void) abd_iterate_func(xd, ysize, xsize - ysize,
979 vdev_raidz_reconst_pq_tail_func, &rpq);
980
981 abd_free(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
982 abd_free(rm->rm_col[VDEV_RAIDZ_Q].rc_abd);
983
984 /*
985 * Restore the saved parity data.
986 */
987 rm->rm_col[VDEV_RAIDZ_P].rc_abd = pdata;
988 rm->rm_col[VDEV_RAIDZ_Q].rc_abd = qdata;
989
990 return ((1 << VDEV_RAIDZ_P) | (1 << VDEV_RAIDZ_Q));
991 }
992
993 /* BEGIN CSTYLED */
994 /*
995 * In the general case of reconstruction, we must solve the system of linear
996 * equations defined by the coeffecients used to generate parity as well as
997 * the contents of the data and parity disks. This can be expressed with
998 * vectors for the original data (D) and the actual data (d) and parity (p)
999 * and a matrix composed of the identity matrix (I) and a dispersal matrix (V):
1000 *
1001 * __ __ __ __
1002 * | | __ __ | p_0 |
1003 * | V | | D_0 | | p_m-1 |
1004 * | | x | : | = | d_0 |
1005 * | I | | D_n-1 | | : |
1006 * | | ~~ ~~ | d_n-1 |
1007 * ~~ ~~ ~~ ~~
1008 *
1009 * I is simply a square identity matrix of size n, and V is a vandermonde
1010 * matrix defined by the coeffecients we chose for the various parity columns
1011 * (1, 2, 4). Note that these values were chosen both for simplicity, speedy
1012 * computation as well as linear separability.
1013 *
1014 * __ __ __ __
1015 * | 1 .. 1 1 1 | | p_0 |
1016 * | 2^n-1 .. 4 2 1 | __ __ | : |
1017 * | 4^n-1 .. 16 4 1 | | D_0 | | p_m-1 |
1018 * | 1 .. 0 0 0 | | D_1 | | d_0 |
1019 * | 0 .. 0 0 0 | x | D_2 | = | d_1 |
1020 * | : : : : | | : | | d_2 |
1021 * | 0 .. 1 0 0 | | D_n-1 | | : |
1022 * | 0 .. 0 1 0 | ~~ ~~ | : |
1023 * | 0 .. 0 0 1 | | d_n-1 |
1024 * ~~ ~~ ~~ ~~
1025 *
1026 * Note that I, V, d, and p are known. To compute D, we must invert the
1027 * matrix and use the known data and parity values to reconstruct the unknown
1028 * data values. We begin by removing the rows in V|I and d|p that correspond
1029 * to failed or missing columns; we then make V|I square (n x n) and d|p
1030 * sized n by removing rows corresponding to unused parity from the bottom up
1031 * to generate (V|I)' and (d|p)'. We can then generate the inverse of (V|I)'
1032 * using Gauss-Jordan elimination. In the example below we use m=3 parity
1033 * columns, n=8 data columns, with errors in d_1, d_2, and p_1:
1034 * __ __
1035 * | 1 1 1 1 1 1 1 1 |
1036 * | 128 64 32 16 8 4 2 1 | <-----+-+-- missing disks
1037 * | 19 205 116 29 64 16 4 1 | / /
1038 * | 1 0 0 0 0 0 0 0 | / /
1039 * | 0 1 0 0 0 0 0 0 | <--' /
1040 * (V|I) = | 0 0 1 0 0 0 0 0 | <---'
1041 * | 0 0 0 1 0 0 0 0 |
1042 * | 0 0 0 0 1 0 0 0 |
1043 * | 0 0 0 0 0 1 0 0 |
1044 * | 0 0 0 0 0 0 1 0 |
1045 * | 0 0 0 0 0 0 0 1 |
1046 * ~~ ~~
1047 * __ __
1048 * | 1 1 1 1 1 1 1 1 |
1049 * | 128 64 32 16 8 4 2 1 |
1050 * | 19 205 116 29 64 16 4 1 |
1051 * | 1 0 0 0 0 0 0 0 |
1052 * | 0 1 0 0 0 0 0 0 |
1053 * (V|I)' = | 0 0 1 0 0 0 0 0 |
1054 * | 0 0 0 1 0 0 0 0 |
1055 * | 0 0 0 0 1 0 0 0 |
1056 * | 0 0 0 0 0 1 0 0 |
1057 * | 0 0 0 0 0 0 1 0 |
1058 * | 0 0 0 0 0 0 0 1 |
1059 * ~~ ~~
1060 *
1061 * Here we employ Gauss-Jordan elimination to find the inverse of (V|I)'. We
1062 * have carefully chosen the seed values 1, 2, and 4 to ensure that this
1063 * matrix is not singular.
1064 * __ __
1065 * | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
1066 * | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
1067 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
1068 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
1069 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
1070 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
1071 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
1072 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
1073 * ~~ ~~
1074 * __ __
1075 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
1076 * | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
1077 * | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
1078 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
1079 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
1080 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
1081 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
1082 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
1083 * ~~ ~~
1084 * __ __
1085 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
1086 * | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
1087 * | 0 205 116 0 0 0 0 0 0 1 19 29 64 16 4 1 |
1088 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
1089 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
1090 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
1091 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
1092 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
1093 * ~~ ~~
1094 * __ __
1095 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
1096 * | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
1097 * | 0 0 185 0 0 0 0 0 205 1 222 208 141 221 201 204 |
1098 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
1099 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
1100 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
1101 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
1102 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
1103 * ~~ ~~
1104 * __ __
1105 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
1106 * | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
1107 * | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
1108 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
1109 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
1110 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
1111 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
1112 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
1113 * ~~ ~~
1114 * __ __
1115 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
1116 * | 0 1 0 0 0 0 0 0 167 100 5 41 159 169 217 208 |
1117 * | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
1118 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
1119 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
1120 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
1121 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
1122 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
1123 * ~~ ~~
1124 * __ __
1125 * | 0 0 1 0 0 0 0 0 |
1126 * | 167 100 5 41 159 169 217 208 |
1127 * | 166 100 4 40 158 168 216 209 |
1128 * (V|I)'^-1 = | 0 0 0 1 0 0 0 0 |
1129 * | 0 0 0 0 1 0 0 0 |
1130 * | 0 0 0 0 0 1 0 0 |
1131 * | 0 0 0 0 0 0 1 0 |
1132 * | 0 0 0 0 0 0 0 1 |
1133 * ~~ ~~
1134 *
1135 * We can then simply compute D = (V|I)'^-1 x (d|p)' to discover the values
1136 * of the missing data.
1137 *
1138 * As is apparent from the example above, the only non-trivial rows in the
1139 * inverse matrix correspond to the data disks that we're trying to
1140 * reconstruct. Indeed, those are the only rows we need as the others would
1141 * only be useful for reconstructing data known or assumed to be valid. For
1142 * that reason, we only build the coefficients in the rows that correspond to
1143 * targeted columns.
1144 */
1145 /* END CSTYLED */
1146
1147 static void
1148 vdev_raidz_matrix_init(raidz_map_t *rm, int n, int nmap, int *map,
1149 uint8_t **rows)
1150 {
1151 int i, j;
1152 int pow;
1153
1154 ASSERT(n == rm->rm_cols - rm->rm_firstdatacol);
1155
1156 /*
1157 * Fill in the missing rows of interest.
1158 */
1159 for (i = 0; i < nmap; i++) {
1160 ASSERT3S(0, <=, map[i]);
1161 ASSERT3S(map[i], <=, 2);
1162
1163 pow = map[i] * n;
1164 if (pow > 255)
1165 pow -= 255;
1166 ASSERT(pow <= 255);
1167
1168 for (j = 0; j < n; j++) {
1169 pow -= map[i];
1170 if (pow < 0)
1171 pow += 255;
1172 rows[i][j] = vdev_raidz_pow2[pow];
1173 }
1174 }
1175 }
1176
1177 static void
1178 vdev_raidz_matrix_invert(raidz_map_t *rm, int n, int nmissing, int *missing,
1179 uint8_t **rows, uint8_t **invrows, const uint8_t *used)
1180 {
1181 int i, j, ii, jj;
1182 uint8_t log;
1183
1184 /*
1185 * Assert that the first nmissing entries from the array of used
1186 * columns correspond to parity columns and that subsequent entries
1187 * correspond to data columns.
1188 */
1189 for (i = 0; i < nmissing; i++) {
1190 ASSERT3S(used[i], <, rm->rm_firstdatacol);
1191 }
1192 for (; i < n; i++) {
1193 ASSERT3S(used[i], >=, rm->rm_firstdatacol);
1194 }
1195
1196 /*
1197 * First initialize the storage where we'll compute the inverse rows.
1198 */
1199 for (i = 0; i < nmissing; i++) {
1200 for (j = 0; j < n; j++) {
1201 invrows[i][j] = (i == j) ? 1 : 0;
1202 }
1203 }
1204
1205 /*
1206 * Subtract all trivial rows from the rows of consequence.
1207 */
1208 for (i = 0; i < nmissing; i++) {
1209 for (j = nmissing; j < n; j++) {
1210 ASSERT3U(used[j], >=, rm->rm_firstdatacol);
1211 jj = used[j] - rm->rm_firstdatacol;
1212 ASSERT3S(jj, <, n);
1213 invrows[i][j] = rows[i][jj];
1214 rows[i][jj] = 0;
1215 }
1216 }
1217
1218 /*
1219 * For each of the rows of interest, we must normalize it and subtract
1220 * a multiple of it from the other rows.
1221 */
1222 for (i = 0; i < nmissing; i++) {
1223 for (j = 0; j < missing[i]; j++) {
1224 ASSERT0(rows[i][j]);
1225 }
1226 ASSERT3U(rows[i][missing[i]], !=, 0);
1227
1228 /*
1229 * Compute the inverse of the first element and multiply each
1230 * element in the row by that value.
1231 */
1232 log = 255 - vdev_raidz_log2[rows[i][missing[i]]];
1233
1234 for (j = 0; j < n; j++) {
1235 rows[i][j] = vdev_raidz_exp2(rows[i][j], log);
1236 invrows[i][j] = vdev_raidz_exp2(invrows[i][j], log);
1237 }
1238
1239 for (ii = 0; ii < nmissing; ii++) {
1240 if (i == ii)
1241 continue;
1242
1243 ASSERT3U(rows[ii][missing[i]], !=, 0);
1244
1245 log = vdev_raidz_log2[rows[ii][missing[i]]];
1246
1247 for (j = 0; j < n; j++) {
1248 rows[ii][j] ^=
1249 vdev_raidz_exp2(rows[i][j], log);
1250 invrows[ii][j] ^=
1251 vdev_raidz_exp2(invrows[i][j], log);
1252 }
1253 }
1254 }
1255
1256 /*
1257 * Verify that the data that is left in the rows are properly part of
1258 * an identity matrix.
1259 */
1260 for (i = 0; i < nmissing; i++) {
1261 for (j = 0; j < n; j++) {
1262 if (j == missing[i]) {
1263 ASSERT3U(rows[i][j], ==, 1);
1264 } else {
1265 ASSERT0(rows[i][j]);
1266 }
1267 }
1268 }
1269 }
1270
1271 static void
1272 vdev_raidz_matrix_reconstruct(raidz_map_t *rm, int n, int nmissing,
1273 int *missing, uint8_t **invrows, const uint8_t *used)
1274 {
1275 int i, j, x, cc, c;
1276 uint8_t *src;
1277 uint64_t ccount;
1278 uint8_t *dst[VDEV_RAIDZ_MAXPARITY] = { NULL };
1279 uint64_t dcount[VDEV_RAIDZ_MAXPARITY] = { 0 };
1280 uint8_t log = 0;
1281 uint8_t val;
1282 int ll;
1283 uint8_t *invlog[VDEV_RAIDZ_MAXPARITY];
1284 uint8_t *p, *pp;
1285 size_t psize;
1286
1287 psize = sizeof (invlog[0][0]) * n * nmissing;
1288 p = kmem_alloc(psize, KM_SLEEP);
1289
1290 for (pp = p, i = 0; i < nmissing; i++) {
1291 invlog[i] = pp;
1292 pp += n;
1293 }
1294
1295 for (i = 0; i < nmissing; i++) {
1296 for (j = 0; j < n; j++) {
1297 ASSERT3U(invrows[i][j], !=, 0);
1298 invlog[i][j] = vdev_raidz_log2[invrows[i][j]];
1299 }
1300 }
1301
1302 for (i = 0; i < n; i++) {
1303 c = used[i];
1304 ASSERT3U(c, <, rm->rm_cols);
1305
1306 src = abd_to_buf(rm->rm_col[c].rc_abd);
1307 ccount = rm->rm_col[c].rc_size;
1308 for (j = 0; j < nmissing; j++) {
1309 cc = missing[j] + rm->rm_firstdatacol;
1310 ASSERT3U(cc, >=, rm->rm_firstdatacol);
1311 ASSERT3U(cc, <, rm->rm_cols);
1312 ASSERT3U(cc, !=, c);
1313
1314 dst[j] = abd_to_buf(rm->rm_col[cc].rc_abd);
1315 dcount[j] = rm->rm_col[cc].rc_size;
1316 }
1317
1318 ASSERT(ccount >= rm->rm_col[missing[0]].rc_size || i > 0);
1319
1320 for (x = 0; x < ccount; x++, src++) {
1321 if (*src != 0)
1322 log = vdev_raidz_log2[*src];
1323
1324 for (cc = 0; cc < nmissing; cc++) {
1325 if (x >= dcount[cc])
1326 continue;
1327
1328 if (*src == 0) {
1329 val = 0;
1330 } else {
1331 if ((ll = log + invlog[cc][i]) >= 255)
1332 ll -= 255;
1333 val = vdev_raidz_pow2[ll];
1334 }
1335
1336 if (i == 0)
1337 dst[cc][x] = val;
1338 else
1339 dst[cc][x] ^= val;
1340 }
1341 }
1342 }
1343
1344 kmem_free(p, psize);
1345 }
1346
1347 static int
1348 vdev_raidz_reconstruct_general(raidz_map_t *rm, int *tgts, int ntgts)
1349 {
1350 int n, i, c, t, tt;
1351 int nmissing_rows;
1352 int missing_rows[VDEV_RAIDZ_MAXPARITY];
1353 int parity_map[VDEV_RAIDZ_MAXPARITY];
1354
1355 uint8_t *p, *pp;
1356 size_t psize;
1357
1358 uint8_t *rows[VDEV_RAIDZ_MAXPARITY];
1359 uint8_t *invrows[VDEV_RAIDZ_MAXPARITY];
1360 uint8_t *used;
1361
1362 abd_t **bufs = NULL;
1363
1364 int code = 0;
1365
1366 /*
1367 * Matrix reconstruction can't use scatter ABDs yet, so we allocate
1368 * temporary linear ABDs.
1369 */
1370 if (!abd_is_linear(rm->rm_col[rm->rm_firstdatacol].rc_abd)) {
1371 bufs = kmem_alloc(rm->rm_cols * sizeof (abd_t *), KM_PUSHPAGE);
1372
1373 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
1374 raidz_col_t *col = &rm->rm_col[c];
1375
1376 bufs[c] = col->rc_abd;
1377 col->rc_abd = abd_alloc_linear(col->rc_size, B_TRUE);
1378 abd_copy(col->rc_abd, bufs[c], col->rc_size);
1379 }
1380 }
1381
1382 n = rm->rm_cols - rm->rm_firstdatacol;
1383
1384 /*
1385 * Figure out which data columns are missing.
1386 */
1387 nmissing_rows = 0;
1388 for (t = 0; t < ntgts; t++) {
1389 if (tgts[t] >= rm->rm_firstdatacol) {
1390 missing_rows[nmissing_rows++] =
1391 tgts[t] - rm->rm_firstdatacol;
1392 }
1393 }
1394
1395 /*
1396 * Figure out which parity columns to use to help generate the missing
1397 * data columns.
1398 */
1399 for (tt = 0, c = 0, i = 0; i < nmissing_rows; c++) {
1400 ASSERT(tt < ntgts);
1401 ASSERT(c < rm->rm_firstdatacol);
1402
1403 /*
1404 * Skip any targeted parity columns.
1405 */
1406 if (c == tgts[tt]) {
1407 tt++;
1408 continue;
1409 }
1410
1411 code |= 1 << c;
1412
1413 parity_map[i] = c;
1414 i++;
1415 }
1416
1417 ASSERT(code != 0);
1418 ASSERT3U(code, <, 1 << VDEV_RAIDZ_MAXPARITY);
1419
1420 psize = (sizeof (rows[0][0]) + sizeof (invrows[0][0])) *
1421 nmissing_rows * n + sizeof (used[0]) * n;
1422 p = kmem_alloc(psize, KM_SLEEP);
1423
1424 for (pp = p, i = 0; i < nmissing_rows; i++) {
1425 rows[i] = pp;
1426 pp += n;
1427 invrows[i] = pp;
1428 pp += n;
1429 }
1430 used = pp;
1431
1432 for (i = 0; i < nmissing_rows; i++) {
1433 used[i] = parity_map[i];
1434 }
1435
1436 for (tt = 0, c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
1437 if (tt < nmissing_rows &&
1438 c == missing_rows[tt] + rm->rm_firstdatacol) {
1439 tt++;
1440 continue;
1441 }
1442
1443 ASSERT3S(i, <, n);
1444 used[i] = c;
1445 i++;
1446 }
1447
1448 /*
1449 * Initialize the interesting rows of the matrix.
1450 */
1451 vdev_raidz_matrix_init(rm, n, nmissing_rows, parity_map, rows);
1452
1453 /*
1454 * Invert the matrix.
1455 */
1456 vdev_raidz_matrix_invert(rm, n, nmissing_rows, missing_rows, rows,
1457 invrows, used);
1458
1459 /*
1460 * Reconstruct the missing data using the generated matrix.
1461 */
1462 vdev_raidz_matrix_reconstruct(rm, n, nmissing_rows, missing_rows,
1463 invrows, used);
1464
1465 kmem_free(p, psize);
1466
1467 /*
1468 * copy back from temporary linear abds and free them
1469 */
1470 if (bufs) {
1471 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
1472 raidz_col_t *col = &rm->rm_col[c];
1473
1474 abd_copy(bufs[c], col->rc_abd, col->rc_size);
1475 abd_free(col->rc_abd);
1476 col->rc_abd = bufs[c];
1477 }
1478 kmem_free(bufs, rm->rm_cols * sizeof (abd_t *));
1479 }
1480
1481 return (code);
1482 }
1483
1484 int
1485 vdev_raidz_reconstruct(raidz_map_t *rm, const int *t, int nt)
1486 {
1487 int tgts[VDEV_RAIDZ_MAXPARITY], *dt;
1488 int ntgts;
1489 int i, c, ret;
1490 int code;
1491 int nbadparity, nbaddata;
1492 int parity_valid[VDEV_RAIDZ_MAXPARITY];
1493
1494 /*
1495 * The tgts list must already be sorted.
1496 */
1497 for (i = 1; i < nt; i++) {
1498 ASSERT(t[i] > t[i - 1]);
1499 }
1500
1501 nbadparity = rm->rm_firstdatacol;
1502 nbaddata = rm->rm_cols - nbadparity;
1503 ntgts = 0;
1504 for (i = 0, c = 0; c < rm->rm_cols; c++) {
1505 if (c < rm->rm_firstdatacol)
1506 parity_valid[c] = B_FALSE;
1507
1508 if (i < nt && c == t[i]) {
1509 tgts[ntgts++] = c;
1510 i++;
1511 } else if (rm->rm_col[c].rc_error != 0) {
1512 tgts[ntgts++] = c;
1513 } else if (c >= rm->rm_firstdatacol) {
1514 nbaddata--;
1515 } else {
1516 parity_valid[c] = B_TRUE;
1517 nbadparity--;
1518 }
1519 }
1520
1521 ASSERT(ntgts >= nt);
1522 ASSERT(nbaddata >= 0);
1523 ASSERT(nbaddata + nbadparity == ntgts);
1524
1525 dt = &tgts[nbadparity];
1526
1527 /* Reconstruct using the new math implementation */
1528 ret = vdev_raidz_math_reconstruct(rm, parity_valid, dt, nbaddata);
1529 if (ret != RAIDZ_ORIGINAL_IMPL)
1530 return (ret);
1531
1532 /*
1533 * See if we can use any of our optimized reconstruction routines.
1534 */
1535 switch (nbaddata) {
1536 case 1:
1537 if (parity_valid[VDEV_RAIDZ_P])
1538 return (vdev_raidz_reconstruct_p(rm, dt, 1));
1539
1540 ASSERT(rm->rm_firstdatacol > 1);
1541
1542 if (parity_valid[VDEV_RAIDZ_Q])
1543 return (vdev_raidz_reconstruct_q(rm, dt, 1));
1544
1545 ASSERT(rm->rm_firstdatacol > 2);
1546 break;
1547
1548 case 2:
1549 ASSERT(rm->rm_firstdatacol > 1);
1550
1551 if (parity_valid[VDEV_RAIDZ_P] &&
1552 parity_valid[VDEV_RAIDZ_Q])
1553 return (vdev_raidz_reconstruct_pq(rm, dt, 2));
1554
1555 ASSERT(rm->rm_firstdatacol > 2);
1556
1557 break;
1558 }
1559
1560 code = vdev_raidz_reconstruct_general(rm, tgts, ntgts);
1561 ASSERT(code < (1 << VDEV_RAIDZ_MAXPARITY));
1562 ASSERT(code > 0);
1563 return (code);
1564 }
1565
1566 static int
1567 vdev_raidz_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
1568 uint64_t *ashift)
1569 {
1570 vdev_t *cvd;
1571 uint64_t nparity = vd->vdev_nparity;
1572 int c;
1573 int lasterror = 0;
1574 int numerrors = 0;
1575
1576 ASSERT(nparity > 0);
1577
1578 if (nparity > VDEV_RAIDZ_MAXPARITY ||
1579 vd->vdev_children < nparity + 1) {
1580 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
1581 return (SET_ERROR(EINVAL));
1582 }
1583
1584 vdev_open_children(vd);
1585
1586 for (c = 0; c < vd->vdev_children; c++) {
1587 cvd = vd->vdev_child[c];
1588
1589 if (cvd->vdev_open_error != 0) {
1590 lasterror = cvd->vdev_open_error;
1591 numerrors++;
1592 continue;
1593 }
1594
1595 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
1596 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
1597 *ashift = MAX(*ashift, cvd->vdev_ashift);
1598 }
1599
1600 *asize *= vd->vdev_children;
1601 *max_asize *= vd->vdev_children;
1602
1603 if (numerrors > nparity) {
1604 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
1605 return (lasterror);
1606 }
1607
1608 return (0);
1609 }
1610
1611 static void
1612 vdev_raidz_close(vdev_t *vd)
1613 {
1614 int c;
1615
1616 for (c = 0; c < vd->vdev_children; c++)
1617 vdev_close(vd->vdev_child[c]);
1618 }
1619
1620 static uint64_t
1621 vdev_raidz_asize(vdev_t *vd, uint64_t psize)
1622 {
1623 uint64_t asize;
1624 uint64_t ashift = vd->vdev_top->vdev_ashift;
1625 uint64_t cols = vd->vdev_children;
1626 uint64_t nparity = vd->vdev_nparity;
1627
1628 asize = ((psize - 1) >> ashift) + 1;
1629 asize += nparity * ((asize + cols - nparity - 1) / (cols - nparity));
1630 asize = roundup(asize, nparity + 1) << ashift;
1631
1632 return (asize);
1633 }
1634
1635 static void
1636 vdev_raidz_child_done(zio_t *zio)
1637 {
1638 raidz_col_t *rc = zio->io_private;
1639
1640 rc->rc_error = zio->io_error;
1641 rc->rc_tried = 1;
1642 rc->rc_skipped = 0;
1643 }
1644
1645 /*
1646 * Start an IO operation on a RAIDZ VDev
1647 *
1648 * Outline:
1649 * - For write operations:
1650 * 1. Generate the parity data
1651 * 2. Create child zio write operations to each column's vdev, for both
1652 * data and parity.
1653 * 3. If the column skips any sectors for padding, create optional dummy
1654 * write zio children for those areas to improve aggregation continuity.
1655 * - For read operations:
1656 * 1. Create child zio read operations to each data column's vdev to read
1657 * the range of data required for zio.
1658 * 2. If this is a scrub or resilver operation, or if any of the data
1659 * vdevs have had errors, then create zio read operations to the parity
1660 * columns' VDevs as well.
1661 */
1662 static void
1663 vdev_raidz_io_start(zio_t *zio)
1664 {
1665 vdev_t *vd = zio->io_vd;
1666 vdev_t *tvd = vd->vdev_top;
1667 vdev_t *cvd;
1668 raidz_map_t *rm;
1669 raidz_col_t *rc;
1670 int c, i;
1671
1672 rm = vdev_raidz_map_alloc(zio, tvd->vdev_ashift, vd->vdev_children,
1673 vd->vdev_nparity);
1674
1675 ASSERT3U(rm->rm_asize, ==, vdev_psize_to_asize(vd, zio->io_size));
1676
1677 if (zio->io_type == ZIO_TYPE_WRITE) {
1678 vdev_raidz_generate_parity(rm);
1679
1680 for (c = 0; c < rm->rm_cols; c++) {
1681 rc = &rm->rm_col[c];
1682 cvd = vd->vdev_child[rc->rc_devidx];
1683 zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
1684 rc->rc_offset, rc->rc_abd, rc->rc_size,
1685 zio->io_type, zio->io_priority, 0,
1686 vdev_raidz_child_done, rc));
1687 }
1688
1689 /*
1690 * Generate optional I/Os for any skipped sectors to improve
1691 * aggregation contiguity.
1692 */
1693 for (c = rm->rm_skipstart, i = 0; i < rm->rm_nskip; c++, i++) {
1694 ASSERT(c <= rm->rm_scols);
1695 if (c == rm->rm_scols)
1696 c = 0;
1697 rc = &rm->rm_col[c];
1698 cvd = vd->vdev_child[rc->rc_devidx];
1699 zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
1700 rc->rc_offset + rc->rc_size, NULL,
1701 1 << tvd->vdev_ashift,
1702 zio->io_type, zio->io_priority,
1703 ZIO_FLAG_NODATA | ZIO_FLAG_OPTIONAL, NULL, NULL));
1704 }
1705
1706 zio_execute(zio);
1707 return;
1708 }
1709
1710 ASSERT(zio->io_type == ZIO_TYPE_READ);
1711
1712 /*
1713 * Iterate over the columns in reverse order so that we hit the parity
1714 * last -- any errors along the way will force us to read the parity.
1715 */
1716 for (c = rm->rm_cols - 1; c >= 0; c--) {
1717 rc = &rm->rm_col[c];
1718 cvd = vd->vdev_child[rc->rc_devidx];
1719 if (!vdev_readable(cvd)) {
1720 if (c >= rm->rm_firstdatacol)
1721 rm->rm_missingdata++;
1722 else
1723 rm->rm_missingparity++;
1724 rc->rc_error = SET_ERROR(ENXIO);
1725 rc->rc_tried = 1; /* don't even try */
1726 rc->rc_skipped = 1;
1727 continue;
1728 }
1729 if (vdev_dtl_contains(cvd, DTL_MISSING, zio->io_txg, 1)) {
1730 if (c >= rm->rm_firstdatacol)
1731 rm->rm_missingdata++;
1732 else
1733 rm->rm_missingparity++;
1734 rc->rc_error = SET_ERROR(ESTALE);
1735 rc->rc_skipped = 1;
1736 continue;
1737 }
1738 if (c >= rm->rm_firstdatacol || rm->rm_missingdata > 0 ||
1739 (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
1740 zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
1741 rc->rc_offset, rc->rc_abd, rc->rc_size,
1742 zio->io_type, zio->io_priority, 0,
1743 vdev_raidz_child_done, rc));
1744 }
1745 }
1746
1747 zio_execute(zio);
1748 }
1749
1750
1751 /*
1752 * Report a checksum error for a child of a RAID-Z device.
1753 */
1754 static void
1755 raidz_checksum_error(zio_t *zio, raidz_col_t *rc, void *bad_data)
1756 {
1757 void *buf;
1758 vdev_t *vd = zio->io_vd->vdev_child[rc->rc_devidx];
1759
1760 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
1761 zio_bad_cksum_t zbc;
1762 raidz_map_t *rm = zio->io_vsd;
1763
1764 mutex_enter(&vd->vdev_stat_lock);
1765 vd->vdev_stat.vs_checksum_errors++;
1766 mutex_exit(&vd->vdev_stat_lock);
1767
1768 zbc.zbc_has_cksum = 0;
1769 zbc.zbc_injected = rm->rm_ecksuminjected;
1770
1771 buf = abd_borrow_buf_copy(rc->rc_abd, rc->rc_size);
1772 zfs_ereport_post_checksum(zio->io_spa, vd, zio,
1773 rc->rc_offset, rc->rc_size, buf, bad_data,
1774 &zbc);
1775 abd_return_buf(rc->rc_abd, buf, rc->rc_size);
1776 }
1777 }
1778
1779 /*
1780 * We keep track of whether or not there were any injected errors, so that
1781 * any ereports we generate can note it.
1782 */
1783 static int
1784 raidz_checksum_verify(zio_t *zio)
1785 {
1786 zio_bad_cksum_t zbc;
1787 raidz_map_t *rm = zio->io_vsd;
1788 int ret;
1789
1790 bzero(&zbc, sizeof (zio_bad_cksum_t));
1791
1792 ret = zio_checksum_error(zio, &zbc);
1793 if (ret != 0 && zbc.zbc_injected != 0)
1794 rm->rm_ecksuminjected = 1;
1795
1796 return (ret);
1797 }
1798
1799 /*
1800 * Generate the parity from the data columns. If we tried and were able to
1801 * read the parity without error, verify that the generated parity matches the
1802 * data we read. If it doesn't, we fire off a checksum error. Return the
1803 * number such failures.
1804 */
1805 static int
1806 raidz_parity_verify(zio_t *zio, raidz_map_t *rm)
1807 {
1808 void *orig[VDEV_RAIDZ_MAXPARITY];
1809 int c, ret = 0;
1810 raidz_col_t *rc;
1811
1812 blkptr_t *bp = zio->io_bp;
1813 enum zio_checksum checksum = (bp == NULL ? zio->io_prop.zp_checksum :
1814 (BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
1815
1816 if (checksum == ZIO_CHECKSUM_NOPARITY)
1817 return (ret);
1818
1819 for (c = 0; c < rm->rm_firstdatacol; c++) {
1820 rc = &rm->rm_col[c];
1821 if (!rc->rc_tried || rc->rc_error != 0)
1822 continue;
1823 orig[c] = zio_buf_alloc(rc->rc_size);
1824 abd_copy_to_buf(orig[c], rc->rc_abd, rc->rc_size);
1825 }
1826
1827 vdev_raidz_generate_parity(rm);
1828
1829 for (c = 0; c < rm->rm_firstdatacol; c++) {
1830 rc = &rm->rm_col[c];
1831 if (!rc->rc_tried || rc->rc_error != 0)
1832 continue;
1833 if (bcmp(orig[c], abd_to_buf(rc->rc_abd), rc->rc_size) != 0) {
1834 raidz_checksum_error(zio, rc, orig[c]);
1835 rc->rc_error = SET_ERROR(ECKSUM);
1836 ret++;
1837 }
1838 zio_buf_free(orig[c], rc->rc_size);
1839 }
1840
1841 return (ret);
1842 }
1843
1844 static int
1845 vdev_raidz_worst_error(raidz_map_t *rm)
1846 {
1847 int c, error = 0;
1848
1849 for (c = 0; c < rm->rm_cols; c++)
1850 error = zio_worst_error(error, rm->rm_col[c].rc_error);
1851
1852 return (error);
1853 }
1854
1855 /*
1856 * Iterate over all combinations of bad data and attempt a reconstruction.
1857 * Note that the algorithm below is non-optimal because it doesn't take into
1858 * account how reconstruction is actually performed. For example, with
1859 * triple-parity RAID-Z the reconstruction procedure is the same if column 4
1860 * is targeted as invalid as if columns 1 and 4 are targeted since in both
1861 * cases we'd only use parity information in column 0.
1862 */
1863 static int
1864 vdev_raidz_combrec(zio_t *zio, int total_errors, int data_errors)
1865 {
1866 raidz_map_t *rm = zio->io_vsd;
1867 raidz_col_t *rc;
1868 void *orig[VDEV_RAIDZ_MAXPARITY];
1869 int tstore[VDEV_RAIDZ_MAXPARITY + 2];
1870 int *tgts = &tstore[1];
1871 int curr, next, i, c, n;
1872 int code, ret = 0;
1873
1874 ASSERT(total_errors < rm->rm_firstdatacol);
1875
1876 /*
1877 * This simplifies one edge condition.
1878 */
1879 tgts[-1] = -1;
1880
1881 for (n = 1; n <= rm->rm_firstdatacol - total_errors; n++) {
1882 /*
1883 * Initialize the targets array by finding the first n columns
1884 * that contain no error.
1885 *
1886 * If there were no data errors, we need to ensure that we're
1887 * always explicitly attempting to reconstruct at least one
1888 * data column. To do this, we simply push the highest target
1889 * up into the data columns.
1890 */
1891 for (c = 0, i = 0; i < n; i++) {
1892 if (i == n - 1 && data_errors == 0 &&
1893 c < rm->rm_firstdatacol) {
1894 c = rm->rm_firstdatacol;
1895 }
1896
1897 while (rm->rm_col[c].rc_error != 0) {
1898 c++;
1899 ASSERT3S(c, <, rm->rm_cols);
1900 }
1901
1902 tgts[i] = c++;
1903 }
1904
1905 /*
1906 * Setting tgts[n] simplifies the other edge condition.
1907 */
1908 tgts[n] = rm->rm_cols;
1909
1910 /*
1911 * These buffers were allocated in previous iterations.
1912 */
1913 for (i = 0; i < n - 1; i++) {
1914 ASSERT(orig[i] != NULL);
1915 }
1916
1917 orig[n - 1] = zio_buf_alloc(rm->rm_col[0].rc_size);
1918
1919 curr = 0;
1920 next = tgts[curr];
1921
1922 while (curr != n) {
1923 tgts[curr] = next;
1924 curr = 0;
1925
1926 /*
1927 * Save off the original data that we're going to
1928 * attempt to reconstruct.
1929 */
1930 for (i = 0; i < n; i++) {
1931 ASSERT(orig[i] != NULL);
1932 c = tgts[i];
1933 ASSERT3S(c, >=, 0);
1934 ASSERT3S(c, <, rm->rm_cols);
1935 rc = &rm->rm_col[c];
1936 abd_copy_to_buf(orig[i], rc->rc_abd,
1937 rc->rc_size);
1938 }
1939
1940 /*
1941 * Attempt a reconstruction and exit the outer loop on
1942 * success.
1943 */
1944 code = vdev_raidz_reconstruct(rm, tgts, n);
1945 if (raidz_checksum_verify(zio) == 0) {
1946
1947 for (i = 0; i < n; i++) {
1948 c = tgts[i];
1949 rc = &rm->rm_col[c];
1950 ASSERT(rc->rc_error == 0);
1951 if (rc->rc_tried)
1952 raidz_checksum_error(zio, rc,
1953 orig[i]);
1954 rc->rc_error = SET_ERROR(ECKSUM);
1955 }
1956
1957 ret = code;
1958 goto done;
1959 }
1960
1961 /*
1962 * Restore the original data.
1963 */
1964 for (i = 0; i < n; i++) {
1965 c = tgts[i];
1966 rc = &rm->rm_col[c];
1967 abd_copy_from_buf(rc->rc_abd, orig[i],
1968 rc->rc_size);
1969 }
1970
1971 do {
1972 /*
1973 * Find the next valid column after the curr
1974 * position..
1975 */
1976 for (next = tgts[curr] + 1;
1977 next < rm->rm_cols &&
1978 rm->rm_col[next].rc_error != 0; next++)
1979 continue;
1980
1981 ASSERT(next <= tgts[curr + 1]);
1982
1983 /*
1984 * If that spot is available, we're done here.
1985 */
1986 if (next != tgts[curr + 1])
1987 break;
1988
1989 /*
1990 * Otherwise, find the next valid column after
1991 * the previous position.
1992 */
1993 for (c = tgts[curr - 1] + 1;
1994 rm->rm_col[c].rc_error != 0; c++)
1995 continue;
1996
1997 tgts[curr] = c;
1998 curr++;
1999
2000 } while (curr != n);
2001 }
2002 }
2003 n--;
2004 done:
2005 for (i = 0; i < n; i++) {
2006 zio_buf_free(orig[i], rm->rm_col[0].rc_size);
2007 }
2008
2009 return (ret);
2010 }
2011
2012 /*
2013 * Complete an IO operation on a RAIDZ VDev
2014 *
2015 * Outline:
2016 * - For write operations:
2017 * 1. Check for errors on the child IOs.
2018 * 2. Return, setting an error code if too few child VDevs were written
2019 * to reconstruct the data later. Note that partial writes are
2020 * considered successful if they can be reconstructed at all.
2021 * - For read operations:
2022 * 1. Check for errors on the child IOs.
2023 * 2. If data errors occurred:
2024 * a. Try to reassemble the data from the parity available.
2025 * b. If we haven't yet read the parity drives, read them now.
2026 * c. If all parity drives have been read but the data still doesn't
2027 * reassemble with a correct checksum, then try combinatorial
2028 * reconstruction.
2029 * d. If that doesn't work, return an error.
2030 * 3. If there were unexpected errors or this is a resilver operation,
2031 * rewrite the vdevs that had errors.
2032 */
2033 static void
2034 vdev_raidz_io_done(zio_t *zio)
2035 {
2036 vdev_t *vd = zio->io_vd;
2037 vdev_t *cvd;
2038 raidz_map_t *rm = zio->io_vsd;
2039 raidz_col_t *rc = NULL;
2040 int unexpected_errors = 0;
2041 int parity_errors = 0;
2042 int parity_untried = 0;
2043 int data_errors = 0;
2044 int total_errors = 0;
2045 int n, c;
2046 int tgts[VDEV_RAIDZ_MAXPARITY];
2047 int code;
2048
2049 ASSERT(zio->io_bp != NULL); /* XXX need to add code to enforce this */
2050
2051 ASSERT(rm->rm_missingparity <= rm->rm_firstdatacol);
2052 ASSERT(rm->rm_missingdata <= rm->rm_cols - rm->rm_firstdatacol);
2053
2054 for (c = 0; c < rm->rm_cols; c++) {
2055 rc = &rm->rm_col[c];
2056
2057 if (rc->rc_error) {
2058 ASSERT(rc->rc_error != ECKSUM); /* child has no bp */
2059
2060 if (c < rm->rm_firstdatacol)
2061 parity_errors++;
2062 else
2063 data_errors++;
2064
2065 if (!rc->rc_skipped)
2066 unexpected_errors++;
2067
2068 total_errors++;
2069 } else if (c < rm->rm_firstdatacol && !rc->rc_tried) {
2070 parity_untried++;
2071 }
2072 }
2073
2074 if (zio->io_type == ZIO_TYPE_WRITE) {
2075 /*
2076 * XXX -- for now, treat partial writes as a success.
2077 * (If we couldn't write enough columns to reconstruct
2078 * the data, the I/O failed. Otherwise, good enough.)
2079 *
2080 * Now that we support write reallocation, it would be better
2081 * to treat partial failure as real failure unless there are
2082 * no non-degraded top-level vdevs left, and not update DTLs
2083 * if we intend to reallocate.
2084 */
2085 /* XXPOLICY */
2086 if (total_errors > rm->rm_firstdatacol)
2087 zio->io_error = vdev_raidz_worst_error(rm);
2088
2089 return;
2090 }
2091
2092 ASSERT(zio->io_type == ZIO_TYPE_READ);
2093 /*
2094 * There are three potential phases for a read:
2095 * 1. produce valid data from the columns read
2096 * 2. read all disks and try again
2097 * 3. perform combinatorial reconstruction
2098 *
2099 * Each phase is progressively both more expensive and less likely to
2100 * occur. If we encounter more errors than we can repair or all phases
2101 * fail, we have no choice but to return an error.
2102 */
2103
2104 /*
2105 * If the number of errors we saw was correctable -- less than or equal
2106 * to the number of parity disks read -- attempt to produce data that
2107 * has a valid checksum. Naturally, this case applies in the absence of
2108 * any errors.
2109 */
2110 if (total_errors <= rm->rm_firstdatacol - parity_untried) {
2111 if (data_errors == 0) {
2112 if (raidz_checksum_verify(zio) == 0) {
2113 /*
2114 * If we read parity information (unnecessarily
2115 * as it happens since no reconstruction was
2116 * needed) regenerate and verify the parity.
2117 * We also regenerate parity when resilvering
2118 * so we can write it out to the failed device
2119 * later.
2120 */
2121 if (parity_errors + parity_untried <
2122 rm->rm_firstdatacol ||
2123 (zio->io_flags & ZIO_FLAG_RESILVER)) {
2124 n = raidz_parity_verify(zio, rm);
2125 unexpected_errors += n;
2126 ASSERT(parity_errors + n <=
2127 rm->rm_firstdatacol);
2128 }
2129 goto done;
2130 }
2131 } else {
2132 /*
2133 * We either attempt to read all the parity columns or
2134 * none of them. If we didn't try to read parity, we
2135 * wouldn't be here in the correctable case. There must
2136 * also have been fewer parity errors than parity
2137 * columns or, again, we wouldn't be in this code path.
2138 */
2139 ASSERT(parity_untried == 0);
2140 ASSERT(parity_errors < rm->rm_firstdatacol);
2141
2142 /*
2143 * Identify the data columns that reported an error.
2144 */
2145 n = 0;
2146 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
2147 rc = &rm->rm_col[c];
2148 if (rc->rc_error != 0) {
2149 ASSERT(n < VDEV_RAIDZ_MAXPARITY);
2150 tgts[n++] = c;
2151 }
2152 }
2153
2154 ASSERT(rm->rm_firstdatacol >= n);
2155
2156 code = vdev_raidz_reconstruct(rm, tgts, n);
2157
2158 if (raidz_checksum_verify(zio) == 0) {
2159 /*
2160 * If we read more parity disks than were used
2161 * for reconstruction, confirm that the other
2162 * parity disks produced correct data. This
2163 * routine is suboptimal in that it regenerates
2164 * the parity that we already used in addition
2165 * to the parity that we're attempting to
2166 * verify, but this should be a relatively
2167 * uncommon case, and can be optimized if it
2168 * becomes a problem. Note that we regenerate
2169 * parity when resilvering so we can write it
2170 * out to failed devices later.
2171 */
2172 if (parity_errors < rm->rm_firstdatacol - n ||
2173 (zio->io_flags & ZIO_FLAG_RESILVER)) {
2174 n = raidz_parity_verify(zio, rm);
2175 unexpected_errors += n;
2176 ASSERT(parity_errors + n <=
2177 rm->rm_firstdatacol);
2178 }
2179
2180 goto done;
2181 }
2182 }
2183 }
2184
2185 /*
2186 * This isn't a typical situation -- either we got a read error or
2187 * a child silently returned bad data. Read every block so we can
2188 * try again with as much data and parity as we can track down. If
2189 * we've already been through once before, all children will be marked
2190 * as tried so we'll proceed to combinatorial reconstruction.
2191 */
2192 unexpected_errors = 1;
2193 rm->rm_missingdata = 0;
2194 rm->rm_missingparity = 0;
2195
2196 for (c = 0; c < rm->rm_cols; c++) {
2197 if (rm->rm_col[c].rc_tried)
2198 continue;
2199
2200 zio_vdev_io_redone(zio);
2201 do {
2202 rc = &rm->rm_col[c];
2203 if (rc->rc_tried)
2204 continue;
2205 zio_nowait(zio_vdev_child_io(zio, NULL,
2206 vd->vdev_child[rc->rc_devidx],
2207 rc->rc_offset, rc->rc_abd, rc->rc_size,
2208 zio->io_type, zio->io_priority, 0,
2209 vdev_raidz_child_done, rc));
2210 } while (++c < rm->rm_cols);
2211
2212 return;
2213 }
2214
2215 /*
2216 * At this point we've attempted to reconstruct the data given the
2217 * errors we detected, and we've attempted to read all columns. There
2218 * must, therefore, be one or more additional problems -- silent errors
2219 * resulting in invalid data rather than explicit I/O errors resulting
2220 * in absent data. We check if there is enough additional data to
2221 * possibly reconstruct the data and then perform combinatorial
2222 * reconstruction over all possible combinations. If that fails,
2223 * we're cooked.
2224 */
2225 if (total_errors > rm->rm_firstdatacol) {
2226 zio->io_error = vdev_raidz_worst_error(rm);
2227
2228 } else if (total_errors < rm->rm_firstdatacol &&
2229 (code = vdev_raidz_combrec(zio, total_errors, data_errors)) != 0) {
2230 /*
2231 * If we didn't use all the available parity for the
2232 * combinatorial reconstruction, verify that the remaining
2233 * parity is correct.
2234 */
2235 if (code != (1 << rm->rm_firstdatacol) - 1)
2236 (void) raidz_parity_verify(zio, rm);
2237 } else {
2238 /*
2239 * We're here because either:
2240 *
2241 * total_errors == rm_first_datacol, or
2242 * vdev_raidz_combrec() failed
2243 *
2244 * In either case, there is enough bad data to prevent
2245 * reconstruction.
2246 *
2247 * Start checksum ereports for all children which haven't
2248 * failed, and the IO wasn't speculative.
2249 */
2250 zio->io_error = SET_ERROR(ECKSUM);
2251
2252 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
2253 for (c = 0; c < rm->rm_cols; c++) {
2254 rc = &rm->rm_col[c];
2255 if (rc->rc_error == 0) {
2256 zio_bad_cksum_t zbc;
2257 zbc.zbc_has_cksum = 0;
2258 zbc.zbc_injected =
2259 rm->rm_ecksuminjected;
2260
2261 zfs_ereport_start_checksum(
2262 zio->io_spa,
2263 vd->vdev_child[rc->rc_devidx],
2264 zio, rc->rc_offset, rc->rc_size,
2265 (void *)(uintptr_t)c, &zbc);
2266 }
2267 }
2268 }
2269 }
2270
2271 done:
2272 zio_checksum_verified(zio);
2273
2274 if (zio->io_error == 0 && spa_writeable(zio->io_spa) &&
2275 (unexpected_errors || (zio->io_flags & ZIO_FLAG_RESILVER))) {
2276 /*
2277 * Use the good data we have in hand to repair damaged children.
2278 */
2279 for (c = 0; c < rm->rm_cols; c++) {
2280 rc = &rm->rm_col[c];
2281 cvd = vd->vdev_child[rc->rc_devidx];
2282
2283 if (rc->rc_error == 0)
2284 continue;
2285
2286 zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2287 rc->rc_offset, rc->rc_abd, rc->rc_size,
2288 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
2289 ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
2290 ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
2291 }
2292 }
2293 }
2294
2295 static void
2296 vdev_raidz_state_change(vdev_t *vd, int faulted, int degraded)
2297 {
2298 if (faulted > vd->vdev_nparity)
2299 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2300 VDEV_AUX_NO_REPLICAS);
2301 else if (degraded + faulted != 0)
2302 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
2303 else
2304 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
2305 }
2306
2307 vdev_ops_t vdev_raidz_ops = {
2308 vdev_raidz_open,
2309 vdev_raidz_close,
2310 vdev_raidz_asize,
2311 vdev_raidz_io_start,
2312 vdev_raidz_io_done,
2313 vdev_raidz_state_change,
2314 NULL,
2315 NULL,
2316 VDEV_TYPE_RAIDZ, /* name of this vdev type */
2317 B_FALSE /* not a leaf vdev */
2318 };