]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/vdev_raidz.c
Introduce ARC Buffer Data (ABD)
[mirror_zfs.git] / module / zfs / vdev_raidz.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
25 * Copyright (c) 2016 Gvozden Nešković. All rights reserved.
26 */
27
28 #include <sys/zfs_context.h>
29 #include <sys/spa.h>
30 #include <sys/vdev_impl.h>
31 #include <sys/zio.h>
32 #include <sys/zio_checksum.h>
33 #include <sys/abd.h>
34 #include <sys/fs/zfs.h>
35 #include <sys/fm/fs/zfs.h>
36 #include <sys/vdev_raidz.h>
37 #include <sys/vdev_raidz_impl.h>
38
39 /*
40 * Virtual device vector for RAID-Z.
41 *
42 * This vdev supports single, double, and triple parity. For single parity,
43 * we use a simple XOR of all the data columns. For double or triple parity,
44 * we use a special case of Reed-Solomon coding. This extends the
45 * technique described in "The mathematics of RAID-6" by H. Peter Anvin by
46 * drawing on the system described in "A Tutorial on Reed-Solomon Coding for
47 * Fault-Tolerance in RAID-like Systems" by James S. Plank on which the
48 * former is also based. The latter is designed to provide higher performance
49 * for writes.
50 *
51 * Note that the Plank paper claimed to support arbitrary N+M, but was then
52 * amended six years later identifying a critical flaw that invalidates its
53 * claims. Nevertheless, the technique can be adapted to work for up to
54 * triple parity. For additional parity, the amendment "Note: Correction to
55 * the 1997 Tutorial on Reed-Solomon Coding" by James S. Plank and Ying Ding
56 * is viable, but the additional complexity means that write performance will
57 * suffer.
58 *
59 * All of the methods above operate on a Galois field, defined over the
60 * integers mod 2^N. In our case we choose N=8 for GF(8) so that all elements
61 * can be expressed with a single byte. Briefly, the operations on the
62 * field are defined as follows:
63 *
64 * o addition (+) is represented by a bitwise XOR
65 * o subtraction (-) is therefore identical to addition: A + B = A - B
66 * o multiplication of A by 2 is defined by the following bitwise expression:
67 *
68 * (A * 2)_7 = A_6
69 * (A * 2)_6 = A_5
70 * (A * 2)_5 = A_4
71 * (A * 2)_4 = A_3 + A_7
72 * (A * 2)_3 = A_2 + A_7
73 * (A * 2)_2 = A_1 + A_7
74 * (A * 2)_1 = A_0
75 * (A * 2)_0 = A_7
76 *
77 * In C, multiplying by 2 is therefore ((a << 1) ^ ((a & 0x80) ? 0x1d : 0)).
78 * As an aside, this multiplication is derived from the error correcting
79 * primitive polynomial x^8 + x^4 + x^3 + x^2 + 1.
80 *
81 * Observe that any number in the field (except for 0) can be expressed as a
82 * power of 2 -- a generator for the field. We store a table of the powers of
83 * 2 and logs base 2 for quick look ups, and exploit the fact that A * B can
84 * be rewritten as 2^(log_2(A) + log_2(B)) (where '+' is normal addition rather
85 * than field addition). The inverse of a field element A (A^-1) is therefore
86 * A ^ (255 - 1) = A^254.
87 *
88 * The up-to-three parity columns, P, Q, R over several data columns,
89 * D_0, ... D_n-1, can be expressed by field operations:
90 *
91 * P = D_0 + D_1 + ... + D_n-2 + D_n-1
92 * Q = 2^n-1 * D_0 + 2^n-2 * D_1 + ... + 2^1 * D_n-2 + 2^0 * D_n-1
93 * = ((...((D_0) * 2 + D_1) * 2 + ...) * 2 + D_n-2) * 2 + D_n-1
94 * R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1
95 * = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1
96 *
97 * We chose 1, 2, and 4 as our generators because 1 corresponds to the trival
98 * XOR operation, and 2 and 4 can be computed quickly and generate linearly-
99 * independent coefficients. (There are no additional coefficients that have
100 * this property which is why the uncorrected Plank method breaks down.)
101 *
102 * See the reconstruction code below for how P, Q and R can used individually
103 * or in concert to recover missing data columns.
104 */
105
106 #define VDEV_RAIDZ_P 0
107 #define VDEV_RAIDZ_Q 1
108 #define VDEV_RAIDZ_R 2
109
110 #define VDEV_RAIDZ_MUL_2(x) (((x) << 1) ^ (((x) & 0x80) ? 0x1d : 0))
111 #define VDEV_RAIDZ_MUL_4(x) (VDEV_RAIDZ_MUL_2(VDEV_RAIDZ_MUL_2(x)))
112
113 /*
114 * We provide a mechanism to perform the field multiplication operation on a
115 * 64-bit value all at once rather than a byte at a time. This works by
116 * creating a mask from the top bit in each byte and using that to
117 * conditionally apply the XOR of 0x1d.
118 */
119 #define VDEV_RAIDZ_64MUL_2(x, mask) \
120 { \
121 (mask) = (x) & 0x8080808080808080ULL; \
122 (mask) = ((mask) << 1) - ((mask) >> 7); \
123 (x) = (((x) << 1) & 0xfefefefefefefefeULL) ^ \
124 ((mask) & 0x1d1d1d1d1d1d1d1dULL); \
125 }
126
127 #define VDEV_RAIDZ_64MUL_4(x, mask) \
128 { \
129 VDEV_RAIDZ_64MUL_2((x), mask); \
130 VDEV_RAIDZ_64MUL_2((x), mask); \
131 }
132
133 void
134 vdev_raidz_map_free(raidz_map_t *rm)
135 {
136 int c;
137 size_t size;
138
139 for (c = 0; c < rm->rm_firstdatacol; c++) {
140 abd_free(rm->rm_col[c].rc_abd);
141
142 if (rm->rm_col[c].rc_gdata != NULL)
143 zio_buf_free(rm->rm_col[c].rc_gdata,
144 rm->rm_col[c].rc_size);
145 }
146
147 size = 0;
148 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
149 abd_put(rm->rm_col[c].rc_abd);
150 size += rm->rm_col[c].rc_size;
151 }
152
153 if (rm->rm_abd_copy != NULL)
154 abd_free(rm->rm_abd_copy);
155
156 kmem_free(rm, offsetof(raidz_map_t, rm_col[rm->rm_scols]));
157 }
158
159 static void
160 vdev_raidz_map_free_vsd(zio_t *zio)
161 {
162 raidz_map_t *rm = zio->io_vsd;
163
164 ASSERT0(rm->rm_freed);
165 rm->rm_freed = 1;
166
167 if (rm->rm_reports == 0)
168 vdev_raidz_map_free(rm);
169 }
170
171 /*ARGSUSED*/
172 static void
173 vdev_raidz_cksum_free(void *arg, size_t ignored)
174 {
175 raidz_map_t *rm = arg;
176
177 ASSERT3U(rm->rm_reports, >, 0);
178
179 if (--rm->rm_reports == 0 && rm->rm_freed != 0)
180 vdev_raidz_map_free(rm);
181 }
182
183 static void
184 vdev_raidz_cksum_finish(zio_cksum_report_t *zcr, const void *good_data)
185 {
186 raidz_map_t *rm = zcr->zcr_cbdata;
187 size_t c = zcr->zcr_cbinfo;
188 size_t x;
189
190 const char *good = NULL;
191 char *bad;
192
193 if (good_data == NULL) {
194 zfs_ereport_finish_checksum(zcr, NULL, NULL, B_FALSE);
195 return;
196 }
197
198 if (c < rm->rm_firstdatacol) {
199 /*
200 * The first time through, calculate the parity blocks for
201 * the good data (this relies on the fact that the good
202 * data never changes for a given logical ZIO)
203 */
204 if (rm->rm_col[0].rc_gdata == NULL) {
205 abd_t *bad_parity[VDEV_RAIDZ_MAXPARITY];
206 char *buf;
207 int offset;
208
209 /*
210 * Set up the rm_col[]s to generate the parity for
211 * good_data, first saving the parity bufs and
212 * replacing them with buffers to hold the result.
213 */
214 for (x = 0; x < rm->rm_firstdatacol; x++) {
215 bad_parity[x] = rm->rm_col[x].rc_abd;
216 rm->rm_col[x].rc_gdata =
217 zio_buf_alloc(rm->rm_col[x].rc_size);
218 rm->rm_col[x].rc_abd =
219 abd_get_from_buf(rm->rm_col[x].rc_gdata,
220 rm->rm_col[x].rc_size);
221 }
222
223 /* fill in the data columns from good_data */
224 buf = (char *)good_data;
225 for (; x < rm->rm_cols; x++) {
226 abd_put(rm->rm_col[x].rc_abd);
227 rm->rm_col[x].rc_abd = abd_get_from_buf(buf,
228 rm->rm_col[x].rc_size);
229 buf += rm->rm_col[x].rc_size;
230 }
231
232 /*
233 * Construct the parity from the good data.
234 */
235 vdev_raidz_generate_parity(rm);
236
237 /* restore everything back to its original state */
238 for (x = 0; x < rm->rm_firstdatacol; x++) {
239 abd_put(rm->rm_col[x].rc_abd);
240 rm->rm_col[x].rc_abd = bad_parity[x];
241 }
242
243 offset = 0;
244 for (x = rm->rm_firstdatacol; x < rm->rm_cols; x++) {
245 abd_put(rm->rm_col[x].rc_abd);
246 rm->rm_col[x].rc_abd = abd_get_offset_size(
247 rm->rm_abd_copy, offset,
248 rm->rm_col[x].rc_size);
249 offset += rm->rm_col[x].rc_size;
250 }
251 }
252
253 ASSERT3P(rm->rm_col[c].rc_gdata, !=, NULL);
254 good = rm->rm_col[c].rc_gdata;
255 } else {
256 /* adjust good_data to point at the start of our column */
257 good = good_data;
258
259 for (x = rm->rm_firstdatacol; x < c; x++)
260 good += rm->rm_col[x].rc_size;
261 }
262
263 bad = abd_borrow_buf_copy(rm->rm_col[c].rc_abd, rm->rm_col[c].rc_size);
264 /* we drop the ereport if it ends up that the data was good */
265 zfs_ereport_finish_checksum(zcr, good, bad, B_TRUE);
266 abd_return_buf(rm->rm_col[c].rc_abd, bad, rm->rm_col[c].rc_size);
267 }
268
269 /*
270 * Invoked indirectly by zfs_ereport_start_checksum(), called
271 * below when our read operation fails completely. The main point
272 * is to keep a copy of everything we read from disk, so that at
273 * vdev_raidz_cksum_finish() time we can compare it with the good data.
274 */
275 static void
276 vdev_raidz_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *arg)
277 {
278 size_t c = (size_t)(uintptr_t)arg;
279 size_t offset;
280
281 raidz_map_t *rm = zio->io_vsd;
282 size_t size;
283
284 /* set up the report and bump the refcount */
285 zcr->zcr_cbdata = rm;
286 zcr->zcr_cbinfo = c;
287 zcr->zcr_finish = vdev_raidz_cksum_finish;
288 zcr->zcr_free = vdev_raidz_cksum_free;
289
290 rm->rm_reports++;
291 ASSERT3U(rm->rm_reports, >, 0);
292
293 if (rm->rm_abd_copy != NULL)
294 return;
295
296 /*
297 * It's the first time we're called for this raidz_map_t, so we need
298 * to copy the data aside; there's no guarantee that our zio's buffer
299 * won't be re-used for something else.
300 *
301 * Our parity data is already in separate buffers, so there's no need
302 * to copy them.
303 */
304
305 size = 0;
306 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++)
307 size += rm->rm_col[c].rc_size;
308
309 rm->rm_abd_copy =
310 abd_alloc_sametype(rm->rm_col[rm->rm_firstdatacol].rc_abd, size);
311
312 for (offset = 0, c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
313 raidz_col_t *col = &rm->rm_col[c];
314 abd_t *tmp = abd_get_offset_size(rm->rm_abd_copy, offset,
315 col->rc_size);
316
317 abd_copy(tmp, col->rc_abd, col->rc_size);
318 abd_put(col->rc_abd);
319 col->rc_abd = tmp;
320
321 offset += col->rc_size;
322 }
323 ASSERT3U(offset, ==, size);
324 }
325
326 static const zio_vsd_ops_t vdev_raidz_vsd_ops = {
327 vdev_raidz_map_free_vsd,
328 vdev_raidz_cksum_report
329 };
330
331 /*
332 * Divides the IO evenly across all child vdevs; usually, dcols is
333 * the number of children in the target vdev.
334 *
335 * Avoid inlining the function to keep vdev_raidz_io_start(), which
336 * is this functions only caller, as small as possible on the stack.
337 */
338 noinline raidz_map_t *
339 vdev_raidz_map_alloc(zio_t *zio, uint64_t unit_shift, uint64_t dcols,
340 uint64_t nparity)
341 {
342 raidz_map_t *rm;
343 /* The starting RAIDZ (parent) vdev sector of the block. */
344 uint64_t b = zio->io_offset >> unit_shift;
345 /* The zio's size in units of the vdev's minimum sector size. */
346 uint64_t s = zio->io_size >> unit_shift;
347 /* The first column for this stripe. */
348 uint64_t f = b % dcols;
349 /* The starting byte offset on each child vdev. */
350 uint64_t o = (b / dcols) << unit_shift;
351 uint64_t q, r, c, bc, col, acols, scols, coff, devidx, asize, tot;
352 uint64_t off = 0;
353
354 /*
355 * "Quotient": The number of data sectors for this stripe on all but
356 * the "big column" child vdevs that also contain "remainder" data.
357 */
358 q = s / (dcols - nparity);
359
360 /*
361 * "Remainder": The number of partial stripe data sectors in this I/O.
362 * This will add a sector to some, but not all, child vdevs.
363 */
364 r = s - q * (dcols - nparity);
365
366 /* The number of "big columns" - those which contain remainder data. */
367 bc = (r == 0 ? 0 : r + nparity);
368
369 /*
370 * The total number of data and parity sectors associated with
371 * this I/O.
372 */
373 tot = s + nparity * (q + (r == 0 ? 0 : 1));
374
375 /* acols: The columns that will be accessed. */
376 /* scols: The columns that will be accessed or skipped. */
377 if (q == 0) {
378 /* Our I/O request doesn't span all child vdevs. */
379 acols = bc;
380 scols = MIN(dcols, roundup(bc, nparity + 1));
381 } else {
382 acols = dcols;
383 scols = dcols;
384 }
385
386 ASSERT3U(acols, <=, scols);
387
388 rm = kmem_alloc(offsetof(raidz_map_t, rm_col[scols]), KM_SLEEP);
389
390 rm->rm_cols = acols;
391 rm->rm_scols = scols;
392 rm->rm_bigcols = bc;
393 rm->rm_skipstart = bc;
394 rm->rm_missingdata = 0;
395 rm->rm_missingparity = 0;
396 rm->rm_firstdatacol = nparity;
397 rm->rm_abd_copy = NULL;
398 rm->rm_reports = 0;
399 rm->rm_freed = 0;
400 rm->rm_ecksuminjected = 0;
401
402 asize = 0;
403
404 for (c = 0; c < scols; c++) {
405 col = f + c;
406 coff = o;
407 if (col >= dcols) {
408 col -= dcols;
409 coff += 1ULL << unit_shift;
410 }
411 rm->rm_col[c].rc_devidx = col;
412 rm->rm_col[c].rc_offset = coff;
413 rm->rm_col[c].rc_abd = NULL;
414 rm->rm_col[c].rc_gdata = NULL;
415 rm->rm_col[c].rc_error = 0;
416 rm->rm_col[c].rc_tried = 0;
417 rm->rm_col[c].rc_skipped = 0;
418
419 if (c >= acols)
420 rm->rm_col[c].rc_size = 0;
421 else if (c < bc)
422 rm->rm_col[c].rc_size = (q + 1) << unit_shift;
423 else
424 rm->rm_col[c].rc_size = q << unit_shift;
425
426 asize += rm->rm_col[c].rc_size;
427 }
428
429 ASSERT3U(asize, ==, tot << unit_shift);
430 rm->rm_asize = roundup(asize, (nparity + 1) << unit_shift);
431 rm->rm_nskip = roundup(tot, nparity + 1) - tot;
432 ASSERT3U(rm->rm_asize - asize, ==, rm->rm_nskip << unit_shift);
433 ASSERT3U(rm->rm_nskip, <=, nparity);
434
435 for (c = 0; c < rm->rm_firstdatacol; c++)
436 rm->rm_col[c].rc_abd =
437 abd_alloc_linear(rm->rm_col[c].rc_size, B_FALSE);
438
439 rm->rm_col[c].rc_abd = abd_get_offset_size(zio->io_abd, 0,
440 rm->rm_col[c].rc_size);
441 off = rm->rm_col[c].rc_size;
442
443 for (c = c + 1; c < acols; c++) {
444 rm->rm_col[c].rc_abd = abd_get_offset_size(zio->io_abd, off,
445 rm->rm_col[c].rc_size);
446 off += rm->rm_col[c].rc_size;
447 }
448
449 /*
450 * If all data stored spans all columns, there's a danger that parity
451 * will always be on the same device and, since parity isn't read
452 * during normal operation, that that device's I/O bandwidth won't be
453 * used effectively. We therefore switch the parity every 1MB.
454 *
455 * ... at least that was, ostensibly, the theory. As a practical
456 * matter unless we juggle the parity between all devices evenly, we
457 * won't see any benefit. Further, occasional writes that aren't a
458 * multiple of the LCM of the number of children and the minimum
459 * stripe width are sufficient to avoid pessimal behavior.
460 * Unfortunately, this decision created an implicit on-disk format
461 * requirement that we need to support for all eternity, but only
462 * for single-parity RAID-Z.
463 *
464 * If we intend to skip a sector in the zeroth column for padding
465 * we must make sure to note this swap. We will never intend to
466 * skip the first column since at least one data and one parity
467 * column must appear in each row.
468 */
469 ASSERT(rm->rm_cols >= 2);
470 ASSERT(rm->rm_col[0].rc_size == rm->rm_col[1].rc_size);
471
472 if (rm->rm_firstdatacol == 1 && (zio->io_offset & (1ULL << 20))) {
473 devidx = rm->rm_col[0].rc_devidx;
474 o = rm->rm_col[0].rc_offset;
475 rm->rm_col[0].rc_devidx = rm->rm_col[1].rc_devidx;
476 rm->rm_col[0].rc_offset = rm->rm_col[1].rc_offset;
477 rm->rm_col[1].rc_devidx = devidx;
478 rm->rm_col[1].rc_offset = o;
479
480 if (rm->rm_skipstart == 0)
481 rm->rm_skipstart = 1;
482 }
483
484 zio->io_vsd = rm;
485 zio->io_vsd_ops = &vdev_raidz_vsd_ops;
486
487 /* init RAIDZ parity ops */
488 rm->rm_ops = vdev_raidz_math_get_ops();
489
490 return (rm);
491 }
492
493 struct pqr_struct {
494 uint64_t *p;
495 uint64_t *q;
496 uint64_t *r;
497 };
498
499 static int
500 vdev_raidz_p_func(void *buf, size_t size, void *private)
501 {
502 struct pqr_struct *pqr = private;
503 const uint64_t *src = buf;
504 int i, cnt = size / sizeof (src[0]);
505
506 ASSERT(pqr->p && !pqr->q && !pqr->r);
507
508 for (i = 0; i < cnt; i++, src++, pqr->p++)
509 *pqr->p ^= *src;
510
511 return (0);
512 }
513
514 static int
515 vdev_raidz_pq_func(void *buf, size_t size, void *private)
516 {
517 struct pqr_struct *pqr = private;
518 const uint64_t *src = buf;
519 uint64_t mask;
520 int i, cnt = size / sizeof (src[0]);
521
522 ASSERT(pqr->p && pqr->q && !pqr->r);
523
524 for (i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++) {
525 *pqr->p ^= *src;
526 VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
527 *pqr->q ^= *src;
528 }
529
530 return (0);
531 }
532
533 static int
534 vdev_raidz_pqr_func(void *buf, size_t size, void *private)
535 {
536 struct pqr_struct *pqr = private;
537 const uint64_t *src = buf;
538 uint64_t mask;
539 int i, cnt = size / sizeof (src[0]);
540
541 ASSERT(pqr->p && pqr->q && pqr->r);
542
543 for (i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++, pqr->r++) {
544 *pqr->p ^= *src;
545 VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
546 *pqr->q ^= *src;
547 VDEV_RAIDZ_64MUL_4(*pqr->r, mask);
548 *pqr->r ^= *src;
549 }
550
551 return (0);
552 }
553
554 static void
555 vdev_raidz_generate_parity_p(raidz_map_t *rm)
556 {
557 uint64_t *p;
558 int c;
559 abd_t *src;
560
561 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
562 src = rm->rm_col[c].rc_abd;
563 p = abd_to_buf(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
564
565 if (c == rm->rm_firstdatacol) {
566 abd_copy_to_buf(p, src, rm->rm_col[c].rc_size);
567 } else {
568 struct pqr_struct pqr = { p, NULL, NULL };
569 (void) abd_iterate_func(src, 0, rm->rm_col[c].rc_size,
570 vdev_raidz_p_func, &pqr);
571 }
572 }
573 }
574
575 static void
576 vdev_raidz_generate_parity_pq(raidz_map_t *rm)
577 {
578 uint64_t *p, *q, pcnt, ccnt, mask, i;
579 int c;
580 abd_t *src;
581
582 pcnt = rm->rm_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
583 ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size ==
584 rm->rm_col[VDEV_RAIDZ_Q].rc_size);
585
586 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
587 src = rm->rm_col[c].rc_abd;
588 p = abd_to_buf(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
589 q = abd_to_buf(rm->rm_col[VDEV_RAIDZ_Q].rc_abd);
590
591 ccnt = rm->rm_col[c].rc_size / sizeof (p[0]);
592
593 if (c == rm->rm_firstdatacol) {
594 abd_copy_to_buf(p, src, rm->rm_col[c].rc_size);
595 (void) memcpy(q, p, rm->rm_col[c].rc_size);
596 } else {
597 struct pqr_struct pqr = { p, q, NULL };
598 (void) abd_iterate_func(src, 0, rm->rm_col[c].rc_size,
599 vdev_raidz_pq_func, &pqr);
600 }
601
602 if (c == rm->rm_firstdatacol) {
603 for (i = ccnt; i < pcnt; i++) {
604 p[i] = 0;
605 q[i] = 0;
606 }
607 } else {
608
609 /*
610 * Treat short columns as though they are full of 0s.
611 * Note that there's therefore nothing needed for P.
612 */
613 for (i = ccnt; i < pcnt; i++) {
614 VDEV_RAIDZ_64MUL_2(q[i], mask);
615 }
616 }
617 }
618 }
619
620 static void
621 vdev_raidz_generate_parity_pqr(raidz_map_t *rm)
622 {
623 uint64_t *p, *q, *r, pcnt, ccnt, mask, i;
624 int c;
625 abd_t *src;
626
627 pcnt = rm->rm_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
628 ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size ==
629 rm->rm_col[VDEV_RAIDZ_Q].rc_size);
630 ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size ==
631 rm->rm_col[VDEV_RAIDZ_R].rc_size);
632
633 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
634 src = rm->rm_col[c].rc_abd;
635 p = abd_to_buf(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
636 q = abd_to_buf(rm->rm_col[VDEV_RAIDZ_Q].rc_abd);
637 r = abd_to_buf(rm->rm_col[VDEV_RAIDZ_R].rc_abd);
638
639 ccnt = rm->rm_col[c].rc_size / sizeof (p[0]);
640
641 if (c == rm->rm_firstdatacol) {
642 abd_copy_to_buf(p, src, rm->rm_col[c].rc_size);
643 (void) memcpy(q, p, rm->rm_col[c].rc_size);
644 (void) memcpy(r, p, rm->rm_col[c].rc_size);
645 } else {
646 struct pqr_struct pqr = { p, q, r };
647 (void) abd_iterate_func(src, 0, rm->rm_col[c].rc_size,
648 vdev_raidz_pqr_func, &pqr);
649 }
650
651 if (c == rm->rm_firstdatacol) {
652 for (i = ccnt; i < pcnt; i++) {
653 p[i] = 0;
654 q[i] = 0;
655 r[i] = 0;
656 }
657 } else {
658 /*
659 * Treat short columns as though they are full of 0s.
660 * Note that there's therefore nothing needed for P.
661 */
662 for (i = ccnt; i < pcnt; i++) {
663 VDEV_RAIDZ_64MUL_2(q[i], mask);
664 VDEV_RAIDZ_64MUL_4(r[i], mask);
665 }
666 }
667 }
668 }
669
670 /*
671 * Generate RAID parity in the first virtual columns according to the number of
672 * parity columns available.
673 */
674 void
675 vdev_raidz_generate_parity(raidz_map_t *rm)
676 {
677 /* Generate using the new math implementation */
678 if (vdev_raidz_math_generate(rm) != RAIDZ_ORIGINAL_IMPL)
679 return;
680
681 switch (rm->rm_firstdatacol) {
682 case 1:
683 vdev_raidz_generate_parity_p(rm);
684 break;
685 case 2:
686 vdev_raidz_generate_parity_pq(rm);
687 break;
688 case 3:
689 vdev_raidz_generate_parity_pqr(rm);
690 break;
691 default:
692 cmn_err(CE_PANIC, "invalid RAID-Z configuration");
693 }
694 }
695
696 /* ARGSUSED */
697 static int
698 vdev_raidz_reconst_p_func(void *dbuf, void *sbuf, size_t size, void *private)
699 {
700 uint64_t *dst = dbuf;
701 uint64_t *src = sbuf;
702 int cnt = size / sizeof (src[0]);
703 int i;
704
705 for (i = 0; i < cnt; i++) {
706 dst[i] ^= src[i];
707 }
708
709 return (0);
710 }
711
712 /* ARGSUSED */
713 static int
714 vdev_raidz_reconst_q_pre_func(void *dbuf, void *sbuf, size_t size,
715 void *private)
716 {
717 uint64_t *dst = dbuf;
718 uint64_t *src = sbuf;
719 uint64_t mask;
720 int cnt = size / sizeof (dst[0]);
721 int i;
722
723 for (i = 0; i < cnt; i++, dst++, src++) {
724 VDEV_RAIDZ_64MUL_2(*dst, mask);
725 *dst ^= *src;
726 }
727
728 return (0);
729 }
730
731 /* ARGSUSED */
732 static int
733 vdev_raidz_reconst_q_pre_tail_func(void *buf, size_t size, void *private)
734 {
735 uint64_t *dst = buf;
736 uint64_t mask;
737 int cnt = size / sizeof (dst[0]);
738 int i;
739
740 for (i = 0; i < cnt; i++, dst++) {
741 /* same operation as vdev_raidz_reconst_q_pre_func() on dst */
742 VDEV_RAIDZ_64MUL_2(*dst, mask);
743 }
744
745 return (0);
746 }
747
748 struct reconst_q_struct {
749 uint64_t *q;
750 int exp;
751 };
752
753 static int
754 vdev_raidz_reconst_q_post_func(void *buf, size_t size, void *private)
755 {
756 struct reconst_q_struct *rq = private;
757 uint64_t *dst = buf;
758 int cnt = size / sizeof (dst[0]);
759 int i;
760
761 for (i = 0; i < cnt; i++, dst++, rq->q++) {
762 int j;
763 uint8_t *b;
764
765 *dst ^= *rq->q;
766 for (j = 0, b = (uint8_t *)dst; j < 8; j++, b++) {
767 *b = vdev_raidz_exp2(*b, rq->exp);
768 }
769 }
770
771 return (0);
772 }
773
774 struct reconst_pq_struct {
775 uint8_t *p;
776 uint8_t *q;
777 uint8_t *pxy;
778 uint8_t *qxy;
779 int aexp;
780 int bexp;
781 };
782
783 static int
784 vdev_raidz_reconst_pq_func(void *xbuf, void *ybuf, size_t size, void *private)
785 {
786 struct reconst_pq_struct *rpq = private;
787 uint8_t *xd = xbuf;
788 uint8_t *yd = ybuf;
789 int i;
790
791 for (i = 0; i < size;
792 i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++, yd++) {
793 *xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
794 vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
795 *yd = *rpq->p ^ *rpq->pxy ^ *xd;
796 }
797
798 return (0);
799 }
800
801 static int
802 vdev_raidz_reconst_pq_tail_func(void *xbuf, size_t size, void *private)
803 {
804 struct reconst_pq_struct *rpq = private;
805 uint8_t *xd = xbuf;
806 int i;
807
808 for (i = 0; i < size;
809 i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++) {
810 /* same operation as vdev_raidz_reconst_pq_func() on xd */
811 *xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
812 vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
813 }
814
815 return (0);
816 }
817
818 static int
819 vdev_raidz_reconstruct_p(raidz_map_t *rm, int *tgts, int ntgts)
820 {
821 int x = tgts[0];
822 int c;
823 abd_t *dst, *src;
824
825 ASSERT(ntgts == 1);
826 ASSERT(x >= rm->rm_firstdatacol);
827 ASSERT(x < rm->rm_cols);
828
829 ASSERT(rm->rm_col[x].rc_size <= rm->rm_col[VDEV_RAIDZ_P].rc_size);
830 ASSERT(rm->rm_col[x].rc_size > 0);
831
832 src = rm->rm_col[VDEV_RAIDZ_P].rc_abd;
833 dst = rm->rm_col[x].rc_abd;
834
835 abd_copy_from_buf(dst, abd_to_buf(src), rm->rm_col[x].rc_size);
836
837 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
838 uint64_t size = MIN(rm->rm_col[x].rc_size,
839 rm->rm_col[c].rc_size);
840
841 src = rm->rm_col[c].rc_abd;
842 dst = rm->rm_col[x].rc_abd;
843
844 if (c == x)
845 continue;
846
847 (void) abd_iterate_func2(dst, src, 0, 0, size,
848 vdev_raidz_reconst_p_func, NULL);
849 }
850
851 return (1 << VDEV_RAIDZ_P);
852 }
853
854 static int
855 vdev_raidz_reconstruct_q(raidz_map_t *rm, int *tgts, int ntgts)
856 {
857 int x = tgts[0];
858 int c, exp;
859 abd_t *dst, *src;
860 struct reconst_q_struct rq;
861
862 ASSERT(ntgts == 1);
863
864 ASSERT(rm->rm_col[x].rc_size <= rm->rm_col[VDEV_RAIDZ_Q].rc_size);
865
866 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
867 uint64_t size = (c == x) ? 0 : MIN(rm->rm_col[x].rc_size,
868 rm->rm_col[c].rc_size);
869
870 src = rm->rm_col[c].rc_abd;
871 dst = rm->rm_col[x].rc_abd;
872
873 if (c == rm->rm_firstdatacol) {
874 abd_copy(dst, src, size);
875 if (rm->rm_col[x].rc_size > size)
876 abd_zero_off(dst, size,
877 rm->rm_col[x].rc_size - size);
878
879 } else {
880 ASSERT3U(size, <=, rm->rm_col[x].rc_size);
881 (void) abd_iterate_func2(dst, src, 0, 0, size,
882 vdev_raidz_reconst_q_pre_func, NULL);
883 (void) abd_iterate_func(dst,
884 size, rm->rm_col[x].rc_size - size,
885 vdev_raidz_reconst_q_pre_tail_func, NULL);
886 }
887 }
888
889 src = rm->rm_col[VDEV_RAIDZ_Q].rc_abd;
890 dst = rm->rm_col[x].rc_abd;
891 exp = 255 - (rm->rm_cols - 1 - x);
892 rq.q = abd_to_buf(src);
893 rq.exp = exp;
894
895 (void) abd_iterate_func(dst, 0, rm->rm_col[x].rc_size,
896 vdev_raidz_reconst_q_post_func, &rq);
897
898 return (1 << VDEV_RAIDZ_Q);
899 }
900
901 static int
902 vdev_raidz_reconstruct_pq(raidz_map_t *rm, int *tgts, int ntgts)
903 {
904 uint8_t *p, *q, *pxy, *qxy, tmp, a, b, aexp, bexp;
905 abd_t *pdata, *qdata;
906 uint64_t xsize, ysize;
907 int x = tgts[0];
908 int y = tgts[1];
909 abd_t *xd, *yd;
910 struct reconst_pq_struct rpq;
911
912 ASSERT(ntgts == 2);
913 ASSERT(x < y);
914 ASSERT(x >= rm->rm_firstdatacol);
915 ASSERT(y < rm->rm_cols);
916
917 ASSERT(rm->rm_col[x].rc_size >= rm->rm_col[y].rc_size);
918
919 /*
920 * Move the parity data aside -- we're going to compute parity as
921 * though columns x and y were full of zeros -- Pxy and Qxy. We want to
922 * reuse the parity generation mechanism without trashing the actual
923 * parity so we make those columns appear to be full of zeros by
924 * setting their lengths to zero.
925 */
926 pdata = rm->rm_col[VDEV_RAIDZ_P].rc_abd;
927 qdata = rm->rm_col[VDEV_RAIDZ_Q].rc_abd;
928 xsize = rm->rm_col[x].rc_size;
929 ysize = rm->rm_col[y].rc_size;
930
931 rm->rm_col[VDEV_RAIDZ_P].rc_abd =
932 abd_alloc_linear(rm->rm_col[VDEV_RAIDZ_P].rc_size, B_TRUE);
933 rm->rm_col[VDEV_RAIDZ_Q].rc_abd =
934 abd_alloc_linear(rm->rm_col[VDEV_RAIDZ_Q].rc_size, B_TRUE);
935 rm->rm_col[x].rc_size = 0;
936 rm->rm_col[y].rc_size = 0;
937
938 vdev_raidz_generate_parity_pq(rm);
939
940 rm->rm_col[x].rc_size = xsize;
941 rm->rm_col[y].rc_size = ysize;
942
943 p = abd_to_buf(pdata);
944 q = abd_to_buf(qdata);
945 pxy = abd_to_buf(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
946 qxy = abd_to_buf(rm->rm_col[VDEV_RAIDZ_Q].rc_abd);
947 xd = rm->rm_col[x].rc_abd;
948 yd = rm->rm_col[y].rc_abd;
949
950 /*
951 * We now have:
952 * Pxy = P + D_x + D_y
953 * Qxy = Q + 2^(ndevs - 1 - x) * D_x + 2^(ndevs - 1 - y) * D_y
954 *
955 * We can then solve for D_x:
956 * D_x = A * (P + Pxy) + B * (Q + Qxy)
957 * where
958 * A = 2^(x - y) * (2^(x - y) + 1)^-1
959 * B = 2^(ndevs - 1 - x) * (2^(x - y) + 1)^-1
960 *
961 * With D_x in hand, we can easily solve for D_y:
962 * D_y = P + Pxy + D_x
963 */
964
965 a = vdev_raidz_pow2[255 + x - y];
966 b = vdev_raidz_pow2[255 - (rm->rm_cols - 1 - x)];
967 tmp = 255 - vdev_raidz_log2[a ^ 1];
968
969 aexp = vdev_raidz_log2[vdev_raidz_exp2(a, tmp)];
970 bexp = vdev_raidz_log2[vdev_raidz_exp2(b, tmp)];
971
972 ASSERT3U(xsize, >=, ysize);
973 rpq.p = p;
974 rpq.q = q;
975 rpq.pxy = pxy;
976 rpq.qxy = qxy;
977 rpq.aexp = aexp;
978 rpq.bexp = bexp;
979
980 (void) abd_iterate_func2(xd, yd, 0, 0, ysize,
981 vdev_raidz_reconst_pq_func, &rpq);
982 (void) abd_iterate_func(xd, ysize, xsize - ysize,
983 vdev_raidz_reconst_pq_tail_func, &rpq);
984
985 abd_free(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
986 abd_free(rm->rm_col[VDEV_RAIDZ_Q].rc_abd);
987
988 /*
989 * Restore the saved parity data.
990 */
991 rm->rm_col[VDEV_RAIDZ_P].rc_abd = pdata;
992 rm->rm_col[VDEV_RAIDZ_Q].rc_abd = qdata;
993
994 return ((1 << VDEV_RAIDZ_P) | (1 << VDEV_RAIDZ_Q));
995 }
996
997 /* BEGIN CSTYLED */
998 /*
999 * In the general case of reconstruction, we must solve the system of linear
1000 * equations defined by the coeffecients used to generate parity as well as
1001 * the contents of the data and parity disks. This can be expressed with
1002 * vectors for the original data (D) and the actual data (d) and parity (p)
1003 * and a matrix composed of the identity matrix (I) and a dispersal matrix (V):
1004 *
1005 * __ __ __ __
1006 * | | __ __ | p_0 |
1007 * | V | | D_0 | | p_m-1 |
1008 * | | x | : | = | d_0 |
1009 * | I | | D_n-1 | | : |
1010 * | | ~~ ~~ | d_n-1 |
1011 * ~~ ~~ ~~ ~~
1012 *
1013 * I is simply a square identity matrix of size n, and V is a vandermonde
1014 * matrix defined by the coeffecients we chose for the various parity columns
1015 * (1, 2, 4). Note that these values were chosen both for simplicity, speedy
1016 * computation as well as linear separability.
1017 *
1018 * __ __ __ __
1019 * | 1 .. 1 1 1 | | p_0 |
1020 * | 2^n-1 .. 4 2 1 | __ __ | : |
1021 * | 4^n-1 .. 16 4 1 | | D_0 | | p_m-1 |
1022 * | 1 .. 0 0 0 | | D_1 | | d_0 |
1023 * | 0 .. 0 0 0 | x | D_2 | = | d_1 |
1024 * | : : : : | | : | | d_2 |
1025 * | 0 .. 1 0 0 | | D_n-1 | | : |
1026 * | 0 .. 0 1 0 | ~~ ~~ | : |
1027 * | 0 .. 0 0 1 | | d_n-1 |
1028 * ~~ ~~ ~~ ~~
1029 *
1030 * Note that I, V, d, and p are known. To compute D, we must invert the
1031 * matrix and use the known data and parity values to reconstruct the unknown
1032 * data values. We begin by removing the rows in V|I and d|p that correspond
1033 * to failed or missing columns; we then make V|I square (n x n) and d|p
1034 * sized n by removing rows corresponding to unused parity from the bottom up
1035 * to generate (V|I)' and (d|p)'. We can then generate the inverse of (V|I)'
1036 * using Gauss-Jordan elimination. In the example below we use m=3 parity
1037 * columns, n=8 data columns, with errors in d_1, d_2, and p_1:
1038 * __ __
1039 * | 1 1 1 1 1 1 1 1 |
1040 * | 128 64 32 16 8 4 2 1 | <-----+-+-- missing disks
1041 * | 19 205 116 29 64 16 4 1 | / /
1042 * | 1 0 0 0 0 0 0 0 | / /
1043 * | 0 1 0 0 0 0 0 0 | <--' /
1044 * (V|I) = | 0 0 1 0 0 0 0 0 | <---'
1045 * | 0 0 0 1 0 0 0 0 |
1046 * | 0 0 0 0 1 0 0 0 |
1047 * | 0 0 0 0 0 1 0 0 |
1048 * | 0 0 0 0 0 0 1 0 |
1049 * | 0 0 0 0 0 0 0 1 |
1050 * ~~ ~~
1051 * __ __
1052 * | 1 1 1 1 1 1 1 1 |
1053 * | 128 64 32 16 8 4 2 1 |
1054 * | 19 205 116 29 64 16 4 1 |
1055 * | 1 0 0 0 0 0 0 0 |
1056 * | 0 1 0 0 0 0 0 0 |
1057 * (V|I)' = | 0 0 1 0 0 0 0 0 |
1058 * | 0 0 0 1 0 0 0 0 |
1059 * | 0 0 0 0 1 0 0 0 |
1060 * | 0 0 0 0 0 1 0 0 |
1061 * | 0 0 0 0 0 0 1 0 |
1062 * | 0 0 0 0 0 0 0 1 |
1063 * ~~ ~~
1064 *
1065 * Here we employ Gauss-Jordan elimination to find the inverse of (V|I)'. We
1066 * have carefully chosen the seed values 1, 2, and 4 to ensure that this
1067 * matrix is not singular.
1068 * __ __
1069 * | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
1070 * | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
1071 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
1072 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
1073 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
1074 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
1075 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
1076 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
1077 * ~~ ~~
1078 * __ __
1079 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
1080 * | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
1081 * | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
1082 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
1083 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
1084 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
1085 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
1086 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
1087 * ~~ ~~
1088 * __ __
1089 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
1090 * | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
1091 * | 0 205 116 0 0 0 0 0 0 1 19 29 64 16 4 1 |
1092 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
1093 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
1094 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
1095 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
1096 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
1097 * ~~ ~~
1098 * __ __
1099 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
1100 * | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
1101 * | 0 0 185 0 0 0 0 0 205 1 222 208 141 221 201 204 |
1102 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
1103 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
1104 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
1105 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
1106 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
1107 * ~~ ~~
1108 * __ __
1109 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
1110 * | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
1111 * | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
1112 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
1113 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
1114 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
1115 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
1116 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
1117 * ~~ ~~
1118 * __ __
1119 * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
1120 * | 0 1 0 0 0 0 0 0 167 100 5 41 159 169 217 208 |
1121 * | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
1122 * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
1123 * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
1124 * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
1125 * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
1126 * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
1127 * ~~ ~~
1128 * __ __
1129 * | 0 0 1 0 0 0 0 0 |
1130 * | 167 100 5 41 159 169 217 208 |
1131 * | 166 100 4 40 158 168 216 209 |
1132 * (V|I)'^-1 = | 0 0 0 1 0 0 0 0 |
1133 * | 0 0 0 0 1 0 0 0 |
1134 * | 0 0 0 0 0 1 0 0 |
1135 * | 0 0 0 0 0 0 1 0 |
1136 * | 0 0 0 0 0 0 0 1 |
1137 * ~~ ~~
1138 *
1139 * We can then simply compute D = (V|I)'^-1 x (d|p)' to discover the values
1140 * of the missing data.
1141 *
1142 * As is apparent from the example above, the only non-trivial rows in the
1143 * inverse matrix correspond to the data disks that we're trying to
1144 * reconstruct. Indeed, those are the only rows we need as the others would
1145 * only be useful for reconstructing data known or assumed to be valid. For
1146 * that reason, we only build the coefficients in the rows that correspond to
1147 * targeted columns.
1148 */
1149 /* END CSTYLED */
1150
1151 static void
1152 vdev_raidz_matrix_init(raidz_map_t *rm, int n, int nmap, int *map,
1153 uint8_t **rows)
1154 {
1155 int i, j;
1156 int pow;
1157
1158 ASSERT(n == rm->rm_cols - rm->rm_firstdatacol);
1159
1160 /*
1161 * Fill in the missing rows of interest.
1162 */
1163 for (i = 0; i < nmap; i++) {
1164 ASSERT3S(0, <=, map[i]);
1165 ASSERT3S(map[i], <=, 2);
1166
1167 pow = map[i] * n;
1168 if (pow > 255)
1169 pow -= 255;
1170 ASSERT(pow <= 255);
1171
1172 for (j = 0; j < n; j++) {
1173 pow -= map[i];
1174 if (pow < 0)
1175 pow += 255;
1176 rows[i][j] = vdev_raidz_pow2[pow];
1177 }
1178 }
1179 }
1180
1181 static void
1182 vdev_raidz_matrix_invert(raidz_map_t *rm, int n, int nmissing, int *missing,
1183 uint8_t **rows, uint8_t **invrows, const uint8_t *used)
1184 {
1185 int i, j, ii, jj;
1186 uint8_t log;
1187
1188 /*
1189 * Assert that the first nmissing entries from the array of used
1190 * columns correspond to parity columns and that subsequent entries
1191 * correspond to data columns.
1192 */
1193 for (i = 0; i < nmissing; i++) {
1194 ASSERT3S(used[i], <, rm->rm_firstdatacol);
1195 }
1196 for (; i < n; i++) {
1197 ASSERT3S(used[i], >=, rm->rm_firstdatacol);
1198 }
1199
1200 /*
1201 * First initialize the storage where we'll compute the inverse rows.
1202 */
1203 for (i = 0; i < nmissing; i++) {
1204 for (j = 0; j < n; j++) {
1205 invrows[i][j] = (i == j) ? 1 : 0;
1206 }
1207 }
1208
1209 /*
1210 * Subtract all trivial rows from the rows of consequence.
1211 */
1212 for (i = 0; i < nmissing; i++) {
1213 for (j = nmissing; j < n; j++) {
1214 ASSERT3U(used[j], >=, rm->rm_firstdatacol);
1215 jj = used[j] - rm->rm_firstdatacol;
1216 ASSERT3S(jj, <, n);
1217 invrows[i][j] = rows[i][jj];
1218 rows[i][jj] = 0;
1219 }
1220 }
1221
1222 /*
1223 * For each of the rows of interest, we must normalize it and subtract
1224 * a multiple of it from the other rows.
1225 */
1226 for (i = 0; i < nmissing; i++) {
1227 for (j = 0; j < missing[i]; j++) {
1228 ASSERT0(rows[i][j]);
1229 }
1230 ASSERT3U(rows[i][missing[i]], !=, 0);
1231
1232 /*
1233 * Compute the inverse of the first element and multiply each
1234 * element in the row by that value.
1235 */
1236 log = 255 - vdev_raidz_log2[rows[i][missing[i]]];
1237
1238 for (j = 0; j < n; j++) {
1239 rows[i][j] = vdev_raidz_exp2(rows[i][j], log);
1240 invrows[i][j] = vdev_raidz_exp2(invrows[i][j], log);
1241 }
1242
1243 for (ii = 0; ii < nmissing; ii++) {
1244 if (i == ii)
1245 continue;
1246
1247 ASSERT3U(rows[ii][missing[i]], !=, 0);
1248
1249 log = vdev_raidz_log2[rows[ii][missing[i]]];
1250
1251 for (j = 0; j < n; j++) {
1252 rows[ii][j] ^=
1253 vdev_raidz_exp2(rows[i][j], log);
1254 invrows[ii][j] ^=
1255 vdev_raidz_exp2(invrows[i][j], log);
1256 }
1257 }
1258 }
1259
1260 /*
1261 * Verify that the data that is left in the rows are properly part of
1262 * an identity matrix.
1263 */
1264 for (i = 0; i < nmissing; i++) {
1265 for (j = 0; j < n; j++) {
1266 if (j == missing[i]) {
1267 ASSERT3U(rows[i][j], ==, 1);
1268 } else {
1269 ASSERT0(rows[i][j]);
1270 }
1271 }
1272 }
1273 }
1274
1275 static void
1276 vdev_raidz_matrix_reconstruct(raidz_map_t *rm, int n, int nmissing,
1277 int *missing, uint8_t **invrows, const uint8_t *used)
1278 {
1279 int i, j, x, cc, c;
1280 uint8_t *src;
1281 uint64_t ccount;
1282 uint8_t *dst[VDEV_RAIDZ_MAXPARITY] = { NULL };
1283 uint64_t dcount[VDEV_RAIDZ_MAXPARITY] = { 0 };
1284 uint8_t log = 0;
1285 uint8_t val;
1286 int ll;
1287 uint8_t *invlog[VDEV_RAIDZ_MAXPARITY];
1288 uint8_t *p, *pp;
1289 size_t psize;
1290
1291 psize = sizeof (invlog[0][0]) * n * nmissing;
1292 p = kmem_alloc(psize, KM_SLEEP);
1293
1294 for (pp = p, i = 0; i < nmissing; i++) {
1295 invlog[i] = pp;
1296 pp += n;
1297 }
1298
1299 for (i = 0; i < nmissing; i++) {
1300 for (j = 0; j < n; j++) {
1301 ASSERT3U(invrows[i][j], !=, 0);
1302 invlog[i][j] = vdev_raidz_log2[invrows[i][j]];
1303 }
1304 }
1305
1306 for (i = 0; i < n; i++) {
1307 c = used[i];
1308 ASSERT3U(c, <, rm->rm_cols);
1309
1310 src = abd_to_buf(rm->rm_col[c].rc_abd);
1311 ccount = rm->rm_col[c].rc_size;
1312 for (j = 0; j < nmissing; j++) {
1313 cc = missing[j] + rm->rm_firstdatacol;
1314 ASSERT3U(cc, >=, rm->rm_firstdatacol);
1315 ASSERT3U(cc, <, rm->rm_cols);
1316 ASSERT3U(cc, !=, c);
1317
1318 dst[j] = abd_to_buf(rm->rm_col[cc].rc_abd);
1319 dcount[j] = rm->rm_col[cc].rc_size;
1320 }
1321
1322 ASSERT(ccount >= rm->rm_col[missing[0]].rc_size || i > 0);
1323
1324 for (x = 0; x < ccount; x++, src++) {
1325 if (*src != 0)
1326 log = vdev_raidz_log2[*src];
1327
1328 for (cc = 0; cc < nmissing; cc++) {
1329 if (x >= dcount[cc])
1330 continue;
1331
1332 if (*src == 0) {
1333 val = 0;
1334 } else {
1335 if ((ll = log + invlog[cc][i]) >= 255)
1336 ll -= 255;
1337 val = vdev_raidz_pow2[ll];
1338 }
1339
1340 if (i == 0)
1341 dst[cc][x] = val;
1342 else
1343 dst[cc][x] ^= val;
1344 }
1345 }
1346 }
1347
1348 kmem_free(p, psize);
1349 }
1350
1351 static int
1352 vdev_raidz_reconstruct_general(raidz_map_t *rm, int *tgts, int ntgts)
1353 {
1354 int n, i, c, t, tt;
1355 int nmissing_rows;
1356 int missing_rows[VDEV_RAIDZ_MAXPARITY];
1357 int parity_map[VDEV_RAIDZ_MAXPARITY];
1358
1359 uint8_t *p, *pp;
1360 size_t psize;
1361
1362 uint8_t *rows[VDEV_RAIDZ_MAXPARITY];
1363 uint8_t *invrows[VDEV_RAIDZ_MAXPARITY];
1364 uint8_t *used;
1365
1366 abd_t **bufs = NULL;
1367
1368 int code = 0;
1369
1370 /*
1371 * Matrix reconstruction can't use scatter ABDs yet, so we allocate
1372 * temporary linear ABDs.
1373 */
1374 if (!abd_is_linear(rm->rm_col[rm->rm_firstdatacol].rc_abd)) {
1375 bufs = kmem_alloc(rm->rm_cols * sizeof (abd_t *), KM_PUSHPAGE);
1376
1377 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
1378 raidz_col_t *col = &rm->rm_col[c];
1379
1380 bufs[c] = col->rc_abd;
1381 col->rc_abd = abd_alloc_linear(col->rc_size, B_TRUE);
1382 abd_copy(col->rc_abd, bufs[c], col->rc_size);
1383 }
1384 }
1385
1386 n = rm->rm_cols - rm->rm_firstdatacol;
1387
1388 /*
1389 * Figure out which data columns are missing.
1390 */
1391 nmissing_rows = 0;
1392 for (t = 0; t < ntgts; t++) {
1393 if (tgts[t] >= rm->rm_firstdatacol) {
1394 missing_rows[nmissing_rows++] =
1395 tgts[t] - rm->rm_firstdatacol;
1396 }
1397 }
1398
1399 /*
1400 * Figure out which parity columns to use to help generate the missing
1401 * data columns.
1402 */
1403 for (tt = 0, c = 0, i = 0; i < nmissing_rows; c++) {
1404 ASSERT(tt < ntgts);
1405 ASSERT(c < rm->rm_firstdatacol);
1406
1407 /*
1408 * Skip any targeted parity columns.
1409 */
1410 if (c == tgts[tt]) {
1411 tt++;
1412 continue;
1413 }
1414
1415 code |= 1 << c;
1416
1417 parity_map[i] = c;
1418 i++;
1419 }
1420
1421 ASSERT(code != 0);
1422 ASSERT3U(code, <, 1 << VDEV_RAIDZ_MAXPARITY);
1423
1424 psize = (sizeof (rows[0][0]) + sizeof (invrows[0][0])) *
1425 nmissing_rows * n + sizeof (used[0]) * n;
1426 p = kmem_alloc(psize, KM_SLEEP);
1427
1428 for (pp = p, i = 0; i < nmissing_rows; i++) {
1429 rows[i] = pp;
1430 pp += n;
1431 invrows[i] = pp;
1432 pp += n;
1433 }
1434 used = pp;
1435
1436 for (i = 0; i < nmissing_rows; i++) {
1437 used[i] = parity_map[i];
1438 }
1439
1440 for (tt = 0, c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
1441 if (tt < nmissing_rows &&
1442 c == missing_rows[tt] + rm->rm_firstdatacol) {
1443 tt++;
1444 continue;
1445 }
1446
1447 ASSERT3S(i, <, n);
1448 used[i] = c;
1449 i++;
1450 }
1451
1452 /*
1453 * Initialize the interesting rows of the matrix.
1454 */
1455 vdev_raidz_matrix_init(rm, n, nmissing_rows, parity_map, rows);
1456
1457 /*
1458 * Invert the matrix.
1459 */
1460 vdev_raidz_matrix_invert(rm, n, nmissing_rows, missing_rows, rows,
1461 invrows, used);
1462
1463 /*
1464 * Reconstruct the missing data using the generated matrix.
1465 */
1466 vdev_raidz_matrix_reconstruct(rm, n, nmissing_rows, missing_rows,
1467 invrows, used);
1468
1469 kmem_free(p, psize);
1470
1471 /*
1472 * copy back from temporary linear abds and free them
1473 */
1474 if (bufs) {
1475 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
1476 raidz_col_t *col = &rm->rm_col[c];
1477
1478 abd_copy(bufs[c], col->rc_abd, col->rc_size);
1479 abd_free(col->rc_abd);
1480 col->rc_abd = bufs[c];
1481 }
1482 kmem_free(bufs, rm->rm_cols * sizeof (abd_t *));
1483 }
1484
1485 return (code);
1486 }
1487
1488 int
1489 vdev_raidz_reconstruct(raidz_map_t *rm, const int *t, int nt)
1490 {
1491 int tgts[VDEV_RAIDZ_MAXPARITY], *dt;
1492 int ntgts;
1493 int i, c, ret;
1494 int code;
1495 int nbadparity, nbaddata;
1496 int parity_valid[VDEV_RAIDZ_MAXPARITY];
1497
1498 /*
1499 * The tgts list must already be sorted.
1500 */
1501 for (i = 1; i < nt; i++) {
1502 ASSERT(t[i] > t[i - 1]);
1503 }
1504
1505 nbadparity = rm->rm_firstdatacol;
1506 nbaddata = rm->rm_cols - nbadparity;
1507 ntgts = 0;
1508 for (i = 0, c = 0; c < rm->rm_cols; c++) {
1509 if (c < rm->rm_firstdatacol)
1510 parity_valid[c] = B_FALSE;
1511
1512 if (i < nt && c == t[i]) {
1513 tgts[ntgts++] = c;
1514 i++;
1515 } else if (rm->rm_col[c].rc_error != 0) {
1516 tgts[ntgts++] = c;
1517 } else if (c >= rm->rm_firstdatacol) {
1518 nbaddata--;
1519 } else {
1520 parity_valid[c] = B_TRUE;
1521 nbadparity--;
1522 }
1523 }
1524
1525 ASSERT(ntgts >= nt);
1526 ASSERT(nbaddata >= 0);
1527 ASSERT(nbaddata + nbadparity == ntgts);
1528
1529 dt = &tgts[nbadparity];
1530
1531 /* Reconstruct using the new math implementation */
1532 ret = vdev_raidz_math_reconstruct(rm, parity_valid, dt, nbaddata);
1533 if (ret != RAIDZ_ORIGINAL_IMPL)
1534 return (ret);
1535
1536 /*
1537 * See if we can use any of our optimized reconstruction routines.
1538 */
1539 switch (nbaddata) {
1540 case 1:
1541 if (parity_valid[VDEV_RAIDZ_P])
1542 return (vdev_raidz_reconstruct_p(rm, dt, 1));
1543
1544 ASSERT(rm->rm_firstdatacol > 1);
1545
1546 if (parity_valid[VDEV_RAIDZ_Q])
1547 return (vdev_raidz_reconstruct_q(rm, dt, 1));
1548
1549 ASSERT(rm->rm_firstdatacol > 2);
1550 break;
1551
1552 case 2:
1553 ASSERT(rm->rm_firstdatacol > 1);
1554
1555 if (parity_valid[VDEV_RAIDZ_P] &&
1556 parity_valid[VDEV_RAIDZ_Q])
1557 return (vdev_raidz_reconstruct_pq(rm, dt, 2));
1558
1559 ASSERT(rm->rm_firstdatacol > 2);
1560
1561 break;
1562 }
1563
1564 code = vdev_raidz_reconstruct_general(rm, tgts, ntgts);
1565 ASSERT(code < (1 << VDEV_RAIDZ_MAXPARITY));
1566 ASSERT(code > 0);
1567 return (code);
1568 }
1569
1570 static int
1571 vdev_raidz_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
1572 uint64_t *ashift)
1573 {
1574 vdev_t *cvd;
1575 uint64_t nparity = vd->vdev_nparity;
1576 int c;
1577 int lasterror = 0;
1578 int numerrors = 0;
1579
1580 ASSERT(nparity > 0);
1581
1582 if (nparity > VDEV_RAIDZ_MAXPARITY ||
1583 vd->vdev_children < nparity + 1) {
1584 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
1585 return (SET_ERROR(EINVAL));
1586 }
1587
1588 vdev_open_children(vd);
1589
1590 for (c = 0; c < vd->vdev_children; c++) {
1591 cvd = vd->vdev_child[c];
1592
1593 if (cvd->vdev_open_error != 0) {
1594 lasterror = cvd->vdev_open_error;
1595 numerrors++;
1596 continue;
1597 }
1598
1599 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
1600 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
1601 *ashift = MAX(*ashift, cvd->vdev_ashift);
1602 }
1603
1604 *asize *= vd->vdev_children;
1605 *max_asize *= vd->vdev_children;
1606
1607 if (numerrors > nparity) {
1608 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
1609 return (lasterror);
1610 }
1611
1612 return (0);
1613 }
1614
1615 static void
1616 vdev_raidz_close(vdev_t *vd)
1617 {
1618 int c;
1619
1620 for (c = 0; c < vd->vdev_children; c++)
1621 vdev_close(vd->vdev_child[c]);
1622 }
1623
1624 static uint64_t
1625 vdev_raidz_asize(vdev_t *vd, uint64_t psize)
1626 {
1627 uint64_t asize;
1628 uint64_t ashift = vd->vdev_top->vdev_ashift;
1629 uint64_t cols = vd->vdev_children;
1630 uint64_t nparity = vd->vdev_nparity;
1631
1632 asize = ((psize - 1) >> ashift) + 1;
1633 asize += nparity * ((asize + cols - nparity - 1) / (cols - nparity));
1634 asize = roundup(asize, nparity + 1) << ashift;
1635
1636 return (asize);
1637 }
1638
1639 static void
1640 vdev_raidz_child_done(zio_t *zio)
1641 {
1642 raidz_col_t *rc = zio->io_private;
1643
1644 rc->rc_error = zio->io_error;
1645 rc->rc_tried = 1;
1646 rc->rc_skipped = 0;
1647 }
1648
1649 /*
1650 * Start an IO operation on a RAIDZ VDev
1651 *
1652 * Outline:
1653 * - For write operations:
1654 * 1. Generate the parity data
1655 * 2. Create child zio write operations to each column's vdev, for both
1656 * data and parity.
1657 * 3. If the column skips any sectors for padding, create optional dummy
1658 * write zio children for those areas to improve aggregation continuity.
1659 * - For read operations:
1660 * 1. Create child zio read operations to each data column's vdev to read
1661 * the range of data required for zio.
1662 * 2. If this is a scrub or resilver operation, or if any of the data
1663 * vdevs have had errors, then create zio read operations to the parity
1664 * columns' VDevs as well.
1665 */
1666 static void
1667 vdev_raidz_io_start(zio_t *zio)
1668 {
1669 vdev_t *vd = zio->io_vd;
1670 vdev_t *tvd = vd->vdev_top;
1671 vdev_t *cvd;
1672 raidz_map_t *rm;
1673 raidz_col_t *rc;
1674 int c, i;
1675
1676 rm = vdev_raidz_map_alloc(zio, tvd->vdev_ashift, vd->vdev_children,
1677 vd->vdev_nparity);
1678
1679 ASSERT3U(rm->rm_asize, ==, vdev_psize_to_asize(vd, zio->io_size));
1680
1681 if (zio->io_type == ZIO_TYPE_WRITE) {
1682 vdev_raidz_generate_parity(rm);
1683
1684 for (c = 0; c < rm->rm_cols; c++) {
1685 rc = &rm->rm_col[c];
1686 cvd = vd->vdev_child[rc->rc_devidx];
1687 zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
1688 rc->rc_offset, rc->rc_abd, rc->rc_size,
1689 zio->io_type, zio->io_priority, 0,
1690 vdev_raidz_child_done, rc));
1691 }
1692
1693 /*
1694 * Generate optional I/Os for any skipped sectors to improve
1695 * aggregation contiguity.
1696 */
1697 for (c = rm->rm_skipstart, i = 0; i < rm->rm_nskip; c++, i++) {
1698 ASSERT(c <= rm->rm_scols);
1699 if (c == rm->rm_scols)
1700 c = 0;
1701 rc = &rm->rm_col[c];
1702 cvd = vd->vdev_child[rc->rc_devidx];
1703 zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
1704 rc->rc_offset + rc->rc_size, NULL,
1705 1 << tvd->vdev_ashift,
1706 zio->io_type, zio->io_priority,
1707 ZIO_FLAG_NODATA | ZIO_FLAG_OPTIONAL, NULL, NULL));
1708 }
1709
1710 zio_execute(zio);
1711 return;
1712 }
1713
1714 ASSERT(zio->io_type == ZIO_TYPE_READ);
1715
1716 /*
1717 * Iterate over the columns in reverse order so that we hit the parity
1718 * last -- any errors along the way will force us to read the parity.
1719 */
1720 for (c = rm->rm_cols - 1; c >= 0; c--) {
1721 rc = &rm->rm_col[c];
1722 cvd = vd->vdev_child[rc->rc_devidx];
1723 if (!vdev_readable(cvd)) {
1724 if (c >= rm->rm_firstdatacol)
1725 rm->rm_missingdata++;
1726 else
1727 rm->rm_missingparity++;
1728 rc->rc_error = SET_ERROR(ENXIO);
1729 rc->rc_tried = 1; /* don't even try */
1730 rc->rc_skipped = 1;
1731 continue;
1732 }
1733 if (vdev_dtl_contains(cvd, DTL_MISSING, zio->io_txg, 1)) {
1734 if (c >= rm->rm_firstdatacol)
1735 rm->rm_missingdata++;
1736 else
1737 rm->rm_missingparity++;
1738 rc->rc_error = SET_ERROR(ESTALE);
1739 rc->rc_skipped = 1;
1740 continue;
1741 }
1742 if (c >= rm->rm_firstdatacol || rm->rm_missingdata > 0 ||
1743 (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
1744 zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
1745 rc->rc_offset, rc->rc_abd, rc->rc_size,
1746 zio->io_type, zio->io_priority, 0,
1747 vdev_raidz_child_done, rc));
1748 }
1749 }
1750
1751 zio_execute(zio);
1752 }
1753
1754
1755 /*
1756 * Report a checksum error for a child of a RAID-Z device.
1757 */
1758 static void
1759 raidz_checksum_error(zio_t *zio, raidz_col_t *rc, void *bad_data)
1760 {
1761 void *buf;
1762 vdev_t *vd = zio->io_vd->vdev_child[rc->rc_devidx];
1763
1764 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
1765 zio_bad_cksum_t zbc;
1766 raidz_map_t *rm = zio->io_vsd;
1767
1768 mutex_enter(&vd->vdev_stat_lock);
1769 vd->vdev_stat.vs_checksum_errors++;
1770 mutex_exit(&vd->vdev_stat_lock);
1771
1772 zbc.zbc_has_cksum = 0;
1773 zbc.zbc_injected = rm->rm_ecksuminjected;
1774
1775 buf = abd_borrow_buf_copy(rc->rc_abd, rc->rc_size);
1776 zfs_ereport_post_checksum(zio->io_spa, vd, zio,
1777 rc->rc_offset, rc->rc_size, buf, bad_data,
1778 &zbc);
1779 abd_return_buf(rc->rc_abd, buf, rc->rc_size);
1780 }
1781 }
1782
1783 /*
1784 * We keep track of whether or not there were any injected errors, so that
1785 * any ereports we generate can note it.
1786 */
1787 static int
1788 raidz_checksum_verify(zio_t *zio)
1789 {
1790 zio_bad_cksum_t zbc;
1791 raidz_map_t *rm = zio->io_vsd;
1792 int ret;
1793
1794 bzero(&zbc, sizeof (zio_bad_cksum_t));
1795
1796 ret = zio_checksum_error(zio, &zbc);
1797 if (ret != 0 && zbc.zbc_injected != 0)
1798 rm->rm_ecksuminjected = 1;
1799
1800 return (ret);
1801 }
1802
1803 /*
1804 * Generate the parity from the data columns. If we tried and were able to
1805 * read the parity without error, verify that the generated parity matches the
1806 * data we read. If it doesn't, we fire off a checksum error. Return the
1807 * number such failures.
1808 */
1809 static int
1810 raidz_parity_verify(zio_t *zio, raidz_map_t *rm)
1811 {
1812 void *orig[VDEV_RAIDZ_MAXPARITY];
1813 int c, ret = 0;
1814 raidz_col_t *rc;
1815
1816 blkptr_t *bp = zio->io_bp;
1817 enum zio_checksum checksum = (bp == NULL ? zio->io_prop.zp_checksum :
1818 (BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
1819
1820 if (checksum == ZIO_CHECKSUM_NOPARITY)
1821 return (ret);
1822
1823 for (c = 0; c < rm->rm_firstdatacol; c++) {
1824 rc = &rm->rm_col[c];
1825 if (!rc->rc_tried || rc->rc_error != 0)
1826 continue;
1827 orig[c] = zio_buf_alloc(rc->rc_size);
1828 abd_copy_to_buf(orig[c], rc->rc_abd, rc->rc_size);
1829 }
1830
1831 vdev_raidz_generate_parity(rm);
1832
1833 for (c = 0; c < rm->rm_firstdatacol; c++) {
1834 rc = &rm->rm_col[c];
1835 if (!rc->rc_tried || rc->rc_error != 0)
1836 continue;
1837 if (bcmp(orig[c], abd_to_buf(rc->rc_abd), rc->rc_size) != 0) {
1838 raidz_checksum_error(zio, rc, orig[c]);
1839 rc->rc_error = SET_ERROR(ECKSUM);
1840 ret++;
1841 }
1842 zio_buf_free(orig[c], rc->rc_size);
1843 }
1844
1845 return (ret);
1846 }
1847
1848 static int
1849 vdev_raidz_worst_error(raidz_map_t *rm)
1850 {
1851 int c, error = 0;
1852
1853 for (c = 0; c < rm->rm_cols; c++)
1854 error = zio_worst_error(error, rm->rm_col[c].rc_error);
1855
1856 return (error);
1857 }
1858
1859 /*
1860 * Iterate over all combinations of bad data and attempt a reconstruction.
1861 * Note that the algorithm below is non-optimal because it doesn't take into
1862 * account how reconstruction is actually performed. For example, with
1863 * triple-parity RAID-Z the reconstruction procedure is the same if column 4
1864 * is targeted as invalid as if columns 1 and 4 are targeted since in both
1865 * cases we'd only use parity information in column 0.
1866 */
1867 static int
1868 vdev_raidz_combrec(zio_t *zio, int total_errors, int data_errors)
1869 {
1870 raidz_map_t *rm = zio->io_vsd;
1871 raidz_col_t *rc;
1872 void *orig[VDEV_RAIDZ_MAXPARITY];
1873 int tstore[VDEV_RAIDZ_MAXPARITY + 2];
1874 int *tgts = &tstore[1];
1875 int curr, next, i, c, n;
1876 int code, ret = 0;
1877
1878 ASSERT(total_errors < rm->rm_firstdatacol);
1879
1880 /*
1881 * This simplifies one edge condition.
1882 */
1883 tgts[-1] = -1;
1884
1885 for (n = 1; n <= rm->rm_firstdatacol - total_errors; n++) {
1886 /*
1887 * Initialize the targets array by finding the first n columns
1888 * that contain no error.
1889 *
1890 * If there were no data errors, we need to ensure that we're
1891 * always explicitly attempting to reconstruct at least one
1892 * data column. To do this, we simply push the highest target
1893 * up into the data columns.
1894 */
1895 for (c = 0, i = 0; i < n; i++) {
1896 if (i == n - 1 && data_errors == 0 &&
1897 c < rm->rm_firstdatacol) {
1898 c = rm->rm_firstdatacol;
1899 }
1900
1901 while (rm->rm_col[c].rc_error != 0) {
1902 c++;
1903 ASSERT3S(c, <, rm->rm_cols);
1904 }
1905
1906 tgts[i] = c++;
1907 }
1908
1909 /*
1910 * Setting tgts[n] simplifies the other edge condition.
1911 */
1912 tgts[n] = rm->rm_cols;
1913
1914 /*
1915 * These buffers were allocated in previous iterations.
1916 */
1917 for (i = 0; i < n - 1; i++) {
1918 ASSERT(orig[i] != NULL);
1919 }
1920
1921 orig[n - 1] = zio_buf_alloc(rm->rm_col[0].rc_size);
1922
1923 curr = 0;
1924 next = tgts[curr];
1925
1926 while (curr != n) {
1927 tgts[curr] = next;
1928 curr = 0;
1929
1930 /*
1931 * Save off the original data that we're going to
1932 * attempt to reconstruct.
1933 */
1934 for (i = 0; i < n; i++) {
1935 ASSERT(orig[i] != NULL);
1936 c = tgts[i];
1937 ASSERT3S(c, >=, 0);
1938 ASSERT3S(c, <, rm->rm_cols);
1939 rc = &rm->rm_col[c];
1940 abd_copy_to_buf(orig[i], rc->rc_abd,
1941 rc->rc_size);
1942 }
1943
1944 /*
1945 * Attempt a reconstruction and exit the outer loop on
1946 * success.
1947 */
1948 code = vdev_raidz_reconstruct(rm, tgts, n);
1949 if (raidz_checksum_verify(zio) == 0) {
1950
1951 for (i = 0; i < n; i++) {
1952 c = tgts[i];
1953 rc = &rm->rm_col[c];
1954 ASSERT(rc->rc_error == 0);
1955 if (rc->rc_tried)
1956 raidz_checksum_error(zio, rc,
1957 orig[i]);
1958 rc->rc_error = SET_ERROR(ECKSUM);
1959 }
1960
1961 ret = code;
1962 goto done;
1963 }
1964
1965 /*
1966 * Restore the original data.
1967 */
1968 for (i = 0; i < n; i++) {
1969 c = tgts[i];
1970 rc = &rm->rm_col[c];
1971 abd_copy_from_buf(rc->rc_abd, orig[i],
1972 rc->rc_size);
1973 }
1974
1975 do {
1976 /*
1977 * Find the next valid column after the curr
1978 * position..
1979 */
1980 for (next = tgts[curr] + 1;
1981 next < rm->rm_cols &&
1982 rm->rm_col[next].rc_error != 0; next++)
1983 continue;
1984
1985 ASSERT(next <= tgts[curr + 1]);
1986
1987 /*
1988 * If that spot is available, we're done here.
1989 */
1990 if (next != tgts[curr + 1])
1991 break;
1992
1993 /*
1994 * Otherwise, find the next valid column after
1995 * the previous position.
1996 */
1997 for (c = tgts[curr - 1] + 1;
1998 rm->rm_col[c].rc_error != 0; c++)
1999 continue;
2000
2001 tgts[curr] = c;
2002 curr++;
2003
2004 } while (curr != n);
2005 }
2006 }
2007 n--;
2008 done:
2009 for (i = 0; i < n; i++) {
2010 zio_buf_free(orig[i], rm->rm_col[0].rc_size);
2011 }
2012
2013 return (ret);
2014 }
2015
2016 /*
2017 * Complete an IO operation on a RAIDZ VDev
2018 *
2019 * Outline:
2020 * - For write operations:
2021 * 1. Check for errors on the child IOs.
2022 * 2. Return, setting an error code if too few child VDevs were written
2023 * to reconstruct the data later. Note that partial writes are
2024 * considered successful if they can be reconstructed at all.
2025 * - For read operations:
2026 * 1. Check for errors on the child IOs.
2027 * 2. If data errors occurred:
2028 * a. Try to reassemble the data from the parity available.
2029 * b. If we haven't yet read the parity drives, read them now.
2030 * c. If all parity drives have been read but the data still doesn't
2031 * reassemble with a correct checksum, then try combinatorial
2032 * reconstruction.
2033 * d. If that doesn't work, return an error.
2034 * 3. If there were unexpected errors or this is a resilver operation,
2035 * rewrite the vdevs that had errors.
2036 */
2037 static void
2038 vdev_raidz_io_done(zio_t *zio)
2039 {
2040 vdev_t *vd = zio->io_vd;
2041 vdev_t *cvd;
2042 raidz_map_t *rm = zio->io_vsd;
2043 raidz_col_t *rc = NULL;
2044 int unexpected_errors = 0;
2045 int parity_errors = 0;
2046 int parity_untried = 0;
2047 int data_errors = 0;
2048 int total_errors = 0;
2049 int n, c;
2050 int tgts[VDEV_RAIDZ_MAXPARITY];
2051 int code;
2052
2053 ASSERT(zio->io_bp != NULL); /* XXX need to add code to enforce this */
2054
2055 ASSERT(rm->rm_missingparity <= rm->rm_firstdatacol);
2056 ASSERT(rm->rm_missingdata <= rm->rm_cols - rm->rm_firstdatacol);
2057
2058 for (c = 0; c < rm->rm_cols; c++) {
2059 rc = &rm->rm_col[c];
2060
2061 if (rc->rc_error) {
2062 ASSERT(rc->rc_error != ECKSUM); /* child has no bp */
2063
2064 if (c < rm->rm_firstdatacol)
2065 parity_errors++;
2066 else
2067 data_errors++;
2068
2069 if (!rc->rc_skipped)
2070 unexpected_errors++;
2071
2072 total_errors++;
2073 } else if (c < rm->rm_firstdatacol && !rc->rc_tried) {
2074 parity_untried++;
2075 }
2076 }
2077
2078 if (zio->io_type == ZIO_TYPE_WRITE) {
2079 /*
2080 * XXX -- for now, treat partial writes as a success.
2081 * (If we couldn't write enough columns to reconstruct
2082 * the data, the I/O failed. Otherwise, good enough.)
2083 *
2084 * Now that we support write reallocation, it would be better
2085 * to treat partial failure as real failure unless there are
2086 * no non-degraded top-level vdevs left, and not update DTLs
2087 * if we intend to reallocate.
2088 */
2089 /* XXPOLICY */
2090 if (total_errors > rm->rm_firstdatacol)
2091 zio->io_error = vdev_raidz_worst_error(rm);
2092
2093 return;
2094 }
2095
2096 ASSERT(zio->io_type == ZIO_TYPE_READ);
2097 /*
2098 * There are three potential phases for a read:
2099 * 1. produce valid data from the columns read
2100 * 2. read all disks and try again
2101 * 3. perform combinatorial reconstruction
2102 *
2103 * Each phase is progressively both more expensive and less likely to
2104 * occur. If we encounter more errors than we can repair or all phases
2105 * fail, we have no choice but to return an error.
2106 */
2107
2108 /*
2109 * If the number of errors we saw was correctable -- less than or equal
2110 * to the number of parity disks read -- attempt to produce data that
2111 * has a valid checksum. Naturally, this case applies in the absence of
2112 * any errors.
2113 */
2114 if (total_errors <= rm->rm_firstdatacol - parity_untried) {
2115 if (data_errors == 0) {
2116 if (raidz_checksum_verify(zio) == 0) {
2117 /*
2118 * If we read parity information (unnecessarily
2119 * as it happens since no reconstruction was
2120 * needed) regenerate and verify the parity.
2121 * We also regenerate parity when resilvering
2122 * so we can write it out to the failed device
2123 * later.
2124 */
2125 if (parity_errors + parity_untried <
2126 rm->rm_firstdatacol ||
2127 (zio->io_flags & ZIO_FLAG_RESILVER)) {
2128 n = raidz_parity_verify(zio, rm);
2129 unexpected_errors += n;
2130 ASSERT(parity_errors + n <=
2131 rm->rm_firstdatacol);
2132 }
2133 goto done;
2134 }
2135 } else {
2136 /*
2137 * We either attempt to read all the parity columns or
2138 * none of them. If we didn't try to read parity, we
2139 * wouldn't be here in the correctable case. There must
2140 * also have been fewer parity errors than parity
2141 * columns or, again, we wouldn't be in this code path.
2142 */
2143 ASSERT(parity_untried == 0);
2144 ASSERT(parity_errors < rm->rm_firstdatacol);
2145
2146 /*
2147 * Identify the data columns that reported an error.
2148 */
2149 n = 0;
2150 for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
2151 rc = &rm->rm_col[c];
2152 if (rc->rc_error != 0) {
2153 ASSERT(n < VDEV_RAIDZ_MAXPARITY);
2154 tgts[n++] = c;
2155 }
2156 }
2157
2158 ASSERT(rm->rm_firstdatacol >= n);
2159
2160 code = vdev_raidz_reconstruct(rm, tgts, n);
2161
2162 if (raidz_checksum_verify(zio) == 0) {
2163 /*
2164 * If we read more parity disks than were used
2165 * for reconstruction, confirm that the other
2166 * parity disks produced correct data. This
2167 * routine is suboptimal in that it regenerates
2168 * the parity that we already used in addition
2169 * to the parity that we're attempting to
2170 * verify, but this should be a relatively
2171 * uncommon case, and can be optimized if it
2172 * becomes a problem. Note that we regenerate
2173 * parity when resilvering so we can write it
2174 * out to failed devices later.
2175 */
2176 if (parity_errors < rm->rm_firstdatacol - n ||
2177 (zio->io_flags & ZIO_FLAG_RESILVER)) {
2178 n = raidz_parity_verify(zio, rm);
2179 unexpected_errors += n;
2180 ASSERT(parity_errors + n <=
2181 rm->rm_firstdatacol);
2182 }
2183
2184 goto done;
2185 }
2186 }
2187 }
2188
2189 /*
2190 * This isn't a typical situation -- either we got a read error or
2191 * a child silently returned bad data. Read every block so we can
2192 * try again with as much data and parity as we can track down. If
2193 * we've already been through once before, all children will be marked
2194 * as tried so we'll proceed to combinatorial reconstruction.
2195 */
2196 unexpected_errors = 1;
2197 rm->rm_missingdata = 0;
2198 rm->rm_missingparity = 0;
2199
2200 for (c = 0; c < rm->rm_cols; c++) {
2201 if (rm->rm_col[c].rc_tried)
2202 continue;
2203
2204 zio_vdev_io_redone(zio);
2205 do {
2206 rc = &rm->rm_col[c];
2207 if (rc->rc_tried)
2208 continue;
2209 zio_nowait(zio_vdev_child_io(zio, NULL,
2210 vd->vdev_child[rc->rc_devidx],
2211 rc->rc_offset, rc->rc_abd, rc->rc_size,
2212 zio->io_type, zio->io_priority, 0,
2213 vdev_raidz_child_done, rc));
2214 } while (++c < rm->rm_cols);
2215
2216 return;
2217 }
2218
2219 /*
2220 * At this point we've attempted to reconstruct the data given the
2221 * errors we detected, and we've attempted to read all columns. There
2222 * must, therefore, be one or more additional problems -- silent errors
2223 * resulting in invalid data rather than explicit I/O errors resulting
2224 * in absent data. We check if there is enough additional data to
2225 * possibly reconstruct the data and then perform combinatorial
2226 * reconstruction over all possible combinations. If that fails,
2227 * we're cooked.
2228 */
2229 if (total_errors > rm->rm_firstdatacol) {
2230 zio->io_error = vdev_raidz_worst_error(rm);
2231
2232 } else if (total_errors < rm->rm_firstdatacol &&
2233 (code = vdev_raidz_combrec(zio, total_errors, data_errors)) != 0) {
2234 /*
2235 * If we didn't use all the available parity for the
2236 * combinatorial reconstruction, verify that the remaining
2237 * parity is correct.
2238 */
2239 if (code != (1 << rm->rm_firstdatacol) - 1)
2240 (void) raidz_parity_verify(zio, rm);
2241 } else {
2242 /*
2243 * We're here because either:
2244 *
2245 * total_errors == rm_first_datacol, or
2246 * vdev_raidz_combrec() failed
2247 *
2248 * In either case, there is enough bad data to prevent
2249 * reconstruction.
2250 *
2251 * Start checksum ereports for all children which haven't
2252 * failed, and the IO wasn't speculative.
2253 */
2254 zio->io_error = SET_ERROR(ECKSUM);
2255
2256 if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
2257 for (c = 0; c < rm->rm_cols; c++) {
2258 rc = &rm->rm_col[c];
2259 if (rc->rc_error == 0) {
2260 zio_bad_cksum_t zbc;
2261 zbc.zbc_has_cksum = 0;
2262 zbc.zbc_injected =
2263 rm->rm_ecksuminjected;
2264
2265 zfs_ereport_start_checksum(
2266 zio->io_spa,
2267 vd->vdev_child[rc->rc_devidx],
2268 zio, rc->rc_offset, rc->rc_size,
2269 (void *)(uintptr_t)c, &zbc);
2270 }
2271 }
2272 }
2273 }
2274
2275 done:
2276 zio_checksum_verified(zio);
2277
2278 if (zio->io_error == 0 && spa_writeable(zio->io_spa) &&
2279 (unexpected_errors || (zio->io_flags & ZIO_FLAG_RESILVER))) {
2280 /*
2281 * Use the good data we have in hand to repair damaged children.
2282 */
2283 for (c = 0; c < rm->rm_cols; c++) {
2284 rc = &rm->rm_col[c];
2285 cvd = vd->vdev_child[rc->rc_devidx];
2286
2287 if (rc->rc_error == 0)
2288 continue;
2289
2290 zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
2291 rc->rc_offset, rc->rc_abd, rc->rc_size,
2292 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
2293 ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
2294 ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
2295 }
2296 }
2297 }
2298
2299 static void
2300 vdev_raidz_state_change(vdev_t *vd, int faulted, int degraded)
2301 {
2302 if (faulted > vd->vdev_nparity)
2303 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
2304 VDEV_AUX_NO_REPLICAS);
2305 else if (degraded + faulted != 0)
2306 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
2307 else
2308 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
2309 }
2310
2311 vdev_ops_t vdev_raidz_ops = {
2312 vdev_raidz_open,
2313 vdev_raidz_close,
2314 vdev_raidz_asize,
2315 vdev_raidz_io_start,
2316 vdev_raidz_io_done,
2317 vdev_raidz_state_change,
2318 NULL,
2319 NULL,
2320 VDEV_TYPE_RAIDZ, /* name of this vdev type */
2321 B_FALSE /* not a leaf vdev */
2322 };