]>
Commit | Line | Data |
---|---|---|
70e083d2 TG |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. | |
70e083d2 | 23 | * Copyright 2011 Nexenta Systems, Inc. All rights reserved. |
86e3c28a | 24 | * Copyright (c) 2011, 2015 by Delphix. All rights reserved. |
70e083d2 | 25 | * Copyright (c) 2014, Joyent, Inc. All rights reserved. |
86e3c28a CIK |
26 | * Copyright 2014 HybridCluster. All rights reserved. |
27 | * Copyright 2016 RackTop Systems. | |
70e083d2 TG |
28 | * Copyright (c) 2016 Actifio, Inc. All rights reserved. |
29 | */ | |
30 | ||
31 | #include <sys/dmu.h> | |
32 | #include <sys/dmu_impl.h> | |
33 | #include <sys/dmu_tx.h> | |
34 | #include <sys/dbuf.h> | |
35 | #include <sys/dnode.h> | |
36 | #include <sys/zfs_context.h> | |
37 | #include <sys/dmu_objset.h> | |
38 | #include <sys/dmu_traverse.h> | |
39 | #include <sys/dsl_dataset.h> | |
40 | #include <sys/dsl_dir.h> | |
41 | #include <sys/dsl_prop.h> | |
42 | #include <sys/dsl_pool.h> | |
43 | #include <sys/dsl_synctask.h> | |
44 | #include <sys/spa_impl.h> | |
45 | #include <sys/zfs_ioctl.h> | |
46 | #include <sys/zap.h> | |
47 | #include <sys/zio_checksum.h> | |
48 | #include <sys/zfs_znode.h> | |
49 | #include <zfs_fletcher.h> | |
50 | #include <sys/avl.h> | |
51 | #include <sys/ddt.h> | |
52 | #include <sys/zfs_onexit.h> | |
53 | #include <sys/dmu_send.h> | |
54 | #include <sys/dsl_destroy.h> | |
55 | #include <sys/blkptr.h> | |
56 | #include <sys/dsl_bookmark.h> | |
57 | #include <sys/zfeature.h> | |
86e3c28a | 58 | #include <sys/bqueue.h> |
70e083d2 | 59 | #include <sys/zvol.h> |
86e3c28a | 60 | #include <sys/policy.h> |
70e083d2 TG |
61 | |
62 | /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ | |
63 | int zfs_send_corrupt_data = B_FALSE; | |
86e3c28a CIK |
64 | int zfs_send_queue_length = 16 * 1024 * 1024; |
65 | int zfs_recv_queue_length = 16 * 1024 * 1024; | |
66 | /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */ | |
67 | int zfs_send_set_freerecords_bit = B_TRUE; | |
70e083d2 TG |
68 | |
69 | static char *dmu_recv_tag = "dmu_recv_tag"; | |
86e3c28a CIK |
70 | const char *recv_clone_name = "%recv"; |
71 | ||
72 | #define BP_SPAN(datablkszsec, indblkshift, level) \ | |
73 | (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \ | |
74 | (level) * (indblkshift - SPA_BLKPTRSHIFT))) | |
75 | ||
76 | static void byteswap_record(dmu_replay_record_t *drr); | |
77 | ||
78 | struct send_thread_arg { | |
79 | bqueue_t q; | |
80 | dsl_dataset_t *ds; /* Dataset to traverse */ | |
81 | uint64_t fromtxg; /* Traverse from this txg */ | |
82 | int flags; /* flags to pass to traverse_dataset */ | |
83 | int error_code; | |
84 | boolean_t cancel; | |
85 | zbookmark_phys_t resume; | |
86 | }; | |
87 | ||
88 | struct send_block_record { | |
89 | boolean_t eos_marker; /* Marks the end of the stream */ | |
90 | blkptr_t bp; | |
91 | zbookmark_phys_t zb; | |
92 | uint8_t indblkshift; | |
93 | uint16_t datablkszsec; | |
94 | bqueue_node_t ln; | |
95 | }; | |
70e083d2 TG |
96 | |
97 | typedef struct dump_bytes_io { | |
98 | dmu_sendarg_t *dbi_dsp; | |
99 | void *dbi_buf; | |
100 | int dbi_len; | |
101 | } dump_bytes_io_t; | |
102 | ||
103 | static void | |
104 | dump_bytes_cb(void *arg) | |
105 | { | |
106 | dump_bytes_io_t *dbi = (dump_bytes_io_t *)arg; | |
107 | dmu_sendarg_t *dsp = dbi->dbi_dsp; | |
86e3c28a | 108 | dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os); |
70e083d2 | 109 | ssize_t resid; /* have to get resid to get detailed errno */ |
86e3c28a CIK |
110 | |
111 | /* | |
112 | * The code does not rely on this (len being a multiple of 8). We keep | |
113 | * this assertion because of the corresponding assertion in | |
114 | * receive_read(). Keeping this assertion ensures that we do not | |
115 | * inadvertently break backwards compatibility (causing the assertion | |
116 | * in receive_read() to trigger on old software). | |
117 | * | |
118 | * Removing the assertions could be rolled into a new feature that uses | |
119 | * data that isn't 8-byte aligned; if the assertions were removed, a | |
120 | * feature flag would have to be added. | |
121 | */ | |
122 | ||
70e083d2 TG |
123 | ASSERT0(dbi->dbi_len % 8); |
124 | ||
70e083d2 TG |
125 | dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp, |
126 | (caddr_t)dbi->dbi_buf, dbi->dbi_len, | |
127 | 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid); | |
128 | ||
129 | mutex_enter(&ds->ds_sendstream_lock); | |
130 | *dsp->dsa_off += dbi->dbi_len; | |
131 | mutex_exit(&ds->ds_sendstream_lock); | |
132 | } | |
133 | ||
134 | static int | |
135 | dump_bytes(dmu_sendarg_t *dsp, void *buf, int len) | |
136 | { | |
137 | dump_bytes_io_t dbi; | |
138 | ||
139 | dbi.dbi_dsp = dsp; | |
140 | dbi.dbi_buf = buf; | |
141 | dbi.dbi_len = len; | |
142 | ||
143 | #if defined(HAVE_LARGE_STACKS) | |
144 | dump_bytes_cb(&dbi); | |
145 | #else | |
146 | /* | |
147 | * The vn_rdwr() call is performed in a taskq to ensure that there is | |
148 | * always enough stack space to write safely to the target filesystem. | |
149 | * The ZIO_TYPE_FREE threads are used because there can be a lot of | |
150 | * them and they are used in vdev_file.c for a similar purpose. | |
151 | */ | |
152 | spa_taskq_dispatch_sync(dmu_objset_spa(dsp->dsa_os), ZIO_TYPE_FREE, | |
153 | ZIO_TASKQ_ISSUE, dump_bytes_cb, &dbi, TQ_SLEEP); | |
154 | #endif /* HAVE_LARGE_STACKS */ | |
155 | ||
156 | return (dsp->dsa_err); | |
157 | } | |
158 | ||
86e3c28a CIK |
159 | /* |
160 | * For all record types except BEGIN, fill in the checksum (overlaid in | |
161 | * drr_u.drr_checksum.drr_checksum). The checksum verifies everything | |
162 | * up to the start of the checksum itself. | |
163 | */ | |
164 | static int | |
165 | dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len) | |
166 | { | |
167 | ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), | |
168 | ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); | |
169 | (void) fletcher_4_incremental_native(dsp->dsa_drr, | |
170 | offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), | |
171 | &dsp->dsa_zc); | |
172 | if (dsp->dsa_drr->drr_type == DRR_BEGIN) { | |
173 | dsp->dsa_sent_begin = B_TRUE; | |
174 | } else { | |
175 | ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u. | |
176 | drr_checksum.drr_checksum)); | |
177 | dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc; | |
178 | } | |
179 | if (dsp->dsa_drr->drr_type == DRR_END) { | |
180 | dsp->dsa_sent_end = B_TRUE; | |
181 | } | |
182 | (void) fletcher_4_incremental_native(&dsp->dsa_drr-> | |
183 | drr_u.drr_checksum.drr_checksum, | |
184 | sizeof (zio_cksum_t), &dsp->dsa_zc); | |
185 | if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) | |
186 | return (SET_ERROR(EINTR)); | |
187 | if (payload_len != 0) { | |
188 | (void) fletcher_4_incremental_native(payload, payload_len, | |
189 | &dsp->dsa_zc); | |
190 | if (dump_bytes(dsp, payload, payload_len) != 0) | |
191 | return (SET_ERROR(EINTR)); | |
192 | } | |
193 | return (0); | |
194 | } | |
195 | ||
196 | /* | |
197 | * Fill in the drr_free struct, or perform aggregation if the previous record is | |
198 | * also a free record, and the two are adjacent. | |
199 | * | |
200 | * Note that we send free records even for a full send, because we want to be | |
201 | * able to receive a full send as a clone, which requires a list of all the free | |
202 | * and freeobject records that were generated on the source. | |
203 | */ | |
70e083d2 TG |
204 | static int |
205 | dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, | |
206 | uint64_t length) | |
207 | { | |
208 | struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free); | |
209 | ||
210 | /* | |
211 | * When we receive a free record, dbuf_free_range() assumes | |
212 | * that the receiving system doesn't have any dbufs in the range | |
213 | * being freed. This is always true because there is a one-record | |
214 | * constraint: we only send one WRITE record for any given | |
86e3c28a | 215 | * object,offset. We know that the one-record constraint is |
70e083d2 TG |
216 | * true because we always send data in increasing order by |
217 | * object,offset. | |
218 | * | |
219 | * If the increasing-order constraint ever changes, we should find | |
220 | * another way to assert that the one-record constraint is still | |
221 | * satisfied. | |
222 | */ | |
223 | ASSERT(object > dsp->dsa_last_data_object || | |
224 | (object == dsp->dsa_last_data_object && | |
225 | offset > dsp->dsa_last_data_offset)); | |
226 | ||
70e083d2 TG |
227 | /* |
228 | * If there is a pending op, but it's not PENDING_FREE, push it out, | |
229 | * since free block aggregation can only be done for blocks of the | |
230 | * same type (i.e., DRR_FREE records can only be aggregated with | |
231 | * other DRR_FREE records. DRR_FREEOBJECTS records can only be | |
232 | * aggregated with other DRR_FREEOBJECTS records. | |
233 | */ | |
234 | if (dsp->dsa_pending_op != PENDING_NONE && | |
235 | dsp->dsa_pending_op != PENDING_FREE) { | |
86e3c28a | 236 | if (dump_record(dsp, NULL, 0) != 0) |
70e083d2 TG |
237 | return (SET_ERROR(EINTR)); |
238 | dsp->dsa_pending_op = PENDING_NONE; | |
239 | } | |
240 | ||
241 | if (dsp->dsa_pending_op == PENDING_FREE) { | |
242 | /* | |
b49151d6 CIK |
243 | * There should never be a PENDING_FREE if length is |
244 | * DMU_OBJECT_END (because dump_dnode is the only place where | |
245 | * this function is called with a DMU_OBJECT_END, and only after | |
246 | * flushing any pending record). | |
70e083d2 | 247 | */ |
b49151d6 | 248 | ASSERT(length != DMU_OBJECT_END); |
70e083d2 TG |
249 | /* |
250 | * Check to see whether this free block can be aggregated | |
251 | * with pending one. | |
252 | */ | |
253 | if (drrf->drr_object == object && drrf->drr_offset + | |
254 | drrf->drr_length == offset) { | |
b49151d6 CIK |
255 | if (offset + length < offset) |
256 | drrf->drr_length = DMU_OBJECT_END; | |
257 | else | |
258 | drrf->drr_length += length; | |
70e083d2 TG |
259 | return (0); |
260 | } else { | |
261 | /* not a continuation. Push out pending record */ | |
86e3c28a | 262 | if (dump_record(dsp, NULL, 0) != 0) |
70e083d2 TG |
263 | return (SET_ERROR(EINTR)); |
264 | dsp->dsa_pending_op = PENDING_NONE; | |
265 | } | |
266 | } | |
267 | /* create a FREE record and make it pending */ | |
268 | bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); | |
269 | dsp->dsa_drr->drr_type = DRR_FREE; | |
270 | drrf->drr_object = object; | |
271 | drrf->drr_offset = offset; | |
b49151d6 CIK |
272 | if (offset + length < offset) |
273 | drrf->drr_length = DMU_OBJECT_END; | |
274 | else | |
275 | drrf->drr_length = length; | |
70e083d2 | 276 | drrf->drr_toguid = dsp->dsa_toguid; |
b49151d6 | 277 | if (length == DMU_OBJECT_END) { |
86e3c28a | 278 | if (dump_record(dsp, NULL, 0) != 0) |
70e083d2 TG |
279 | return (SET_ERROR(EINTR)); |
280 | } else { | |
281 | dsp->dsa_pending_op = PENDING_FREE; | |
282 | } | |
283 | ||
284 | return (0); | |
285 | } | |
286 | ||
287 | static int | |
288 | dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type, | |
86e3c28a CIK |
289 | uint64_t object, uint64_t offset, int lsize, int psize, const blkptr_t *bp, |
290 | void *data) | |
70e083d2 | 291 | { |
86e3c28a | 292 | uint64_t payload_size; |
70e083d2 TG |
293 | struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write); |
294 | ||
295 | /* | |
296 | * We send data in increasing object, offset order. | |
297 | * See comment in dump_free() for details. | |
298 | */ | |
299 | ASSERT(object > dsp->dsa_last_data_object || | |
300 | (object == dsp->dsa_last_data_object && | |
301 | offset > dsp->dsa_last_data_offset)); | |
302 | dsp->dsa_last_data_object = object; | |
86e3c28a | 303 | dsp->dsa_last_data_offset = offset + lsize - 1; |
70e083d2 TG |
304 | |
305 | /* | |
306 | * If there is any kind of pending aggregation (currently either | |
307 | * a grouping of free objects or free blocks), push it out to | |
308 | * the stream, since aggregation can't be done across operations | |
309 | * of different types. | |
310 | */ | |
311 | if (dsp->dsa_pending_op != PENDING_NONE) { | |
86e3c28a | 312 | if (dump_record(dsp, NULL, 0) != 0) |
70e083d2 TG |
313 | return (SET_ERROR(EINTR)); |
314 | dsp->dsa_pending_op = PENDING_NONE; | |
315 | } | |
86e3c28a | 316 | /* write a WRITE record */ |
70e083d2 TG |
317 | bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); |
318 | dsp->dsa_drr->drr_type = DRR_WRITE; | |
319 | drrw->drr_object = object; | |
320 | drrw->drr_type = type; | |
321 | drrw->drr_offset = offset; | |
70e083d2 | 322 | drrw->drr_toguid = dsp->dsa_toguid; |
86e3c28a CIK |
323 | drrw->drr_logical_size = lsize; |
324 | ||
325 | /* only set the compression fields if the buf is compressed */ | |
326 | if (lsize != psize) { | |
327 | ASSERT(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED); | |
328 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
329 | ASSERT(!BP_SHOULD_BYTESWAP(bp)); | |
330 | ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp))); | |
331 | ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF); | |
332 | ASSERT3S(psize, >, 0); | |
333 | ASSERT3S(lsize, >=, psize); | |
334 | ||
335 | drrw->drr_compressiontype = BP_GET_COMPRESS(bp); | |
336 | drrw->drr_compressed_size = psize; | |
337 | payload_size = drrw->drr_compressed_size; | |
338 | } else { | |
339 | payload_size = drrw->drr_logical_size; | |
340 | } | |
341 | ||
70e083d2 TG |
342 | if (bp == NULL || BP_IS_EMBEDDED(bp)) { |
343 | /* | |
344 | * There's no pre-computed checksum for partial-block | |
345 | * writes or embedded BP's, so (like | |
346 | * fletcher4-checkummed blocks) userland will have to | |
347 | * compute a dedup-capable checksum itself. | |
348 | */ | |
349 | drrw->drr_checksumtype = ZIO_CHECKSUM_OFF; | |
350 | } else { | |
351 | drrw->drr_checksumtype = BP_GET_CHECKSUM(bp); | |
86e3c28a CIK |
352 | if (zio_checksum_table[drrw->drr_checksumtype].ci_flags & |
353 | ZCHECKSUM_FLAG_DEDUP) | |
70e083d2 TG |
354 | drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP; |
355 | DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp)); | |
356 | DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp)); | |
357 | DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp)); | |
358 | drrw->drr_key.ddk_cksum = bp->blk_cksum; | |
359 | } | |
360 | ||
86e3c28a | 361 | if (dump_record(dsp, data, payload_size) != 0) |
70e083d2 TG |
362 | return (SET_ERROR(EINTR)); |
363 | return (0); | |
364 | } | |
365 | ||
366 | static int | |
367 | dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, | |
368 | int blksz, const blkptr_t *bp) | |
369 | { | |
370 | char buf[BPE_PAYLOAD_SIZE]; | |
371 | struct drr_write_embedded *drrw = | |
372 | &(dsp->dsa_drr->drr_u.drr_write_embedded); | |
373 | ||
374 | if (dsp->dsa_pending_op != PENDING_NONE) { | |
86e3c28a | 375 | if (dump_record(dsp, NULL, 0) != 0) |
70e083d2 TG |
376 | return (EINTR); |
377 | dsp->dsa_pending_op = PENDING_NONE; | |
378 | } | |
379 | ||
380 | ASSERT(BP_IS_EMBEDDED(bp)); | |
381 | ||
382 | bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); | |
383 | dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED; | |
384 | drrw->drr_object = object; | |
385 | drrw->drr_offset = offset; | |
386 | drrw->drr_length = blksz; | |
387 | drrw->drr_toguid = dsp->dsa_toguid; | |
388 | drrw->drr_compression = BP_GET_COMPRESS(bp); | |
389 | drrw->drr_etype = BPE_GET_ETYPE(bp); | |
390 | drrw->drr_lsize = BPE_GET_LSIZE(bp); | |
391 | drrw->drr_psize = BPE_GET_PSIZE(bp); | |
392 | ||
393 | decode_embedded_bp_compressed(bp, buf); | |
394 | ||
86e3c28a | 395 | if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0) |
70e083d2 TG |
396 | return (EINTR); |
397 | return (0); | |
398 | } | |
399 | ||
400 | static int | |
401 | dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data) | |
402 | { | |
403 | struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill); | |
404 | ||
405 | if (dsp->dsa_pending_op != PENDING_NONE) { | |
86e3c28a | 406 | if (dump_record(dsp, NULL, 0) != 0) |
70e083d2 TG |
407 | return (SET_ERROR(EINTR)); |
408 | dsp->dsa_pending_op = PENDING_NONE; | |
409 | } | |
410 | ||
411 | /* write a SPILL record */ | |
412 | bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); | |
413 | dsp->dsa_drr->drr_type = DRR_SPILL; | |
414 | drrs->drr_object = object; | |
415 | drrs->drr_length = blksz; | |
416 | drrs->drr_toguid = dsp->dsa_toguid; | |
417 | ||
86e3c28a | 418 | if (dump_record(dsp, data, blksz) != 0) |
70e083d2 TG |
419 | return (SET_ERROR(EINTR)); |
420 | return (0); | |
421 | } | |
422 | ||
423 | static int | |
424 | dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs) | |
425 | { | |
426 | struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects); | |
86e3c28a CIK |
427 | uint64_t maxobj = DNODES_PER_BLOCK * |
428 | (DMU_META_DNODE(dsp->dsa_os)->dn_maxblkid + 1); | |
70e083d2 | 429 | |
86e3c28a CIK |
430 | /* |
431 | * ZoL < 0.7 does not handle large FREEOBJECTS records correctly, | |
432 | * leading to zfs recv never completing. to avoid this issue, don't | |
433 | * send FREEOBJECTS records for object IDs which cannot exist on the | |
434 | * receiving side. | |
435 | */ | |
436 | if (maxobj > 0) { | |
437 | if (maxobj < firstobj) | |
438 | return (0); | |
439 | ||
440 | if (maxobj < firstobj + numobjs) | |
441 | numobjs = maxobj - firstobj; | |
442 | } | |
70e083d2 TG |
443 | |
444 | /* | |
445 | * If there is a pending op, but it's not PENDING_FREEOBJECTS, | |
446 | * push it out, since free block aggregation can only be done for | |
447 | * blocks of the same type (i.e., DRR_FREE records can only be | |
448 | * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records | |
449 | * can only be aggregated with other DRR_FREEOBJECTS records. | |
450 | */ | |
451 | if (dsp->dsa_pending_op != PENDING_NONE && | |
452 | dsp->dsa_pending_op != PENDING_FREEOBJECTS) { | |
86e3c28a | 453 | if (dump_record(dsp, NULL, 0) != 0) |
70e083d2 TG |
454 | return (SET_ERROR(EINTR)); |
455 | dsp->dsa_pending_op = PENDING_NONE; | |
456 | } | |
457 | if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) { | |
458 | /* | |
459 | * See whether this free object array can be aggregated | |
460 | * with pending one | |
461 | */ | |
462 | if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) { | |
463 | drrfo->drr_numobjs += numobjs; | |
464 | return (0); | |
465 | } else { | |
466 | /* can't be aggregated. Push out pending record */ | |
86e3c28a | 467 | if (dump_record(dsp, NULL, 0) != 0) |
70e083d2 TG |
468 | return (SET_ERROR(EINTR)); |
469 | dsp->dsa_pending_op = PENDING_NONE; | |
470 | } | |
471 | } | |
472 | ||
473 | /* write a FREEOBJECTS record */ | |
474 | bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); | |
475 | dsp->dsa_drr->drr_type = DRR_FREEOBJECTS; | |
476 | drrfo->drr_firstobj = firstobj; | |
477 | drrfo->drr_numobjs = numobjs; | |
478 | drrfo->drr_toguid = dsp->dsa_toguid; | |
479 | ||
480 | dsp->dsa_pending_op = PENDING_FREEOBJECTS; | |
481 | ||
482 | return (0); | |
483 | } | |
484 | ||
485 | static int | |
486 | dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp) | |
487 | { | |
488 | struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object); | |
489 | ||
86e3c28a CIK |
490 | if (object < dsp->dsa_resume_object) { |
491 | /* | |
492 | * Note: when resuming, we will visit all the dnodes in | |
493 | * the block of dnodes that we are resuming from. In | |
494 | * this case it's unnecessary to send the dnodes prior to | |
495 | * the one we are resuming from. We should be at most one | |
496 | * block's worth of dnodes behind the resume point. | |
497 | */ | |
498 | ASSERT3U(dsp->dsa_resume_object - object, <, | |
499 | 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)); | |
500 | return (0); | |
501 | } | |
502 | ||
70e083d2 TG |
503 | if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) |
504 | return (dump_freeobjects(dsp, object, 1)); | |
505 | ||
506 | if (dsp->dsa_pending_op != PENDING_NONE) { | |
86e3c28a | 507 | if (dump_record(dsp, NULL, 0) != 0) |
70e083d2 TG |
508 | return (SET_ERROR(EINTR)); |
509 | dsp->dsa_pending_op = PENDING_NONE; | |
510 | } | |
511 | ||
512 | /* write an OBJECT record */ | |
513 | bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); | |
514 | dsp->dsa_drr->drr_type = DRR_OBJECT; | |
515 | drro->drr_object = object; | |
516 | drro->drr_type = dnp->dn_type; | |
517 | drro->drr_bonustype = dnp->dn_bonustype; | |
518 | drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; | |
519 | drro->drr_bonuslen = dnp->dn_bonuslen; | |
86e3c28a | 520 | drro->drr_dn_slots = dnp->dn_extra_slots + 1; |
70e083d2 TG |
521 | drro->drr_checksumtype = dnp->dn_checksum; |
522 | drro->drr_compress = dnp->dn_compress; | |
523 | drro->drr_toguid = dsp->dsa_toguid; | |
524 | ||
525 | if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && | |
526 | drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE) | |
527 | drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE; | |
528 | ||
86e3c28a CIK |
529 | if (dump_record(dsp, DN_BONUS(dnp), |
530 | P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) { | |
70e083d2 | 531 | return (SET_ERROR(EINTR)); |
86e3c28a | 532 | } |
70e083d2 TG |
533 | |
534 | /* Free anything past the end of the file. */ | |
535 | if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) * | |
b49151d6 | 536 | (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0) |
70e083d2 TG |
537 | return (SET_ERROR(EINTR)); |
538 | if (dsp->dsa_err != 0) | |
539 | return (SET_ERROR(EINTR)); | |
540 | return (0); | |
541 | } | |
542 | ||
543 | static boolean_t | |
544 | backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp) | |
545 | { | |
546 | if (!BP_IS_EMBEDDED(bp)) | |
547 | return (B_FALSE); | |
548 | ||
549 | /* | |
550 | * Compression function must be legacy, or explicitly enabled. | |
551 | */ | |
552 | if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS && | |
86e3c28a | 553 | !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LZ4))) |
70e083d2 TG |
554 | return (B_FALSE); |
555 | ||
556 | /* | |
557 | * Embed type must be explicitly enabled. | |
558 | */ | |
559 | switch (BPE_GET_ETYPE(bp)) { | |
560 | case BP_EMBEDDED_TYPE_DATA: | |
561 | if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) | |
562 | return (B_TRUE); | |
563 | break; | |
564 | default: | |
565 | return (B_FALSE); | |
566 | } | |
567 | return (B_FALSE); | |
568 | } | |
569 | ||
86e3c28a CIK |
570 | /* |
571 | * This is the callback function to traverse_dataset that acts as the worker | |
572 | * thread for dmu_send_impl. | |
573 | */ | |
574 | /*ARGSUSED*/ | |
575 | static int | |
576 | send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, | |
577 | const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg) | |
578 | { | |
579 | struct send_thread_arg *sta = arg; | |
580 | struct send_block_record *record; | |
581 | uint64_t record_size; | |
582 | int err = 0; | |
583 | ||
584 | ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || | |
585 | zb->zb_object >= sta->resume.zb_object); | |
70e083d2 | 586 | |
86e3c28a CIK |
587 | if (sta->cancel) |
588 | return (SET_ERROR(EINTR)); | |
589 | ||
590 | if (bp == NULL) { | |
591 | ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL); | |
592 | return (0); | |
593 | } else if (zb->zb_level < 0) { | |
594 | return (0); | |
595 | } | |
596 | ||
597 | record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP); | |
598 | record->eos_marker = B_FALSE; | |
599 | record->bp = *bp; | |
600 | record->zb = *zb; | |
601 | record->indblkshift = dnp->dn_indblkshift; | |
602 | record->datablkszsec = dnp->dn_datablkszsec; | |
603 | record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; | |
604 | bqueue_enqueue(&sta->q, record, record_size); | |
605 | ||
606 | return (err); | |
607 | } | |
608 | ||
609 | /* | |
610 | * This function kicks off the traverse_dataset. It also handles setting the | |
611 | * error code of the thread in case something goes wrong, and pushes the End of | |
612 | * Stream record when the traverse_dataset call has finished. If there is no | |
613 | * dataset to traverse, the thread immediately pushes End of Stream marker. | |
614 | */ | |
615 | static void | |
616 | send_traverse_thread(void *arg) | |
617 | { | |
618 | struct send_thread_arg *st_arg = arg; | |
619 | int err; | |
620 | struct send_block_record *data; | |
621 | fstrans_cookie_t cookie = spl_fstrans_mark(); | |
622 | ||
623 | if (st_arg->ds != NULL) { | |
624 | err = traverse_dataset_resume(st_arg->ds, | |
625 | st_arg->fromtxg, &st_arg->resume, | |
626 | st_arg->flags, send_cb, st_arg); | |
627 | ||
628 | if (err != EINTR) | |
629 | st_arg->error_code = err; | |
630 | } | |
631 | data = kmem_zalloc(sizeof (*data), KM_SLEEP); | |
632 | data->eos_marker = B_TRUE; | |
633 | bqueue_enqueue(&st_arg->q, data, 1); | |
634 | spl_fstrans_unmark(cookie); | |
635 | thread_exit(); | |
636 | } | |
637 | ||
638 | /* | |
639 | * This function actually handles figuring out what kind of record needs to be | |
640 | * dumped, reading the data (which has hopefully been prefetched), and calling | |
641 | * the appropriate helper function. | |
642 | */ | |
70e083d2 | 643 | static int |
86e3c28a | 644 | do_dump(dmu_sendarg_t *dsa, struct send_block_record *data) |
70e083d2 | 645 | { |
86e3c28a CIK |
646 | dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os); |
647 | const blkptr_t *bp = &data->bp; | |
648 | const zbookmark_phys_t *zb = &data->zb; | |
649 | uint8_t indblkshift = data->indblkshift; | |
650 | uint16_t dblkszsec = data->datablkszsec; | |
651 | spa_t *spa = ds->ds_dir->dd_pool->dp_spa; | |
70e083d2 TG |
652 | dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; |
653 | int err = 0; | |
86e3c28a | 654 | uint64_t dnobj; |
70e083d2 | 655 | |
86e3c28a CIK |
656 | ASSERT3U(zb->zb_level, >=, 0); |
657 | ||
658 | ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || | |
659 | zb->zb_object >= dsa->dsa_resume_object); | |
70e083d2 TG |
660 | |
661 | if (zb->zb_object != DMU_META_DNODE_OBJECT && | |
662 | DMU_OBJECT_IS_SPECIAL(zb->zb_object)) { | |
663 | return (0); | |
70e083d2 TG |
664 | } else if (BP_IS_HOLE(bp) && |
665 | zb->zb_object == DMU_META_DNODE_OBJECT) { | |
86e3c28a | 666 | uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level); |
70e083d2 | 667 | uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT; |
86e3c28a | 668 | err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT); |
70e083d2 | 669 | } else if (BP_IS_HOLE(bp)) { |
86e3c28a CIK |
670 | uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level); |
671 | uint64_t offset = zb->zb_blkid * span; | |
b49151d6 CIK |
672 | /* Don't dump free records for offsets > DMU_OBJECT_END */ |
673 | if (zb->zb_blkid == 0 || span <= DMU_OBJECT_END / zb->zb_blkid) | |
674 | err = dump_free(dsa, zb->zb_object, offset, span); | |
70e083d2 TG |
675 | } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) { |
676 | return (0); | |
677 | } else if (type == DMU_OT_DNODE) { | |
678 | dnode_phys_t *blk; | |
86e3c28a | 679 | int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; |
70e083d2 TG |
680 | arc_flags_t aflags = ARC_FLAG_WAIT; |
681 | arc_buf_t *abuf; | |
86e3c28a CIK |
682 | int i; |
683 | ||
684 | ASSERT0(zb->zb_level); | |
70e083d2 TG |
685 | |
686 | if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, | |
687 | ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, | |
688 | &aflags, zb) != 0) | |
689 | return (SET_ERROR(EIO)); | |
690 | ||
691 | blk = abuf->b_data; | |
86e3c28a CIK |
692 | dnobj = zb->zb_blkid * epb; |
693 | for (i = 0; i < epb; i += blk[i].dn_extra_slots + 1) { | |
694 | err = dump_dnode(dsa, dnobj + i, blk + i); | |
70e083d2 TG |
695 | if (err != 0) |
696 | break; | |
697 | } | |
86e3c28a | 698 | arc_buf_destroy(abuf, &abuf); |
70e083d2 TG |
699 | } else if (type == DMU_OT_SA) { |
700 | arc_flags_t aflags = ARC_FLAG_WAIT; | |
701 | arc_buf_t *abuf; | |
702 | int blksz = BP_GET_LSIZE(bp); | |
703 | ||
704 | if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, | |
705 | ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, | |
706 | &aflags, zb) != 0) | |
707 | return (SET_ERROR(EIO)); | |
708 | ||
86e3c28a CIK |
709 | err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data); |
710 | arc_buf_destroy(abuf, &abuf); | |
711 | } else if (backup_do_embed(dsa, bp)) { | |
70e083d2 | 712 | /* it's an embedded level-0 block of a regular object */ |
86e3c28a CIK |
713 | int blksz = dblkszsec << SPA_MINBLOCKSHIFT; |
714 | ASSERT0(zb->zb_level); | |
715 | err = dump_write_embedded(dsa, zb->zb_object, | |
70e083d2 | 716 | zb->zb_blkid * blksz, blksz, bp); |
86e3c28a CIK |
717 | } else { |
718 | /* it's a level-0 block of a regular object */ | |
70e083d2 TG |
719 | arc_flags_t aflags = ARC_FLAG_WAIT; |
720 | arc_buf_t *abuf; | |
86e3c28a CIK |
721 | int blksz = dblkszsec << SPA_MINBLOCKSHIFT; |
722 | uint64_t offset; | |
723 | ||
724 | /* | |
725 | * If we have large blocks stored on disk but the send flags | |
726 | * don't allow us to send large blocks, we split the data from | |
727 | * the arc buf into chunks. | |
728 | */ | |
729 | boolean_t split_large_blocks = blksz > SPA_OLD_MAXBLOCKSIZE && | |
730 | !(dsa->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS); | |
731 | /* | |
732 | * We should only request compressed data from the ARC if all | |
733 | * the following are true: | |
734 | * - stream compression was requested | |
735 | * - we aren't splitting large blocks into smaller chunks | |
736 | * - the data won't need to be byteswapped before sending | |
737 | * - this isn't an embedded block | |
738 | * - this isn't metadata (if receiving on a different endian | |
739 | * system it can be byteswapped more easily) | |
740 | */ | |
741 | boolean_t request_compressed = | |
742 | (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED) && | |
743 | !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) && | |
744 | !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp)); | |
70e083d2 | 745 | |
70e083d2 | 746 | ASSERT0(zb->zb_level); |
86e3c28a CIK |
747 | ASSERT(zb->zb_object > dsa->dsa_resume_object || |
748 | (zb->zb_object == dsa->dsa_resume_object && | |
749 | zb->zb_blkid * blksz >= dsa->dsa_resume_offset)); | |
750 | ||
751 | ASSERT3U(blksz, ==, BP_GET_LSIZE(bp)); | |
752 | ||
753 | enum zio_flag zioflags = ZIO_FLAG_CANFAIL; | |
754 | if (request_compressed) | |
755 | zioflags |= ZIO_FLAG_RAW; | |
756 | ||
70e083d2 | 757 | if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, |
86e3c28a | 758 | ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0) { |
70e083d2 | 759 | if (zfs_send_corrupt_data) { |
70e083d2 | 760 | /* Send a block filled with 0x"zfs badd bloc" */ |
86e3c28a CIK |
761 | abuf = arc_alloc_buf(spa, &abuf, ARC_BUFC_DATA, |
762 | blksz); | |
763 | uint64_t *ptr; | |
70e083d2 TG |
764 | for (ptr = abuf->b_data; |
765 | (char *)ptr < (char *)abuf->b_data + blksz; | |
766 | ptr++) | |
767 | *ptr = 0x2f5baddb10cULL; | |
768 | } else { | |
769 | return (SET_ERROR(EIO)); | |
770 | } | |
771 | } | |
772 | ||
773 | offset = zb->zb_blkid * blksz; | |
774 | ||
86e3c28a CIK |
775 | if (split_large_blocks) { |
776 | ASSERT3U(arc_get_compression(abuf), ==, | |
777 | ZIO_COMPRESS_OFF); | |
70e083d2 TG |
778 | char *buf = abuf->b_data; |
779 | while (blksz > 0 && err == 0) { | |
780 | int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE); | |
86e3c28a CIK |
781 | err = dump_write(dsa, type, zb->zb_object, |
782 | offset, n, n, NULL, buf); | |
70e083d2 TG |
783 | offset += n; |
784 | buf += n; | |
785 | blksz -= n; | |
786 | } | |
787 | } else { | |
86e3c28a CIK |
788 | err = dump_write(dsa, type, zb->zb_object, offset, |
789 | blksz, arc_buf_size(abuf), bp, | |
790 | abuf->b_data); | |
70e083d2 | 791 | } |
86e3c28a | 792 | arc_buf_destroy(abuf, &abuf); |
70e083d2 TG |
793 | } |
794 | ||
795 | ASSERT(err == 0 || err == EINTR); | |
796 | return (err); | |
797 | } | |
798 | ||
799 | /* | |
86e3c28a CIK |
800 | * Pop the new data off the queue, and free the old data. |
801 | */ | |
802 | static struct send_block_record * | |
803 | get_next_record(bqueue_t *bq, struct send_block_record *data) | |
804 | { | |
805 | struct send_block_record *tmp = bqueue_dequeue(bq); | |
806 | kmem_free(data, sizeof (*data)); | |
807 | return (tmp); | |
808 | } | |
809 | ||
810 | /* | |
811 | * Actually do the bulk of the work in a zfs send. | |
812 | * | |
813 | * Note: Releases dp using the specified tag. | |
70e083d2 TG |
814 | */ |
815 | static int | |
86e3c28a CIK |
816 | dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds, |
817 | zfs_bookmark_phys_t *ancestor_zb, boolean_t is_clone, | |
818 | boolean_t embedok, boolean_t large_block_ok, boolean_t compressok, | |
819 | int outfd, uint64_t resumeobj, uint64_t resumeoff, | |
820 | vnode_t *vp, offset_t *off) | |
70e083d2 TG |
821 | { |
822 | objset_t *os; | |
823 | dmu_replay_record_t *drr; | |
824 | dmu_sendarg_t *dsp; | |
825 | int err; | |
826 | uint64_t fromtxg = 0; | |
827 | uint64_t featureflags = 0; | |
86e3c28a CIK |
828 | struct send_thread_arg to_arg; |
829 | void *payload = NULL; | |
830 | size_t payload_len = 0; | |
831 | struct send_block_record *to_data; | |
70e083d2 | 832 | |
86e3c28a | 833 | err = dmu_objset_from_ds(to_ds, &os); |
70e083d2 TG |
834 | if (err != 0) { |
835 | dsl_pool_rele(dp, tag); | |
836 | return (err); | |
837 | } | |
838 | ||
839 | drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); | |
840 | drr->drr_type = DRR_BEGIN; | |
841 | drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; | |
842 | DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo, | |
843 | DMU_SUBSTREAM); | |
844 | ||
86e3c28a CIK |
845 | bzero(&to_arg, sizeof (to_arg)); |
846 | ||
70e083d2 TG |
847 | #ifdef _KERNEL |
848 | if (dmu_objset_type(os) == DMU_OST_ZFS) { | |
849 | uint64_t version; | |
850 | if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) { | |
851 | kmem_free(drr, sizeof (dmu_replay_record_t)); | |
852 | dsl_pool_rele(dp, tag); | |
853 | return (SET_ERROR(EINVAL)); | |
854 | } | |
855 | if (version >= ZPL_VERSION_SA) { | |
856 | featureflags |= DMU_BACKUP_FEATURE_SA_SPILL; | |
857 | } | |
858 | } | |
859 | #endif | |
860 | ||
86e3c28a | 861 | if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS]) |
70e083d2 | 862 | featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS; |
86e3c28a CIK |
863 | if (to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE]) |
864 | featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE; | |
70e083d2 TG |
865 | if (embedok && |
866 | spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) { | |
867 | featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA; | |
86e3c28a CIK |
868 | } |
869 | if (compressok) { | |
870 | featureflags |= DMU_BACKUP_FEATURE_COMPRESSED; | |
871 | } | |
872 | if ((featureflags & | |
873 | (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED)) != | |
874 | 0 && spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) { | |
875 | featureflags |= DMU_BACKUP_FEATURE_LZ4; | |
876 | } | |
877 | ||
878 | if (resumeobj != 0 || resumeoff != 0) { | |
879 | featureflags |= DMU_BACKUP_FEATURE_RESUMING; | |
70e083d2 TG |
880 | } |
881 | ||
882 | DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo, | |
883 | featureflags); | |
884 | ||
885 | drr->drr_u.drr_begin.drr_creation_time = | |
86e3c28a | 886 | dsl_dataset_phys(to_ds)->ds_creation_time; |
70e083d2 TG |
887 | drr->drr_u.drr_begin.drr_type = dmu_objset_type(os); |
888 | if (is_clone) | |
889 | drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE; | |
86e3c28a CIK |
890 | drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid; |
891 | if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET) | |
70e083d2 | 892 | drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA; |
86e3c28a CIK |
893 | if (zfs_send_set_freerecords_bit) |
894 | drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_FREERECORDS; | |
70e083d2 | 895 | |
86e3c28a CIK |
896 | if (ancestor_zb != NULL) { |
897 | drr->drr_u.drr_begin.drr_fromguid = | |
898 | ancestor_zb->zbm_guid; | |
899 | fromtxg = ancestor_zb->zbm_creation_txg; | |
70e083d2 | 900 | } |
86e3c28a CIK |
901 | dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname); |
902 | if (!to_ds->ds_is_snapshot) { | |
70e083d2 TG |
903 | (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--", |
904 | sizeof (drr->drr_u.drr_begin.drr_toname)); | |
905 | } | |
906 | ||
907 | dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP); | |
908 | ||
909 | dsp->dsa_drr = drr; | |
910 | dsp->dsa_vp = vp; | |
911 | dsp->dsa_outfd = outfd; | |
912 | dsp->dsa_proc = curproc; | |
913 | dsp->dsa_os = os; | |
914 | dsp->dsa_off = off; | |
86e3c28a | 915 | dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid; |
70e083d2 | 916 | dsp->dsa_pending_op = PENDING_NONE; |
70e083d2 | 917 | dsp->dsa_featureflags = featureflags; |
86e3c28a CIK |
918 | dsp->dsa_resume_object = resumeobj; |
919 | dsp->dsa_resume_offset = resumeoff; | |
70e083d2 | 920 | |
86e3c28a CIK |
921 | mutex_enter(&to_ds->ds_sendstream_lock); |
922 | list_insert_head(&to_ds->ds_sendstreams, dsp); | |
923 | mutex_exit(&to_ds->ds_sendstream_lock); | |
70e083d2 | 924 | |
86e3c28a | 925 | dsl_dataset_long_hold(to_ds, FTAG); |
70e083d2 TG |
926 | dsl_pool_rele(dp, tag); |
927 | ||
86e3c28a CIK |
928 | if (resumeobj != 0 || resumeoff != 0) { |
929 | dmu_object_info_t to_doi; | |
930 | nvlist_t *nvl; | |
931 | err = dmu_object_info(os, resumeobj, &to_doi); | |
932 | if (err != 0) | |
933 | goto out; | |
934 | SET_BOOKMARK(&to_arg.resume, to_ds->ds_object, resumeobj, 0, | |
935 | resumeoff / to_doi.doi_data_block_size); | |
936 | ||
937 | nvl = fnvlist_alloc(); | |
938 | fnvlist_add_uint64(nvl, "resume_object", resumeobj); | |
939 | fnvlist_add_uint64(nvl, "resume_offset", resumeoff); | |
940 | payload = fnvlist_pack(nvl, &payload_len); | |
941 | drr->drr_payloadlen = payload_len; | |
942 | fnvlist_free(nvl); | |
943 | } | |
944 | ||
945 | err = dump_record(dsp, payload, payload_len); | |
946 | fnvlist_pack_free(payload, payload_len); | |
947 | if (err != 0) { | |
70e083d2 TG |
948 | err = dsp->dsa_err; |
949 | goto out; | |
950 | } | |
951 | ||
86e3c28a CIK |
952 | err = bqueue_init(&to_arg.q, zfs_send_queue_length, |
953 | offsetof(struct send_block_record, ln)); | |
954 | to_arg.error_code = 0; | |
955 | to_arg.cancel = B_FALSE; | |
956 | to_arg.ds = to_ds; | |
957 | to_arg.fromtxg = fromtxg; | |
958 | to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH; | |
959 | (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, curproc, | |
960 | TS_RUN, minclsyspri); | |
961 | ||
962 | to_data = bqueue_dequeue(&to_arg.q); | |
963 | ||
964 | while (!to_data->eos_marker && err == 0) { | |
965 | err = do_dump(dsp, to_data); | |
966 | to_data = get_next_record(&to_arg.q, to_data); | |
967 | if (issig(JUSTLOOKING) && issig(FORREAL)) | |
968 | err = EINTR; | |
969 | } | |
970 | ||
971 | if (err != 0) { | |
972 | to_arg.cancel = B_TRUE; | |
973 | while (!to_data->eos_marker) { | |
974 | to_data = get_next_record(&to_arg.q, to_data); | |
975 | } | |
976 | } | |
977 | kmem_free(to_data, sizeof (*to_data)); | |
978 | ||
979 | bqueue_destroy(&to_arg.q); | |
980 | ||
981 | if (err == 0 && to_arg.error_code != 0) | |
982 | err = to_arg.error_code; | |
983 | ||
984 | if (err != 0) | |
985 | goto out; | |
70e083d2 TG |
986 | |
987 | if (dsp->dsa_pending_op != PENDING_NONE) | |
86e3c28a | 988 | if (dump_record(dsp, NULL, 0) != 0) |
70e083d2 TG |
989 | err = SET_ERROR(EINTR); |
990 | ||
991 | if (err != 0) { | |
992 | if (err == EINTR && dsp->dsa_err != 0) | |
993 | err = dsp->dsa_err; | |
994 | goto out; | |
995 | } | |
996 | ||
997 | bzero(drr, sizeof (dmu_replay_record_t)); | |
998 | drr->drr_type = DRR_END; | |
999 | drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc; | |
1000 | drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid; | |
1001 | ||
86e3c28a | 1002 | if (dump_record(dsp, NULL, 0) != 0) |
70e083d2 | 1003 | err = dsp->dsa_err; |
70e083d2 TG |
1004 | |
1005 | out: | |
86e3c28a CIK |
1006 | mutex_enter(&to_ds->ds_sendstream_lock); |
1007 | list_remove(&to_ds->ds_sendstreams, dsp); | |
1008 | mutex_exit(&to_ds->ds_sendstream_lock); | |
1009 | ||
1010 | VERIFY(err != 0 || (dsp->dsa_sent_begin && dsp->dsa_sent_end)); | |
70e083d2 TG |
1011 | |
1012 | kmem_free(drr, sizeof (dmu_replay_record_t)); | |
1013 | kmem_free(dsp, sizeof (dmu_sendarg_t)); | |
1014 | ||
86e3c28a | 1015 | dsl_dataset_long_rele(to_ds, FTAG); |
70e083d2 TG |
1016 | |
1017 | return (err); | |
1018 | } | |
1019 | ||
1020 | int | |
1021 | dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, | |
86e3c28a | 1022 | boolean_t embedok, boolean_t large_block_ok, boolean_t compressok, |
70e083d2 TG |
1023 | int outfd, vnode_t *vp, offset_t *off) |
1024 | { | |
1025 | dsl_pool_t *dp; | |
1026 | dsl_dataset_t *ds; | |
1027 | dsl_dataset_t *fromds = NULL; | |
1028 | int err; | |
1029 | ||
1030 | err = dsl_pool_hold(pool, FTAG, &dp); | |
1031 | if (err != 0) | |
1032 | return (err); | |
1033 | ||
1034 | err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds); | |
1035 | if (err != 0) { | |
1036 | dsl_pool_rele(dp, FTAG); | |
1037 | return (err); | |
1038 | } | |
1039 | ||
1040 | if (fromsnap != 0) { | |
1041 | zfs_bookmark_phys_t zb; | |
1042 | boolean_t is_clone; | |
1043 | ||
1044 | err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds); | |
1045 | if (err != 0) { | |
1046 | dsl_dataset_rele(ds, FTAG); | |
1047 | dsl_pool_rele(dp, FTAG); | |
1048 | return (err); | |
1049 | } | |
1050 | if (!dsl_dataset_is_before(ds, fromds, 0)) | |
1051 | err = SET_ERROR(EXDEV); | |
1052 | zb.zbm_creation_time = | |
1053 | dsl_dataset_phys(fromds)->ds_creation_time; | |
1054 | zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg; | |
1055 | zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; | |
1056 | is_clone = (fromds->ds_dir != ds->ds_dir); | |
1057 | dsl_dataset_rele(fromds, FTAG); | |
1058 | err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone, | |
86e3c28a | 1059 | embedok, large_block_ok, compressok, outfd, 0, 0, vp, off); |
70e083d2 TG |
1060 | } else { |
1061 | err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE, | |
86e3c28a | 1062 | embedok, large_block_ok, compressok, outfd, 0, 0, vp, off); |
70e083d2 TG |
1063 | } |
1064 | dsl_dataset_rele(ds, FTAG); | |
1065 | return (err); | |
1066 | } | |
1067 | ||
1068 | int | |
86e3c28a CIK |
1069 | dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok, |
1070 | boolean_t large_block_ok, boolean_t compressok, int outfd, | |
1071 | uint64_t resumeobj, uint64_t resumeoff, | |
1072 | vnode_t *vp, offset_t *off) | |
70e083d2 TG |
1073 | { |
1074 | dsl_pool_t *dp; | |
1075 | dsl_dataset_t *ds; | |
1076 | int err; | |
1077 | boolean_t owned = B_FALSE; | |
1078 | ||
1079 | if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL) | |
1080 | return (SET_ERROR(EINVAL)); | |
1081 | ||
1082 | err = dsl_pool_hold(tosnap, FTAG, &dp); | |
1083 | if (err != 0) | |
1084 | return (err); | |
1085 | ||
1086 | if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) { | |
1087 | /* | |
1088 | * We are sending a filesystem or volume. Ensure | |
1089 | * that it doesn't change by owning the dataset. | |
1090 | */ | |
1091 | err = dsl_dataset_own(dp, tosnap, FTAG, &ds); | |
1092 | owned = B_TRUE; | |
1093 | } else { | |
1094 | err = dsl_dataset_hold(dp, tosnap, FTAG, &ds); | |
1095 | } | |
1096 | if (err != 0) { | |
1097 | dsl_pool_rele(dp, FTAG); | |
1098 | return (err); | |
1099 | } | |
1100 | ||
1101 | if (fromsnap != NULL) { | |
1102 | zfs_bookmark_phys_t zb; | |
1103 | boolean_t is_clone = B_FALSE; | |
1104 | int fsnamelen = strchr(tosnap, '@') - tosnap; | |
1105 | ||
1106 | /* | |
1107 | * If the fromsnap is in a different filesystem, then | |
1108 | * mark the send stream as a clone. | |
1109 | */ | |
1110 | if (strncmp(tosnap, fromsnap, fsnamelen) != 0 || | |
1111 | (fromsnap[fsnamelen] != '@' && | |
1112 | fromsnap[fsnamelen] != '#')) { | |
1113 | is_clone = B_TRUE; | |
1114 | } | |
1115 | ||
1116 | if (strchr(fromsnap, '@')) { | |
1117 | dsl_dataset_t *fromds; | |
1118 | err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds); | |
1119 | if (err == 0) { | |
1120 | if (!dsl_dataset_is_before(ds, fromds, 0)) | |
1121 | err = SET_ERROR(EXDEV); | |
1122 | zb.zbm_creation_time = | |
1123 | dsl_dataset_phys(fromds)->ds_creation_time; | |
1124 | zb.zbm_creation_txg = | |
1125 | dsl_dataset_phys(fromds)->ds_creation_txg; | |
1126 | zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; | |
1127 | is_clone = (ds->ds_dir != fromds->ds_dir); | |
1128 | dsl_dataset_rele(fromds, FTAG); | |
1129 | } | |
1130 | } else { | |
1131 | err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb); | |
1132 | } | |
1133 | if (err != 0) { | |
1134 | dsl_dataset_rele(ds, FTAG); | |
1135 | dsl_pool_rele(dp, FTAG); | |
1136 | return (err); | |
1137 | } | |
1138 | err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone, | |
86e3c28a CIK |
1139 | embedok, large_block_ok, compressok, |
1140 | outfd, resumeobj, resumeoff, vp, off); | |
70e083d2 TG |
1141 | } else { |
1142 | err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE, | |
86e3c28a CIK |
1143 | embedok, large_block_ok, compressok, |
1144 | outfd, resumeobj, resumeoff, vp, off); | |
70e083d2 TG |
1145 | } |
1146 | if (owned) | |
1147 | dsl_dataset_disown(ds, FTAG); | |
1148 | else | |
1149 | dsl_dataset_rele(ds, FTAG); | |
1150 | return (err); | |
1151 | } | |
1152 | ||
1153 | static int | |
86e3c28a CIK |
1154 | dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed, |
1155 | uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep) | |
70e083d2 TG |
1156 | { |
1157 | int err; | |
86e3c28a | 1158 | uint64_t size; |
70e083d2 TG |
1159 | /* |
1160 | * Assume that space (both on-disk and in-stream) is dominated by | |
1161 | * data. We will adjust for indirect blocks and the copies property, | |
1162 | * but ignore per-object space used (eg, dnodes and DRR_OBJECT records). | |
1163 | */ | |
1164 | ||
86e3c28a CIK |
1165 | uint64_t recordsize; |
1166 | uint64_t record_count; | |
1167 | objset_t *os; | |
1168 | VERIFY0(dmu_objset_from_ds(ds, &os)); | |
1169 | ||
1170 | /* Assume all (uncompressed) blocks are recordsize. */ | |
1171 | if (os->os_phys->os_type == DMU_OST_ZVOL) { | |
1172 | err = dsl_prop_get_int_ds(ds, | |
1173 | zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize); | |
1174 | } else { | |
1175 | err = dsl_prop_get_int_ds(ds, | |
1176 | zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize); | |
1177 | } | |
1178 | if (err != 0) | |
1179 | return (err); | |
1180 | record_count = uncompressed / recordsize; | |
1181 | ||
1182 | /* | |
1183 | * If we're estimating a send size for a compressed stream, use the | |
1184 | * compressed data size to estimate the stream size. Otherwise, use the | |
1185 | * uncompressed data size. | |
1186 | */ | |
1187 | size = stream_compressed ? compressed : uncompressed; | |
1188 | ||
70e083d2 TG |
1189 | /* |
1190 | * Subtract out approximate space used by indirect blocks. | |
1191 | * Assume most space is used by data blocks (non-indirect, non-dnode). | |
86e3c28a | 1192 | * Assume no ditto blocks or internal fragmentation. |
70e083d2 TG |
1193 | * |
1194 | * Therefore, space used by indirect blocks is sizeof(blkptr_t) per | |
86e3c28a | 1195 | * block. |
70e083d2 | 1196 | */ |
86e3c28a | 1197 | size -= record_count * sizeof (blkptr_t); |
70e083d2 TG |
1198 | |
1199 | /* Add in the space for the record associated with each block. */ | |
86e3c28a | 1200 | size += record_count * sizeof (dmu_replay_record_t); |
70e083d2 TG |
1201 | |
1202 | *sizep = size; | |
1203 | ||
1204 | return (0); | |
1205 | } | |
1206 | ||
1207 | int | |
86e3c28a CIK |
1208 | dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, |
1209 | boolean_t stream_compressed, uint64_t *sizep) | |
70e083d2 TG |
1210 | { |
1211 | int err; | |
86e3c28a | 1212 | uint64_t uncomp, comp; |
70e083d2 TG |
1213 | |
1214 | ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool)); | |
1215 | ||
1216 | /* tosnap must be a snapshot */ | |
1217 | if (!ds->ds_is_snapshot) | |
1218 | return (SET_ERROR(EINVAL)); | |
1219 | ||
1220 | /* fromsnap, if provided, must be a snapshot */ | |
1221 | if (fromds != NULL && !fromds->ds_is_snapshot) | |
1222 | return (SET_ERROR(EINVAL)); | |
1223 | ||
1224 | /* | |
1225 | * fromsnap must be an earlier snapshot from the same fs as tosnap, | |
1226 | * or the origin's fs. | |
1227 | */ | |
1228 | if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0)) | |
1229 | return (SET_ERROR(EXDEV)); | |
1230 | ||
86e3c28a | 1231 | /* Get compressed and uncompressed size estimates of changed data. */ |
70e083d2 | 1232 | if (fromds == NULL) { |
86e3c28a CIK |
1233 | uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes; |
1234 | comp = dsl_dataset_phys(ds)->ds_compressed_bytes; | |
70e083d2 | 1235 | } else { |
86e3c28a | 1236 | uint64_t used; |
70e083d2 | 1237 | err = dsl_dataset_space_written(fromds, ds, |
86e3c28a | 1238 | &used, &comp, &uncomp); |
70e083d2 TG |
1239 | if (err != 0) |
1240 | return (err); | |
1241 | } | |
1242 | ||
86e3c28a CIK |
1243 | err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp, |
1244 | stream_compressed, sizep); | |
1245 | /* | |
1246 | * Add the size of the BEGIN and END records to the estimate. | |
1247 | */ | |
1248 | *sizep += 2 * sizeof (dmu_replay_record_t); | |
70e083d2 TG |
1249 | return (err); |
1250 | } | |
1251 | ||
86e3c28a CIK |
1252 | struct calculate_send_arg { |
1253 | uint64_t uncompressed; | |
1254 | uint64_t compressed; | |
1255 | }; | |
1256 | ||
70e083d2 TG |
1257 | /* |
1258 | * Simple callback used to traverse the blocks of a snapshot and sum their | |
86e3c28a | 1259 | * uncompressed and compressed sizes. |
70e083d2 TG |
1260 | */ |
1261 | /* ARGSUSED */ | |
1262 | static int | |
1263 | dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, | |
1264 | const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) | |
1265 | { | |
86e3c28a | 1266 | struct calculate_send_arg *space = arg; |
70e083d2 | 1267 | if (bp != NULL && !BP_IS_HOLE(bp)) { |
86e3c28a CIK |
1268 | space->uncompressed += BP_GET_UCSIZE(bp); |
1269 | space->compressed += BP_GET_PSIZE(bp); | |
70e083d2 TG |
1270 | } |
1271 | return (0); | |
1272 | } | |
1273 | ||
1274 | /* | |
1275 | * Given a desination snapshot and a TXG, calculate the approximate size of a | |
1276 | * send stream sent from that TXG. from_txg may be zero, indicating that the | |
1277 | * whole snapshot will be sent. | |
1278 | */ | |
1279 | int | |
1280 | dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg, | |
86e3c28a | 1281 | boolean_t stream_compressed, uint64_t *sizep) |
70e083d2 TG |
1282 | { |
1283 | int err; | |
86e3c28a | 1284 | struct calculate_send_arg size = { 0 }; |
70e083d2 TG |
1285 | |
1286 | ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool)); | |
1287 | ||
1288 | /* tosnap must be a snapshot */ | |
1289 | if (!dsl_dataset_is_snapshot(ds)) | |
1290 | return (SET_ERROR(EINVAL)); | |
1291 | ||
1292 | /* verify that from_txg is before the provided snapshot was taken */ | |
1293 | if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) { | |
1294 | return (SET_ERROR(EXDEV)); | |
1295 | } | |
1296 | /* | |
1297 | * traverse the blocks of the snapshot with birth times after | |
1298 | * from_txg, summing their uncompressed size | |
1299 | */ | |
1300 | err = traverse_dataset(ds, from_txg, TRAVERSE_POST, | |
1301 | dmu_calculate_send_traversal, &size); | |
86e3c28a | 1302 | |
70e083d2 TG |
1303 | if (err) |
1304 | return (err); | |
1305 | ||
86e3c28a CIK |
1306 | err = dmu_adjust_send_estimate_for_indirects(ds, size.uncompressed, |
1307 | size.compressed, stream_compressed, sizep); | |
70e083d2 TG |
1308 | return (err); |
1309 | } | |
1310 | ||
1311 | typedef struct dmu_recv_begin_arg { | |
1312 | const char *drba_origin; | |
1313 | dmu_recv_cookie_t *drba_cookie; | |
1314 | cred_t *drba_cred; | |
1315 | uint64_t drba_snapobj; | |
1316 | } dmu_recv_begin_arg_t; | |
1317 | ||
1318 | static int | |
1319 | recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds, | |
1320 | uint64_t fromguid) | |
1321 | { | |
1322 | uint64_t val; | |
1323 | int error; | |
1324 | dsl_pool_t *dp = ds->ds_dir->dd_pool; | |
1325 | ||
1326 | /* temporary clone name must not exist */ | |
1327 | error = zap_lookup(dp->dp_meta_objset, | |
1328 | dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name, | |
1329 | 8, 1, &val); | |
1330 | if (error != ENOENT) | |
1331 | return (error == 0 ? EBUSY : error); | |
1332 | ||
1333 | /* new snapshot name must not exist */ | |
1334 | error = zap_lookup(dp->dp_meta_objset, | |
1335 | dsl_dataset_phys(ds)->ds_snapnames_zapobj, | |
1336 | drba->drba_cookie->drc_tosnap, 8, 1, &val); | |
1337 | if (error != ENOENT) | |
1338 | return (error == 0 ? EEXIST : error); | |
1339 | ||
1340 | /* | |
1341 | * Check snapshot limit before receiving. We'll recheck again at the | |
1342 | * end, but might as well abort before receiving if we're already over | |
1343 | * the limit. | |
1344 | * | |
1345 | * Note that we do not check the file system limit with | |
1346 | * dsl_dir_fscount_check because the temporary %clones don't count | |
1347 | * against that limit. | |
1348 | */ | |
1349 | error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT, | |
1350 | NULL, drba->drba_cred); | |
1351 | if (error != 0) | |
1352 | return (error); | |
1353 | ||
1354 | if (fromguid != 0) { | |
1355 | dsl_dataset_t *snap; | |
1356 | uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; | |
1357 | ||
1358 | /* Find snapshot in this dir that matches fromguid. */ | |
1359 | while (obj != 0) { | |
1360 | error = dsl_dataset_hold_obj(dp, obj, FTAG, | |
1361 | &snap); | |
1362 | if (error != 0) | |
1363 | return (SET_ERROR(ENODEV)); | |
1364 | if (snap->ds_dir != ds->ds_dir) { | |
1365 | dsl_dataset_rele(snap, FTAG); | |
1366 | return (SET_ERROR(ENODEV)); | |
1367 | } | |
1368 | if (dsl_dataset_phys(snap)->ds_guid == fromguid) | |
1369 | break; | |
1370 | obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; | |
1371 | dsl_dataset_rele(snap, FTAG); | |
1372 | } | |
1373 | if (obj == 0) | |
1374 | return (SET_ERROR(ENODEV)); | |
1375 | ||
1376 | if (drba->drba_cookie->drc_force) { | |
1377 | drba->drba_snapobj = obj; | |
1378 | } else { | |
1379 | /* | |
1380 | * If we are not forcing, there must be no | |
1381 | * changes since fromsnap. | |
1382 | */ | |
1383 | if (dsl_dataset_modified_since_snap(ds, snap)) { | |
1384 | dsl_dataset_rele(snap, FTAG); | |
1385 | return (SET_ERROR(ETXTBSY)); | |
1386 | } | |
1387 | drba->drba_snapobj = ds->ds_prev->ds_object; | |
1388 | } | |
1389 | ||
1390 | dsl_dataset_rele(snap, FTAG); | |
1391 | } else { | |
1392 | /* if full, then must be forced */ | |
1393 | if (!drba->drba_cookie->drc_force) | |
1394 | return (SET_ERROR(EEXIST)); | |
1395 | /* start from $ORIGIN@$ORIGIN, if supported */ | |
1396 | drba->drba_snapobj = dp->dp_origin_snap != NULL ? | |
1397 | dp->dp_origin_snap->ds_object : 0; | |
1398 | } | |
1399 | ||
1400 | return (0); | |
1401 | ||
1402 | } | |
1403 | ||
1404 | static int | |
1405 | dmu_recv_begin_check(void *arg, dmu_tx_t *tx) | |
1406 | { | |
1407 | dmu_recv_begin_arg_t *drba = arg; | |
1408 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
1409 | struct drr_begin *drrb = drba->drba_cookie->drc_drrb; | |
1410 | uint64_t fromguid = drrb->drr_fromguid; | |
1411 | int flags = drrb->drr_flags; | |
1412 | int error; | |
1413 | uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); | |
1414 | dsl_dataset_t *ds; | |
1415 | const char *tofs = drba->drba_cookie->drc_tofs; | |
1416 | ||
1417 | /* already checked */ | |
1418 | ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); | |
86e3c28a | 1419 | ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING)); |
70e083d2 TG |
1420 | |
1421 | if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == | |
1422 | DMU_COMPOUNDSTREAM || | |
1423 | drrb->drr_type >= DMU_OST_NUMTYPES || | |
1424 | ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL)) | |
1425 | return (SET_ERROR(EINVAL)); | |
1426 | ||
1427 | /* Verify pool version supports SA if SA_SPILL feature set */ | |
1428 | if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && | |
1429 | spa_version(dp->dp_spa) < SPA_VERSION_SA) | |
1430 | return (SET_ERROR(ENOTSUP)); | |
1431 | ||
86e3c28a CIK |
1432 | if (drba->drba_cookie->drc_resumable && |
1433 | !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET)) | |
1434 | return (SET_ERROR(ENOTSUP)); | |
1435 | ||
70e083d2 TG |
1436 | /* |
1437 | * The receiving code doesn't know how to translate a WRITE_EMBEDDED | |
86e3c28a | 1438 | * record to a plain WRITE record, so the pool must have the |
70e083d2 TG |
1439 | * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED |
1440 | * records. Same with WRITE_EMBEDDED records that use LZ4 compression. | |
1441 | */ | |
1442 | if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && | |
1443 | !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) | |
1444 | return (SET_ERROR(ENOTSUP)); | |
86e3c28a | 1445 | if ((featureflags & DMU_BACKUP_FEATURE_LZ4) && |
70e083d2 TG |
1446 | !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) |
1447 | return (SET_ERROR(ENOTSUP)); | |
1448 | ||
1449 | /* | |
1450 | * The receiving code doesn't know how to translate large blocks | |
1451 | * to smaller ones, so the pool must have the LARGE_BLOCKS | |
86e3c28a CIK |
1452 | * feature enabled if the stream has LARGE_BLOCKS. Same with |
1453 | * large dnodes. | |
70e083d2 TG |
1454 | */ |
1455 | if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && | |
1456 | !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS)) | |
1457 | return (SET_ERROR(ENOTSUP)); | |
86e3c28a CIK |
1458 | if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) && |
1459 | !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE)) | |
1460 | return (SET_ERROR(ENOTSUP)); | |
70e083d2 TG |
1461 | |
1462 | error = dsl_dataset_hold(dp, tofs, FTAG, &ds); | |
1463 | if (error == 0) { | |
1464 | /* target fs already exists; recv into temp clone */ | |
1465 | ||
1466 | /* Can't recv a clone into an existing fs */ | |
86e3c28a | 1467 | if (flags & DRR_FLAG_CLONE || drba->drba_origin) { |
70e083d2 TG |
1468 | dsl_dataset_rele(ds, FTAG); |
1469 | return (SET_ERROR(EINVAL)); | |
1470 | } | |
1471 | ||
1472 | error = recv_begin_check_existing_impl(drba, ds, fromguid); | |
1473 | dsl_dataset_rele(ds, FTAG); | |
1474 | } else if (error == ENOENT) { | |
1475 | /* target fs does not exist; must be a full backup or clone */ | |
86e3c28a | 1476 | char buf[ZFS_MAX_DATASET_NAME_LEN]; |
70e083d2 TG |
1477 | |
1478 | /* | |
1479 | * If it's a non-clone incremental, we are missing the | |
1480 | * target fs, so fail the recv. | |
1481 | */ | |
86e3c28a CIK |
1482 | if (fromguid != 0 && !(flags & DRR_FLAG_CLONE || |
1483 | drba->drba_origin)) | |
70e083d2 TG |
1484 | return (SET_ERROR(ENOENT)); |
1485 | ||
86e3c28a CIK |
1486 | /* |
1487 | * If we're receiving a full send as a clone, and it doesn't | |
1488 | * contain all the necessary free records and freeobject | |
1489 | * records, reject it. | |
1490 | */ | |
1491 | if (fromguid == 0 && drba->drba_origin && | |
1492 | !(flags & DRR_FLAG_FREERECORDS)) | |
1493 | return (SET_ERROR(EINVAL)); | |
1494 | ||
70e083d2 | 1495 | /* Open the parent of tofs */ |
86e3c28a | 1496 | ASSERT3U(strlen(tofs), <, sizeof (buf)); |
70e083d2 TG |
1497 | (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1); |
1498 | error = dsl_dataset_hold(dp, buf, FTAG, &ds); | |
1499 | if (error != 0) | |
1500 | return (error); | |
1501 | ||
1502 | /* | |
1503 | * Check filesystem and snapshot limits before receiving. We'll | |
1504 | * recheck snapshot limits again at the end (we create the | |
1505 | * filesystems and increment those counts during begin_sync). | |
1506 | */ | |
1507 | error = dsl_fs_ss_limit_check(ds->ds_dir, 1, | |
1508 | ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred); | |
1509 | if (error != 0) { | |
1510 | dsl_dataset_rele(ds, FTAG); | |
1511 | return (error); | |
1512 | } | |
1513 | ||
1514 | error = dsl_fs_ss_limit_check(ds->ds_dir, 1, | |
1515 | ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred); | |
1516 | if (error != 0) { | |
1517 | dsl_dataset_rele(ds, FTAG); | |
1518 | return (error); | |
1519 | } | |
1520 | ||
1521 | if (drba->drba_origin != NULL) { | |
1522 | dsl_dataset_t *origin; | |
1523 | error = dsl_dataset_hold(dp, drba->drba_origin, | |
1524 | FTAG, &origin); | |
1525 | if (error != 0) { | |
1526 | dsl_dataset_rele(ds, FTAG); | |
1527 | return (error); | |
1528 | } | |
1529 | if (!origin->ds_is_snapshot) { | |
1530 | dsl_dataset_rele(origin, FTAG); | |
1531 | dsl_dataset_rele(ds, FTAG); | |
1532 | return (SET_ERROR(EINVAL)); | |
1533 | } | |
86e3c28a CIK |
1534 | if (dsl_dataset_phys(origin)->ds_guid != fromguid && |
1535 | fromguid != 0) { | |
70e083d2 TG |
1536 | dsl_dataset_rele(origin, FTAG); |
1537 | dsl_dataset_rele(ds, FTAG); | |
1538 | return (SET_ERROR(ENODEV)); | |
1539 | } | |
1540 | dsl_dataset_rele(origin, FTAG); | |
1541 | } | |
1542 | dsl_dataset_rele(ds, FTAG); | |
1543 | error = 0; | |
1544 | } | |
1545 | return (error); | |
1546 | } | |
1547 | ||
1548 | static void | |
1549 | dmu_recv_begin_sync(void *arg, dmu_tx_t *tx) | |
1550 | { | |
1551 | dmu_recv_begin_arg_t *drba = arg; | |
1552 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
86e3c28a | 1553 | objset_t *mos = dp->dp_meta_objset; |
70e083d2 TG |
1554 | struct drr_begin *drrb = drba->drba_cookie->drc_drrb; |
1555 | const char *tofs = drba->drba_cookie->drc_tofs; | |
1556 | dsl_dataset_t *ds, *newds; | |
1557 | uint64_t dsobj; | |
1558 | int error; | |
86e3c28a | 1559 | uint64_t crflags = 0; |
70e083d2 | 1560 | |
86e3c28a CIK |
1561 | if (drrb->drr_flags & DRR_FLAG_CI_DATA) |
1562 | crflags |= DS_FLAG_CI_DATASET; | |
70e083d2 TG |
1563 | |
1564 | error = dsl_dataset_hold(dp, tofs, FTAG, &ds); | |
1565 | if (error == 0) { | |
1566 | /* create temporary clone */ | |
1567 | dsl_dataset_t *snap = NULL; | |
1568 | if (drba->drba_snapobj != 0) { | |
1569 | VERIFY0(dsl_dataset_hold_obj(dp, | |
1570 | drba->drba_snapobj, FTAG, &snap)); | |
1571 | } | |
1572 | dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name, | |
1573 | snap, crflags, drba->drba_cred, tx); | |
86e3c28a CIK |
1574 | if (drba->drba_snapobj != 0) |
1575 | dsl_dataset_rele(snap, FTAG); | |
70e083d2 TG |
1576 | dsl_dataset_rele(ds, FTAG); |
1577 | } else { | |
1578 | dsl_dir_t *dd; | |
1579 | const char *tail; | |
1580 | dsl_dataset_t *origin = NULL; | |
1581 | ||
1582 | VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail)); | |
1583 | ||
1584 | if (drba->drba_origin != NULL) { | |
1585 | VERIFY0(dsl_dataset_hold(dp, drba->drba_origin, | |
1586 | FTAG, &origin)); | |
1587 | } | |
1588 | ||
1589 | /* Create new dataset. */ | |
1590 | dsobj = dsl_dataset_create_sync(dd, | |
1591 | strrchr(tofs, '/') + 1, | |
1592 | origin, crflags, drba->drba_cred, tx); | |
1593 | if (origin != NULL) | |
1594 | dsl_dataset_rele(origin, FTAG); | |
1595 | dsl_dir_rele(dd, FTAG); | |
1596 | drba->drba_cookie->drc_newfs = B_TRUE; | |
1597 | } | |
1598 | VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds)); | |
1599 | ||
86e3c28a CIK |
1600 | if (drba->drba_cookie->drc_resumable) { |
1601 | uint64_t one = 1; | |
1602 | uint64_t zero = 0; | |
1603 | ||
1604 | dsl_dataset_zapify(newds, tx); | |
1605 | if (drrb->drr_fromguid != 0) { | |
1606 | VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID, | |
1607 | 8, 1, &drrb->drr_fromguid, tx)); | |
1608 | } | |
1609 | VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID, | |
1610 | 8, 1, &drrb->drr_toguid, tx)); | |
1611 | VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME, | |
1612 | 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx)); | |
1613 | VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT, | |
1614 | 8, 1, &one, tx)); | |
1615 | VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET, | |
1616 | 8, 1, &zero, tx)); | |
1617 | VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES, | |
1618 | 8, 1, &zero, tx)); | |
1619 | if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & | |
1620 | DMU_BACKUP_FEATURE_LARGE_BLOCKS) { | |
1621 | VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK, | |
1622 | 8, 1, &one, tx)); | |
1623 | } | |
1624 | if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & | |
1625 | DMU_BACKUP_FEATURE_EMBED_DATA) { | |
1626 | VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK, | |
1627 | 8, 1, &one, tx)); | |
1628 | } | |
1629 | if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & | |
1630 | DMU_BACKUP_FEATURE_COMPRESSED) { | |
1631 | VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK, | |
1632 | 8, 1, &one, tx)); | |
1633 | } | |
70e083d2 TG |
1634 | } |
1635 | ||
1636 | dmu_buf_will_dirty(newds->ds_dbuf, tx); | |
1637 | dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT; | |
1638 | ||
1639 | /* | |
1640 | * If we actually created a non-clone, we need to create the | |
1641 | * objset in our new dataset. | |
1642 | */ | |
86e3c28a | 1643 | rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG); |
70e083d2 TG |
1644 | if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) { |
1645 | (void) dmu_objset_create_impl(dp->dp_spa, | |
1646 | newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx); | |
1647 | } | |
86e3c28a | 1648 | rrw_exit(&newds->ds_bp_rwlock, FTAG); |
70e083d2 TG |
1649 | |
1650 | drba->drba_cookie->drc_ds = newds; | |
1651 | ||
1652 | spa_history_log_internal_ds(newds, "receive", tx, ""); | |
1653 | } | |
1654 | ||
86e3c28a CIK |
1655 | static int |
1656 | dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx) | |
70e083d2 | 1657 | { |
86e3c28a CIK |
1658 | dmu_recv_begin_arg_t *drba = arg; |
1659 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
1660 | struct drr_begin *drrb = drba->drba_cookie->drc_drrb; | |
1661 | int error; | |
1662 | uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); | |
1663 | dsl_dataset_t *ds; | |
1664 | const char *tofs = drba->drba_cookie->drc_tofs; | |
1665 | uint64_t val; | |
70e083d2 | 1666 | |
86e3c28a CIK |
1667 | /* 6 extra bytes for /%recv */ |
1668 | char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; | |
70e083d2 | 1669 | |
86e3c28a CIK |
1670 | /* already checked */ |
1671 | ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); | |
1672 | ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING); | |
1673 | ||
1674 | if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == | |
1675 | DMU_COMPOUNDSTREAM || | |
1676 | drrb->drr_type >= DMU_OST_NUMTYPES) | |
70e083d2 TG |
1677 | return (SET_ERROR(EINVAL)); |
1678 | ||
86e3c28a CIK |
1679 | /* Verify pool version supports SA if SA_SPILL feature set */ |
1680 | if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && | |
1681 | spa_version(dp->dp_spa) < SPA_VERSION_SA) | |
1682 | return (SET_ERROR(ENOTSUP)); | |
70e083d2 | 1683 | |
86e3c28a CIK |
1684 | /* |
1685 | * The receiving code doesn't know how to translate a WRITE_EMBEDDED | |
1686 | * record to a plain WRITE record, so the pool must have the | |
1687 | * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED | |
1688 | * records. Same with WRITE_EMBEDDED records that use LZ4 compression. | |
1689 | */ | |
1690 | if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && | |
1691 | !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) | |
1692 | return (SET_ERROR(ENOTSUP)); | |
1693 | if ((featureflags & DMU_BACKUP_FEATURE_LZ4) && | |
1694 | !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) | |
1695 | return (SET_ERROR(ENOTSUP)); | |
70e083d2 | 1696 | |
86e3c28a CIK |
1697 | /* |
1698 | * The receiving code doesn't know how to translate large blocks | |
1699 | * to smaller ones, so the pool must have the LARGE_BLOCKS | |
1700 | * feature enabled if the stream has LARGE_BLOCKS. Same with | |
1701 | * large dnodes. | |
1702 | */ | |
1703 | if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && | |
1704 | !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS)) | |
1705 | return (SET_ERROR(ENOTSUP)); | |
1706 | if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) && | |
1707 | !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE)) | |
1708 | return (SET_ERROR(ENOTSUP)); | |
70e083d2 | 1709 | |
86e3c28a CIK |
1710 | (void) snprintf(recvname, sizeof (recvname), "%s/%s", |
1711 | tofs, recv_clone_name); | |
70e083d2 | 1712 | |
86e3c28a CIK |
1713 | if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) { |
1714 | /* %recv does not exist; continue in tofs */ | |
1715 | error = dsl_dataset_hold(dp, tofs, FTAG, &ds); | |
1716 | if (error != 0) | |
1717 | return (error); | |
1718 | } | |
70e083d2 | 1719 | |
86e3c28a CIK |
1720 | /* check that ds is marked inconsistent */ |
1721 | if (!DS_IS_INCONSISTENT(ds)) { | |
1722 | dsl_dataset_rele(ds, FTAG); | |
1723 | return (SET_ERROR(EINVAL)); | |
1724 | } | |
1725 | ||
1726 | /* check that there is resuming data, and that the toguid matches */ | |
1727 | if (!dsl_dataset_is_zapified(ds)) { | |
1728 | dsl_dataset_rele(ds, FTAG); | |
1729 | return (SET_ERROR(EINVAL)); | |
1730 | } | |
1731 | error = zap_lookup(dp->dp_meta_objset, ds->ds_object, | |
1732 | DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val); | |
1733 | if (error != 0 || drrb->drr_toguid != val) { | |
1734 | dsl_dataset_rele(ds, FTAG); | |
1735 | return (SET_ERROR(EINVAL)); | |
1736 | } | |
1737 | ||
1738 | /* | |
1739 | * Check if the receive is still running. If so, it will be owned. | |
1740 | * Note that nothing else can own the dataset (e.g. after the receive | |
1741 | * fails) because it will be marked inconsistent. | |
1742 | */ | |
1743 | if (dsl_dataset_has_owner(ds)) { | |
1744 | dsl_dataset_rele(ds, FTAG); | |
1745 | return (SET_ERROR(EBUSY)); | |
1746 | } | |
1747 | ||
1748 | /* There should not be any snapshots of this fs yet. */ | |
1749 | if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) { | |
1750 | dsl_dataset_rele(ds, FTAG); | |
1751 | return (SET_ERROR(EINVAL)); | |
1752 | } | |
1753 | ||
1754 | /* | |
1755 | * Note: resume point will be checked when we process the first WRITE | |
1756 | * record. | |
1757 | */ | |
1758 | ||
1759 | /* check that the origin matches */ | |
1760 | val = 0; | |
1761 | (void) zap_lookup(dp->dp_meta_objset, ds->ds_object, | |
1762 | DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val); | |
1763 | if (drrb->drr_fromguid != val) { | |
1764 | dsl_dataset_rele(ds, FTAG); | |
1765 | return (SET_ERROR(EINVAL)); | |
1766 | } | |
1767 | ||
1768 | dsl_dataset_rele(ds, FTAG); | |
1769 | return (0); | |
1770 | } | |
1771 | ||
1772 | static void | |
1773 | dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx) | |
1774 | { | |
1775 | dmu_recv_begin_arg_t *drba = arg; | |
1776 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
1777 | const char *tofs = drba->drba_cookie->drc_tofs; | |
1778 | dsl_dataset_t *ds; | |
1779 | uint64_t dsobj; | |
1780 | /* 6 extra bytes for /%recv */ | |
1781 | char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; | |
1782 | ||
1783 | (void) snprintf(recvname, sizeof (recvname), "%s/%s", | |
1784 | tofs, recv_clone_name); | |
1785 | ||
1786 | if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) { | |
1787 | /* %recv does not exist; continue in tofs */ | |
1788 | VERIFY0(dsl_dataset_hold(dp, tofs, FTAG, &ds)); | |
1789 | drba->drba_cookie->drc_newfs = B_TRUE; | |
1790 | } | |
1791 | ||
1792 | /* clear the inconsistent flag so that we can own it */ | |
1793 | ASSERT(DS_IS_INCONSISTENT(ds)); | |
1794 | dmu_buf_will_dirty(ds->ds_dbuf, tx); | |
1795 | dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; | |
1796 | dsobj = ds->ds_object; | |
1797 | dsl_dataset_rele(ds, FTAG); | |
1798 | ||
1799 | VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &ds)); | |
1800 | ||
1801 | dmu_buf_will_dirty(ds->ds_dbuf, tx); | |
1802 | dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT; | |
1803 | ||
1804 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); | |
1805 | ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds))); | |
1806 | rrw_exit(&ds->ds_bp_rwlock, FTAG); | |
1807 | ||
1808 | drba->drba_cookie->drc_ds = ds; | |
1809 | ||
1810 | spa_history_log_internal_ds(ds, "resume receive", tx, ""); | |
1811 | } | |
1812 | ||
1813 | /* | |
1814 | * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() | |
1815 | * succeeds; otherwise we will leak the holds on the datasets. | |
1816 | */ | |
1817 | int | |
1818 | dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin, | |
1819 | boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc) | |
1820 | { | |
1821 | dmu_recv_begin_arg_t drba = { 0 }; | |
1822 | ||
1823 | bzero(drc, sizeof (dmu_recv_cookie_t)); | |
1824 | drc->drc_drr_begin = drr_begin; | |
1825 | drc->drc_drrb = &drr_begin->drr_u.drr_begin; | |
1826 | drc->drc_tosnap = tosnap; | |
1827 | drc->drc_tofs = tofs; | |
1828 | drc->drc_force = force; | |
1829 | drc->drc_resumable = resumable; | |
1830 | drc->drc_cred = CRED(); | |
1831 | drc->drc_clone = (origin != NULL); | |
1832 | ||
1833 | if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { | |
1834 | drc->drc_byteswap = B_TRUE; | |
1835 | (void) fletcher_4_incremental_byteswap(drr_begin, | |
1836 | sizeof (dmu_replay_record_t), &drc->drc_cksum); | |
1837 | byteswap_record(drr_begin); | |
1838 | } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) { | |
1839 | (void) fletcher_4_incremental_native(drr_begin, | |
1840 | sizeof (dmu_replay_record_t), &drc->drc_cksum); | |
1841 | } else { | |
1842 | return (SET_ERROR(EINVAL)); | |
1843 | } | |
1844 | ||
1845 | drba.drba_origin = origin; | |
1846 | drba.drba_cookie = drc; | |
1847 | drba.drba_cred = CRED(); | |
1848 | ||
1849 | if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) & | |
1850 | DMU_BACKUP_FEATURE_RESUMING) { | |
1851 | return (dsl_sync_task(tofs, | |
1852 | dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync, | |
1853 | &drba, 5, ZFS_SPACE_CHECK_NORMAL)); | |
1854 | } else { | |
1855 | return (dsl_sync_task(tofs, | |
1856 | dmu_recv_begin_check, dmu_recv_begin_sync, | |
1857 | &drba, 5, ZFS_SPACE_CHECK_NORMAL)); | |
1858 | } | |
1859 | } | |
1860 | ||
1861 | struct receive_record_arg { | |
1862 | dmu_replay_record_t header; | |
1863 | void *payload; /* Pointer to a buffer containing the payload */ | |
1864 | /* | |
1865 | * If the record is a write, pointer to the arc_buf_t containing the | |
1866 | * payload. | |
1867 | */ | |
1868 | arc_buf_t *write_buf; | |
1869 | int payload_size; | |
1870 | uint64_t bytes_read; /* bytes read from stream when record created */ | |
1871 | boolean_t eos_marker; /* Marks the end of the stream */ | |
1872 | bqueue_node_t node; | |
1873 | }; | |
1874 | ||
1875 | struct receive_writer_arg { | |
1876 | objset_t *os; | |
1877 | boolean_t byteswap; | |
1878 | bqueue_t q; | |
1879 | ||
1880 | /* | |
1881 | * These three args are used to signal to the main thread that we're | |
1882 | * done. | |
1883 | */ | |
1884 | kmutex_t mutex; | |
1885 | kcondvar_t cv; | |
1886 | boolean_t done; | |
1887 | ||
1888 | int err; | |
1889 | /* A map from guid to dataset to help handle dedup'd streams. */ | |
1890 | avl_tree_t *guid_to_ds_map; | |
1891 | boolean_t resumable; | |
1892 | uint64_t last_object; | |
1893 | uint64_t last_offset; | |
1894 | uint64_t max_object; /* highest object ID referenced in stream */ | |
1895 | uint64_t bytes_read; /* bytes read when current record created */ | |
1896 | }; | |
1897 | ||
1898 | struct objlist { | |
1899 | list_t list; /* List of struct receive_objnode. */ | |
1900 | /* | |
1901 | * Last object looked up. Used to assert that objects are being looked | |
1902 | * up in ascending order. | |
1903 | */ | |
1904 | uint64_t last_lookup; | |
1905 | }; | |
1906 | ||
1907 | struct receive_objnode { | |
1908 | list_node_t node; | |
1909 | uint64_t object; | |
1910 | }; | |
1911 | ||
1912 | struct receive_arg { | |
1913 | objset_t *os; | |
1914 | vnode_t *vp; /* The vnode to read the stream from */ | |
1915 | uint64_t voff; /* The current offset in the stream */ | |
1916 | uint64_t bytes_read; | |
1917 | /* | |
1918 | * A record that has had its payload read in, but hasn't yet been handed | |
1919 | * off to the worker thread. | |
1920 | */ | |
1921 | struct receive_record_arg *rrd; | |
1922 | /* A record that has had its header read in, but not its payload. */ | |
1923 | struct receive_record_arg *next_rrd; | |
1924 | zio_cksum_t cksum; | |
1925 | zio_cksum_t prev_cksum; | |
1926 | int err; | |
1927 | boolean_t byteswap; | |
1928 | /* Sorted list of objects not to issue prefetches for. */ | |
1929 | struct objlist ignore_objlist; | |
1930 | }; | |
1931 | ||
1932 | typedef struct guid_map_entry { | |
1933 | uint64_t guid; | |
1934 | dsl_dataset_t *gme_ds; | |
1935 | avl_node_t avlnode; | |
1936 | } guid_map_entry_t; | |
70e083d2 TG |
1937 | |
1938 | static int | |
1939 | guid_compare(const void *arg1, const void *arg2) | |
1940 | { | |
86e3c28a CIK |
1941 | const guid_map_entry_t *gmep1 = (const guid_map_entry_t *)arg1; |
1942 | const guid_map_entry_t *gmep2 = (const guid_map_entry_t *)arg2; | |
70e083d2 | 1943 | |
86e3c28a | 1944 | return (AVL_CMP(gmep1->guid, gmep2->guid)); |
70e083d2 TG |
1945 | } |
1946 | ||
1947 | static void | |
1948 | free_guid_map_onexit(void *arg) | |
1949 | { | |
1950 | avl_tree_t *ca = arg; | |
1951 | void *cookie = NULL; | |
1952 | guid_map_entry_t *gmep; | |
1953 | ||
1954 | while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) { | |
1955 | dsl_dataset_long_rele(gmep->gme_ds, gmep); | |
1956 | dsl_dataset_rele(gmep->gme_ds, gmep); | |
1957 | kmem_free(gmep, sizeof (guid_map_entry_t)); | |
1958 | } | |
1959 | avl_destroy(ca); | |
1960 | kmem_free(ca, sizeof (avl_tree_t)); | |
1961 | } | |
1962 | ||
86e3c28a CIK |
1963 | static int |
1964 | receive_read(struct receive_arg *ra, int len, void *buf) | |
70e083d2 TG |
1965 | { |
1966 | int done = 0; | |
1967 | ||
86e3c28a CIK |
1968 | /* |
1969 | * The code doesn't rely on this (lengths being multiples of 8). See | |
1970 | * comment in dump_bytes. | |
1971 | */ | |
70e083d2 | 1972 | ASSERT0(len % 8); |
70e083d2 TG |
1973 | |
1974 | while (done < len) { | |
1975 | ssize_t resid; | |
1976 | ||
1977 | ra->err = vn_rdwr(UIO_READ, ra->vp, | |
86e3c28a | 1978 | (char *)buf + done, len - done, |
70e083d2 TG |
1979 | ra->voff, UIO_SYSSPACE, FAPPEND, |
1980 | RLIM64_INFINITY, CRED(), &resid); | |
1981 | ||
86e3c28a CIK |
1982 | if (resid == len - done) { |
1983 | /* | |
1984 | * Note: ECKSUM indicates that the receive | |
1985 | * was interrupted and can potentially be resumed. | |
1986 | */ | |
1987 | ra->err = SET_ERROR(ECKSUM); | |
1988 | } | |
70e083d2 TG |
1989 | ra->voff += len - done - resid; |
1990 | done = len - resid; | |
1991 | if (ra->err != 0) | |
86e3c28a | 1992 | return (ra->err); |
70e083d2 TG |
1993 | } |
1994 | ||
86e3c28a CIK |
1995 | ra->bytes_read += len; |
1996 | ||
70e083d2 | 1997 | ASSERT3U(done, ==, len); |
86e3c28a | 1998 | return (0); |
70e083d2 TG |
1999 | } |
2000 | ||
2001 | noinline static void | |
86e3c28a | 2002 | byteswap_record(dmu_replay_record_t *drr) |
70e083d2 TG |
2003 | { |
2004 | #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) | |
2005 | #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) | |
2006 | drr->drr_type = BSWAP_32(drr->drr_type); | |
2007 | drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); | |
86e3c28a | 2008 | |
70e083d2 TG |
2009 | switch (drr->drr_type) { |
2010 | case DRR_BEGIN: | |
2011 | DO64(drr_begin.drr_magic); | |
2012 | DO64(drr_begin.drr_versioninfo); | |
2013 | DO64(drr_begin.drr_creation_time); | |
2014 | DO32(drr_begin.drr_type); | |
2015 | DO32(drr_begin.drr_flags); | |
2016 | DO64(drr_begin.drr_toguid); | |
2017 | DO64(drr_begin.drr_fromguid); | |
2018 | break; | |
2019 | case DRR_OBJECT: | |
2020 | DO64(drr_object.drr_object); | |
2021 | DO32(drr_object.drr_type); | |
2022 | DO32(drr_object.drr_bonustype); | |
2023 | DO32(drr_object.drr_blksz); | |
2024 | DO32(drr_object.drr_bonuslen); | |
2025 | DO64(drr_object.drr_toguid); | |
2026 | break; | |
2027 | case DRR_FREEOBJECTS: | |
2028 | DO64(drr_freeobjects.drr_firstobj); | |
2029 | DO64(drr_freeobjects.drr_numobjs); | |
2030 | DO64(drr_freeobjects.drr_toguid); | |
2031 | break; | |
2032 | case DRR_WRITE: | |
2033 | DO64(drr_write.drr_object); | |
2034 | DO32(drr_write.drr_type); | |
2035 | DO64(drr_write.drr_offset); | |
86e3c28a | 2036 | DO64(drr_write.drr_logical_size); |
70e083d2 | 2037 | DO64(drr_write.drr_toguid); |
86e3c28a | 2038 | ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum); |
70e083d2 | 2039 | DO64(drr_write.drr_key.ddk_prop); |
86e3c28a | 2040 | DO64(drr_write.drr_compressed_size); |
70e083d2 TG |
2041 | break; |
2042 | case DRR_WRITE_BYREF: | |
2043 | DO64(drr_write_byref.drr_object); | |
2044 | DO64(drr_write_byref.drr_offset); | |
2045 | DO64(drr_write_byref.drr_length); | |
2046 | DO64(drr_write_byref.drr_toguid); | |
2047 | DO64(drr_write_byref.drr_refguid); | |
2048 | DO64(drr_write_byref.drr_refobject); | |
2049 | DO64(drr_write_byref.drr_refoffset); | |
86e3c28a CIK |
2050 | ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref. |
2051 | drr_key.ddk_cksum); | |
70e083d2 TG |
2052 | DO64(drr_write_byref.drr_key.ddk_prop); |
2053 | break; | |
2054 | case DRR_WRITE_EMBEDDED: | |
2055 | DO64(drr_write_embedded.drr_object); | |
2056 | DO64(drr_write_embedded.drr_offset); | |
2057 | DO64(drr_write_embedded.drr_length); | |
2058 | DO64(drr_write_embedded.drr_toguid); | |
2059 | DO32(drr_write_embedded.drr_lsize); | |
2060 | DO32(drr_write_embedded.drr_psize); | |
2061 | break; | |
2062 | case DRR_FREE: | |
2063 | DO64(drr_free.drr_object); | |
2064 | DO64(drr_free.drr_offset); | |
2065 | DO64(drr_free.drr_length); | |
2066 | DO64(drr_free.drr_toguid); | |
2067 | break; | |
2068 | case DRR_SPILL: | |
2069 | DO64(drr_spill.drr_object); | |
2070 | DO64(drr_spill.drr_length); | |
2071 | DO64(drr_spill.drr_toguid); | |
2072 | break; | |
2073 | case DRR_END: | |
70e083d2 | 2074 | DO64(drr_end.drr_toguid); |
86e3c28a | 2075 | ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum); |
70e083d2 TG |
2076 | break; |
2077 | default: | |
2078 | break; | |
2079 | } | |
86e3c28a CIK |
2080 | |
2081 | if (drr->drr_type != DRR_BEGIN) { | |
2082 | ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum); | |
2083 | } | |
2084 | ||
70e083d2 TG |
2085 | #undef DO64 |
2086 | #undef DO32 | |
2087 | } | |
2088 | ||
2089 | static inline uint8_t | |
2090 | deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size) | |
2091 | { | |
2092 | if (bonus_type == DMU_OT_SA) { | |
2093 | return (1); | |
2094 | } else { | |
2095 | return (1 + | |
86e3c28a CIK |
2096 | ((DN_OLD_MAX_BONUSLEN - |
2097 | MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT)); | |
70e083d2 TG |
2098 | } |
2099 | } | |
2100 | ||
86e3c28a CIK |
2101 | static void |
2102 | save_resume_state(struct receive_writer_arg *rwa, | |
2103 | uint64_t object, uint64_t offset, dmu_tx_t *tx) | |
2104 | { | |
2105 | int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; | |
2106 | ||
2107 | if (!rwa->resumable) | |
2108 | return; | |
2109 | ||
2110 | /* | |
2111 | * We use ds_resume_bytes[] != 0 to indicate that we need to | |
2112 | * update this on disk, so it must not be 0. | |
2113 | */ | |
2114 | ASSERT(rwa->bytes_read != 0); | |
2115 | ||
2116 | /* | |
2117 | * We only resume from write records, which have a valid | |
2118 | * (non-meta-dnode) object number. | |
2119 | */ | |
2120 | ASSERT(object != 0); | |
2121 | ||
2122 | /* | |
2123 | * For resuming to work correctly, we must receive records in order, | |
2124 | * sorted by object,offset. This is checked by the callers, but | |
2125 | * assert it here for good measure. | |
2126 | */ | |
2127 | ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]); | |
2128 | ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] || | |
2129 | offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]); | |
2130 | ASSERT3U(rwa->bytes_read, >=, | |
2131 | rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]); | |
2132 | ||
2133 | rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object; | |
2134 | rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset; | |
2135 | rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read; | |
2136 | } | |
2137 | ||
70e083d2 | 2138 | noinline static int |
86e3c28a CIK |
2139 | receive_object(struct receive_writer_arg *rwa, struct drr_object *drro, |
2140 | void *data) | |
70e083d2 TG |
2141 | { |
2142 | dmu_object_info_t doi; | |
2143 | dmu_tx_t *tx; | |
70e083d2 TG |
2144 | uint64_t object; |
2145 | int err; | |
2146 | ||
2147 | if (drro->drr_type == DMU_OT_NONE || | |
2148 | !DMU_OT_IS_VALID(drro->drr_type) || | |
2149 | !DMU_OT_IS_VALID(drro->drr_bonustype) || | |
2150 | drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS || | |
2151 | drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || | |
2152 | P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || | |
2153 | drro->drr_blksz < SPA_MINBLOCKSIZE || | |
86e3c28a CIK |
2154 | drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) || |
2155 | drro->drr_bonuslen > | |
2156 | DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) || | |
2157 | drro->drr_dn_slots > | |
2158 | (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) { | |
70e083d2 TG |
2159 | return (SET_ERROR(EINVAL)); |
2160 | } | |
2161 | ||
86e3c28a | 2162 | err = dmu_object_info(rwa->os, drro->drr_object, &doi); |
70e083d2 TG |
2163 | |
2164 | if (err != 0 && err != ENOENT) | |
2165 | return (SET_ERROR(EINVAL)); | |
2166 | object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT; | |
2167 | ||
86e3c28a CIK |
2168 | if (drro->drr_object > rwa->max_object) |
2169 | rwa->max_object = drro->drr_object; | |
70e083d2 TG |
2170 | |
2171 | /* | |
2172 | * If we are losing blkptrs or changing the block size this must | |
2173 | * be a new file instance. We must clear out the previous file | |
2174 | * contents before we can change this type of metadata in the dnode. | |
2175 | */ | |
2176 | if (err == 0) { | |
2177 | int nblkptr; | |
2178 | ||
2179 | nblkptr = deduce_nblkptr(drro->drr_bonustype, | |
2180 | drro->drr_bonuslen); | |
2181 | ||
2182 | if (drro->drr_blksz != doi.doi_data_block_size || | |
2183 | nblkptr < doi.doi_nblkptr) { | |
86e3c28a | 2184 | err = dmu_free_long_range(rwa->os, drro->drr_object, |
70e083d2 TG |
2185 | 0, DMU_OBJECT_END); |
2186 | if (err != 0) | |
2187 | return (SET_ERROR(EINVAL)); | |
2188 | } | |
2189 | } | |
2190 | ||
86e3c28a | 2191 | tx = dmu_tx_create(rwa->os); |
70e083d2 TG |
2192 | dmu_tx_hold_bonus(tx, object); |
2193 | err = dmu_tx_assign(tx, TXG_WAIT); | |
2194 | if (err != 0) { | |
2195 | dmu_tx_abort(tx); | |
2196 | return (err); | |
2197 | } | |
2198 | ||
2199 | if (object == DMU_NEW_OBJECT) { | |
2200 | /* currently free, want to be allocated */ | |
86e3c28a | 2201 | err = dmu_object_claim_dnsize(rwa->os, drro->drr_object, |
70e083d2 | 2202 | drro->drr_type, drro->drr_blksz, |
86e3c28a CIK |
2203 | drro->drr_bonustype, drro->drr_bonuslen, |
2204 | drro->drr_dn_slots << DNODE_SHIFT, tx); | |
70e083d2 TG |
2205 | } else if (drro->drr_type != doi.doi_type || |
2206 | drro->drr_blksz != doi.doi_data_block_size || | |
2207 | drro->drr_bonustype != doi.doi_bonus_type || | |
2208 | drro->drr_bonuslen != doi.doi_bonus_size) { | |
2209 | /* currently allocated, but with different properties */ | |
86e3c28a | 2210 | err = dmu_object_reclaim(rwa->os, drro->drr_object, |
70e083d2 TG |
2211 | drro->drr_type, drro->drr_blksz, |
2212 | drro->drr_bonustype, drro->drr_bonuslen, tx); | |
2213 | } | |
2214 | if (err != 0) { | |
2215 | dmu_tx_commit(tx); | |
2216 | return (SET_ERROR(EINVAL)); | |
2217 | } | |
2218 | ||
86e3c28a CIK |
2219 | dmu_object_set_checksum(rwa->os, drro->drr_object, |
2220 | drro->drr_checksumtype, tx); | |
2221 | dmu_object_set_compress(rwa->os, drro->drr_object, | |
2222 | drro->drr_compress, tx); | |
70e083d2 TG |
2223 | |
2224 | if (data != NULL) { | |
2225 | dmu_buf_t *db; | |
2226 | ||
86e3c28a | 2227 | VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db)); |
70e083d2 TG |
2228 | dmu_buf_will_dirty(db, tx); |
2229 | ||
2230 | ASSERT3U(db->db_size, >=, drro->drr_bonuslen); | |
2231 | bcopy(data, db->db_data, drro->drr_bonuslen); | |
86e3c28a | 2232 | if (rwa->byteswap) { |
70e083d2 TG |
2233 | dmu_object_byteswap_t byteswap = |
2234 | DMU_OT_BYTESWAP(drro->drr_bonustype); | |
2235 | dmu_ot_byteswap[byteswap].ob_func(db->db_data, | |
2236 | drro->drr_bonuslen); | |
2237 | } | |
2238 | dmu_buf_rele(db, FTAG); | |
2239 | } | |
2240 | dmu_tx_commit(tx); | |
86e3c28a | 2241 | |
70e083d2 TG |
2242 | return (0); |
2243 | } | |
2244 | ||
2245 | /* ARGSUSED */ | |
2246 | noinline static int | |
86e3c28a | 2247 | receive_freeobjects(struct receive_writer_arg *rwa, |
70e083d2 TG |
2248 | struct drr_freeobjects *drrfo) |
2249 | { | |
2250 | uint64_t obj; | |
86e3c28a | 2251 | int next_err = 0; |
70e083d2 TG |
2252 | |
2253 | if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) | |
2254 | return (SET_ERROR(EINVAL)); | |
2255 | ||
86e3c28a CIK |
2256 | for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj; |
2257 | obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0; | |
2258 | next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) { | |
2259 | dmu_object_info_t doi; | |
70e083d2 TG |
2260 | int err; |
2261 | ||
86e3c28a CIK |
2262 | err = dmu_object_info(rwa->os, obj, &doi); |
2263 | if (err == ENOENT) | |
70e083d2 | 2264 | continue; |
86e3c28a CIK |
2265 | else if (err != 0) |
2266 | return (err); | |
70e083d2 | 2267 | |
86e3c28a | 2268 | err = dmu_free_long_object(rwa->os, obj); |
70e083d2 TG |
2269 | if (err != 0) |
2270 | return (err); | |
86e3c28a CIK |
2271 | |
2272 | if (obj > rwa->max_object) | |
2273 | rwa->max_object = obj; | |
70e083d2 | 2274 | } |
86e3c28a CIK |
2275 | if (next_err != ESRCH) |
2276 | return (next_err); | |
70e083d2 TG |
2277 | return (0); |
2278 | } | |
2279 | ||
2280 | noinline static int | |
86e3c28a CIK |
2281 | receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw, |
2282 | arc_buf_t *abuf) | |
70e083d2 TG |
2283 | { |
2284 | dmu_tx_t *tx; | |
2285 | dmu_buf_t *bonus; | |
70e083d2 TG |
2286 | int err; |
2287 | ||
86e3c28a | 2288 | if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset || |
70e083d2 TG |
2289 | !DMU_OT_IS_VALID(drrw->drr_type)) |
2290 | return (SET_ERROR(EINVAL)); | |
2291 | ||
86e3c28a CIK |
2292 | /* |
2293 | * For resuming to work, records must be in increasing order | |
2294 | * by (object, offset). | |
2295 | */ | |
2296 | if (drrw->drr_object < rwa->last_object || | |
2297 | (drrw->drr_object == rwa->last_object && | |
2298 | drrw->drr_offset < rwa->last_offset)) { | |
70e083d2 | 2299 | return (SET_ERROR(EINVAL)); |
86e3c28a CIK |
2300 | } |
2301 | rwa->last_object = drrw->drr_object; | |
2302 | rwa->last_offset = drrw->drr_offset; | |
70e083d2 | 2303 | |
86e3c28a CIK |
2304 | if (rwa->last_object > rwa->max_object) |
2305 | rwa->max_object = rwa->last_object; | |
70e083d2 | 2306 | |
86e3c28a CIK |
2307 | if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0) |
2308 | return (SET_ERROR(EINVAL)); | |
70e083d2 | 2309 | |
86e3c28a | 2310 | tx = dmu_tx_create(rwa->os); |
70e083d2 TG |
2311 | |
2312 | dmu_tx_hold_write(tx, drrw->drr_object, | |
86e3c28a | 2313 | drrw->drr_offset, drrw->drr_logical_size); |
70e083d2 TG |
2314 | err = dmu_tx_assign(tx, TXG_WAIT); |
2315 | if (err != 0) { | |
70e083d2 TG |
2316 | dmu_tx_abort(tx); |
2317 | return (err); | |
2318 | } | |
86e3c28a | 2319 | if (rwa->byteswap) { |
70e083d2 TG |
2320 | dmu_object_byteswap_t byteswap = |
2321 | DMU_OT_BYTESWAP(drrw->drr_type); | |
86e3c28a CIK |
2322 | dmu_ot_byteswap[byteswap].ob_func(abuf->b_data, |
2323 | DRR_WRITE_PAYLOAD_SIZE(drrw)); | |
70e083d2 | 2324 | } |
86e3c28a CIK |
2325 | |
2326 | /* use the bonus buf to look up the dnode in dmu_assign_arcbuf */ | |
2327 | if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0) | |
2328 | return (SET_ERROR(EINVAL)); | |
70e083d2 | 2329 | dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx); |
86e3c28a CIK |
2330 | |
2331 | /* | |
2332 | * Note: If the receive fails, we want the resume stream to start | |
2333 | * with the same record that we last successfully received (as opposed | |
2334 | * to the next record), so that we can verify that we are | |
2335 | * resuming from the correct location. | |
2336 | */ | |
2337 | save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx); | |
70e083d2 TG |
2338 | dmu_tx_commit(tx); |
2339 | dmu_buf_rele(bonus, FTAG); | |
86e3c28a | 2340 | |
70e083d2 TG |
2341 | return (0); |
2342 | } | |
2343 | ||
2344 | /* | |
2345 | * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed | |
2346 | * streams to refer to a copy of the data that is already on the | |
2347 | * system because it came in earlier in the stream. This function | |
2348 | * finds the earlier copy of the data, and uses that copy instead of | |
2349 | * data from the stream to fulfill this write. | |
2350 | */ | |
2351 | static int | |
86e3c28a | 2352 | receive_write_byref(struct receive_writer_arg *rwa, |
70e083d2 TG |
2353 | struct drr_write_byref *drrwbr) |
2354 | { | |
2355 | dmu_tx_t *tx; | |
2356 | int err; | |
2357 | guid_map_entry_t gmesrch; | |
2358 | guid_map_entry_t *gmep; | |
2359 | avl_index_t where; | |
2360 | objset_t *ref_os = NULL; | |
2361 | dmu_buf_t *dbp; | |
2362 | ||
2363 | if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset) | |
2364 | return (SET_ERROR(EINVAL)); | |
2365 | ||
2366 | /* | |
2367 | * If the GUID of the referenced dataset is different from the | |
2368 | * GUID of the target dataset, find the referenced dataset. | |
2369 | */ | |
2370 | if (drrwbr->drr_toguid != drrwbr->drr_refguid) { | |
2371 | gmesrch.guid = drrwbr->drr_refguid; | |
86e3c28a | 2372 | if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch, |
70e083d2 TG |
2373 | &where)) == NULL) { |
2374 | return (SET_ERROR(EINVAL)); | |
2375 | } | |
2376 | if (dmu_objset_from_ds(gmep->gme_ds, &ref_os)) | |
2377 | return (SET_ERROR(EINVAL)); | |
2378 | } else { | |
86e3c28a | 2379 | ref_os = rwa->os; |
70e083d2 TG |
2380 | } |
2381 | ||
86e3c28a CIK |
2382 | if (drrwbr->drr_object > rwa->max_object) |
2383 | rwa->max_object = drrwbr->drr_object; | |
2384 | ||
70e083d2 TG |
2385 | err = dmu_buf_hold(ref_os, drrwbr->drr_refobject, |
2386 | drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH); | |
2387 | if (err != 0) | |
2388 | return (err); | |
2389 | ||
86e3c28a | 2390 | tx = dmu_tx_create(rwa->os); |
70e083d2 TG |
2391 | |
2392 | dmu_tx_hold_write(tx, drrwbr->drr_object, | |
2393 | drrwbr->drr_offset, drrwbr->drr_length); | |
2394 | err = dmu_tx_assign(tx, TXG_WAIT); | |
2395 | if (err != 0) { | |
2396 | dmu_tx_abort(tx); | |
2397 | return (err); | |
2398 | } | |
86e3c28a | 2399 | dmu_write(rwa->os, drrwbr->drr_object, |
70e083d2 TG |
2400 | drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx); |
2401 | dmu_buf_rele(dbp, FTAG); | |
86e3c28a CIK |
2402 | |
2403 | /* See comment in restore_write. */ | |
2404 | save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx); | |
70e083d2 TG |
2405 | dmu_tx_commit(tx); |
2406 | return (0); | |
2407 | } | |
2408 | ||
2409 | static int | |
86e3c28a CIK |
2410 | receive_write_embedded(struct receive_writer_arg *rwa, |
2411 | struct drr_write_embedded *drrwe, void *data) | |
70e083d2 TG |
2412 | { |
2413 | dmu_tx_t *tx; | |
2414 | int err; | |
70e083d2 | 2415 | |
86e3c28a | 2416 | if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset) |
70e083d2 TG |
2417 | return (EINVAL); |
2418 | ||
86e3c28a | 2419 | if (drrwe->drr_psize > BPE_PAYLOAD_SIZE) |
70e083d2 TG |
2420 | return (EINVAL); |
2421 | ||
86e3c28a | 2422 | if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES) |
70e083d2 | 2423 | return (EINVAL); |
86e3c28a | 2424 | if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS) |
70e083d2 TG |
2425 | return (EINVAL); |
2426 | ||
86e3c28a CIK |
2427 | if (drrwe->drr_object > rwa->max_object) |
2428 | rwa->max_object = drrwe->drr_object; | |
70e083d2 | 2429 | |
86e3c28a | 2430 | tx = dmu_tx_create(rwa->os); |
70e083d2 | 2431 | |
86e3c28a CIK |
2432 | dmu_tx_hold_write(tx, drrwe->drr_object, |
2433 | drrwe->drr_offset, drrwe->drr_length); | |
70e083d2 TG |
2434 | err = dmu_tx_assign(tx, TXG_WAIT); |
2435 | if (err != 0) { | |
2436 | dmu_tx_abort(tx); | |
2437 | return (err); | |
2438 | } | |
2439 | ||
86e3c28a CIK |
2440 | dmu_write_embedded(rwa->os, drrwe->drr_object, |
2441 | drrwe->drr_offset, data, drrwe->drr_etype, | |
2442 | drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize, | |
2443 | rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx); | |
70e083d2 | 2444 | |
86e3c28a CIK |
2445 | /* See comment in restore_write. */ |
2446 | save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx); | |
70e083d2 TG |
2447 | dmu_tx_commit(tx); |
2448 | return (0); | |
2449 | } | |
2450 | ||
2451 | static int | |
86e3c28a CIK |
2452 | receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs, |
2453 | void *data) | |
70e083d2 TG |
2454 | { |
2455 | dmu_tx_t *tx; | |
70e083d2 TG |
2456 | dmu_buf_t *db, *db_spill; |
2457 | int err; | |
2458 | ||
2459 | if (drrs->drr_length < SPA_MINBLOCKSIZE || | |
86e3c28a | 2460 | drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os))) |
70e083d2 TG |
2461 | return (SET_ERROR(EINVAL)); |
2462 | ||
86e3c28a | 2463 | if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0) |
70e083d2 TG |
2464 | return (SET_ERROR(EINVAL)); |
2465 | ||
86e3c28a CIK |
2466 | if (drrs->drr_object > rwa->max_object) |
2467 | rwa->max_object = drrs->drr_object; | |
2468 | ||
2469 | VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db)); | |
70e083d2 TG |
2470 | if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) { |
2471 | dmu_buf_rele(db, FTAG); | |
2472 | return (err); | |
2473 | } | |
2474 | ||
86e3c28a | 2475 | tx = dmu_tx_create(rwa->os); |
70e083d2 TG |
2476 | |
2477 | dmu_tx_hold_spill(tx, db->db_object); | |
2478 | ||
2479 | err = dmu_tx_assign(tx, TXG_WAIT); | |
2480 | if (err != 0) { | |
2481 | dmu_buf_rele(db, FTAG); | |
2482 | dmu_buf_rele(db_spill, FTAG); | |
2483 | dmu_tx_abort(tx); | |
2484 | return (err); | |
2485 | } | |
2486 | dmu_buf_will_dirty(db_spill, tx); | |
2487 | ||
2488 | if (db_spill->db_size < drrs->drr_length) | |
2489 | VERIFY(0 == dbuf_spill_set_blksz(db_spill, | |
2490 | drrs->drr_length, tx)); | |
2491 | bcopy(data, db_spill->db_data, drrs->drr_length); | |
2492 | ||
2493 | dmu_buf_rele(db, FTAG); | |
2494 | dmu_buf_rele(db_spill, FTAG); | |
2495 | ||
2496 | dmu_tx_commit(tx); | |
2497 | return (0); | |
2498 | } | |
2499 | ||
2500 | /* ARGSUSED */ | |
2501 | noinline static int | |
86e3c28a | 2502 | receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf) |
70e083d2 TG |
2503 | { |
2504 | int err; | |
2505 | ||
b49151d6 | 2506 | if (drrf->drr_length != DMU_OBJECT_END && |
70e083d2 TG |
2507 | drrf->drr_offset + drrf->drr_length < drrf->drr_offset) |
2508 | return (SET_ERROR(EINVAL)); | |
2509 | ||
86e3c28a | 2510 | if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0) |
70e083d2 TG |
2511 | return (SET_ERROR(EINVAL)); |
2512 | ||
86e3c28a CIK |
2513 | if (drrf->drr_object > rwa->max_object) |
2514 | rwa->max_object = drrf->drr_object; | |
2515 | ||
2516 | err = dmu_free_long_range(rwa->os, drrf->drr_object, | |
70e083d2 | 2517 | drrf->drr_offset, drrf->drr_length); |
86e3c28a | 2518 | |
70e083d2 TG |
2519 | return (err); |
2520 | } | |
2521 | ||
2522 | /* used to destroy the drc_ds on error */ | |
2523 | static void | |
2524 | dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc) | |
2525 | { | |
86e3c28a CIK |
2526 | if (drc->drc_resumable) { |
2527 | /* wait for our resume state to be written to disk */ | |
2528 | txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0); | |
2529 | dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); | |
2530 | } else { | |
2531 | char name[ZFS_MAX_DATASET_NAME_LEN]; | |
2532 | dsl_dataset_name(drc->drc_ds, name); | |
2533 | dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); | |
2534 | (void) dsl_destroy_head(name); | |
2535 | } | |
2536 | } | |
2537 | ||
2538 | static void | |
2539 | receive_cksum(struct receive_arg *ra, int len, void *buf) | |
2540 | { | |
2541 | if (ra->byteswap) { | |
2542 | (void) fletcher_4_incremental_byteswap(buf, len, &ra->cksum); | |
2543 | } else { | |
2544 | (void) fletcher_4_incremental_native(buf, len, &ra->cksum); | |
2545 | } | |
2546 | } | |
2547 | ||
2548 | /* | |
2549 | * Read the payload into a buffer of size len, and update the current record's | |
2550 | * payload field. | |
2551 | * Allocate ra->next_rrd and read the next record's header into | |
2552 | * ra->next_rrd->header. | |
2553 | * Verify checksum of payload and next record. | |
2554 | */ | |
2555 | static int | |
2556 | receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf) | |
2557 | { | |
2558 | int err; | |
2559 | zio_cksum_t cksum_orig; | |
2560 | zio_cksum_t *cksump; | |
2561 | ||
2562 | if (len != 0) { | |
2563 | ASSERT3U(len, <=, SPA_MAXBLOCKSIZE); | |
2564 | err = receive_read(ra, len, buf); | |
2565 | if (err != 0) | |
2566 | return (err); | |
2567 | receive_cksum(ra, len, buf); | |
2568 | ||
2569 | /* note: rrd is NULL when reading the begin record's payload */ | |
2570 | if (ra->rrd != NULL) { | |
2571 | ra->rrd->payload = buf; | |
2572 | ra->rrd->payload_size = len; | |
2573 | ra->rrd->bytes_read = ra->bytes_read; | |
2574 | } | |
2575 | } | |
2576 | ||
2577 | ra->prev_cksum = ra->cksum; | |
2578 | ||
2579 | ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP); | |
2580 | err = receive_read(ra, sizeof (ra->next_rrd->header), | |
2581 | &ra->next_rrd->header); | |
2582 | ra->next_rrd->bytes_read = ra->bytes_read; | |
2583 | if (err != 0) { | |
2584 | kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); | |
2585 | ra->next_rrd = NULL; | |
2586 | return (err); | |
2587 | } | |
2588 | if (ra->next_rrd->header.drr_type == DRR_BEGIN) { | |
2589 | kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); | |
2590 | ra->next_rrd = NULL; | |
2591 | return (SET_ERROR(EINVAL)); | |
2592 | } | |
2593 | ||
2594 | /* | |
2595 | * Note: checksum is of everything up to but not including the | |
2596 | * checksum itself. | |
2597 | */ | |
2598 | ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), | |
2599 | ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); | |
2600 | receive_cksum(ra, | |
2601 | offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), | |
2602 | &ra->next_rrd->header); | |
2603 | ||
2604 | cksum_orig = ra->next_rrd->header.drr_u.drr_checksum.drr_checksum; | |
2605 | cksump = &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum; | |
2606 | ||
2607 | if (ra->byteswap) | |
2608 | byteswap_record(&ra->next_rrd->header); | |
2609 | ||
2610 | if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) && | |
2611 | !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) { | |
2612 | kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); | |
2613 | ra->next_rrd = NULL; | |
2614 | return (SET_ERROR(ECKSUM)); | |
2615 | } | |
2616 | ||
2617 | receive_cksum(ra, sizeof (cksum_orig), &cksum_orig); | |
2618 | ||
2619 | return (0); | |
2620 | } | |
2621 | ||
2622 | static void | |
2623 | objlist_create(struct objlist *list) | |
2624 | { | |
2625 | list_create(&list->list, sizeof (struct receive_objnode), | |
2626 | offsetof(struct receive_objnode, node)); | |
2627 | list->last_lookup = 0; | |
2628 | } | |
2629 | ||
2630 | static void | |
2631 | objlist_destroy(struct objlist *list) | |
2632 | { | |
2633 | struct receive_objnode *n; | |
2634 | ||
2635 | for (n = list_remove_head(&list->list); | |
2636 | n != NULL; n = list_remove_head(&list->list)) { | |
2637 | kmem_free(n, sizeof (*n)); | |
2638 | } | |
2639 | list_destroy(&list->list); | |
2640 | } | |
2641 | ||
2642 | /* | |
2643 | * This function looks through the objlist to see if the specified object number | |
2644 | * is contained in the objlist. In the process, it will remove all object | |
2645 | * numbers in the list that are smaller than the specified object number. Thus, | |
2646 | * any lookup of an object number smaller than a previously looked up object | |
2647 | * number will always return false; therefore, all lookups should be done in | |
2648 | * ascending order. | |
2649 | */ | |
2650 | static boolean_t | |
2651 | objlist_exists(struct objlist *list, uint64_t object) | |
2652 | { | |
2653 | struct receive_objnode *node = list_head(&list->list); | |
2654 | ASSERT3U(object, >=, list->last_lookup); | |
2655 | list->last_lookup = object; | |
2656 | while (node != NULL && node->object < object) { | |
2657 | VERIFY3P(node, ==, list_remove_head(&list->list)); | |
2658 | kmem_free(node, sizeof (*node)); | |
2659 | node = list_head(&list->list); | |
2660 | } | |
2661 | return (node != NULL && node->object == object); | |
2662 | } | |
2663 | ||
2664 | /* | |
2665 | * The objlist is a list of object numbers stored in ascending order. However, | |
2666 | * the insertion of new object numbers does not seek out the correct location to | |
2667 | * store a new object number; instead, it appends it to the list for simplicity. | |
2668 | * Thus, any users must take care to only insert new object numbers in ascending | |
2669 | * order. | |
2670 | */ | |
2671 | static void | |
2672 | objlist_insert(struct objlist *list, uint64_t object) | |
2673 | { | |
2674 | struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP); | |
2675 | node->object = object; | |
2676 | #ifdef ZFS_DEBUG | |
2677 | { | |
2678 | struct receive_objnode *last_object = list_tail(&list->list); | |
2679 | uint64_t last_objnum = (last_object != NULL ? last_object->object : 0); | |
2680 | ASSERT3U(node->object, >, last_objnum); | |
2681 | } | |
2682 | #endif | |
2683 | list_insert_tail(&list->list, node); | |
2684 | } | |
2685 | ||
2686 | /* | |
2687 | * Issue the prefetch reads for any necessary indirect blocks. | |
2688 | * | |
2689 | * We use the object ignore list to tell us whether or not to issue prefetches | |
2690 | * for a given object. We do this for both correctness (in case the blocksize | |
2691 | * of an object has changed) and performance (if the object doesn't exist, don't | |
2692 | * needlessly try to issue prefetches). We also trim the list as we go through | |
2693 | * the stream to prevent it from growing to an unbounded size. | |
2694 | * | |
2695 | * The object numbers within will always be in sorted order, and any write | |
2696 | * records we see will also be in sorted order, but they're not sorted with | |
2697 | * respect to each other (i.e. we can get several object records before | |
2698 | * receiving each object's write records). As a result, once we've reached a | |
2699 | * given object number, we can safely remove any reference to lower object | |
2700 | * numbers in the ignore list. In practice, we receive up to 32 object records | |
2701 | * before receiving write records, so the list can have up to 32 nodes in it. | |
2702 | */ | |
2703 | /* ARGSUSED */ | |
2704 | static void | |
2705 | receive_read_prefetch(struct receive_arg *ra, | |
2706 | uint64_t object, uint64_t offset, uint64_t length) | |
2707 | { | |
2708 | if (!objlist_exists(&ra->ignore_objlist, object)) { | |
2709 | dmu_prefetch(ra->os, object, 1, offset, length, | |
2710 | ZIO_PRIORITY_SYNC_READ); | |
2711 | } | |
2712 | } | |
2713 | ||
2714 | /* | |
2715 | * Read records off the stream, issuing any necessary prefetches. | |
2716 | */ | |
2717 | static int | |
2718 | receive_read_record(struct receive_arg *ra) | |
2719 | { | |
2720 | int err; | |
2721 | ||
2722 | switch (ra->rrd->header.drr_type) { | |
2723 | case DRR_OBJECT: | |
2724 | { | |
2725 | struct drr_object *drro = &ra->rrd->header.drr_u.drr_object; | |
2726 | uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8); | |
2727 | void *buf = kmem_zalloc(size, KM_SLEEP); | |
2728 | dmu_object_info_t doi; | |
2729 | err = receive_read_payload_and_next_header(ra, size, buf); | |
2730 | if (err != 0) { | |
2731 | kmem_free(buf, size); | |
2732 | return (err); | |
2733 | } | |
2734 | err = dmu_object_info(ra->os, drro->drr_object, &doi); | |
2735 | /* | |
2736 | * See receive_read_prefetch for an explanation why we're | |
2737 | * storing this object in the ignore_obj_list. | |
2738 | */ | |
2739 | if (err == ENOENT || | |
2740 | (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) { | |
2741 | objlist_insert(&ra->ignore_objlist, drro->drr_object); | |
2742 | err = 0; | |
2743 | } | |
2744 | return (err); | |
2745 | } | |
2746 | case DRR_FREEOBJECTS: | |
2747 | { | |
2748 | err = receive_read_payload_and_next_header(ra, 0, NULL); | |
2749 | return (err); | |
2750 | } | |
2751 | case DRR_WRITE: | |
2752 | { | |
2753 | struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write; | |
2754 | arc_buf_t *abuf; | |
2755 | boolean_t is_meta = DMU_OT_IS_METADATA(drrw->drr_type); | |
2756 | if (DRR_WRITE_COMPRESSED(drrw)) { | |
2757 | ASSERT3U(drrw->drr_compressed_size, >, 0); | |
2758 | ASSERT3U(drrw->drr_logical_size, >=, | |
2759 | drrw->drr_compressed_size); | |
2760 | ASSERT(!is_meta); | |
2761 | abuf = arc_loan_compressed_buf( | |
2762 | dmu_objset_spa(ra->os), | |
2763 | drrw->drr_compressed_size, drrw->drr_logical_size, | |
2764 | drrw->drr_compressiontype); | |
2765 | } else { | |
2766 | abuf = arc_loan_buf(dmu_objset_spa(ra->os), | |
2767 | is_meta, drrw->drr_logical_size); | |
2768 | } | |
2769 | ||
2770 | err = receive_read_payload_and_next_header(ra, | |
2771 | DRR_WRITE_PAYLOAD_SIZE(drrw), abuf->b_data); | |
2772 | if (err != 0) { | |
2773 | dmu_return_arcbuf(abuf); | |
2774 | return (err); | |
2775 | } | |
2776 | ra->rrd->write_buf = abuf; | |
2777 | receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset, | |
2778 | drrw->drr_logical_size); | |
2779 | return (err); | |
2780 | } | |
2781 | case DRR_WRITE_BYREF: | |
2782 | { | |
2783 | struct drr_write_byref *drrwb = | |
2784 | &ra->rrd->header.drr_u.drr_write_byref; | |
2785 | err = receive_read_payload_and_next_header(ra, 0, NULL); | |
2786 | receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset, | |
2787 | drrwb->drr_length); | |
2788 | return (err); | |
2789 | } | |
2790 | case DRR_WRITE_EMBEDDED: | |
2791 | { | |
2792 | struct drr_write_embedded *drrwe = | |
2793 | &ra->rrd->header.drr_u.drr_write_embedded; | |
2794 | uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8); | |
2795 | void *buf = kmem_zalloc(size, KM_SLEEP); | |
2796 | ||
2797 | err = receive_read_payload_and_next_header(ra, size, buf); | |
2798 | if (err != 0) { | |
2799 | kmem_free(buf, size); | |
2800 | return (err); | |
2801 | } | |
2802 | ||
2803 | receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset, | |
2804 | drrwe->drr_length); | |
2805 | return (err); | |
2806 | } | |
2807 | case DRR_FREE: | |
2808 | { | |
2809 | /* | |
2810 | * It might be beneficial to prefetch indirect blocks here, but | |
2811 | * we don't really have the data to decide for sure. | |
2812 | */ | |
2813 | err = receive_read_payload_and_next_header(ra, 0, NULL); | |
2814 | return (err); | |
2815 | } | |
2816 | case DRR_END: | |
2817 | { | |
2818 | struct drr_end *drre = &ra->rrd->header.drr_u.drr_end; | |
2819 | if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum)) | |
2820 | return (SET_ERROR(ECKSUM)); | |
2821 | return (0); | |
2822 | } | |
2823 | case DRR_SPILL: | |
2824 | { | |
2825 | struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill; | |
2826 | void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP); | |
2827 | err = receive_read_payload_and_next_header(ra, drrs->drr_length, | |
2828 | buf); | |
2829 | if (err != 0) | |
2830 | kmem_free(buf, drrs->drr_length); | |
2831 | return (err); | |
2832 | } | |
2833 | default: | |
2834 | return (SET_ERROR(EINVAL)); | |
2835 | } | |
2836 | } | |
2837 | ||
2838 | static void | |
2839 | dprintf_drr(struct receive_record_arg *rrd, int err) | |
2840 | { | |
2841 | switch (rrd->header.drr_type) { | |
2842 | case DRR_OBJECT: | |
2843 | { | |
2844 | struct drr_object *drro = &rrd->header.drr_u.drr_object; | |
2845 | dprintf("drr_type = OBJECT obj = %llu type = %u " | |
2846 | "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u " | |
2847 | "compress = %u dn_slots = %u err = %d\n", | |
2848 | drro->drr_object, drro->drr_type, drro->drr_bonustype, | |
2849 | drro->drr_blksz, drro->drr_bonuslen, | |
2850 | drro->drr_checksumtype, drro->drr_compress, | |
2851 | drro->drr_dn_slots, err); | |
2852 | break; | |
2853 | } | |
2854 | case DRR_FREEOBJECTS: | |
2855 | { | |
2856 | struct drr_freeobjects *drrfo = | |
2857 | &rrd->header.drr_u.drr_freeobjects; | |
2858 | dprintf("drr_type = FREEOBJECTS firstobj = %llu " | |
2859 | "numobjs = %llu err = %d\n", | |
2860 | drrfo->drr_firstobj, drrfo->drr_numobjs, err); | |
2861 | break; | |
2862 | } | |
2863 | case DRR_WRITE: | |
2864 | { | |
2865 | struct drr_write *drrw = &rrd->header.drr_u.drr_write; | |
2866 | dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu " | |
2867 | "lsize = %llu cksumtype = %u cksumflags = %u " | |
2868 | "compress = %u psize = %llu err = %d\n", | |
2869 | drrw->drr_object, drrw->drr_type, drrw->drr_offset, | |
2870 | drrw->drr_logical_size, drrw->drr_checksumtype, | |
2871 | drrw->drr_checksumflags, drrw->drr_compressiontype, | |
2872 | drrw->drr_compressed_size, err); | |
2873 | break; | |
2874 | } | |
2875 | case DRR_WRITE_BYREF: | |
2876 | { | |
2877 | struct drr_write_byref *drrwbr = | |
2878 | &rrd->header.drr_u.drr_write_byref; | |
2879 | dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu " | |
2880 | "length = %llu toguid = %llx refguid = %llx " | |
2881 | "refobject = %llu refoffset = %llu cksumtype = %u " | |
2882 | "cksumflags = %u err = %d\n", | |
2883 | drrwbr->drr_object, drrwbr->drr_offset, | |
2884 | drrwbr->drr_length, drrwbr->drr_toguid, | |
2885 | drrwbr->drr_refguid, drrwbr->drr_refobject, | |
2886 | drrwbr->drr_refoffset, drrwbr->drr_checksumtype, | |
2887 | drrwbr->drr_checksumflags, err); | |
2888 | break; | |
2889 | } | |
2890 | case DRR_WRITE_EMBEDDED: | |
2891 | { | |
2892 | struct drr_write_embedded *drrwe = | |
2893 | &rrd->header.drr_u.drr_write_embedded; | |
2894 | dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu " | |
2895 | "length = %llu compress = %u etype = %u lsize = %u " | |
2896 | "psize = %u err = %d\n", | |
2897 | drrwe->drr_object, drrwe->drr_offset, drrwe->drr_length, | |
2898 | drrwe->drr_compression, drrwe->drr_etype, | |
2899 | drrwe->drr_lsize, drrwe->drr_psize, err); | |
2900 | break; | |
2901 | } | |
2902 | case DRR_FREE: | |
2903 | { | |
2904 | struct drr_free *drrf = &rrd->header.drr_u.drr_free; | |
2905 | dprintf("drr_type = FREE obj = %llu offset = %llu " | |
2906 | "length = %lld err = %d\n", | |
2907 | drrf->drr_object, drrf->drr_offset, drrf->drr_length, | |
2908 | err); | |
2909 | break; | |
2910 | } | |
2911 | case DRR_SPILL: | |
2912 | { | |
2913 | struct drr_spill *drrs = &rrd->header.drr_u.drr_spill; | |
2914 | dprintf("drr_type = SPILL obj = %llu length = %llu " | |
2915 | "err = %d\n", drrs->drr_object, drrs->drr_length, err); | |
2916 | break; | |
2917 | } | |
2918 | default: | |
2919 | return; | |
2920 | } | |
70e083d2 TG |
2921 | } |
2922 | ||
2923 | /* | |
86e3c28a CIK |
2924 | * Commit the records to the pool. |
2925 | */ | |
2926 | static int | |
2927 | receive_process_record(struct receive_writer_arg *rwa, | |
2928 | struct receive_record_arg *rrd) | |
2929 | { | |
2930 | int err; | |
2931 | ||
2932 | /* Processing in order, therefore bytes_read should be increasing. */ | |
2933 | ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read); | |
2934 | rwa->bytes_read = rrd->bytes_read; | |
2935 | ||
2936 | switch (rrd->header.drr_type) { | |
2937 | case DRR_OBJECT: | |
2938 | { | |
2939 | struct drr_object *drro = &rrd->header.drr_u.drr_object; | |
2940 | err = receive_object(rwa, drro, rrd->payload); | |
2941 | kmem_free(rrd->payload, rrd->payload_size); | |
2942 | rrd->payload = NULL; | |
2943 | break; | |
2944 | } | |
2945 | case DRR_FREEOBJECTS: | |
2946 | { | |
2947 | struct drr_freeobjects *drrfo = | |
2948 | &rrd->header.drr_u.drr_freeobjects; | |
2949 | err = receive_freeobjects(rwa, drrfo); | |
2950 | break; | |
2951 | } | |
2952 | case DRR_WRITE: | |
2953 | { | |
2954 | struct drr_write *drrw = &rrd->header.drr_u.drr_write; | |
2955 | err = receive_write(rwa, drrw, rrd->write_buf); | |
2956 | /* if receive_write() is successful, it consumes the arc_buf */ | |
2957 | if (err != 0) | |
2958 | dmu_return_arcbuf(rrd->write_buf); | |
2959 | rrd->write_buf = NULL; | |
2960 | rrd->payload = NULL; | |
2961 | break; | |
2962 | } | |
2963 | case DRR_WRITE_BYREF: | |
2964 | { | |
2965 | struct drr_write_byref *drrwbr = | |
2966 | &rrd->header.drr_u.drr_write_byref; | |
2967 | err = receive_write_byref(rwa, drrwbr); | |
2968 | break; | |
2969 | } | |
2970 | case DRR_WRITE_EMBEDDED: | |
2971 | { | |
2972 | struct drr_write_embedded *drrwe = | |
2973 | &rrd->header.drr_u.drr_write_embedded; | |
2974 | err = receive_write_embedded(rwa, drrwe, rrd->payload); | |
2975 | kmem_free(rrd->payload, rrd->payload_size); | |
2976 | rrd->payload = NULL; | |
2977 | break; | |
2978 | } | |
2979 | case DRR_FREE: | |
2980 | { | |
2981 | struct drr_free *drrf = &rrd->header.drr_u.drr_free; | |
2982 | err = receive_free(rwa, drrf); | |
2983 | break; | |
2984 | } | |
2985 | case DRR_SPILL: | |
2986 | { | |
2987 | struct drr_spill *drrs = &rrd->header.drr_u.drr_spill; | |
2988 | err = receive_spill(rwa, drrs, rrd->payload); | |
2989 | kmem_free(rrd->payload, rrd->payload_size); | |
2990 | rrd->payload = NULL; | |
2991 | break; | |
2992 | } | |
2993 | default: | |
2994 | return (SET_ERROR(EINVAL)); | |
2995 | } | |
2996 | ||
2997 | if (err != 0) | |
2998 | dprintf_drr(rrd, err); | |
2999 | ||
3000 | return (err); | |
3001 | } | |
3002 | ||
3003 | /* | |
3004 | * dmu_recv_stream's worker thread; pull records off the queue, and then call | |
3005 | * receive_process_record When we're done, signal the main thread and exit. | |
3006 | */ | |
3007 | static void | |
3008 | receive_writer_thread(void *arg) | |
3009 | { | |
3010 | struct receive_writer_arg *rwa = arg; | |
3011 | struct receive_record_arg *rrd; | |
3012 | fstrans_cookie_t cookie = spl_fstrans_mark(); | |
3013 | ||
3014 | for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker; | |
3015 | rrd = bqueue_dequeue(&rwa->q)) { | |
3016 | /* | |
3017 | * If there's an error, the main thread will stop putting things | |
3018 | * on the queue, but we need to clear everything in it before we | |
3019 | * can exit. | |
3020 | */ | |
3021 | if (rwa->err == 0) { | |
3022 | rwa->err = receive_process_record(rwa, rrd); | |
3023 | } else if (rrd->write_buf != NULL) { | |
3024 | dmu_return_arcbuf(rrd->write_buf); | |
3025 | rrd->write_buf = NULL; | |
3026 | rrd->payload = NULL; | |
3027 | } else if (rrd->payload != NULL) { | |
3028 | kmem_free(rrd->payload, rrd->payload_size); | |
3029 | rrd->payload = NULL; | |
3030 | } | |
3031 | kmem_free(rrd, sizeof (*rrd)); | |
3032 | } | |
3033 | kmem_free(rrd, sizeof (*rrd)); | |
3034 | mutex_enter(&rwa->mutex); | |
3035 | rwa->done = B_TRUE; | |
3036 | cv_signal(&rwa->cv); | |
3037 | mutex_exit(&rwa->mutex); | |
3038 | spl_fstrans_unmark(cookie); | |
3039 | thread_exit(); | |
3040 | } | |
3041 | ||
3042 | static int | |
3043 | resume_check(struct receive_arg *ra, nvlist_t *begin_nvl) | |
3044 | { | |
3045 | uint64_t val; | |
3046 | objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset; | |
3047 | uint64_t dsobj = dmu_objset_id(ra->os); | |
3048 | uint64_t resume_obj, resume_off; | |
3049 | ||
3050 | if (nvlist_lookup_uint64(begin_nvl, | |
3051 | "resume_object", &resume_obj) != 0 || | |
3052 | nvlist_lookup_uint64(begin_nvl, | |
3053 | "resume_offset", &resume_off) != 0) { | |
3054 | return (SET_ERROR(EINVAL)); | |
3055 | } | |
3056 | VERIFY0(zap_lookup(mos, dsobj, | |
3057 | DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val)); | |
3058 | if (resume_obj != val) | |
3059 | return (SET_ERROR(EINVAL)); | |
3060 | VERIFY0(zap_lookup(mos, dsobj, | |
3061 | DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val)); | |
3062 | if (resume_off != val) | |
3063 | return (SET_ERROR(EINVAL)); | |
3064 | ||
3065 | return (0); | |
3066 | } | |
3067 | ||
3068 | /* | |
3069 | * Read in the stream's records, one by one, and apply them to the pool. There | |
3070 | * are two threads involved; the thread that calls this function will spin up a | |
3071 | * worker thread, read the records off the stream one by one, and issue | |
3072 | * prefetches for any necessary indirect blocks. It will then push the records | |
3073 | * onto an internal blocking queue. The worker thread will pull the records off | |
3074 | * the queue, and actually write the data into the DMU. This way, the worker | |
3075 | * thread doesn't have to wait for reads to complete, since everything it needs | |
3076 | * (the indirect blocks) will be prefetched. | |
3077 | * | |
70e083d2 TG |
3078 | * NB: callers *must* call dmu_recv_end() if this succeeds. |
3079 | */ | |
3080 | int | |
3081 | dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp, | |
3082 | int cleanup_fd, uint64_t *action_handlep) | |
3083 | { | |
86e3c28a CIK |
3084 | int err = 0; |
3085 | struct receive_arg *ra; | |
3086 | struct receive_writer_arg *rwa; | |
70e083d2 | 3087 | int featureflags; |
86e3c28a CIK |
3088 | uint32_t payloadlen; |
3089 | void *payload; | |
3090 | nvlist_t *begin_nvl = NULL; | |
3091 | ||
3092 | ra = kmem_zalloc(sizeof (*ra), KM_SLEEP); | |
3093 | rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP); | |
3094 | ||
3095 | ra->byteswap = drc->drc_byteswap; | |
3096 | ra->cksum = drc->drc_cksum; | |
3097 | ra->vp = vp; | |
3098 | ra->voff = *voffp; | |
3099 | ||
3100 | if (dsl_dataset_is_zapified(drc->drc_ds)) { | |
3101 | (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset, | |
3102 | drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES, | |
3103 | sizeof (ra->bytes_read), 1, &ra->bytes_read); | |
3104 | } | |
70e083d2 | 3105 | |
86e3c28a | 3106 | objlist_create(&ra->ignore_objlist); |
70e083d2 TG |
3107 | |
3108 | /* these were verified in dmu_recv_begin */ | |
3109 | ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==, | |
3110 | DMU_SUBSTREAM); | |
3111 | ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES); | |
3112 | ||
3113 | /* | |
3114 | * Open the objset we are modifying. | |
3115 | */ | |
86e3c28a | 3116 | VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra->os)); |
70e083d2 TG |
3117 | |
3118 | ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT); | |
3119 | ||
3120 | featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo); | |
3121 | ||
3122 | /* if this stream is dedup'ed, set up the avl tree for guid mapping */ | |
3123 | if (featureflags & DMU_BACKUP_FEATURE_DEDUP) { | |
3124 | minor_t minor; | |
3125 | ||
3126 | if (cleanup_fd == -1) { | |
86e3c28a | 3127 | ra->err = SET_ERROR(EBADF); |
70e083d2 TG |
3128 | goto out; |
3129 | } | |
86e3c28a CIK |
3130 | ra->err = zfs_onexit_fd_hold(cleanup_fd, &minor); |
3131 | if (ra->err != 0) { | |
70e083d2 TG |
3132 | cleanup_fd = -1; |
3133 | goto out; | |
3134 | } | |
3135 | ||
3136 | if (*action_handlep == 0) { | |
86e3c28a | 3137 | rwa->guid_to_ds_map = |
70e083d2 | 3138 | kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); |
86e3c28a | 3139 | avl_create(rwa->guid_to_ds_map, guid_compare, |
70e083d2 TG |
3140 | sizeof (guid_map_entry_t), |
3141 | offsetof(guid_map_entry_t, avlnode)); | |
86e3c28a CIK |
3142 | err = zfs_onexit_add_cb(minor, |
3143 | free_guid_map_onexit, rwa->guid_to_ds_map, | |
70e083d2 | 3144 | action_handlep); |
86e3c28a | 3145 | if (ra->err != 0) |
70e083d2 TG |
3146 | goto out; |
3147 | } else { | |
86e3c28a CIK |
3148 | err = zfs_onexit_cb_data(minor, *action_handlep, |
3149 | (void **)&rwa->guid_to_ds_map); | |
3150 | if (ra->err != 0) | |
70e083d2 TG |
3151 | goto out; |
3152 | } | |
3153 | ||
86e3c28a | 3154 | drc->drc_guid_to_ds_map = rwa->guid_to_ds_map; |
70e083d2 TG |
3155 | } |
3156 | ||
86e3c28a CIK |
3157 | payloadlen = drc->drc_drr_begin->drr_payloadlen; |
3158 | payload = NULL; | |
3159 | if (payloadlen != 0) | |
3160 | payload = kmem_alloc(payloadlen, KM_SLEEP); | |
3161 | ||
3162 | err = receive_read_payload_and_next_header(ra, payloadlen, payload); | |
3163 | if (err != 0) { | |
3164 | if (payloadlen != 0) | |
3165 | kmem_free(payload, payloadlen); | |
3166 | goto out; | |
3167 | } | |
3168 | if (payloadlen != 0) { | |
3169 | err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP); | |
3170 | kmem_free(payload, payloadlen); | |
3171 | if (err != 0) | |
3172 | goto out; | |
3173 | } | |
3174 | ||
3175 | if (featureflags & DMU_BACKUP_FEATURE_RESUMING) { | |
3176 | err = resume_check(ra, begin_nvl); | |
3177 | if (err != 0) | |
3178 | goto out; | |
3179 | } | |
3180 | ||
3181 | (void) bqueue_init(&rwa->q, zfs_recv_queue_length, | |
3182 | offsetof(struct receive_record_arg, node)); | |
3183 | cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL); | |
3184 | mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL); | |
3185 | rwa->os = ra->os; | |
3186 | rwa->byteswap = drc->drc_byteswap; | |
3187 | rwa->resumable = drc->drc_resumable; | |
3188 | ||
3189 | (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc, | |
3190 | TS_RUN, minclsyspri); | |
70e083d2 | 3191 | /* |
86e3c28a CIK |
3192 | * We're reading rwa->err without locks, which is safe since we are the |
3193 | * only reader, and the worker thread is the only writer. It's ok if we | |
3194 | * miss a write for an iteration or two of the loop, since the writer | |
3195 | * thread will keep freeing records we send it until we send it an eos | |
3196 | * marker. | |
3197 | * | |
3198 | * We can leave this loop in 3 ways: First, if rwa->err is | |
3199 | * non-zero. In that case, the writer thread will free the rrd we just | |
3200 | * pushed. Second, if we're interrupted; in that case, either it's the | |
3201 | * first loop and ra->rrd was never allocated, or it's later and ra->rrd | |
3202 | * has been handed off to the writer thread who will free it. Finally, | |
3203 | * if receive_read_record fails or we're at the end of the stream, then | |
3204 | * we free ra->rrd and exit. | |
70e083d2 | 3205 | */ |
86e3c28a | 3206 | while (rwa->err == 0) { |
70e083d2 | 3207 | if (issig(JUSTLOOKING) && issig(FORREAL)) { |
86e3c28a CIK |
3208 | err = SET_ERROR(EINTR); |
3209 | break; | |
70e083d2 TG |
3210 | } |
3211 | ||
86e3c28a CIK |
3212 | ASSERT3P(ra->rrd, ==, NULL); |
3213 | ra->rrd = ra->next_rrd; | |
3214 | ra->next_rrd = NULL; | |
3215 | /* Allocates and loads header into ra->next_rrd */ | |
3216 | err = receive_read_record(ra); | |
70e083d2 | 3217 | |
86e3c28a CIK |
3218 | if (ra->rrd->header.drr_type == DRR_END || err != 0) { |
3219 | kmem_free(ra->rrd, sizeof (*ra->rrd)); | |
3220 | ra->rrd = NULL; | |
70e083d2 TG |
3221 | break; |
3222 | } | |
86e3c28a CIK |
3223 | |
3224 | bqueue_enqueue(&rwa->q, ra->rrd, | |
3225 | sizeof (struct receive_record_arg) + ra->rrd->payload_size); | |
3226 | ra->rrd = NULL; | |
3227 | } | |
3228 | if (ra->next_rrd == NULL) | |
3229 | ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP); | |
3230 | ra->next_rrd->eos_marker = B_TRUE; | |
3231 | bqueue_enqueue(&rwa->q, ra->next_rrd, 1); | |
3232 | ||
3233 | mutex_enter(&rwa->mutex); | |
3234 | while (!rwa->done) { | |
3235 | cv_wait(&rwa->cv, &rwa->mutex); | |
3236 | } | |
3237 | mutex_exit(&rwa->mutex); | |
3238 | ||
3239 | /* | |
3240 | * If we are receiving a full stream as a clone, all object IDs which | |
3241 | * are greater than the maximum ID referenced in the stream are | |
3242 | * by definition unused and must be freed. | |
3243 | */ | |
3244 | if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) { | |
3245 | uint64_t obj = rwa->max_object + 1; | |
3246 | int free_err = 0; | |
3247 | int next_err = 0; | |
3248 | ||
3249 | while (next_err == 0) { | |
3250 | free_err = dmu_free_long_object(rwa->os, obj); | |
3251 | if (free_err != 0 && free_err != ENOENT) | |
3252 | break; | |
3253 | ||
3254 | next_err = dmu_object_next(rwa->os, &obj, FALSE, 0); | |
70e083d2 | 3255 | } |
86e3c28a CIK |
3256 | |
3257 | if (err == 0) { | |
3258 | if (free_err != 0 && free_err != ENOENT) | |
3259 | err = free_err; | |
3260 | else if (next_err != ESRCH) | |
3261 | err = next_err; | |
70e083d2 | 3262 | } |
70e083d2 | 3263 | } |
86e3c28a CIK |
3264 | |
3265 | cv_destroy(&rwa->cv); | |
3266 | mutex_destroy(&rwa->mutex); | |
3267 | bqueue_destroy(&rwa->q); | |
3268 | if (err == 0) | |
3269 | err = rwa->err; | |
70e083d2 TG |
3270 | |
3271 | out: | |
86e3c28a | 3272 | nvlist_free(begin_nvl); |
70e083d2 TG |
3273 | if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1)) |
3274 | zfs_onexit_fd_rele(cleanup_fd); | |
3275 | ||
86e3c28a | 3276 | if (err != 0) { |
70e083d2 | 3277 | /* |
86e3c28a CIK |
3278 | * Clean up references. If receive is not resumable, |
3279 | * destroy what we created, so we don't leave it in | |
3280 | * the inconsistent state. | |
70e083d2 TG |
3281 | */ |
3282 | dmu_recv_cleanup_ds(drc); | |
3283 | } | |
3284 | ||
86e3c28a CIK |
3285 | *voffp = ra->voff; |
3286 | objlist_destroy(&ra->ignore_objlist); | |
3287 | kmem_free(ra, sizeof (*ra)); | |
3288 | kmem_free(rwa, sizeof (*rwa)); | |
3289 | return (err); | |
70e083d2 TG |
3290 | } |
3291 | ||
3292 | static int | |
3293 | dmu_recv_end_check(void *arg, dmu_tx_t *tx) | |
3294 | { | |
3295 | dmu_recv_cookie_t *drc = arg; | |
3296 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
3297 | int error; | |
3298 | ||
3299 | ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag); | |
3300 | ||
3301 | if (!drc->drc_newfs) { | |
3302 | dsl_dataset_t *origin_head; | |
3303 | ||
3304 | error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head); | |
3305 | if (error != 0) | |
3306 | return (error); | |
3307 | if (drc->drc_force) { | |
3308 | /* | |
3309 | * We will destroy any snapshots in tofs (i.e. before | |
3310 | * origin_head) that are after the origin (which is | |
3311 | * the snap before drc_ds, because drc_ds can not | |
3312 | * have any snaps of its own). | |
3313 | */ | |
3314 | uint64_t obj; | |
3315 | ||
3316 | obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; | |
3317 | while (obj != | |
3318 | dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { | |
3319 | dsl_dataset_t *snap; | |
3320 | error = dsl_dataset_hold_obj(dp, obj, FTAG, | |
3321 | &snap); | |
3322 | if (error != 0) | |
3323 | break; | |
3324 | if (snap->ds_dir != origin_head->ds_dir) | |
3325 | error = SET_ERROR(EINVAL); | |
3326 | if (error == 0) { | |
3327 | error = dsl_destroy_snapshot_check_impl( | |
3328 | snap, B_FALSE); | |
3329 | } | |
3330 | obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; | |
3331 | dsl_dataset_rele(snap, FTAG); | |
3332 | if (error != 0) | |
3333 | break; | |
3334 | } | |
3335 | if (error != 0) { | |
3336 | dsl_dataset_rele(origin_head, FTAG); | |
3337 | return (error); | |
3338 | } | |
3339 | } | |
3340 | error = dsl_dataset_clone_swap_check_impl(drc->drc_ds, | |
3341 | origin_head, drc->drc_force, drc->drc_owner, tx); | |
3342 | if (error != 0) { | |
3343 | dsl_dataset_rele(origin_head, FTAG); | |
3344 | return (error); | |
3345 | } | |
3346 | error = dsl_dataset_snapshot_check_impl(origin_head, | |
3347 | drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); | |
3348 | dsl_dataset_rele(origin_head, FTAG); | |
3349 | if (error != 0) | |
3350 | return (error); | |
3351 | ||
3352 | error = dsl_destroy_head_check_impl(drc->drc_ds, 1); | |
3353 | } else { | |
3354 | error = dsl_dataset_snapshot_check_impl(drc->drc_ds, | |
3355 | drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); | |
3356 | } | |
3357 | return (error); | |
3358 | } | |
3359 | ||
3360 | static void | |
3361 | dmu_recv_end_sync(void *arg, dmu_tx_t *tx) | |
3362 | { | |
3363 | dmu_recv_cookie_t *drc = arg; | |
3364 | dsl_pool_t *dp = dmu_tx_pool(tx); | |
3365 | ||
3366 | spa_history_log_internal_ds(drc->drc_ds, "finish receiving", | |
3367 | tx, "snap=%s", drc->drc_tosnap); | |
3368 | ||
3369 | if (!drc->drc_newfs) { | |
3370 | dsl_dataset_t *origin_head; | |
3371 | ||
3372 | VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG, | |
3373 | &origin_head)); | |
3374 | ||
3375 | if (drc->drc_force) { | |
3376 | /* | |
3377 | * Destroy any snapshots of drc_tofs (origin_head) | |
3378 | * after the origin (the snap before drc_ds). | |
3379 | */ | |
3380 | uint64_t obj; | |
3381 | ||
3382 | obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; | |
3383 | while (obj != | |
3384 | dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { | |
3385 | dsl_dataset_t *snap; | |
3386 | VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, | |
3387 | &snap)); | |
3388 | ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir); | |
3389 | obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; | |
3390 | dsl_destroy_snapshot_sync_impl(snap, | |
3391 | B_FALSE, tx); | |
3392 | dsl_dataset_rele(snap, FTAG); | |
3393 | } | |
3394 | } | |
3395 | VERIFY3P(drc->drc_ds->ds_prev, ==, | |
3396 | origin_head->ds_prev); | |
3397 | ||
3398 | dsl_dataset_clone_swap_sync_impl(drc->drc_ds, | |
3399 | origin_head, tx); | |
3400 | dsl_dataset_snapshot_sync_impl(origin_head, | |
3401 | drc->drc_tosnap, tx); | |
3402 | ||
3403 | /* set snapshot's creation time and guid */ | |
3404 | dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx); | |
3405 | dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time = | |
3406 | drc->drc_drrb->drr_creation_time; | |
3407 | dsl_dataset_phys(origin_head->ds_prev)->ds_guid = | |
3408 | drc->drc_drrb->drr_toguid; | |
3409 | dsl_dataset_phys(origin_head->ds_prev)->ds_flags &= | |
3410 | ~DS_FLAG_INCONSISTENT; | |
3411 | ||
3412 | dmu_buf_will_dirty(origin_head->ds_dbuf, tx); | |
3413 | dsl_dataset_phys(origin_head)->ds_flags &= | |
3414 | ~DS_FLAG_INCONSISTENT; | |
3415 | ||
86e3c28a CIK |
3416 | drc->drc_newsnapobj = |
3417 | dsl_dataset_phys(origin_head)->ds_prev_snap_obj; | |
3418 | ||
70e083d2 TG |
3419 | dsl_dataset_rele(origin_head, FTAG); |
3420 | dsl_destroy_head_sync_impl(drc->drc_ds, tx); | |
3421 | ||
3422 | if (drc->drc_owner != NULL) | |
3423 | VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner); | |
3424 | } else { | |
3425 | dsl_dataset_t *ds = drc->drc_ds; | |
3426 | ||
3427 | dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx); | |
3428 | ||
3429 | /* set snapshot's creation time and guid */ | |
3430 | dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); | |
3431 | dsl_dataset_phys(ds->ds_prev)->ds_creation_time = | |
3432 | drc->drc_drrb->drr_creation_time; | |
3433 | dsl_dataset_phys(ds->ds_prev)->ds_guid = | |
3434 | drc->drc_drrb->drr_toguid; | |
3435 | dsl_dataset_phys(ds->ds_prev)->ds_flags &= | |
3436 | ~DS_FLAG_INCONSISTENT; | |
3437 | ||
3438 | dmu_buf_will_dirty(ds->ds_dbuf, tx); | |
3439 | dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; | |
86e3c28a CIK |
3440 | if (dsl_dataset_has_resume_receive_state(ds)) { |
3441 | (void) zap_remove(dp->dp_meta_objset, ds->ds_object, | |
3442 | DS_FIELD_RESUME_FROMGUID, tx); | |
3443 | (void) zap_remove(dp->dp_meta_objset, ds->ds_object, | |
3444 | DS_FIELD_RESUME_OBJECT, tx); | |
3445 | (void) zap_remove(dp->dp_meta_objset, ds->ds_object, | |
3446 | DS_FIELD_RESUME_OFFSET, tx); | |
3447 | (void) zap_remove(dp->dp_meta_objset, ds->ds_object, | |
3448 | DS_FIELD_RESUME_BYTES, tx); | |
3449 | (void) zap_remove(dp->dp_meta_objset, ds->ds_object, | |
3450 | DS_FIELD_RESUME_TOGUID, tx); | |
3451 | (void) zap_remove(dp->dp_meta_objset, ds->ds_object, | |
3452 | DS_FIELD_RESUME_TONAME, tx); | |
3453 | } | |
3454 | drc->drc_newsnapobj = | |
3455 | dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj; | |
70e083d2 | 3456 | } |
70e083d2 TG |
3457 | zvol_create_minors(dp->dp_spa, drc->drc_tofs, B_TRUE); |
3458 | /* | |
3459 | * Release the hold from dmu_recv_begin. This must be done before | |
3460 | * we return to open context, so that when we free the dataset's dnode, | |
3461 | * we can evict its bonus buffer. | |
3462 | */ | |
3463 | dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); | |
3464 | drc->drc_ds = NULL; | |
3465 | } | |
3466 | ||
3467 | static int | |
3468 | add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj) | |
3469 | { | |
3470 | dsl_pool_t *dp; | |
3471 | dsl_dataset_t *snapds; | |
3472 | guid_map_entry_t *gmep; | |
3473 | int err; | |
3474 | ||
3475 | ASSERT(guid_map != NULL); | |
3476 | ||
3477 | err = dsl_pool_hold(name, FTAG, &dp); | |
3478 | if (err != 0) | |
3479 | return (err); | |
3480 | gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP); | |
3481 | err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds); | |
3482 | if (err == 0) { | |
3483 | gmep->guid = dsl_dataset_phys(snapds)->ds_guid; | |
3484 | gmep->gme_ds = snapds; | |
3485 | avl_add(guid_map, gmep); | |
3486 | dsl_dataset_long_hold(snapds, gmep); | |
3487 | } else { | |
3488 | kmem_free(gmep, sizeof (*gmep)); | |
3489 | } | |
3490 | ||
3491 | dsl_pool_rele(dp, FTAG); | |
3492 | return (err); | |
3493 | } | |
3494 | ||
3495 | static int dmu_recv_end_modified_blocks = 3; | |
3496 | ||
3497 | static int | |
3498 | dmu_recv_existing_end(dmu_recv_cookie_t *drc) | |
3499 | { | |
70e083d2 | 3500 | #ifdef _KERNEL |
70e083d2 TG |
3501 | /* |
3502 | * We will be destroying the ds; make sure its origin is unmounted if | |
3503 | * necessary. | |
3504 | */ | |
86e3c28a | 3505 | char name[ZFS_MAX_DATASET_NAME_LEN]; |
70e083d2 TG |
3506 | dsl_dataset_name(drc->drc_ds, name); |
3507 | zfs_destroy_unmount_origin(name); | |
70e083d2 TG |
3508 | #endif |
3509 | ||
86e3c28a | 3510 | return (dsl_sync_task(drc->drc_tofs, |
70e083d2 | 3511 | dmu_recv_end_check, dmu_recv_end_sync, drc, |
86e3c28a | 3512 | dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL)); |
70e083d2 TG |
3513 | } |
3514 | ||
3515 | static int | |
3516 | dmu_recv_new_end(dmu_recv_cookie_t *drc) | |
86e3c28a CIK |
3517 | { |
3518 | return (dsl_sync_task(drc->drc_tofs, | |
3519 | dmu_recv_end_check, dmu_recv_end_sync, drc, | |
3520 | dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL)); | |
3521 | } | |
3522 | ||
3523 | int | |
3524 | dmu_recv_end(dmu_recv_cookie_t *drc, void *owner) | |
70e083d2 TG |
3525 | { |
3526 | int error; | |
3527 | ||
86e3c28a CIK |
3528 | drc->drc_owner = owner; |
3529 | ||
3530 | if (drc->drc_newfs) | |
3531 | error = dmu_recv_new_end(drc); | |
3532 | else | |
3533 | error = dmu_recv_existing_end(drc); | |
70e083d2 TG |
3534 | |
3535 | if (error != 0) { | |
3536 | dmu_recv_cleanup_ds(drc); | |
3537 | } else if (drc->drc_guid_to_ds_map != NULL) { | |
3538 | (void) add_ds_to_guidmap(drc->drc_tofs, | |
3539 | drc->drc_guid_to_ds_map, | |
3540 | drc->drc_newsnapobj); | |
3541 | } | |
3542 | return (error); | |
3543 | } | |
3544 | ||
70e083d2 TG |
3545 | /* |
3546 | * Return TRUE if this objset is currently being received into. | |
3547 | */ | |
3548 | boolean_t | |
3549 | dmu_objset_is_receiving(objset_t *os) | |
3550 | { | |
3551 | return (os->os_dsl_dataset != NULL && | |
3552 | os->os_dsl_dataset->ds_owner == dmu_recv_tag); | |
3553 | } | |
3554 | ||
3555 | #if defined(_KERNEL) | |
3556 | module_param(zfs_send_corrupt_data, int, 0644); | |
3557 | MODULE_PARM_DESC(zfs_send_corrupt_data, "Allow sending corrupt data"); | |
3558 | #endif |