Print this page
5056 ZFS deadlock on db_mtx and dn_holds
Reviewed by: Will Andrews <willa@spectralogic.com>
Reviewed by: Matt Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Approved by: Dan McDonald <danmcd@omniti.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/zfs/dmu_send.c
+++ new/usr/src/uts/common/fs/zfs/dmu_send.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 26 * Copyright 2014 HybridCluster. All rights reserved.
27 27 */
28 28
29 29 #include <sys/dmu.h>
30 30 #include <sys/dmu_impl.h>
31 31 #include <sys/dmu_tx.h>
32 32 #include <sys/dbuf.h>
33 33 #include <sys/dnode.h>
34 34 #include <sys/zfs_context.h>
35 35 #include <sys/dmu_objset.h>
36 36 #include <sys/dmu_traverse.h>
37 37 #include <sys/dsl_dataset.h>
38 38 #include <sys/dsl_dir.h>
39 39 #include <sys/dsl_prop.h>
40 40 #include <sys/dsl_pool.h>
41 41 #include <sys/dsl_synctask.h>
42 42 #include <sys/zfs_ioctl.h>
43 43 #include <sys/zap.h>
44 44 #include <sys/zio_checksum.h>
45 45 #include <sys/zfs_znode.h>
46 46 #include <zfs_fletcher.h>
47 47 #include <sys/avl.h>
48 48 #include <sys/ddt.h>
49 49 #include <sys/zfs_onexit.h>
50 50 #include <sys/dmu_send.h>
51 51 #include <sys/dsl_destroy.h>
52 52 #include <sys/blkptr.h>
53 53 #include <sys/dsl_bookmark.h>
54 54 #include <sys/zfeature.h>
55 55
56 56 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
57 57 int zfs_send_corrupt_data = B_FALSE;
58 58
59 59 static char *dmu_recv_tag = "dmu_recv_tag";
60 60 static const char *recv_clone_name = "%recv";
61 61
62 62 static int
63 63 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
64 64 {
65 65 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
66 66 ssize_t resid; /* have to get resid to get detailed errno */
67 67 ASSERT0(len % 8);
68 68
69 69 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
70 70 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
71 71 (caddr_t)buf, len,
72 72 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
73 73
74 74 mutex_enter(&ds->ds_sendstream_lock);
75 75 *dsp->dsa_off += len;
76 76 mutex_exit(&ds->ds_sendstream_lock);
77 77
78 78 return (dsp->dsa_err);
79 79 }
80 80
81 81 static int
82 82 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
83 83 uint64_t length)
84 84 {
85 85 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
86 86
87 87 /*
88 88 * When we receive a free record, dbuf_free_range() assumes
89 89 * that the receiving system doesn't have any dbufs in the range
90 90 * being freed. This is always true because there is a one-record
91 91 * constraint: we only send one WRITE record for any given
92 92 * object+offset. We know that the one-record constraint is
93 93 * true because we always send data in increasing order by
94 94 * object,offset.
95 95 *
96 96 * If the increasing-order constraint ever changes, we should find
97 97 * another way to assert that the one-record constraint is still
98 98 * satisfied.
99 99 */
100 100 ASSERT(object > dsp->dsa_last_data_object ||
101 101 (object == dsp->dsa_last_data_object &&
102 102 offset > dsp->dsa_last_data_offset));
103 103
104 104 /*
105 105 * If we are doing a non-incremental send, then there can't
106 106 * be any data in the dataset we're receiving into. Therefore
107 107 * a free record would simply be a no-op. Save space by not
108 108 * sending it to begin with.
109 109 */
110 110 if (!dsp->dsa_incremental)
111 111 return (0);
112 112
113 113 if (length != -1ULL && offset + length < offset)
114 114 length = -1ULL;
115 115
116 116 /*
117 117 * If there is a pending op, but it's not PENDING_FREE, push it out,
118 118 * since free block aggregation can only be done for blocks of the
119 119 * same type (i.e., DRR_FREE records can only be aggregated with
120 120 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
121 121 * aggregated with other DRR_FREEOBJECTS records.
122 122 */
123 123 if (dsp->dsa_pending_op != PENDING_NONE &&
124 124 dsp->dsa_pending_op != PENDING_FREE) {
125 125 if (dump_bytes(dsp, dsp->dsa_drr,
126 126 sizeof (dmu_replay_record_t)) != 0)
127 127 return (SET_ERROR(EINTR));
128 128 dsp->dsa_pending_op = PENDING_NONE;
129 129 }
130 130
131 131 if (dsp->dsa_pending_op == PENDING_FREE) {
132 132 /*
133 133 * There should never be a PENDING_FREE if length is -1
134 134 * (because dump_dnode is the only place where this
135 135 * function is called with a -1, and only after flushing
136 136 * any pending record).
137 137 */
138 138 ASSERT(length != -1ULL);
139 139 /*
140 140 * Check to see whether this free block can be aggregated
141 141 * with pending one.
142 142 */
143 143 if (drrf->drr_object == object && drrf->drr_offset +
144 144 drrf->drr_length == offset) {
145 145 drrf->drr_length += length;
146 146 return (0);
147 147 } else {
148 148 /* not a continuation. Push out pending record */
149 149 if (dump_bytes(dsp, dsp->dsa_drr,
150 150 sizeof (dmu_replay_record_t)) != 0)
151 151 return (SET_ERROR(EINTR));
152 152 dsp->dsa_pending_op = PENDING_NONE;
153 153 }
154 154 }
155 155 /* create a FREE record and make it pending */
156 156 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
157 157 dsp->dsa_drr->drr_type = DRR_FREE;
158 158 drrf->drr_object = object;
159 159 drrf->drr_offset = offset;
160 160 drrf->drr_length = length;
161 161 drrf->drr_toguid = dsp->dsa_toguid;
162 162 if (length == -1ULL) {
163 163 if (dump_bytes(dsp, dsp->dsa_drr,
164 164 sizeof (dmu_replay_record_t)) != 0)
165 165 return (SET_ERROR(EINTR));
166 166 } else {
167 167 dsp->dsa_pending_op = PENDING_FREE;
168 168 }
169 169
170 170 return (0);
171 171 }
172 172
173 173 static int
174 174 dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
175 175 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
176 176 {
177 177 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
178 178
179 179 /*
180 180 * We send data in increasing object, offset order.
181 181 * See comment in dump_free() for details.
182 182 */
183 183 ASSERT(object > dsp->dsa_last_data_object ||
184 184 (object == dsp->dsa_last_data_object &&
185 185 offset > dsp->dsa_last_data_offset));
186 186 dsp->dsa_last_data_object = object;
187 187 dsp->dsa_last_data_offset = offset + blksz - 1;
188 188
189 189 /*
190 190 * If there is any kind of pending aggregation (currently either
191 191 * a grouping of free objects or free blocks), push it out to
192 192 * the stream, since aggregation can't be done across operations
193 193 * of different types.
194 194 */
195 195 if (dsp->dsa_pending_op != PENDING_NONE) {
196 196 if (dump_bytes(dsp, dsp->dsa_drr,
197 197 sizeof (dmu_replay_record_t)) != 0)
198 198 return (SET_ERROR(EINTR));
199 199 dsp->dsa_pending_op = PENDING_NONE;
200 200 }
201 201 /* write a DATA record */
202 202 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
203 203 dsp->dsa_drr->drr_type = DRR_WRITE;
204 204 drrw->drr_object = object;
205 205 drrw->drr_type = type;
206 206 drrw->drr_offset = offset;
207 207 drrw->drr_length = blksz;
208 208 drrw->drr_toguid = dsp->dsa_toguid;
209 209 if (bp == NULL || BP_IS_EMBEDDED(bp)) {
210 210 /*
211 211 * There's no pre-computed checksum for partial-block
212 212 * writes or embedded BP's, so (like
213 213 * fletcher4-checkummed blocks) userland will have to
214 214 * compute a dedup-capable checksum itself.
215 215 */
216 216 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
217 217 } else {
218 218 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
219 219 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
220 220 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
221 221 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
222 222 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
223 223 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
224 224 drrw->drr_key.ddk_cksum = bp->blk_cksum;
225 225 }
226 226
227 227 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
228 228 return (SET_ERROR(EINTR));
229 229 if (dump_bytes(dsp, data, blksz) != 0)
230 230 return (SET_ERROR(EINTR));
231 231 return (0);
232 232 }
233 233
234 234 static int
235 235 dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
236 236 int blksz, const blkptr_t *bp)
237 237 {
238 238 char buf[BPE_PAYLOAD_SIZE];
239 239 struct drr_write_embedded *drrw =
240 240 &(dsp->dsa_drr->drr_u.drr_write_embedded);
241 241
242 242 if (dsp->dsa_pending_op != PENDING_NONE) {
243 243 if (dump_bytes(dsp, dsp->dsa_drr,
244 244 sizeof (dmu_replay_record_t)) != 0)
245 245 return (EINTR);
246 246 dsp->dsa_pending_op = PENDING_NONE;
247 247 }
248 248
249 249 ASSERT(BP_IS_EMBEDDED(bp));
250 250
251 251 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
252 252 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
253 253 drrw->drr_object = object;
254 254 drrw->drr_offset = offset;
255 255 drrw->drr_length = blksz;
256 256 drrw->drr_toguid = dsp->dsa_toguid;
257 257 drrw->drr_compression = BP_GET_COMPRESS(bp);
258 258 drrw->drr_etype = BPE_GET_ETYPE(bp);
259 259 drrw->drr_lsize = BPE_GET_LSIZE(bp);
260 260 drrw->drr_psize = BPE_GET_PSIZE(bp);
261 261
262 262 decode_embedded_bp_compressed(bp, buf);
263 263
264 264 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
265 265 return (EINTR);
266 266 if (dump_bytes(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
267 267 return (EINTR);
268 268 return (0);
269 269 }
270 270
271 271 static int
272 272 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
273 273 {
274 274 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
275 275
276 276 if (dsp->dsa_pending_op != PENDING_NONE) {
277 277 if (dump_bytes(dsp, dsp->dsa_drr,
278 278 sizeof (dmu_replay_record_t)) != 0)
279 279 return (SET_ERROR(EINTR));
280 280 dsp->dsa_pending_op = PENDING_NONE;
281 281 }
282 282
283 283 /* write a SPILL record */
284 284 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
285 285 dsp->dsa_drr->drr_type = DRR_SPILL;
286 286 drrs->drr_object = object;
287 287 drrs->drr_length = blksz;
288 288 drrs->drr_toguid = dsp->dsa_toguid;
289 289
290 290 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
291 291 return (SET_ERROR(EINTR));
292 292 if (dump_bytes(dsp, data, blksz))
293 293 return (SET_ERROR(EINTR));
294 294 return (0);
295 295 }
296 296
297 297 static int
298 298 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
299 299 {
300 300 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
301 301
302 302 /* See comment in dump_free(). */
303 303 if (!dsp->dsa_incremental)
304 304 return (0);
305 305
306 306 /*
307 307 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
308 308 * push it out, since free block aggregation can only be done for
309 309 * blocks of the same type (i.e., DRR_FREE records can only be
310 310 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
311 311 * can only be aggregated with other DRR_FREEOBJECTS records.
312 312 */
313 313 if (dsp->dsa_pending_op != PENDING_NONE &&
314 314 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
315 315 if (dump_bytes(dsp, dsp->dsa_drr,
316 316 sizeof (dmu_replay_record_t)) != 0)
317 317 return (SET_ERROR(EINTR));
318 318 dsp->dsa_pending_op = PENDING_NONE;
319 319 }
320 320 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
321 321 /*
322 322 * See whether this free object array can be aggregated
323 323 * with pending one
324 324 */
325 325 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
326 326 drrfo->drr_numobjs += numobjs;
327 327 return (0);
328 328 } else {
329 329 /* can't be aggregated. Push out pending record */
330 330 if (dump_bytes(dsp, dsp->dsa_drr,
331 331 sizeof (dmu_replay_record_t)) != 0)
332 332 return (SET_ERROR(EINTR));
333 333 dsp->dsa_pending_op = PENDING_NONE;
334 334 }
335 335 }
336 336
337 337 /* write a FREEOBJECTS record */
338 338 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
339 339 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
340 340 drrfo->drr_firstobj = firstobj;
341 341 drrfo->drr_numobjs = numobjs;
342 342 drrfo->drr_toguid = dsp->dsa_toguid;
343 343
344 344 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
345 345
346 346 return (0);
347 347 }
348 348
349 349 static int
350 350 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
351 351 {
352 352 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
353 353
354 354 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
355 355 return (dump_freeobjects(dsp, object, 1));
356 356
357 357 if (dsp->dsa_pending_op != PENDING_NONE) {
358 358 if (dump_bytes(dsp, dsp->dsa_drr,
359 359 sizeof (dmu_replay_record_t)) != 0)
360 360 return (SET_ERROR(EINTR));
361 361 dsp->dsa_pending_op = PENDING_NONE;
362 362 }
363 363
364 364 /* write an OBJECT record */
365 365 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
366 366 dsp->dsa_drr->drr_type = DRR_OBJECT;
367 367 drro->drr_object = object;
368 368 drro->drr_type = dnp->dn_type;
369 369 drro->drr_bonustype = dnp->dn_bonustype;
370 370 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
371 371 drro->drr_bonuslen = dnp->dn_bonuslen;
372 372 drro->drr_checksumtype = dnp->dn_checksum;
373 373 drro->drr_compress = dnp->dn_compress;
374 374 drro->drr_toguid = dsp->dsa_toguid;
375 375
376 376 if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
377 377 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
378 378 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
379 379
380 380 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
381 381 return (SET_ERROR(EINTR));
382 382
383 383 if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
384 384 return (SET_ERROR(EINTR));
385 385
386 386 /* Free anything past the end of the file. */
387 387 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
388 388 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
389 389 return (SET_ERROR(EINTR));
390 390 if (dsp->dsa_err != 0)
391 391 return (SET_ERROR(EINTR));
392 392 return (0);
393 393 }
394 394
395 395 static boolean_t
396 396 backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
397 397 {
398 398 if (!BP_IS_EMBEDDED(bp))
399 399 return (B_FALSE);
400 400
401 401 /*
402 402 * Compression function must be legacy, or explicitly enabled.
403 403 */
404 404 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
405 405 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4)))
406 406 return (B_FALSE);
407 407
408 408 /*
409 409 * Embed type must be explicitly enabled.
410 410 */
411 411 switch (BPE_GET_ETYPE(bp)) {
412 412 case BP_EMBEDDED_TYPE_DATA:
413 413 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
414 414 return (B_TRUE);
415 415 break;
416 416 default:
417 417 return (B_FALSE);
418 418 }
419 419 return (B_FALSE);
420 420 }
421 421
422 422 #define BP_SPAN(dnp, level) \
423 423 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
424 424 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
425 425
426 426 /* ARGSUSED */
427 427 static int
428 428 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
429 429 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
430 430 {
431 431 dmu_sendarg_t *dsp = arg;
432 432 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
433 433 int err = 0;
434 434
435 435 if (issig(JUSTLOOKING) && issig(FORREAL))
436 436 return (SET_ERROR(EINTR));
437 437
438 438 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
439 439 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
440 440 return (0);
441 441 } else if (zb->zb_level == ZB_ZIL_LEVEL) {
442 442 /*
443 443 * If we are sending a non-snapshot (which is allowed on
444 444 * read-only pools), it may have a ZIL, which must be ignored.
445 445 */
446 446 return (0);
447 447 } else if (BP_IS_HOLE(bp) &&
448 448 zb->zb_object == DMU_META_DNODE_OBJECT) {
449 449 uint64_t span = BP_SPAN(dnp, zb->zb_level);
450 450 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
451 451 err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
452 452 } else if (BP_IS_HOLE(bp)) {
453 453 uint64_t span = BP_SPAN(dnp, zb->zb_level);
454 454 err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span);
455 455 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
456 456 return (0);
457 457 } else if (type == DMU_OT_DNODE) {
458 458 dnode_phys_t *blk;
459 459 int i;
460 460 int blksz = BP_GET_LSIZE(bp);
461 461 arc_flags_t aflags = ARC_FLAG_WAIT;
462 462 arc_buf_t *abuf;
463 463
464 464 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
465 465 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
466 466 &aflags, zb) != 0)
467 467 return (SET_ERROR(EIO));
468 468
469 469 blk = abuf->b_data;
470 470 for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
471 471 uint64_t dnobj = (zb->zb_blkid <<
472 472 (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
473 473 err = dump_dnode(dsp, dnobj, blk+i);
474 474 if (err != 0)
475 475 break;
476 476 }
477 477 (void) arc_buf_remove_ref(abuf, &abuf);
478 478 } else if (type == DMU_OT_SA) {
479 479 arc_flags_t aflags = ARC_FLAG_WAIT;
480 480 arc_buf_t *abuf;
481 481 int blksz = BP_GET_LSIZE(bp);
482 482
483 483 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
484 484 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
485 485 &aflags, zb) != 0)
486 486 return (SET_ERROR(EIO));
487 487
488 488 err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
489 489 (void) arc_buf_remove_ref(abuf, &abuf);
490 490 } else if (backup_do_embed(dsp, bp)) {
491 491 /* it's an embedded level-0 block of a regular object */
492 492 int blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
493 493 err = dump_write_embedded(dsp, zb->zb_object,
494 494 zb->zb_blkid * blksz, blksz, bp);
495 495 } else { /* it's a level-0 block of a regular object */
496 496 arc_flags_t aflags = ARC_FLAG_WAIT;
497 497 arc_buf_t *abuf;
498 498 int blksz = BP_GET_LSIZE(bp);
499 499 uint64_t offset;
500 500
501 501 ASSERT3U(blksz, ==, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
502 502 ASSERT0(zb->zb_level);
503 503 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
504 504 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
505 505 &aflags, zb) != 0) {
506 506 if (zfs_send_corrupt_data) {
507 507 /* Send a block filled with 0x"zfs badd bloc" */
508 508 abuf = arc_buf_alloc(spa, blksz, &abuf,
509 509 ARC_BUFC_DATA);
510 510 uint64_t *ptr;
511 511 for (ptr = abuf->b_data;
512 512 (char *)ptr < (char *)abuf->b_data + blksz;
513 513 ptr++)
514 514 *ptr = 0x2f5baddb10c;
515 515 } else {
516 516 return (SET_ERROR(EIO));
517 517 }
518 518 }
519 519
520 520 offset = zb->zb_blkid * blksz;
521 521
522 522 if (!(dsp->dsa_featureflags &
523 523 DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
524 524 blksz > SPA_OLD_MAXBLOCKSIZE) {
525 525 char *buf = abuf->b_data;
526 526 while (blksz > 0 && err == 0) {
527 527 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
528 528 err = dump_write(dsp, type, zb->zb_object,
529 529 offset, n, NULL, buf);
530 530 offset += n;
531 531 buf += n;
532 532 blksz -= n;
533 533 }
534 534 } else {
535 535 err = dump_write(dsp, type, zb->zb_object,
536 536 offset, blksz, bp, abuf->b_data);
537 537 }
538 538 (void) arc_buf_remove_ref(abuf, &abuf);
539 539 }
540 540
541 541 ASSERT(err == 0 || err == EINTR);
542 542 return (err);
543 543 }
544 544
545 545 /*
546 546 * Releases dp using the specified tag.
547 547 */
548 548 static int
549 549 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
550 550 zfs_bookmark_phys_t *fromzb, boolean_t is_clone, boolean_t embedok,
551 551 boolean_t large_block_ok, int outfd, vnode_t *vp, offset_t *off)
552 552 {
553 553 objset_t *os;
554 554 dmu_replay_record_t *drr;
555 555 dmu_sendarg_t *dsp;
556 556 int err;
557 557 uint64_t fromtxg = 0;
558 558 uint64_t featureflags = 0;
559 559
560 560 err = dmu_objset_from_ds(ds, &os);
561 561 if (err != 0) {
562 562 dsl_pool_rele(dp, tag);
563 563 return (err);
564 564 }
565 565
566 566 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
567 567 drr->drr_type = DRR_BEGIN;
568 568 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
569 569 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
570 570 DMU_SUBSTREAM);
571 571
572 572 #ifdef _KERNEL
573 573 if (dmu_objset_type(os) == DMU_OST_ZFS) {
574 574 uint64_t version;
575 575 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
576 576 kmem_free(drr, sizeof (dmu_replay_record_t));
577 577 dsl_pool_rele(dp, tag);
578 578 return (SET_ERROR(EINVAL));
579 579 }
580 580 if (version >= ZPL_VERSION_SA) {
581 581 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
582 582 }
583 583 }
584 584 #endif
585 585
586 586 if (large_block_ok && ds->ds_large_blocks)
587 587 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
588 588 if (embedok &&
589 589 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
590 590 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
591 591 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
592 592 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4;
593 593 } else {
594 594 embedok = B_FALSE;
595 595 }
596 596
597 597 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
598 598 featureflags);
599 599
600 600 drr->drr_u.drr_begin.drr_creation_time =
601 601 dsl_dataset_phys(ds)->ds_creation_time;
602 602 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
603 603 if (is_clone)
|
↓ open down ↓ |
603 lines elided |
↑ open up ↑ |
604 604 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
605 605 drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(ds)->ds_guid;
606 606 if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
607 607 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
608 608
609 609 if (fromzb != NULL) {
610 610 drr->drr_u.drr_begin.drr_fromguid = fromzb->zbm_guid;
611 611 fromtxg = fromzb->zbm_creation_txg;
612 612 }
613 613 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
614 - if (!dsl_dataset_is_snapshot(ds)) {
614 + if (!ds->ds_is_snapshot) {
615 615 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
616 616 sizeof (drr->drr_u.drr_begin.drr_toname));
617 617 }
618 618
619 619 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
620 620
621 621 dsp->dsa_drr = drr;
622 622 dsp->dsa_vp = vp;
623 623 dsp->dsa_outfd = outfd;
624 624 dsp->dsa_proc = curproc;
625 625 dsp->dsa_os = os;
626 626 dsp->dsa_off = off;
627 627 dsp->dsa_toguid = dsl_dataset_phys(ds)->ds_guid;
628 628 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
629 629 dsp->dsa_pending_op = PENDING_NONE;
630 630 dsp->dsa_incremental = (fromzb != NULL);
631 631 dsp->dsa_featureflags = featureflags;
632 632
633 633 mutex_enter(&ds->ds_sendstream_lock);
634 634 list_insert_head(&ds->ds_sendstreams, dsp);
635 635 mutex_exit(&ds->ds_sendstream_lock);
636 636
637 637 dsl_dataset_long_hold(ds, FTAG);
638 638 dsl_pool_rele(dp, tag);
639 639
640 640 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
641 641 err = dsp->dsa_err;
642 642 goto out;
643 643 }
644 644
645 645 err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
646 646 backup_cb, dsp);
647 647
648 648 if (dsp->dsa_pending_op != PENDING_NONE)
649 649 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
650 650 err = SET_ERROR(EINTR);
651 651
652 652 if (err != 0) {
653 653 if (err == EINTR && dsp->dsa_err != 0)
654 654 err = dsp->dsa_err;
655 655 goto out;
656 656 }
657 657
658 658 bzero(drr, sizeof (dmu_replay_record_t));
659 659 drr->drr_type = DRR_END;
660 660 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
661 661 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
662 662
663 663 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
664 664 err = dsp->dsa_err;
665 665 goto out;
666 666 }
667 667
668 668 out:
669 669 mutex_enter(&ds->ds_sendstream_lock);
670 670 list_remove(&ds->ds_sendstreams, dsp);
671 671 mutex_exit(&ds->ds_sendstream_lock);
672 672
673 673 kmem_free(drr, sizeof (dmu_replay_record_t));
674 674 kmem_free(dsp, sizeof (dmu_sendarg_t));
675 675
676 676 dsl_dataset_long_rele(ds, FTAG);
677 677
678 678 return (err);
679 679 }
680 680
681 681 int
682 682 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
683 683 boolean_t embedok, boolean_t large_block_ok,
684 684 int outfd, vnode_t *vp, offset_t *off)
685 685 {
686 686 dsl_pool_t *dp;
687 687 dsl_dataset_t *ds;
688 688 dsl_dataset_t *fromds = NULL;
689 689 int err;
690 690
691 691 err = dsl_pool_hold(pool, FTAG, &dp);
692 692 if (err != 0)
693 693 return (err);
694 694
695 695 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
696 696 if (err != 0) {
697 697 dsl_pool_rele(dp, FTAG);
698 698 return (err);
699 699 }
700 700
701 701 if (fromsnap != 0) {
702 702 zfs_bookmark_phys_t zb;
703 703 boolean_t is_clone;
704 704
705 705 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
706 706 if (err != 0) {
707 707 dsl_dataset_rele(ds, FTAG);
708 708 dsl_pool_rele(dp, FTAG);
709 709 return (err);
710 710 }
711 711 if (!dsl_dataset_is_before(ds, fromds, 0))
712 712 err = SET_ERROR(EXDEV);
713 713 zb.zbm_creation_time =
714 714 dsl_dataset_phys(fromds)->ds_creation_time;
715 715 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
716 716 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
717 717 is_clone = (fromds->ds_dir != ds->ds_dir);
718 718 dsl_dataset_rele(fromds, FTAG);
719 719 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
720 720 embedok, large_block_ok, outfd, vp, off);
721 721 } else {
722 722 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
723 723 embedok, large_block_ok, outfd, vp, off);
724 724 }
725 725 dsl_dataset_rele(ds, FTAG);
726 726 return (err);
727 727 }
728 728
729 729 int
730 730 dmu_send(const char *tosnap, const char *fromsnap,
731 731 boolean_t embedok, boolean_t large_block_ok,
732 732 int outfd, vnode_t *vp, offset_t *off)
733 733 {
734 734 dsl_pool_t *dp;
735 735 dsl_dataset_t *ds;
736 736 int err;
737 737 boolean_t owned = B_FALSE;
738 738
739 739 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
740 740 return (SET_ERROR(EINVAL));
741 741
742 742 err = dsl_pool_hold(tosnap, FTAG, &dp);
743 743 if (err != 0)
744 744 return (err);
745 745
746 746 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
747 747 /*
748 748 * We are sending a filesystem or volume. Ensure
749 749 * that it doesn't change by owning the dataset.
750 750 */
751 751 err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
752 752 owned = B_TRUE;
753 753 } else {
754 754 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
755 755 }
756 756 if (err != 0) {
757 757 dsl_pool_rele(dp, FTAG);
758 758 return (err);
759 759 }
760 760
761 761 if (fromsnap != NULL) {
762 762 zfs_bookmark_phys_t zb;
763 763 boolean_t is_clone = B_FALSE;
764 764 int fsnamelen = strchr(tosnap, '@') - tosnap;
765 765
766 766 /*
767 767 * If the fromsnap is in a different filesystem, then
768 768 * mark the send stream as a clone.
769 769 */
770 770 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
771 771 (fromsnap[fsnamelen] != '@' &&
772 772 fromsnap[fsnamelen] != '#')) {
773 773 is_clone = B_TRUE;
774 774 }
775 775
776 776 if (strchr(fromsnap, '@')) {
777 777 dsl_dataset_t *fromds;
778 778 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
779 779 if (err == 0) {
780 780 if (!dsl_dataset_is_before(ds, fromds, 0))
781 781 err = SET_ERROR(EXDEV);
782 782 zb.zbm_creation_time =
783 783 dsl_dataset_phys(fromds)->ds_creation_time;
784 784 zb.zbm_creation_txg =
785 785 dsl_dataset_phys(fromds)->ds_creation_txg;
786 786 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
787 787 is_clone = (ds->ds_dir != fromds->ds_dir);
788 788 dsl_dataset_rele(fromds, FTAG);
789 789 }
790 790 } else {
791 791 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
792 792 }
793 793 if (err != 0) {
794 794 dsl_dataset_rele(ds, FTAG);
795 795 dsl_pool_rele(dp, FTAG);
796 796 return (err);
797 797 }
798 798 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
799 799 embedok, large_block_ok, outfd, vp, off);
800 800 } else {
801 801 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
802 802 embedok, large_block_ok, outfd, vp, off);
803 803 }
804 804 if (owned)
805 805 dsl_dataset_disown(ds, FTAG);
806 806 else
807 807 dsl_dataset_rele(ds, FTAG);
808 808 return (err);
809 809 }
810 810
|
↓ open down ↓ |
186 lines elided |
↑ open up ↑ |
811 811 int
812 812 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
813 813 {
814 814 dsl_pool_t *dp = ds->ds_dir->dd_pool;
815 815 int err;
816 816 uint64_t size;
817 817
818 818 ASSERT(dsl_pool_config_held(dp));
819 819
820 820 /* tosnap must be a snapshot */
821 - if (!dsl_dataset_is_snapshot(ds))
821 + if (!ds->ds_is_snapshot)
822 822 return (SET_ERROR(EINVAL));
823 823
824 824 /*
825 825 * fromsnap must be an earlier snapshot from the same fs as tosnap,
826 826 * or the origin's fs.
827 827 */
828 828 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
829 829 return (SET_ERROR(EXDEV));
830 830
831 831 /* Get uncompressed size estimate of changed data. */
832 832 if (fromds == NULL) {
833 833 size = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
834 834 } else {
835 835 uint64_t used, comp;
836 836 err = dsl_dataset_space_written(fromds, ds,
837 837 &used, &comp, &size);
838 838 if (err != 0)
839 839 return (err);
840 840 }
841 841
842 842 /*
843 843 * Assume that space (both on-disk and in-stream) is dominated by
844 844 * data. We will adjust for indirect blocks and the copies property,
845 845 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
846 846 */
847 847
848 848 /*
849 849 * Subtract out approximate space used by indirect blocks.
850 850 * Assume most space is used by data blocks (non-indirect, non-dnode).
851 851 * Assume all blocks are recordsize. Assume ditto blocks and
852 852 * internal fragmentation counter out compression.
853 853 *
854 854 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
855 855 * block, which we observe in practice.
856 856 */
857 857 uint64_t recordsize;
858 858 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
859 859 if (err != 0)
860 860 return (err);
861 861 size -= size / recordsize * sizeof (blkptr_t);
862 862
863 863 /* Add in the space for the record associated with each block. */
864 864 size += size / recordsize * sizeof (dmu_replay_record_t);
865 865
866 866 *sizep = size;
867 867
868 868 return (0);
869 869 }
870 870
871 871 typedef struct dmu_recv_begin_arg {
872 872 const char *drba_origin;
873 873 dmu_recv_cookie_t *drba_cookie;
874 874 cred_t *drba_cred;
875 875 uint64_t drba_snapobj;
876 876 } dmu_recv_begin_arg_t;
877 877
878 878 static int
879 879 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
880 880 uint64_t fromguid)
881 881 {
882 882 uint64_t val;
883 883 int error;
884 884 dsl_pool_t *dp = ds->ds_dir->dd_pool;
885 885
886 886 /* temporary clone name must not exist */
887 887 error = zap_lookup(dp->dp_meta_objset,
888 888 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
889 889 8, 1, &val);
890 890 if (error != ENOENT)
891 891 return (error == 0 ? EBUSY : error);
892 892
893 893 /* new snapshot name must not exist */
894 894 error = zap_lookup(dp->dp_meta_objset,
895 895 dsl_dataset_phys(ds)->ds_snapnames_zapobj,
896 896 drba->drba_cookie->drc_tosnap, 8, 1, &val);
897 897 if (error != ENOENT)
898 898 return (error == 0 ? EEXIST : error);
899 899
900 900 /*
901 901 * Check snapshot limit before receiving. We'll recheck again at the
902 902 * end, but might as well abort before receiving if we're already over
903 903 * the limit.
904 904 *
905 905 * Note that we do not check the file system limit with
906 906 * dsl_dir_fscount_check because the temporary %clones don't count
907 907 * against that limit.
908 908 */
909 909 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
910 910 NULL, drba->drba_cred);
911 911 if (error != 0)
912 912 return (error);
913 913
914 914 if (fromguid != 0) {
915 915 dsl_dataset_t *snap;
916 916 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
917 917
918 918 /* Find snapshot in this dir that matches fromguid. */
919 919 while (obj != 0) {
920 920 error = dsl_dataset_hold_obj(dp, obj, FTAG,
921 921 &snap);
922 922 if (error != 0)
923 923 return (SET_ERROR(ENODEV));
924 924 if (snap->ds_dir != ds->ds_dir) {
925 925 dsl_dataset_rele(snap, FTAG);
926 926 return (SET_ERROR(ENODEV));
927 927 }
928 928 if (dsl_dataset_phys(snap)->ds_guid == fromguid)
929 929 break;
930 930 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
931 931 dsl_dataset_rele(snap, FTAG);
932 932 }
933 933 if (obj == 0)
934 934 return (SET_ERROR(ENODEV));
935 935
936 936 if (drba->drba_cookie->drc_force) {
937 937 drba->drba_snapobj = obj;
938 938 } else {
939 939 /*
940 940 * If we are not forcing, there must be no
941 941 * changes since fromsnap.
942 942 */
943 943 if (dsl_dataset_modified_since_snap(ds, snap)) {
944 944 dsl_dataset_rele(snap, FTAG);
945 945 return (SET_ERROR(ETXTBSY));
946 946 }
947 947 drba->drba_snapobj = ds->ds_prev->ds_object;
948 948 }
949 949
950 950 dsl_dataset_rele(snap, FTAG);
951 951 } else {
952 952 /* if full, most recent snapshot must be $ORIGIN */
953 953 if (dsl_dataset_phys(ds)->ds_prev_snap_txg >= TXG_INITIAL)
954 954 return (SET_ERROR(ENODEV));
955 955 drba->drba_snapobj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
956 956 }
957 957
958 958 return (0);
959 959
960 960 }
961 961
962 962 static int
963 963 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
964 964 {
965 965 dmu_recv_begin_arg_t *drba = arg;
966 966 dsl_pool_t *dp = dmu_tx_pool(tx);
967 967 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
968 968 uint64_t fromguid = drrb->drr_fromguid;
969 969 int flags = drrb->drr_flags;
970 970 int error;
971 971 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
972 972 dsl_dataset_t *ds;
973 973 const char *tofs = drba->drba_cookie->drc_tofs;
974 974
975 975 /* already checked */
976 976 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
977 977
978 978 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
979 979 DMU_COMPOUNDSTREAM ||
980 980 drrb->drr_type >= DMU_OST_NUMTYPES ||
981 981 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
982 982 return (SET_ERROR(EINVAL));
983 983
984 984 /* Verify pool version supports SA if SA_SPILL feature set */
985 985 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
986 986 spa_version(dp->dp_spa) < SPA_VERSION_SA)
987 987 return (SET_ERROR(ENOTSUP));
988 988
989 989 /*
990 990 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
991 991 * record to a plan WRITE record, so the pool must have the
992 992 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
993 993 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
994 994 */
995 995 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
996 996 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
997 997 return (SET_ERROR(ENOTSUP));
998 998 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
999 999 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1000 1000 return (SET_ERROR(ENOTSUP));
1001 1001
1002 1002 /*
1003 1003 * The receiving code doesn't know how to translate large blocks
1004 1004 * to smaller ones, so the pool must have the LARGE_BLOCKS
1005 1005 * feature enabled if the stream has LARGE_BLOCKS.
1006 1006 */
1007 1007 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1008 1008 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1009 1009 return (SET_ERROR(ENOTSUP));
1010 1010
1011 1011 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1012 1012 if (error == 0) {
1013 1013 /* target fs already exists; recv into temp clone */
1014 1014
1015 1015 /* Can't recv a clone into an existing fs */
1016 1016 if (flags & DRR_FLAG_CLONE) {
1017 1017 dsl_dataset_rele(ds, FTAG);
1018 1018 return (SET_ERROR(EINVAL));
1019 1019 }
1020 1020
1021 1021 error = recv_begin_check_existing_impl(drba, ds, fromguid);
1022 1022 dsl_dataset_rele(ds, FTAG);
1023 1023 } else if (error == ENOENT) {
1024 1024 /* target fs does not exist; must be a full backup or clone */
1025 1025 char buf[MAXNAMELEN];
1026 1026
1027 1027 /*
1028 1028 * If it's a non-clone incremental, we are missing the
1029 1029 * target fs, so fail the recv.
1030 1030 */
1031 1031 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE))
1032 1032 return (SET_ERROR(ENOENT));
1033 1033
1034 1034 /* Open the parent of tofs */
1035 1035 ASSERT3U(strlen(tofs), <, MAXNAMELEN);
1036 1036 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1037 1037 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1038 1038 if (error != 0)
1039 1039 return (error);
1040 1040
1041 1041 /*
1042 1042 * Check filesystem and snapshot limits before receiving. We'll
1043 1043 * recheck snapshot limits again at the end (we create the
1044 1044 * filesystems and increment those counts during begin_sync).
1045 1045 */
1046 1046 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1047 1047 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1048 1048 if (error != 0) {
1049 1049 dsl_dataset_rele(ds, FTAG);
1050 1050 return (error);
1051 1051 }
1052 1052
1053 1053 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1054 1054 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1055 1055 if (error != 0) {
1056 1056 dsl_dataset_rele(ds, FTAG);
1057 1057 return (error);
|
↓ open down ↓ |
226 lines elided |
↑ open up ↑ |
1058 1058 }
1059 1059
1060 1060 if (drba->drba_origin != NULL) {
1061 1061 dsl_dataset_t *origin;
1062 1062 error = dsl_dataset_hold(dp, drba->drba_origin,
1063 1063 FTAG, &origin);
1064 1064 if (error != 0) {
1065 1065 dsl_dataset_rele(ds, FTAG);
1066 1066 return (error);
1067 1067 }
1068 - if (!dsl_dataset_is_snapshot(origin)) {
1068 + if (!origin->ds_is_snapshot) {
1069 1069 dsl_dataset_rele(origin, FTAG);
1070 1070 dsl_dataset_rele(ds, FTAG);
1071 1071 return (SET_ERROR(EINVAL));
1072 1072 }
1073 1073 if (dsl_dataset_phys(origin)->ds_guid != fromguid) {
1074 1074 dsl_dataset_rele(origin, FTAG);
1075 1075 dsl_dataset_rele(ds, FTAG);
1076 1076 return (SET_ERROR(ENODEV));
1077 1077 }
1078 1078 dsl_dataset_rele(origin, FTAG);
1079 1079 }
1080 1080 dsl_dataset_rele(ds, FTAG);
1081 1081 error = 0;
1082 1082 }
1083 1083 return (error);
1084 1084 }
1085 1085
1086 1086 static void
1087 1087 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1088 1088 {
1089 1089 dmu_recv_begin_arg_t *drba = arg;
1090 1090 dsl_pool_t *dp = dmu_tx_pool(tx);
1091 1091 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1092 1092 const char *tofs = drba->drba_cookie->drc_tofs;
1093 1093 dsl_dataset_t *ds, *newds;
1094 1094 uint64_t dsobj;
1095 1095 int error;
1096 1096 uint64_t crflags;
1097 1097
1098 1098 crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
1099 1099 DS_FLAG_CI_DATASET : 0;
1100 1100
1101 1101 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1102 1102 if (error == 0) {
1103 1103 /* create temporary clone */
1104 1104 dsl_dataset_t *snap = NULL;
1105 1105 if (drba->drba_snapobj != 0) {
1106 1106 VERIFY0(dsl_dataset_hold_obj(dp,
1107 1107 drba->drba_snapobj, FTAG, &snap));
1108 1108 }
1109 1109 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1110 1110 snap, crflags, drba->drba_cred, tx);
1111 1111 dsl_dataset_rele(snap, FTAG);
1112 1112 dsl_dataset_rele(ds, FTAG);
1113 1113 } else {
1114 1114 dsl_dir_t *dd;
1115 1115 const char *tail;
1116 1116 dsl_dataset_t *origin = NULL;
1117 1117
1118 1118 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1119 1119
1120 1120 if (drba->drba_origin != NULL) {
1121 1121 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1122 1122 FTAG, &origin));
1123 1123 }
1124 1124
1125 1125 /* Create new dataset. */
1126 1126 dsobj = dsl_dataset_create_sync(dd,
1127 1127 strrchr(tofs, '/') + 1,
1128 1128 origin, crflags, drba->drba_cred, tx);
1129 1129 if (origin != NULL)
1130 1130 dsl_dataset_rele(origin, FTAG);
1131 1131 dsl_dir_rele(dd, FTAG);
1132 1132 drba->drba_cookie->drc_newfs = B_TRUE;
1133 1133 }
1134 1134 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1135 1135
1136 1136 if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1137 1137 DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1138 1138 !newds->ds_large_blocks) {
1139 1139 dsl_dataset_activate_large_blocks_sync_impl(dsobj, tx);
1140 1140 newds->ds_large_blocks = B_TRUE;
1141 1141 }
1142 1142
1143 1143 dmu_buf_will_dirty(newds->ds_dbuf, tx);
1144 1144 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1145 1145
1146 1146 /*
1147 1147 * If we actually created a non-clone, we need to create the
1148 1148 * objset in our new dataset.
1149 1149 */
1150 1150 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1151 1151 (void) dmu_objset_create_impl(dp->dp_spa,
1152 1152 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1153 1153 }
1154 1154
1155 1155 drba->drba_cookie->drc_ds = newds;
1156 1156
1157 1157 spa_history_log_internal_ds(newds, "receive", tx, "");
1158 1158 }
1159 1159
1160 1160 /*
1161 1161 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1162 1162 * succeeds; otherwise we will leak the holds on the datasets.
1163 1163 */
1164 1164 int
1165 1165 dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
1166 1166 boolean_t force, char *origin, dmu_recv_cookie_t *drc)
1167 1167 {
1168 1168 dmu_recv_begin_arg_t drba = { 0 };
1169 1169 dmu_replay_record_t *drr;
1170 1170
1171 1171 bzero(drc, sizeof (dmu_recv_cookie_t));
1172 1172 drc->drc_drrb = drrb;
1173 1173 drc->drc_tosnap = tosnap;
1174 1174 drc->drc_tofs = tofs;
1175 1175 drc->drc_force = force;
1176 1176 drc->drc_cred = CRED();
1177 1177
1178 1178 if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
1179 1179 drc->drc_byteswap = B_TRUE;
1180 1180 else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
1181 1181 return (SET_ERROR(EINVAL));
1182 1182
1183 1183 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
1184 1184 drr->drr_type = DRR_BEGIN;
1185 1185 drr->drr_u.drr_begin = *drc->drc_drrb;
1186 1186 if (drc->drc_byteswap) {
1187 1187 fletcher_4_incremental_byteswap(drr,
1188 1188 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1189 1189 } else {
1190 1190 fletcher_4_incremental_native(drr,
1191 1191 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1192 1192 }
1193 1193 kmem_free(drr, sizeof (dmu_replay_record_t));
1194 1194
1195 1195 if (drc->drc_byteswap) {
1196 1196 drrb->drr_magic = BSWAP_64(drrb->drr_magic);
1197 1197 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
1198 1198 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
1199 1199 drrb->drr_type = BSWAP_32(drrb->drr_type);
1200 1200 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
1201 1201 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
1202 1202 }
1203 1203
1204 1204 drba.drba_origin = origin;
1205 1205 drba.drba_cookie = drc;
1206 1206 drba.drba_cred = CRED();
1207 1207
1208 1208 return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
1209 1209 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1210 1210 }
1211 1211
1212 1212 struct restorearg {
1213 1213 int err;
1214 1214 boolean_t byteswap;
1215 1215 vnode_t *vp;
1216 1216 char *buf;
1217 1217 uint64_t voff;
1218 1218 int bufsize; /* amount of memory allocated for buf */
1219 1219 zio_cksum_t cksum;
1220 1220 avl_tree_t *guid_to_ds_map;
1221 1221 };
1222 1222
1223 1223 typedef struct guid_map_entry {
1224 1224 uint64_t guid;
1225 1225 dsl_dataset_t *gme_ds;
1226 1226 avl_node_t avlnode;
1227 1227 } guid_map_entry_t;
1228 1228
1229 1229 static int
1230 1230 guid_compare(const void *arg1, const void *arg2)
1231 1231 {
1232 1232 const guid_map_entry_t *gmep1 = arg1;
1233 1233 const guid_map_entry_t *gmep2 = arg2;
1234 1234
1235 1235 if (gmep1->guid < gmep2->guid)
1236 1236 return (-1);
1237 1237 else if (gmep1->guid > gmep2->guid)
1238 1238 return (1);
1239 1239 return (0);
1240 1240 }
1241 1241
1242 1242 static void
1243 1243 free_guid_map_onexit(void *arg)
1244 1244 {
1245 1245 avl_tree_t *ca = arg;
1246 1246 void *cookie = NULL;
1247 1247 guid_map_entry_t *gmep;
1248 1248
1249 1249 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1250 1250 dsl_dataset_long_rele(gmep->gme_ds, gmep);
1251 1251 dsl_dataset_rele(gmep->gme_ds, gmep);
1252 1252 kmem_free(gmep, sizeof (guid_map_entry_t));
1253 1253 }
1254 1254 avl_destroy(ca);
1255 1255 kmem_free(ca, sizeof (avl_tree_t));
1256 1256 }
1257 1257
1258 1258 static void *
1259 1259 restore_read(struct restorearg *ra, int len, char *buf)
1260 1260 {
1261 1261 int done = 0;
1262 1262
1263 1263 if (buf == NULL)
1264 1264 buf = ra->buf;
1265 1265
1266 1266 /* some things will require 8-byte alignment, so everything must */
1267 1267 ASSERT0(len % 8);
1268 1268 ASSERT3U(len, <=, ra->bufsize);
1269 1269
1270 1270 while (done < len) {
1271 1271 ssize_t resid;
1272 1272
1273 1273 ra->err = vn_rdwr(UIO_READ, ra->vp,
1274 1274 buf + done, len - done,
1275 1275 ra->voff, UIO_SYSSPACE, FAPPEND,
1276 1276 RLIM64_INFINITY, CRED(), &resid);
1277 1277
1278 1278 if (resid == len - done)
1279 1279 ra->err = SET_ERROR(EINVAL);
1280 1280 ra->voff += len - done - resid;
1281 1281 done = len - resid;
1282 1282 if (ra->err != 0)
1283 1283 return (NULL);
1284 1284 }
1285 1285
1286 1286 ASSERT3U(done, ==, len);
1287 1287 if (ra->byteswap)
1288 1288 fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
1289 1289 else
1290 1290 fletcher_4_incremental_native(buf, len, &ra->cksum);
1291 1291 return (buf);
1292 1292 }
1293 1293
1294 1294 static void
1295 1295 backup_byteswap(dmu_replay_record_t *drr)
1296 1296 {
1297 1297 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1298 1298 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1299 1299 drr->drr_type = BSWAP_32(drr->drr_type);
1300 1300 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1301 1301 switch (drr->drr_type) {
1302 1302 case DRR_BEGIN:
1303 1303 DO64(drr_begin.drr_magic);
1304 1304 DO64(drr_begin.drr_versioninfo);
1305 1305 DO64(drr_begin.drr_creation_time);
1306 1306 DO32(drr_begin.drr_type);
1307 1307 DO32(drr_begin.drr_flags);
1308 1308 DO64(drr_begin.drr_toguid);
1309 1309 DO64(drr_begin.drr_fromguid);
1310 1310 break;
1311 1311 case DRR_OBJECT:
1312 1312 DO64(drr_object.drr_object);
1313 1313 DO32(drr_object.drr_type);
1314 1314 DO32(drr_object.drr_bonustype);
1315 1315 DO32(drr_object.drr_blksz);
1316 1316 DO32(drr_object.drr_bonuslen);
1317 1317 DO64(drr_object.drr_toguid);
1318 1318 break;
1319 1319 case DRR_FREEOBJECTS:
1320 1320 DO64(drr_freeobjects.drr_firstobj);
1321 1321 DO64(drr_freeobjects.drr_numobjs);
1322 1322 DO64(drr_freeobjects.drr_toguid);
1323 1323 break;
1324 1324 case DRR_WRITE:
1325 1325 DO64(drr_write.drr_object);
1326 1326 DO32(drr_write.drr_type);
1327 1327 DO64(drr_write.drr_offset);
1328 1328 DO64(drr_write.drr_length);
1329 1329 DO64(drr_write.drr_toguid);
1330 1330 DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
1331 1331 DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
1332 1332 DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
1333 1333 DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
1334 1334 DO64(drr_write.drr_key.ddk_prop);
1335 1335 break;
1336 1336 case DRR_WRITE_BYREF:
1337 1337 DO64(drr_write_byref.drr_object);
1338 1338 DO64(drr_write_byref.drr_offset);
1339 1339 DO64(drr_write_byref.drr_length);
1340 1340 DO64(drr_write_byref.drr_toguid);
1341 1341 DO64(drr_write_byref.drr_refguid);
1342 1342 DO64(drr_write_byref.drr_refobject);
1343 1343 DO64(drr_write_byref.drr_refoffset);
1344 1344 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
1345 1345 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
1346 1346 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
1347 1347 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
1348 1348 DO64(drr_write_byref.drr_key.ddk_prop);
1349 1349 break;
1350 1350 case DRR_WRITE_EMBEDDED:
1351 1351 DO64(drr_write_embedded.drr_object);
1352 1352 DO64(drr_write_embedded.drr_offset);
1353 1353 DO64(drr_write_embedded.drr_length);
1354 1354 DO64(drr_write_embedded.drr_toguid);
1355 1355 DO32(drr_write_embedded.drr_lsize);
1356 1356 DO32(drr_write_embedded.drr_psize);
1357 1357 break;
1358 1358 case DRR_FREE:
1359 1359 DO64(drr_free.drr_object);
1360 1360 DO64(drr_free.drr_offset);
1361 1361 DO64(drr_free.drr_length);
1362 1362 DO64(drr_free.drr_toguid);
1363 1363 break;
1364 1364 case DRR_SPILL:
1365 1365 DO64(drr_spill.drr_object);
1366 1366 DO64(drr_spill.drr_length);
1367 1367 DO64(drr_spill.drr_toguid);
1368 1368 break;
1369 1369 case DRR_END:
1370 1370 DO64(drr_end.drr_checksum.zc_word[0]);
1371 1371 DO64(drr_end.drr_checksum.zc_word[1]);
1372 1372 DO64(drr_end.drr_checksum.zc_word[2]);
1373 1373 DO64(drr_end.drr_checksum.zc_word[3]);
1374 1374 DO64(drr_end.drr_toguid);
1375 1375 break;
1376 1376 }
1377 1377 #undef DO64
1378 1378 #undef DO32
1379 1379 }
1380 1380
1381 1381 static inline uint8_t
1382 1382 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1383 1383 {
1384 1384 if (bonus_type == DMU_OT_SA) {
1385 1385 return (1);
1386 1386 } else {
1387 1387 return (1 +
1388 1388 ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
1389 1389 }
1390 1390 }
1391 1391
1392 1392 static int
1393 1393 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
1394 1394 {
1395 1395 dmu_object_info_t doi;
1396 1396 dmu_tx_t *tx;
1397 1397 void *data = NULL;
1398 1398 uint64_t object;
1399 1399 int err;
1400 1400
1401 1401 if (drro->drr_type == DMU_OT_NONE ||
1402 1402 !DMU_OT_IS_VALID(drro->drr_type) ||
1403 1403 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1404 1404 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1405 1405 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1406 1406 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1407 1407 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1408 1408 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(os)) ||
1409 1409 drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1410 1410 return (SET_ERROR(EINVAL));
1411 1411 }
1412 1412
1413 1413 err = dmu_object_info(os, drro->drr_object, &doi);
1414 1414
1415 1415 if (err != 0 && err != ENOENT)
1416 1416 return (SET_ERROR(EINVAL));
1417 1417 object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
1418 1418
1419 1419 if (drro->drr_bonuslen) {
1420 1420 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8), NULL);
1421 1421 if (ra->err != 0)
1422 1422 return (ra->err);
1423 1423 }
1424 1424
1425 1425 /*
1426 1426 * If we are losing blkptrs or changing the block size this must
1427 1427 * be a new file instance. We must clear out the previous file
1428 1428 * contents before we can change this type of metadata in the dnode.
1429 1429 */
1430 1430 if (err == 0) {
1431 1431 int nblkptr;
1432 1432
1433 1433 nblkptr = deduce_nblkptr(drro->drr_bonustype,
1434 1434 drro->drr_bonuslen);
1435 1435
1436 1436 if (drro->drr_blksz != doi.doi_data_block_size ||
1437 1437 nblkptr < doi.doi_nblkptr) {
1438 1438 err = dmu_free_long_range(os, drro->drr_object,
1439 1439 0, DMU_OBJECT_END);
1440 1440 if (err != 0)
1441 1441 return (SET_ERROR(EINVAL));
1442 1442 }
1443 1443 }
1444 1444
1445 1445 tx = dmu_tx_create(os);
1446 1446 dmu_tx_hold_bonus(tx, object);
1447 1447 err = dmu_tx_assign(tx, TXG_WAIT);
1448 1448 if (err != 0) {
1449 1449 dmu_tx_abort(tx);
1450 1450 return (err);
1451 1451 }
1452 1452
1453 1453 if (object == DMU_NEW_OBJECT) {
1454 1454 /* currently free, want to be allocated */
1455 1455 err = dmu_object_claim(os, drro->drr_object,
1456 1456 drro->drr_type, drro->drr_blksz,
1457 1457 drro->drr_bonustype, drro->drr_bonuslen, tx);
1458 1458 } else if (drro->drr_type != doi.doi_type ||
1459 1459 drro->drr_blksz != doi.doi_data_block_size ||
1460 1460 drro->drr_bonustype != doi.doi_bonus_type ||
1461 1461 drro->drr_bonuslen != doi.doi_bonus_size) {
1462 1462 /* currently allocated, but with different properties */
1463 1463 err = dmu_object_reclaim(os, drro->drr_object,
1464 1464 drro->drr_type, drro->drr_blksz,
1465 1465 drro->drr_bonustype, drro->drr_bonuslen, tx);
1466 1466 }
1467 1467 if (err != 0) {
1468 1468 dmu_tx_commit(tx);
1469 1469 return (SET_ERROR(EINVAL));
1470 1470 }
1471 1471
1472 1472 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1473 1473 tx);
1474 1474 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1475 1475
1476 1476 if (data != NULL) {
1477 1477 dmu_buf_t *db;
1478 1478
1479 1479 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1480 1480 dmu_buf_will_dirty(db, tx);
1481 1481
1482 1482 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1483 1483 bcopy(data, db->db_data, drro->drr_bonuslen);
1484 1484 if (ra->byteswap) {
1485 1485 dmu_object_byteswap_t byteswap =
1486 1486 DMU_OT_BYTESWAP(drro->drr_bonustype);
1487 1487 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1488 1488 drro->drr_bonuslen);
1489 1489 }
1490 1490 dmu_buf_rele(db, FTAG);
1491 1491 }
1492 1492 dmu_tx_commit(tx);
1493 1493 return (0);
1494 1494 }
1495 1495
1496 1496 /* ARGSUSED */
1497 1497 static int
1498 1498 restore_freeobjects(struct restorearg *ra, objset_t *os,
1499 1499 struct drr_freeobjects *drrfo)
1500 1500 {
1501 1501 uint64_t obj;
1502 1502
1503 1503 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1504 1504 return (SET_ERROR(EINVAL));
1505 1505
1506 1506 for (obj = drrfo->drr_firstobj;
1507 1507 obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1508 1508 (void) dmu_object_next(os, &obj, FALSE, 0)) {
1509 1509 int err;
1510 1510
1511 1511 if (dmu_object_info(os, obj, NULL) != 0)
1512 1512 continue;
1513 1513
1514 1514 err = dmu_free_long_object(os, obj);
1515 1515 if (err != 0)
1516 1516 return (err);
1517 1517 }
1518 1518 return (0);
1519 1519 }
1520 1520
1521 1521 static int
1522 1522 restore_write(struct restorearg *ra, objset_t *os,
1523 1523 struct drr_write *drrw)
1524 1524 {
1525 1525 dmu_tx_t *tx;
1526 1526 void *data;
1527 1527 int err;
1528 1528
1529 1529 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1530 1530 !DMU_OT_IS_VALID(drrw->drr_type))
1531 1531 return (SET_ERROR(EINVAL));
1532 1532
1533 1533 if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1534 1534 return (SET_ERROR(EINVAL));
1535 1535
1536 1536 dmu_buf_t *bonus;
1537 1537 if (dmu_bonus_hold(os, drrw->drr_object, FTAG, &bonus) != 0)
1538 1538 return (SET_ERROR(EINVAL));
1539 1539
1540 1540 arc_buf_t *abuf = dmu_request_arcbuf(bonus, drrw->drr_length);
1541 1541
1542 1542 data = restore_read(ra, drrw->drr_length, abuf->b_data);
1543 1543 if (data == NULL) {
1544 1544 dmu_return_arcbuf(abuf);
1545 1545 dmu_buf_rele(bonus, FTAG);
1546 1546 return (ra->err);
1547 1547 }
1548 1548
1549 1549 tx = dmu_tx_create(os);
1550 1550
1551 1551 dmu_tx_hold_write(tx, drrw->drr_object,
1552 1552 drrw->drr_offset, drrw->drr_length);
1553 1553 err = dmu_tx_assign(tx, TXG_WAIT);
1554 1554 if (err != 0) {
1555 1555 dmu_return_arcbuf(abuf);
1556 1556 dmu_buf_rele(bonus, FTAG);
1557 1557 dmu_tx_abort(tx);
1558 1558 return (err);
1559 1559 }
1560 1560 if (ra->byteswap) {
1561 1561 dmu_object_byteswap_t byteswap =
1562 1562 DMU_OT_BYTESWAP(drrw->drr_type);
1563 1563 dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length);
1564 1564 }
1565 1565 dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
1566 1566 dmu_tx_commit(tx);
1567 1567 dmu_buf_rele(bonus, FTAG);
1568 1568 return (0);
1569 1569 }
1570 1570
1571 1571 /*
1572 1572 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1573 1573 * streams to refer to a copy of the data that is already on the
1574 1574 * system because it came in earlier in the stream. This function
1575 1575 * finds the earlier copy of the data, and uses that copy instead of
1576 1576 * data from the stream to fulfill this write.
1577 1577 */
1578 1578 static int
1579 1579 restore_write_byref(struct restorearg *ra, objset_t *os,
1580 1580 struct drr_write_byref *drrwbr)
1581 1581 {
1582 1582 dmu_tx_t *tx;
1583 1583 int err;
1584 1584 guid_map_entry_t gmesrch;
1585 1585 guid_map_entry_t *gmep;
1586 1586 avl_index_t where;
1587 1587 objset_t *ref_os = NULL;
1588 1588 dmu_buf_t *dbp;
1589 1589
1590 1590 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1591 1591 return (SET_ERROR(EINVAL));
1592 1592
1593 1593 /*
1594 1594 * If the GUID of the referenced dataset is different from the
1595 1595 * GUID of the target dataset, find the referenced dataset.
1596 1596 */
1597 1597 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1598 1598 gmesrch.guid = drrwbr->drr_refguid;
1599 1599 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1600 1600 &where)) == NULL) {
1601 1601 return (SET_ERROR(EINVAL));
1602 1602 }
1603 1603 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1604 1604 return (SET_ERROR(EINVAL));
1605 1605 } else {
1606 1606 ref_os = os;
1607 1607 }
1608 1608
1609 1609 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1610 1610 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
1611 1611 if (err != 0)
1612 1612 return (err);
1613 1613
1614 1614 tx = dmu_tx_create(os);
1615 1615
1616 1616 dmu_tx_hold_write(tx, drrwbr->drr_object,
1617 1617 drrwbr->drr_offset, drrwbr->drr_length);
1618 1618 err = dmu_tx_assign(tx, TXG_WAIT);
1619 1619 if (err != 0) {
1620 1620 dmu_tx_abort(tx);
1621 1621 return (err);
1622 1622 }
1623 1623 dmu_write(os, drrwbr->drr_object,
1624 1624 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1625 1625 dmu_buf_rele(dbp, FTAG);
1626 1626 dmu_tx_commit(tx);
1627 1627 return (0);
1628 1628 }
1629 1629
1630 1630 static int
1631 1631 restore_write_embedded(struct restorearg *ra, objset_t *os,
1632 1632 struct drr_write_embedded *drrwnp)
1633 1633 {
1634 1634 dmu_tx_t *tx;
1635 1635 int err;
1636 1636 void *data;
1637 1637
1638 1638 if (drrwnp->drr_offset + drrwnp->drr_length < drrwnp->drr_offset)
1639 1639 return (EINVAL);
1640 1640
1641 1641 if (drrwnp->drr_psize > BPE_PAYLOAD_SIZE)
1642 1642 return (EINVAL);
1643 1643
1644 1644 if (drrwnp->drr_etype >= NUM_BP_EMBEDDED_TYPES)
1645 1645 return (EINVAL);
1646 1646 if (drrwnp->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
1647 1647 return (EINVAL);
1648 1648
1649 1649 data = restore_read(ra, P2ROUNDUP(drrwnp->drr_psize, 8), NULL);
1650 1650 if (data == NULL)
1651 1651 return (ra->err);
1652 1652
1653 1653 tx = dmu_tx_create(os);
1654 1654
1655 1655 dmu_tx_hold_write(tx, drrwnp->drr_object,
1656 1656 drrwnp->drr_offset, drrwnp->drr_length);
1657 1657 err = dmu_tx_assign(tx, TXG_WAIT);
1658 1658 if (err != 0) {
1659 1659 dmu_tx_abort(tx);
1660 1660 return (err);
1661 1661 }
1662 1662
1663 1663 dmu_write_embedded(os, drrwnp->drr_object,
1664 1664 drrwnp->drr_offset, data, drrwnp->drr_etype,
1665 1665 drrwnp->drr_compression, drrwnp->drr_lsize, drrwnp->drr_psize,
1666 1666 ra->byteswap ^ ZFS_HOST_BYTEORDER, tx);
1667 1667
1668 1668 dmu_tx_commit(tx);
1669 1669 return (0);
1670 1670 }
1671 1671
1672 1672 static int
1673 1673 restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1674 1674 {
1675 1675 dmu_tx_t *tx;
1676 1676 void *data;
1677 1677 dmu_buf_t *db, *db_spill;
1678 1678 int err;
1679 1679
1680 1680 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1681 1681 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(os)))
1682 1682 return (SET_ERROR(EINVAL));
1683 1683
1684 1684 data = restore_read(ra, drrs->drr_length, NULL);
1685 1685 if (data == NULL)
1686 1686 return (ra->err);
1687 1687
1688 1688 if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1689 1689 return (SET_ERROR(EINVAL));
1690 1690
1691 1691 VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1692 1692 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1693 1693 dmu_buf_rele(db, FTAG);
1694 1694 return (err);
1695 1695 }
1696 1696
1697 1697 tx = dmu_tx_create(os);
1698 1698
1699 1699 dmu_tx_hold_spill(tx, db->db_object);
1700 1700
1701 1701 err = dmu_tx_assign(tx, TXG_WAIT);
1702 1702 if (err != 0) {
1703 1703 dmu_buf_rele(db, FTAG);
1704 1704 dmu_buf_rele(db_spill, FTAG);
1705 1705 dmu_tx_abort(tx);
1706 1706 return (err);
1707 1707 }
1708 1708 dmu_buf_will_dirty(db_spill, tx);
1709 1709
1710 1710 if (db_spill->db_size < drrs->drr_length)
1711 1711 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1712 1712 drrs->drr_length, tx));
1713 1713 bcopy(data, db_spill->db_data, drrs->drr_length);
1714 1714
1715 1715 dmu_buf_rele(db, FTAG);
1716 1716 dmu_buf_rele(db_spill, FTAG);
1717 1717
1718 1718 dmu_tx_commit(tx);
1719 1719 return (0);
1720 1720 }
1721 1721
1722 1722 /* ARGSUSED */
1723 1723 static int
1724 1724 restore_free(struct restorearg *ra, objset_t *os,
1725 1725 struct drr_free *drrf)
1726 1726 {
1727 1727 int err;
1728 1728
1729 1729 if (drrf->drr_length != -1ULL &&
1730 1730 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1731 1731 return (SET_ERROR(EINVAL));
1732 1732
1733 1733 if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1734 1734 return (SET_ERROR(EINVAL));
1735 1735
1736 1736 err = dmu_free_long_range(os, drrf->drr_object,
1737 1737 drrf->drr_offset, drrf->drr_length);
1738 1738 return (err);
1739 1739 }
1740 1740
1741 1741 /* used to destroy the drc_ds on error */
1742 1742 static void
1743 1743 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
1744 1744 {
1745 1745 char name[MAXNAMELEN];
1746 1746 dsl_dataset_name(drc->drc_ds, name);
1747 1747 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1748 1748 (void) dsl_destroy_head(name);
1749 1749 }
1750 1750
1751 1751 /*
1752 1752 * NB: callers *must* call dmu_recv_end() if this succeeds.
1753 1753 */
1754 1754 int
1755 1755 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
1756 1756 int cleanup_fd, uint64_t *action_handlep)
1757 1757 {
1758 1758 struct restorearg ra = { 0 };
1759 1759 dmu_replay_record_t *drr;
1760 1760 objset_t *os;
1761 1761 zio_cksum_t pcksum;
1762 1762 int featureflags;
1763 1763
1764 1764 ra.byteswap = drc->drc_byteswap;
1765 1765 ra.cksum = drc->drc_cksum;
1766 1766 ra.vp = vp;
1767 1767 ra.voff = *voffp;
1768 1768 ra.bufsize = SPA_MAXBLOCKSIZE;
1769 1769 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1770 1770
1771 1771 /* these were verified in dmu_recv_begin */
1772 1772 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
1773 1773 DMU_SUBSTREAM);
1774 1774 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
1775 1775
1776 1776 /*
1777 1777 * Open the objset we are modifying.
1778 1778 */
1779 1779 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &os));
1780 1780
1781 1781 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
1782 1782
1783 1783 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1784 1784
1785 1785 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
1786 1786 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1787 1787 minor_t minor;
1788 1788
1789 1789 if (cleanup_fd == -1) {
1790 1790 ra.err = SET_ERROR(EBADF);
1791 1791 goto out;
1792 1792 }
1793 1793 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1794 1794 if (ra.err != 0) {
1795 1795 cleanup_fd = -1;
1796 1796 goto out;
1797 1797 }
1798 1798
1799 1799 if (*action_handlep == 0) {
1800 1800 ra.guid_to_ds_map =
1801 1801 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1802 1802 avl_create(ra.guid_to_ds_map, guid_compare,
1803 1803 sizeof (guid_map_entry_t),
1804 1804 offsetof(guid_map_entry_t, avlnode));
1805 1805 ra.err = zfs_onexit_add_cb(minor,
1806 1806 free_guid_map_onexit, ra.guid_to_ds_map,
1807 1807 action_handlep);
1808 1808 if (ra.err != 0)
1809 1809 goto out;
1810 1810 } else {
1811 1811 ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1812 1812 (void **)&ra.guid_to_ds_map);
1813 1813 if (ra.err != 0)
1814 1814 goto out;
1815 1815 }
1816 1816
1817 1817 drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1818 1818 }
1819 1819
1820 1820 /*
1821 1821 * Read records and process them.
1822 1822 */
1823 1823 pcksum = ra.cksum;
1824 1824 while (ra.err == 0 &&
1825 1825 NULL != (drr = restore_read(&ra, sizeof (*drr), NULL))) {
1826 1826 if (issig(JUSTLOOKING) && issig(FORREAL)) {
1827 1827 ra.err = SET_ERROR(EINTR);
1828 1828 goto out;
1829 1829 }
1830 1830
1831 1831 if (ra.byteswap)
1832 1832 backup_byteswap(drr);
1833 1833
1834 1834 switch (drr->drr_type) {
1835 1835 case DRR_OBJECT:
1836 1836 {
1837 1837 /*
1838 1838 * We need to make a copy of the record header,
1839 1839 * because restore_{object,write} may need to
1840 1840 * restore_read(), which will invalidate drr.
1841 1841 */
1842 1842 struct drr_object drro = drr->drr_u.drr_object;
1843 1843 ra.err = restore_object(&ra, os, &drro);
1844 1844 break;
1845 1845 }
1846 1846 case DRR_FREEOBJECTS:
1847 1847 {
1848 1848 struct drr_freeobjects drrfo =
1849 1849 drr->drr_u.drr_freeobjects;
1850 1850 ra.err = restore_freeobjects(&ra, os, &drrfo);
1851 1851 break;
1852 1852 }
1853 1853 case DRR_WRITE:
1854 1854 {
1855 1855 struct drr_write drrw = drr->drr_u.drr_write;
1856 1856 ra.err = restore_write(&ra, os, &drrw);
1857 1857 break;
1858 1858 }
1859 1859 case DRR_WRITE_BYREF:
1860 1860 {
1861 1861 struct drr_write_byref drrwbr =
1862 1862 drr->drr_u.drr_write_byref;
1863 1863 ra.err = restore_write_byref(&ra, os, &drrwbr);
1864 1864 break;
1865 1865 }
1866 1866 case DRR_WRITE_EMBEDDED:
1867 1867 {
1868 1868 struct drr_write_embedded drrwe =
1869 1869 drr->drr_u.drr_write_embedded;
1870 1870 ra.err = restore_write_embedded(&ra, os, &drrwe);
1871 1871 break;
1872 1872 }
1873 1873 case DRR_FREE:
1874 1874 {
1875 1875 struct drr_free drrf = drr->drr_u.drr_free;
1876 1876 ra.err = restore_free(&ra, os, &drrf);
1877 1877 break;
1878 1878 }
1879 1879 case DRR_END:
1880 1880 {
1881 1881 struct drr_end drre = drr->drr_u.drr_end;
1882 1882 /*
1883 1883 * We compare against the *previous* checksum
1884 1884 * value, because the stored checksum is of
1885 1885 * everything before the DRR_END record.
1886 1886 */
1887 1887 if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1888 1888 ra.err = SET_ERROR(ECKSUM);
1889 1889 goto out;
1890 1890 }
1891 1891 case DRR_SPILL:
1892 1892 {
1893 1893 struct drr_spill drrs = drr->drr_u.drr_spill;
1894 1894 ra.err = restore_spill(&ra, os, &drrs);
1895 1895 break;
1896 1896 }
1897 1897 default:
1898 1898 ra.err = SET_ERROR(EINVAL);
1899 1899 goto out;
1900 1900 }
1901 1901 pcksum = ra.cksum;
1902 1902 }
1903 1903 ASSERT(ra.err != 0);
1904 1904
1905 1905 out:
1906 1906 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1907 1907 zfs_onexit_fd_rele(cleanup_fd);
1908 1908
1909 1909 if (ra.err != 0) {
1910 1910 /*
1911 1911 * destroy what we created, so we don't leave it in the
1912 1912 * inconsistent restoring state.
1913 1913 */
1914 1914 dmu_recv_cleanup_ds(drc);
1915 1915 }
1916 1916
1917 1917 kmem_free(ra.buf, ra.bufsize);
1918 1918 *voffp = ra.voff;
1919 1919 return (ra.err);
1920 1920 }
1921 1921
1922 1922 static int
1923 1923 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
1924 1924 {
1925 1925 dmu_recv_cookie_t *drc = arg;
1926 1926 dsl_pool_t *dp = dmu_tx_pool(tx);
1927 1927 int error;
1928 1928
1929 1929 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
1930 1930
1931 1931 if (!drc->drc_newfs) {
1932 1932 dsl_dataset_t *origin_head;
1933 1933
1934 1934 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
1935 1935 if (error != 0)
1936 1936 return (error);
1937 1937 if (drc->drc_force) {
1938 1938 /*
1939 1939 * We will destroy any snapshots in tofs (i.e. before
1940 1940 * origin_head) that are after the origin (which is
1941 1941 * the snap before drc_ds, because drc_ds can not
1942 1942 * have any snaps of its own).
1943 1943 */
1944 1944 uint64_t obj;
1945 1945
1946 1946 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
1947 1947 while (obj !=
1948 1948 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
1949 1949 dsl_dataset_t *snap;
1950 1950 error = dsl_dataset_hold_obj(dp, obj, FTAG,
1951 1951 &snap);
1952 1952 if (error != 0)
1953 1953 return (error);
1954 1954 if (snap->ds_dir != origin_head->ds_dir)
1955 1955 error = SET_ERROR(EINVAL);
1956 1956 if (error == 0) {
1957 1957 error = dsl_destroy_snapshot_check_impl(
1958 1958 snap, B_FALSE);
1959 1959 }
1960 1960 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
1961 1961 dsl_dataset_rele(snap, FTAG);
1962 1962 if (error != 0)
1963 1963 return (error);
1964 1964 }
1965 1965 }
1966 1966 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
1967 1967 origin_head, drc->drc_force, drc->drc_owner, tx);
1968 1968 if (error != 0) {
1969 1969 dsl_dataset_rele(origin_head, FTAG);
1970 1970 return (error);
1971 1971 }
1972 1972 error = dsl_dataset_snapshot_check_impl(origin_head,
1973 1973 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
1974 1974 dsl_dataset_rele(origin_head, FTAG);
1975 1975 if (error != 0)
1976 1976 return (error);
1977 1977
1978 1978 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
1979 1979 } else {
1980 1980 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
1981 1981 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
1982 1982 }
1983 1983 return (error);
1984 1984 }
1985 1985
1986 1986 static void
1987 1987 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
1988 1988 {
1989 1989 dmu_recv_cookie_t *drc = arg;
1990 1990 dsl_pool_t *dp = dmu_tx_pool(tx);
1991 1991
1992 1992 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
1993 1993 tx, "snap=%s", drc->drc_tosnap);
1994 1994
1995 1995 if (!drc->drc_newfs) {
1996 1996 dsl_dataset_t *origin_head;
1997 1997
1998 1998 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
1999 1999 &origin_head));
2000 2000
2001 2001 if (drc->drc_force) {
2002 2002 /*
2003 2003 * Destroy any snapshots of drc_tofs (origin_head)
2004 2004 * after the origin (the snap before drc_ds).
2005 2005 */
2006 2006 uint64_t obj;
2007 2007
2008 2008 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2009 2009 while (obj !=
2010 2010 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2011 2011 dsl_dataset_t *snap;
2012 2012 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
2013 2013 &snap));
2014 2014 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
2015 2015 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2016 2016 dsl_destroy_snapshot_sync_impl(snap,
2017 2017 B_FALSE, tx);
2018 2018 dsl_dataset_rele(snap, FTAG);
2019 2019 }
2020 2020 }
2021 2021 VERIFY3P(drc->drc_ds->ds_prev, ==,
2022 2022 origin_head->ds_prev);
2023 2023
2024 2024 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
2025 2025 origin_head, tx);
2026 2026 dsl_dataset_snapshot_sync_impl(origin_head,
2027 2027 drc->drc_tosnap, tx);
2028 2028
2029 2029 /* set snapshot's creation time and guid */
2030 2030 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
2031 2031 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
2032 2032 drc->drc_drrb->drr_creation_time;
2033 2033 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
2034 2034 drc->drc_drrb->drr_toguid;
2035 2035 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
2036 2036 ~DS_FLAG_INCONSISTENT;
2037 2037
2038 2038 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
2039 2039 dsl_dataset_phys(origin_head)->ds_flags &=
2040 2040 ~DS_FLAG_INCONSISTENT;
2041 2041
2042 2042 dsl_dataset_rele(origin_head, FTAG);
2043 2043 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
2044 2044
2045 2045 if (drc->drc_owner != NULL)
2046 2046 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
2047 2047 } else {
2048 2048 dsl_dataset_t *ds = drc->drc_ds;
2049 2049
2050 2050 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
2051 2051
2052 2052 /* set snapshot's creation time and guid */
2053 2053 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2054 2054 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
2055 2055 drc->drc_drrb->drr_creation_time;
2056 2056 dsl_dataset_phys(ds->ds_prev)->ds_guid =
2057 2057 drc->drc_drrb->drr_toguid;
2058 2058 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
2059 2059 ~DS_FLAG_INCONSISTENT;
2060 2060
2061 2061 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2062 2062 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
2063 2063 }
2064 2064 drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
2065 2065 /*
2066 2066 * Release the hold from dmu_recv_begin. This must be done before
2067 2067 * we return to open context, so that when we free the dataset's dnode,
2068 2068 * we can evict its bonus buffer.
2069 2069 */
2070 2070 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2071 2071 drc->drc_ds = NULL;
2072 2072 }
2073 2073
2074 2074 static int
2075 2075 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
2076 2076 {
2077 2077 dsl_pool_t *dp;
2078 2078 dsl_dataset_t *snapds;
2079 2079 guid_map_entry_t *gmep;
2080 2080 int err;
2081 2081
2082 2082 ASSERT(guid_map != NULL);
2083 2083
2084 2084 err = dsl_pool_hold(name, FTAG, &dp);
2085 2085 if (err != 0)
2086 2086 return (err);
2087 2087 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
2088 2088 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
2089 2089 if (err == 0) {
2090 2090 gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
2091 2091 gmep->gme_ds = snapds;
2092 2092 avl_add(guid_map, gmep);
2093 2093 dsl_dataset_long_hold(snapds, gmep);
2094 2094 } else {
2095 2095 kmem_free(gmep, sizeof (*gmep));
2096 2096 }
2097 2097
2098 2098 dsl_pool_rele(dp, FTAG);
2099 2099 return (err);
2100 2100 }
2101 2101
2102 2102 static int dmu_recv_end_modified_blocks = 3;
2103 2103
2104 2104 static int
2105 2105 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
2106 2106 {
2107 2107 int error;
2108 2108 char name[MAXNAMELEN];
2109 2109
2110 2110 #ifdef _KERNEL
2111 2111 /*
2112 2112 * We will be destroying the ds; make sure its origin is unmounted if
2113 2113 * necessary.
2114 2114 */
2115 2115 dsl_dataset_name(drc->drc_ds, name);
2116 2116 zfs_destroy_unmount_origin(name);
2117 2117 #endif
2118 2118
2119 2119 error = dsl_sync_task(drc->drc_tofs,
2120 2120 dmu_recv_end_check, dmu_recv_end_sync, drc,
2121 2121 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2122 2122
2123 2123 if (error != 0)
2124 2124 dmu_recv_cleanup_ds(drc);
2125 2125 return (error);
2126 2126 }
2127 2127
2128 2128 static int
2129 2129 dmu_recv_new_end(dmu_recv_cookie_t *drc)
2130 2130 {
2131 2131 int error;
2132 2132
2133 2133 error = dsl_sync_task(drc->drc_tofs,
2134 2134 dmu_recv_end_check, dmu_recv_end_sync, drc,
2135 2135 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2136 2136
2137 2137 if (error != 0) {
2138 2138 dmu_recv_cleanup_ds(drc);
2139 2139 } else if (drc->drc_guid_to_ds_map != NULL) {
2140 2140 (void) add_ds_to_guidmap(drc->drc_tofs,
2141 2141 drc->drc_guid_to_ds_map,
2142 2142 drc->drc_newsnapobj);
2143 2143 }
2144 2144 return (error);
2145 2145 }
2146 2146
2147 2147 int
2148 2148 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
2149 2149 {
2150 2150 drc->drc_owner = owner;
2151 2151
2152 2152 if (drc->drc_newfs)
2153 2153 return (dmu_recv_new_end(drc));
2154 2154 else
2155 2155 return (dmu_recv_existing_end(drc));
2156 2156 }
2157 2157
2158 2158 /*
2159 2159 * Return TRUE if this objset is currently being received into.
2160 2160 */
2161 2161 boolean_t
2162 2162 dmu_objset_is_receiving(objset_t *os)
2163 2163 {
2164 2164 return (os->os_dsl_dataset != NULL &&
2165 2165 os->os_dsl_dataset->ds_owner == dmu_recv_tag);
2166 2166 }
|
↓ open down ↓ |
1088 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX