Print this page
2619 asynchronous destruction of ZFS file systems
2747 SPA versioning with zfs feature flags
Reviewed by: Matt Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <gwilson@delphix.com>
Reviewed by: Richard Lowe <richlowe@richlowe.net>
Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com>
Approved by: Dan McDonald <danmcd@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/zfs/dsl_dataset.c
+++ new/usr/src/uts/common/fs/zfs/dsl_dataset.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 - * Copyright (c) 2011 by Delphix. All rights reserved.
23 + * Copyright (c) 2012 by Delphix. All rights reserved.
24 24 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25 25 */
26 26
27 27 #include <sys/dmu_objset.h>
28 28 #include <sys/dsl_dataset.h>
29 29 #include <sys/dsl_dir.h>
30 30 #include <sys/dsl_prop.h>
31 31 #include <sys/dsl_synctask.h>
32 32 #include <sys/dmu_traverse.h>
33 33 #include <sys/dmu_impl.h>
34 34 #include <sys/dmu_tx.h>
35 35 #include <sys/arc.h>
36 36 #include <sys/zio.h>
37 37 #include <sys/zap.h>
38 +#include <sys/zfeature.h>
38 39 #include <sys/unique.h>
39 40 #include <sys/zfs_context.h>
40 41 #include <sys/zfs_ioctl.h>
41 42 #include <sys/spa.h>
42 43 #include <sys/zfs_znode.h>
43 44 #include <sys/zfs_onexit.h>
44 45 #include <sys/zvol.h>
45 46 #include <sys/dsl_scan.h>
46 47 #include <sys/dsl_deadlist.h>
47 48
48 49 static char *dsl_reaper = "the grim reaper";
49 50
50 51 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
51 52 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
52 53 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
53 54
54 55 #define SWITCH64(x, y) \
55 56 { \
56 57 uint64_t __tmp = (x); \
57 58 (x) = (y); \
58 59 (y) = __tmp; \
59 60 }
60 61
61 62 #define DS_REF_MAX (1ULL << 62)
62 63
63 64 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
64 65
65 66 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
66 67
67 68
68 69 /*
69 70 * Figure out how much of this delta should be propogated to the dsl_dir
70 71 * layer. If there's a refreservation, that space has already been
71 72 * partially accounted for in our ancestors.
72 73 */
73 74 static int64_t
74 75 parent_delta(dsl_dataset_t *ds, int64_t delta)
75 76 {
76 77 uint64_t old_bytes, new_bytes;
77 78
78 79 if (ds->ds_reserved == 0)
79 80 return (delta);
80 81
81 82 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
82 83 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
83 84
84 85 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
85 86 return (new_bytes - old_bytes);
86 87 }
87 88
88 89 void
89 90 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
90 91 {
91 92 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
92 93 int compressed = BP_GET_PSIZE(bp);
|
↓ open down ↓ |
45 lines elided |
↑ open up ↑ |
93 94 int uncompressed = BP_GET_UCSIZE(bp);
94 95 int64_t delta;
95 96
96 97 dprintf_bp(bp, "ds=%p", ds);
97 98
98 99 ASSERT(dmu_tx_is_syncing(tx));
99 100 /* It could have been compressed away to nothing */
100 101 if (BP_IS_HOLE(bp))
101 102 return;
102 103 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
103 - ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
104 + ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
104 105 if (ds == NULL) {
105 106 /*
106 107 * Account for the meta-objset space in its placeholder
107 108 * dsl_dir.
108 109 */
109 110 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
110 111 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
111 112 used, compressed, uncompressed, tx);
112 113 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
113 114 return;
114 115 }
115 116 dmu_buf_will_dirty(ds->ds_dbuf, tx);
116 117
117 118 mutex_enter(&ds->ds_dir->dd_lock);
118 119 mutex_enter(&ds->ds_lock);
119 120 delta = parent_delta(ds, used);
120 - ds->ds_phys->ds_used_bytes += used;
121 + ds->ds_phys->ds_referenced_bytes += used;
121 122 ds->ds_phys->ds_compressed_bytes += compressed;
122 123 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
123 124 ds->ds_phys->ds_unique_bytes += used;
124 125 mutex_exit(&ds->ds_lock);
125 126 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
126 127 compressed, uncompressed, tx);
127 128 dsl_dir_transfer_space(ds->ds_dir, used - delta,
128 129 DD_USED_REFRSRV, DD_USED_HEAD, tx);
129 130 mutex_exit(&ds->ds_dir->dd_lock);
130 131 }
131 132
132 133 int
133 134 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
134 135 boolean_t async)
135 136 {
136 137 if (BP_IS_HOLE(bp))
137 138 return (0);
138 139
139 140 ASSERT(dmu_tx_is_syncing(tx));
140 141 ASSERT(bp->blk_birth <= tx->tx_txg);
141 142
142 143 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
143 144 int compressed = BP_GET_PSIZE(bp);
144 145 int uncompressed = BP_GET_UCSIZE(bp);
145 146
146 147 ASSERT(used > 0);
147 148 if (ds == NULL) {
148 149 /*
149 150 * Account for the meta-objset space in its placeholder
150 151 * dataset.
151 152 */
152 153 dsl_free(tx->tx_pool, tx->tx_txg, bp);
153 154
154 155 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
155 156 -used, -compressed, -uncompressed, tx);
156 157 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
157 158 return (used);
158 159 }
159 160 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
160 161
161 162 ASSERT(!dsl_dataset_is_snapshot(ds));
162 163 dmu_buf_will_dirty(ds->ds_dbuf, tx);
163 164
164 165 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
165 166 int64_t delta;
166 167
167 168 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
168 169 dsl_free(tx->tx_pool, tx->tx_txg, bp);
169 170
170 171 mutex_enter(&ds->ds_dir->dd_lock);
171 172 mutex_enter(&ds->ds_lock);
172 173 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
173 174 !DS_UNIQUE_IS_ACCURATE(ds));
174 175 delta = parent_delta(ds, -used);
175 176 ds->ds_phys->ds_unique_bytes -= used;
176 177 mutex_exit(&ds->ds_lock);
177 178 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
178 179 delta, -compressed, -uncompressed, tx);
179 180 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
180 181 DD_USED_REFRSRV, DD_USED_HEAD, tx);
181 182 mutex_exit(&ds->ds_dir->dd_lock);
182 183 } else {
183 184 dprintf_bp(bp, "putting on dead list: %s", "");
184 185 if (async) {
185 186 /*
186 187 * We are here as part of zio's write done callback,
187 188 * which means we're a zio interrupt thread. We can't
188 189 * call dsl_deadlist_insert() now because it may block
189 190 * waiting for I/O. Instead, put bp on the deferred
190 191 * queue and let dsl_pool_sync() finish the job.
191 192 */
192 193 bplist_append(&ds->ds_pending_deadlist, bp);
193 194 } else {
194 195 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
195 196 }
196 197 ASSERT3U(ds->ds_prev->ds_object, ==,
197 198 ds->ds_phys->ds_prev_snap_obj);
198 199 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
199 200 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
200 201 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
201 202 ds->ds_object && bp->blk_birth >
202 203 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
203 204 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
|
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
204 205 mutex_enter(&ds->ds_prev->ds_lock);
205 206 ds->ds_prev->ds_phys->ds_unique_bytes += used;
206 207 mutex_exit(&ds->ds_prev->ds_lock);
207 208 }
208 209 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
209 210 dsl_dir_transfer_space(ds->ds_dir, used,
210 211 DD_USED_HEAD, DD_USED_SNAP, tx);
211 212 }
212 213 }
213 214 mutex_enter(&ds->ds_lock);
214 - ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
215 - ds->ds_phys->ds_used_bytes -= used;
215 + ASSERT3U(ds->ds_phys->ds_referenced_bytes, >=, used);
216 + ds->ds_phys->ds_referenced_bytes -= used;
216 217 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
217 218 ds->ds_phys->ds_compressed_bytes -= compressed;
218 219 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
219 220 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
220 221 mutex_exit(&ds->ds_lock);
221 222
222 223 return (used);
223 224 }
224 225
225 226 uint64_t
226 227 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
227 228 {
228 229 uint64_t trysnap = 0;
229 230
230 231 if (ds == NULL)
231 232 return (0);
232 233 /*
233 234 * The snapshot creation could fail, but that would cause an
234 235 * incorrect FALSE return, which would only result in an
235 236 * overestimation of the amount of space that an operation would
236 237 * consume, which is OK.
237 238 *
238 239 * There's also a small window where we could miss a pending
239 240 * snapshot, because we could set the sync task in the quiescing
240 241 * phase. So this should only be used as a guess.
241 242 */
242 243 if (ds->ds_trysnap_txg >
243 244 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
244 245 trysnap = ds->ds_trysnap_txg;
245 246 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
246 247 }
247 248
248 249 boolean_t
249 250 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
250 251 uint64_t blk_birth)
251 252 {
252 253 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
253 254 return (B_FALSE);
254 255
255 256 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
256 257
257 258 return (B_TRUE);
258 259 }
259 260
260 261 /* ARGSUSED */
261 262 static void
262 263 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
263 264 {
264 265 dsl_dataset_t *ds = dsv;
265 266
266 267 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
267 268
268 269 unique_remove(ds->ds_fsid_guid);
269 270
270 271 if (ds->ds_objset != NULL)
271 272 dmu_objset_evict(ds->ds_objset);
272 273
273 274 if (ds->ds_prev) {
274 275 dsl_dataset_drop_ref(ds->ds_prev, ds);
275 276 ds->ds_prev = NULL;
276 277 }
277 278
278 279 bplist_destroy(&ds->ds_pending_deadlist);
279 280 if (db != NULL) {
280 281 dsl_deadlist_close(&ds->ds_deadlist);
281 282 } else {
282 283 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
283 284 ASSERT(!ds->ds_deadlist.dl_oldfmt);
284 285 }
285 286 if (ds->ds_dir)
286 287 dsl_dir_close(ds->ds_dir, ds);
287 288
288 289 ASSERT(!list_link_active(&ds->ds_synced_link));
289 290
290 291 mutex_destroy(&ds->ds_lock);
291 292 mutex_destroy(&ds->ds_recvlock);
292 293 mutex_destroy(&ds->ds_opening_lock);
293 294 rw_destroy(&ds->ds_rwlock);
294 295 cv_destroy(&ds->ds_exclusive_cv);
295 296
296 297 kmem_free(ds, sizeof (dsl_dataset_t));
297 298 }
298 299
299 300 static int
300 301 dsl_dataset_get_snapname(dsl_dataset_t *ds)
301 302 {
302 303 dsl_dataset_phys_t *headphys;
303 304 int err;
304 305 dmu_buf_t *headdbuf;
305 306 dsl_pool_t *dp = ds->ds_dir->dd_pool;
306 307 objset_t *mos = dp->dp_meta_objset;
307 308
308 309 if (ds->ds_snapname[0])
309 310 return (0);
310 311 if (ds->ds_phys->ds_next_snap_obj == 0)
311 312 return (0);
312 313
313 314 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
314 315 FTAG, &headdbuf);
315 316 if (err)
316 317 return (err);
317 318 headphys = headdbuf->db_data;
318 319 err = zap_value_search(dp->dp_meta_objset,
319 320 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
320 321 dmu_buf_rele(headdbuf, FTAG);
321 322 return (err);
322 323 }
323 324
324 325 static int
325 326 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
326 327 {
327 328 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
328 329 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
329 330 matchtype_t mt;
330 331 int err;
331 332
332 333 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
333 334 mt = MT_FIRST;
334 335 else
335 336 mt = MT_EXACT;
336 337
337 338 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
338 339 value, mt, NULL, 0, NULL);
339 340 if (err == ENOTSUP && mt == MT_FIRST)
340 341 err = zap_lookup(mos, snapobj, name, 8, 1, value);
341 342 return (err);
342 343 }
343 344
344 345 static int
345 346 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
346 347 {
347 348 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
348 349 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
349 350 matchtype_t mt;
350 351 int err;
351 352
352 353 dsl_dir_snap_cmtime_update(ds->ds_dir);
353 354
354 355 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
355 356 mt = MT_FIRST;
356 357 else
357 358 mt = MT_EXACT;
358 359
359 360 err = zap_remove_norm(mos, snapobj, name, mt, tx);
360 361 if (err == ENOTSUP && mt == MT_FIRST)
361 362 err = zap_remove(mos, snapobj, name, tx);
362 363 return (err);
363 364 }
364 365
365 366 static int
366 367 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
367 368 dsl_dataset_t **dsp)
368 369 {
369 370 objset_t *mos = dp->dp_meta_objset;
370 371 dmu_buf_t *dbuf;
371 372 dsl_dataset_t *ds;
372 373 int err;
373 374 dmu_object_info_t doi;
374 375
375 376 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
376 377 dsl_pool_sync_context(dp));
377 378
378 379 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
379 380 if (err)
380 381 return (err);
381 382
382 383 /* Make sure dsobj has the correct object type. */
383 384 dmu_object_info_from_db(dbuf, &doi);
384 385 if (doi.doi_type != DMU_OT_DSL_DATASET)
385 386 return (EINVAL);
386 387
387 388 ds = dmu_buf_get_user(dbuf);
388 389 if (ds == NULL) {
389 390 dsl_dataset_t *winner;
390 391
391 392 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
392 393 ds->ds_dbuf = dbuf;
393 394 ds->ds_object = dsobj;
394 395 ds->ds_phys = dbuf->db_data;
395 396
396 397 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
397 398 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
398 399 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
399 400 mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
400 401
401 402 rw_init(&ds->ds_rwlock, 0, 0, 0);
402 403 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
403 404
404 405 bplist_create(&ds->ds_pending_deadlist);
405 406 dsl_deadlist_open(&ds->ds_deadlist,
406 407 mos, ds->ds_phys->ds_deadlist_obj);
407 408
408 409 list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
409 410 offsetof(dmu_sendarg_t, dsa_link));
410 411
411 412 if (err == 0) {
412 413 err = dsl_dir_open_obj(dp,
413 414 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
414 415 }
415 416 if (err) {
416 417 mutex_destroy(&ds->ds_lock);
417 418 mutex_destroy(&ds->ds_recvlock);
418 419 mutex_destroy(&ds->ds_opening_lock);
419 420 rw_destroy(&ds->ds_rwlock);
420 421 cv_destroy(&ds->ds_exclusive_cv);
421 422 bplist_destroy(&ds->ds_pending_deadlist);
422 423 dsl_deadlist_close(&ds->ds_deadlist);
423 424 kmem_free(ds, sizeof (dsl_dataset_t));
424 425 dmu_buf_rele(dbuf, tag);
425 426 return (err);
426 427 }
427 428
428 429 if (!dsl_dataset_is_snapshot(ds)) {
429 430 ds->ds_snapname[0] = '\0';
430 431 if (ds->ds_phys->ds_prev_snap_obj) {
431 432 err = dsl_dataset_get_ref(dp,
432 433 ds->ds_phys->ds_prev_snap_obj,
433 434 ds, &ds->ds_prev);
434 435 }
435 436 } else {
436 437 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
437 438 err = dsl_dataset_get_snapname(ds);
438 439 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
439 440 err = zap_count(
440 441 ds->ds_dir->dd_pool->dp_meta_objset,
441 442 ds->ds_phys->ds_userrefs_obj,
442 443 &ds->ds_userrefs);
443 444 }
444 445 }
445 446
446 447 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
447 448 /*
448 449 * In sync context, we're called with either no lock
449 450 * or with the write lock. If we're not syncing,
450 451 * we're always called with the read lock held.
451 452 */
452 453 boolean_t need_lock =
453 454 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
454 455 dsl_pool_sync_context(dp);
455 456
456 457 if (need_lock)
457 458 rw_enter(&dp->dp_config_rwlock, RW_READER);
458 459
459 460 err = dsl_prop_get_ds(ds,
460 461 "refreservation", sizeof (uint64_t), 1,
461 462 &ds->ds_reserved, NULL);
462 463 if (err == 0) {
463 464 err = dsl_prop_get_ds(ds,
464 465 "refquota", sizeof (uint64_t), 1,
465 466 &ds->ds_quota, NULL);
466 467 }
467 468
468 469 if (need_lock)
469 470 rw_exit(&dp->dp_config_rwlock);
470 471 } else {
471 472 ds->ds_reserved = ds->ds_quota = 0;
472 473 }
473 474
474 475 if (err == 0) {
475 476 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
476 477 dsl_dataset_evict);
477 478 }
478 479 if (err || winner) {
479 480 bplist_destroy(&ds->ds_pending_deadlist);
480 481 dsl_deadlist_close(&ds->ds_deadlist);
481 482 if (ds->ds_prev)
482 483 dsl_dataset_drop_ref(ds->ds_prev, ds);
483 484 dsl_dir_close(ds->ds_dir, ds);
484 485 mutex_destroy(&ds->ds_lock);
485 486 mutex_destroy(&ds->ds_recvlock);
486 487 mutex_destroy(&ds->ds_opening_lock);
487 488 rw_destroy(&ds->ds_rwlock);
488 489 cv_destroy(&ds->ds_exclusive_cv);
489 490 kmem_free(ds, sizeof (dsl_dataset_t));
490 491 if (err) {
491 492 dmu_buf_rele(dbuf, tag);
492 493 return (err);
493 494 }
494 495 ds = winner;
495 496 } else {
496 497 ds->ds_fsid_guid =
497 498 unique_insert(ds->ds_phys->ds_fsid_guid);
498 499 }
499 500 }
500 501 ASSERT3P(ds->ds_dbuf, ==, dbuf);
501 502 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
502 503 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
503 504 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
504 505 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
505 506 mutex_enter(&ds->ds_lock);
506 507 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
507 508 mutex_exit(&ds->ds_lock);
508 509 dmu_buf_rele(ds->ds_dbuf, tag);
509 510 return (ENOENT);
510 511 }
511 512 mutex_exit(&ds->ds_lock);
512 513 *dsp = ds;
513 514 return (0);
514 515 }
515 516
516 517 static int
517 518 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
518 519 {
519 520 dsl_pool_t *dp = ds->ds_dir->dd_pool;
520 521
521 522 /*
522 523 * In syncing context we don't want the rwlock lock: there
523 524 * may be an existing writer waiting for sync phase to
524 525 * finish. We don't need to worry about such writers, since
525 526 * sync phase is single-threaded, so the writer can't be
526 527 * doing anything while we are active.
527 528 */
528 529 if (dsl_pool_sync_context(dp)) {
529 530 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
530 531 return (0);
531 532 }
532 533
533 534 /*
534 535 * Normal users will hold the ds_rwlock as a READER until they
535 536 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
536 537 * drop their READER lock after they set the ds_owner field.
537 538 *
538 539 * If the dataset is being destroyed, the destroy thread will
539 540 * obtain a WRITER lock for exclusive access after it's done its
540 541 * open-context work and then change the ds_owner to
541 542 * dsl_reaper once destruction is assured. So threads
542 543 * may block here temporarily, until the "destructability" of
543 544 * the dataset is determined.
544 545 */
545 546 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
546 547 mutex_enter(&ds->ds_lock);
547 548 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
548 549 rw_exit(&dp->dp_config_rwlock);
549 550 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
550 551 if (DSL_DATASET_IS_DESTROYED(ds)) {
551 552 mutex_exit(&ds->ds_lock);
552 553 dsl_dataset_drop_ref(ds, tag);
553 554 rw_enter(&dp->dp_config_rwlock, RW_READER);
554 555 return (ENOENT);
555 556 }
556 557 /*
557 558 * The dp_config_rwlock lives above the ds_lock. And
558 559 * we need to check DSL_DATASET_IS_DESTROYED() while
559 560 * holding the ds_lock, so we have to drop and reacquire
560 561 * the ds_lock here.
561 562 */
562 563 mutex_exit(&ds->ds_lock);
563 564 rw_enter(&dp->dp_config_rwlock, RW_READER);
564 565 mutex_enter(&ds->ds_lock);
565 566 }
566 567 mutex_exit(&ds->ds_lock);
567 568 return (0);
568 569 }
569 570
570 571 int
571 572 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
572 573 dsl_dataset_t **dsp)
573 574 {
574 575 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
575 576
576 577 if (err)
577 578 return (err);
578 579 return (dsl_dataset_hold_ref(*dsp, tag));
579 580 }
580 581
581 582 int
582 583 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
583 584 void *tag, dsl_dataset_t **dsp)
584 585 {
585 586 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
586 587 if (err)
587 588 return (err);
588 589 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
589 590 dsl_dataset_rele(*dsp, tag);
590 591 *dsp = NULL;
591 592 return (EBUSY);
592 593 }
593 594 return (0);
594 595 }
595 596
596 597 int
597 598 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
598 599 {
599 600 dsl_dir_t *dd;
600 601 dsl_pool_t *dp;
601 602 const char *snapname;
602 603 uint64_t obj;
603 604 int err = 0;
604 605
605 606 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
606 607 if (err)
607 608 return (err);
608 609
609 610 dp = dd->dd_pool;
610 611 obj = dd->dd_phys->dd_head_dataset_obj;
611 612 rw_enter(&dp->dp_config_rwlock, RW_READER);
612 613 if (obj)
613 614 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
614 615 else
615 616 err = ENOENT;
616 617 if (err)
617 618 goto out;
618 619
619 620 err = dsl_dataset_hold_ref(*dsp, tag);
620 621
621 622 /* we may be looking for a snapshot */
622 623 if (err == 0 && snapname != NULL) {
623 624 dsl_dataset_t *ds = NULL;
624 625
625 626 if (*snapname++ != '@') {
626 627 dsl_dataset_rele(*dsp, tag);
627 628 err = ENOENT;
628 629 goto out;
629 630 }
630 631
631 632 dprintf("looking for snapshot '%s'\n", snapname);
632 633 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
633 634 if (err == 0)
634 635 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
635 636 dsl_dataset_rele(*dsp, tag);
636 637
637 638 ASSERT3U((err == 0), ==, (ds != NULL));
638 639
639 640 if (ds) {
640 641 mutex_enter(&ds->ds_lock);
641 642 if (ds->ds_snapname[0] == 0)
642 643 (void) strlcpy(ds->ds_snapname, snapname,
643 644 sizeof (ds->ds_snapname));
644 645 mutex_exit(&ds->ds_lock);
645 646 err = dsl_dataset_hold_ref(ds, tag);
646 647 *dsp = err ? NULL : ds;
647 648 }
648 649 }
649 650 out:
650 651 rw_exit(&dp->dp_config_rwlock);
651 652 dsl_dir_close(dd, FTAG);
652 653 return (err);
653 654 }
654 655
655 656 int
656 657 dsl_dataset_own(const char *name, boolean_t inconsistentok,
657 658 void *tag, dsl_dataset_t **dsp)
658 659 {
659 660 int err = dsl_dataset_hold(name, tag, dsp);
660 661 if (err)
661 662 return (err);
662 663 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
663 664 dsl_dataset_rele(*dsp, tag);
664 665 return (EBUSY);
665 666 }
666 667 return (0);
667 668 }
668 669
669 670 void
670 671 dsl_dataset_name(dsl_dataset_t *ds, char *name)
671 672 {
672 673 if (ds == NULL) {
673 674 (void) strcpy(name, "mos");
674 675 } else {
675 676 dsl_dir_name(ds->ds_dir, name);
676 677 VERIFY(0 == dsl_dataset_get_snapname(ds));
677 678 if (ds->ds_snapname[0]) {
678 679 (void) strcat(name, "@");
679 680 /*
680 681 * We use a "recursive" mutex so that we
681 682 * can call dprintf_ds() with ds_lock held.
682 683 */
683 684 if (!MUTEX_HELD(&ds->ds_lock)) {
684 685 mutex_enter(&ds->ds_lock);
685 686 (void) strcat(name, ds->ds_snapname);
686 687 mutex_exit(&ds->ds_lock);
687 688 } else {
688 689 (void) strcat(name, ds->ds_snapname);
689 690 }
690 691 }
691 692 }
692 693 }
693 694
694 695 static int
695 696 dsl_dataset_namelen(dsl_dataset_t *ds)
696 697 {
697 698 int result;
698 699
699 700 if (ds == NULL) {
700 701 result = 3; /* "mos" */
701 702 } else {
702 703 result = dsl_dir_namelen(ds->ds_dir);
703 704 VERIFY(0 == dsl_dataset_get_snapname(ds));
704 705 if (ds->ds_snapname[0]) {
705 706 ++result; /* adding one for the @-sign */
706 707 if (!MUTEX_HELD(&ds->ds_lock)) {
707 708 mutex_enter(&ds->ds_lock);
708 709 result += strlen(ds->ds_snapname);
709 710 mutex_exit(&ds->ds_lock);
710 711 } else {
711 712 result += strlen(ds->ds_snapname);
712 713 }
713 714 }
714 715 }
715 716
716 717 return (result);
717 718 }
718 719
719 720 void
720 721 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
721 722 {
722 723 dmu_buf_rele(ds->ds_dbuf, tag);
723 724 }
724 725
725 726 void
726 727 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
727 728 {
728 729 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
729 730 rw_exit(&ds->ds_rwlock);
730 731 }
731 732 dsl_dataset_drop_ref(ds, tag);
732 733 }
733 734
734 735 void
735 736 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
736 737 {
737 738 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
738 739 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
739 740
740 741 mutex_enter(&ds->ds_lock);
741 742 ds->ds_owner = NULL;
742 743 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
743 744 rw_exit(&ds->ds_rwlock);
744 745 cv_broadcast(&ds->ds_exclusive_cv);
745 746 }
746 747 mutex_exit(&ds->ds_lock);
747 748 if (ds->ds_dbuf)
748 749 dsl_dataset_drop_ref(ds, tag);
749 750 else
750 751 dsl_dataset_evict(NULL, ds);
751 752 }
752 753
753 754 boolean_t
754 755 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
755 756 {
756 757 boolean_t gotit = FALSE;
757 758
758 759 mutex_enter(&ds->ds_lock);
759 760 if (ds->ds_owner == NULL &&
760 761 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
761 762 ds->ds_owner = tag;
762 763 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
763 764 rw_exit(&ds->ds_rwlock);
764 765 gotit = TRUE;
765 766 }
766 767 mutex_exit(&ds->ds_lock);
767 768 return (gotit);
768 769 }
769 770
770 771 void
771 772 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
772 773 {
773 774 ASSERT3P(owner, ==, ds->ds_owner);
774 775 if (!RW_WRITE_HELD(&ds->ds_rwlock))
775 776 rw_enter(&ds->ds_rwlock, RW_WRITER);
776 777 }
777 778
778 779 uint64_t
779 780 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
780 781 uint64_t flags, dmu_tx_t *tx)
781 782 {
782 783 dsl_pool_t *dp = dd->dd_pool;
783 784 dmu_buf_t *dbuf;
784 785 dsl_dataset_phys_t *dsphys;
785 786 uint64_t dsobj;
786 787 objset_t *mos = dp->dp_meta_objset;
787 788
788 789 if (origin == NULL)
789 790 origin = dp->dp_origin_snap;
790 791
791 792 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
792 793 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
793 794 ASSERT(dmu_tx_is_syncing(tx));
794 795 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
795 796
796 797 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
797 798 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
798 799 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
799 800 dmu_buf_will_dirty(dbuf, tx);
800 801 dsphys = dbuf->db_data;
801 802 bzero(dsphys, sizeof (dsl_dataset_phys_t));
802 803 dsphys->ds_dir_obj = dd->dd_object;
803 804 dsphys->ds_flags = flags;
804 805 dsphys->ds_fsid_guid = unique_create();
805 806 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
806 807 sizeof (dsphys->ds_guid));
807 808 dsphys->ds_snapnames_zapobj =
808 809 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
809 810 DMU_OT_NONE, 0, tx);
810 811 dsphys->ds_creation_time = gethrestime_sec();
|
↓ open down ↓ |
585 lines elided |
↑ open up ↑ |
811 812 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
812 813
813 814 if (origin == NULL) {
814 815 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
815 816 } else {
816 817 dsl_dataset_t *ohds;
817 818
818 819 dsphys->ds_prev_snap_obj = origin->ds_object;
819 820 dsphys->ds_prev_snap_txg =
820 821 origin->ds_phys->ds_creation_txg;
821 - dsphys->ds_used_bytes =
822 - origin->ds_phys->ds_used_bytes;
822 + dsphys->ds_referenced_bytes =
823 + origin->ds_phys->ds_referenced_bytes;
823 824 dsphys->ds_compressed_bytes =
824 825 origin->ds_phys->ds_compressed_bytes;
825 826 dsphys->ds_uncompressed_bytes =
826 827 origin->ds_phys->ds_uncompressed_bytes;
827 828 dsphys->ds_bp = origin->ds_phys->ds_bp;
828 829 dsphys->ds_flags |= origin->ds_phys->ds_flags;
829 830
830 831 dmu_buf_will_dirty(origin->ds_dbuf, tx);
831 832 origin->ds_phys->ds_num_children++;
832 833
833 834 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
834 835 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
835 836 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
836 837 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
837 838 dsl_dataset_rele(ohds, FTAG);
838 839
839 840 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
840 841 if (origin->ds_phys->ds_next_clones_obj == 0) {
841 842 origin->ds_phys->ds_next_clones_obj =
842 843 zap_create(mos,
843 844 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
844 845 }
845 846 VERIFY(0 == zap_add_int(mos,
846 847 origin->ds_phys->ds_next_clones_obj,
847 848 dsobj, tx));
848 849 }
849 850
850 851 dmu_buf_will_dirty(dd->dd_dbuf, tx);
851 852 dd->dd_phys->dd_origin_obj = origin->ds_object;
852 853 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
853 854 if (origin->ds_dir->dd_phys->dd_clones == 0) {
854 855 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
855 856 origin->ds_dir->dd_phys->dd_clones =
856 857 zap_create(mos,
857 858 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
858 859 }
859 860 VERIFY3U(0, ==, zap_add_int(mos,
860 861 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
861 862 }
862 863 }
863 864
864 865 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
865 866 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
866 867
867 868 dmu_buf_rele(dbuf, FTAG);
868 869
869 870 dmu_buf_will_dirty(dd->dd_dbuf, tx);
870 871 dd->dd_phys->dd_head_dataset_obj = dsobj;
871 872
872 873 return (dsobj);
873 874 }
874 875
875 876 uint64_t
876 877 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
877 878 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
878 879 {
879 880 dsl_pool_t *dp = pdd->dd_pool;
880 881 uint64_t dsobj, ddobj;
881 882 dsl_dir_t *dd;
882 883
883 884 ASSERT(lastname[0] != '@');
884 885
885 886 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
886 887 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
887 888
888 889 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
889 890
890 891 dsl_deleg_set_create_perms(dd, tx, cr);
891 892
892 893 dsl_dir_close(dd, FTAG);
893 894
894 895 /*
895 896 * If we are creating a clone, make sure we zero out any stale
896 897 * data from the origin snapshots zil header.
897 898 */
898 899 if (origin != NULL) {
899 900 dsl_dataset_t *ds;
900 901 objset_t *os;
901 902
902 903 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
903 904 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
904 905 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
905 906 dsl_dataset_dirty(ds, tx);
906 907 dsl_dataset_rele(ds, FTAG);
907 908 }
908 909
909 910 return (dsobj);
910 911 }
911 912
912 913 /*
913 914 * The snapshots must all be in the same pool.
914 915 */
915 916 int
916 917 dmu_snapshots_destroy_nvl(nvlist_t *snaps, boolean_t defer, char *failed)
917 918 {
918 919 int err;
919 920 dsl_sync_task_t *dst;
920 921 spa_t *spa;
921 922 nvpair_t *pair;
922 923 dsl_sync_task_group_t *dstg;
923 924
924 925 pair = nvlist_next_nvpair(snaps, NULL);
925 926 if (pair == NULL)
|
↓ open down ↓ |
93 lines elided |
↑ open up ↑ |
926 927 return (0);
927 928
928 929 err = spa_open(nvpair_name(pair), &spa, FTAG);
929 930 if (err)
930 931 return (err);
931 932 dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
932 933
933 934 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
934 935 pair = nvlist_next_nvpair(snaps, pair)) {
935 936 dsl_dataset_t *ds;
936 - int err;
937 937
938 938 err = dsl_dataset_own(nvpair_name(pair), B_TRUE, dstg, &ds);
939 939 if (err == 0) {
940 940 struct dsl_ds_destroyarg *dsda;
941 941
942 942 dsl_dataset_make_exclusive(ds, dstg);
943 943 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg),
944 944 KM_SLEEP);
945 945 dsda->ds = ds;
946 946 dsda->defer = defer;
947 947 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
948 948 dsl_dataset_destroy_sync, dsda, dstg, 0);
949 949 } else if (err == ENOENT) {
950 950 err = 0;
951 951 } else {
952 952 (void) strcpy(failed, nvpair_name(pair));
953 953 break;
954 954 }
955 955 }
956 956
957 957 if (err == 0)
958 958 err = dsl_sync_task_group_wait(dstg);
959 959
960 960 for (dst = list_head(&dstg->dstg_tasks); dst;
961 961 dst = list_next(&dstg->dstg_tasks, dst)) {
962 962 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
963 963 dsl_dataset_t *ds = dsda->ds;
964 964
965 965 /*
966 966 * Return the file system name that triggered the error
967 967 */
968 968 if (dst->dst_err) {
969 969 dsl_dataset_name(ds, failed);
970 970 }
971 971 ASSERT3P(dsda->rm_origin, ==, NULL);
972 972 dsl_dataset_disown(ds, dstg);
973 973 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
974 974 }
975 975
976 976 dsl_sync_task_group_destroy(dstg);
977 977 spa_close(spa, FTAG);
978 978 return (err);
979 979
980 980 }
981 981
982 982 static boolean_t
983 983 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
984 984 {
985 985 boolean_t might_destroy = B_FALSE;
986 986
987 987 mutex_enter(&ds->ds_lock);
988 988 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
989 989 DS_IS_DEFER_DESTROY(ds))
990 990 might_destroy = B_TRUE;
991 991 mutex_exit(&ds->ds_lock);
992 992
993 993 return (might_destroy);
994 994 }
995 995
996 996 /*
997 997 * If we're removing a clone, and these three conditions are true:
998 998 * 1) the clone's origin has no other children
999 999 * 2) the clone's origin has no user references
1000 1000 * 3) the clone's origin has been marked for deferred destruction
1001 1001 * Then, prepare to remove the origin as part of this sync task group.
1002 1002 */
1003 1003 static int
1004 1004 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1005 1005 {
1006 1006 dsl_dataset_t *ds = dsda->ds;
1007 1007 dsl_dataset_t *origin = ds->ds_prev;
1008 1008
1009 1009 if (dsl_dataset_might_destroy_origin(origin)) {
1010 1010 char *name;
1011 1011 int namelen;
1012 1012 int error;
1013 1013
1014 1014 namelen = dsl_dataset_namelen(origin) + 1;
1015 1015 name = kmem_alloc(namelen, KM_SLEEP);
1016 1016 dsl_dataset_name(origin, name);
1017 1017 #ifdef _KERNEL
1018 1018 error = zfs_unmount_snap(name, NULL);
1019 1019 if (error) {
1020 1020 kmem_free(name, namelen);
1021 1021 return (error);
1022 1022 }
1023 1023 #endif
1024 1024 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1025 1025 kmem_free(name, namelen);
1026 1026 if (error)
1027 1027 return (error);
1028 1028 dsda->rm_origin = origin;
1029 1029 dsl_dataset_make_exclusive(origin, tag);
1030 1030 }
1031 1031
1032 1032 return (0);
1033 1033 }
1034 1034
1035 1035 /*
1036 1036 * ds must be opened as OWNER. On return (whether successful or not),
1037 1037 * ds will be closed and caller can no longer dereference it.
1038 1038 */
1039 1039 int
1040 1040 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1041 1041 {
1042 1042 int err;
1043 1043 dsl_sync_task_group_t *dstg;
1044 1044 objset_t *os;
1045 1045 dsl_dir_t *dd;
1046 1046 uint64_t obj;
1047 1047 struct dsl_ds_destroyarg dsda = { 0 };
1048 1048 dsl_dataset_t dummy_ds = { 0 };
1049 1049
1050 1050 dsda.ds = ds;
1051 1051
1052 1052 if (dsl_dataset_is_snapshot(ds)) {
1053 1053 /* Destroying a snapshot is simpler */
1054 1054 dsl_dataset_make_exclusive(ds, tag);
1055 1055
1056 1056 dsda.defer = defer;
1057 1057 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1058 1058 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1059 1059 &dsda, tag, 0);
1060 1060 ASSERT3P(dsda.rm_origin, ==, NULL);
1061 1061 goto out;
1062 1062 } else if (defer) {
1063 1063 err = EINVAL;
1064 1064 goto out;
1065 1065 }
1066 1066
1067 1067 dd = ds->ds_dir;
1068 1068 dummy_ds.ds_dir = dd;
1069 1069 dummy_ds.ds_object = ds->ds_object;
1070 1070
1071 1071 /*
1072 1072 * Check for errors and mark this ds as inconsistent, in
1073 1073 * case we crash while freeing the objects.
1074 1074 */
|
↓ open down ↓ |
128 lines elided |
↑ open up ↑ |
1075 1075 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1076 1076 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1077 1077 if (err)
1078 1078 goto out;
1079 1079
1080 1080 err = dmu_objset_from_ds(ds, &os);
1081 1081 if (err)
1082 1082 goto out;
1083 1083
1084 1084 /*
1085 - * remove the objects in open context, so that we won't
1086 - * have too much to do in syncing context.
1085 + * If async destruction is not enabled try to remove all objects
1086 + * while in the open context so that there is less work to do in
1087 + * the syncing context.
1087 1088 */
1088 - for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1089 - ds->ds_phys->ds_prev_snap_txg)) {
1090 - /*
1091 - * Ignore errors, if there is not enough disk space
1092 - * we will deal with it in dsl_dataset_destroy_sync().
1093 - */
1094 - (void) dmu_free_object(os, obj);
1089 + if (!spa_feature_is_enabled(dsl_dataset_get_spa(ds),
1090 + &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
1091 + for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1092 + ds->ds_phys->ds_prev_snap_txg)) {
1093 + /*
1094 + * Ignore errors, if there is not enough disk space
1095 + * we will deal with it in dsl_dataset_destroy_sync().
1096 + */
1097 + (void) dmu_free_object(os, obj);
1098 + }
1099 + if (err != ESRCH)
1100 + goto out;
1095 1101 }
1096 - if (err != ESRCH)
1097 - goto out;
1098 1102
1099 1103 /*
1100 1104 * Only the ZIL knows how to free log blocks.
1101 1105 */
1102 1106 zil_destroy(dmu_objset_zil(os), B_FALSE);
1103 1107
1104 1108 /*
1105 1109 * Sync out all in-flight IO.
1106 1110 */
1107 1111 txg_wait_synced(dd->dd_pool, 0);
1108 1112
1109 1113 /*
1110 1114 * If we managed to free all the objects in open
1111 1115 * context, the user space accounting should be zero.
1112 1116 */
1113 1117 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1114 1118 dmu_objset_userused_enabled(os)) {
1115 1119 uint64_t count;
1116 1120
1117 1121 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1118 1122 count == 0);
1119 1123 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1120 1124 count == 0);
1121 1125 }
1122 1126
1123 1127 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1124 1128 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1125 1129 rw_exit(&dd->dd_pool->dp_config_rwlock);
1126 1130
1127 1131 if (err)
1128 1132 goto out;
1129 1133
1130 1134 /*
1131 1135 * Blow away the dsl_dir + head dataset.
1132 1136 */
1133 1137 dsl_dataset_make_exclusive(ds, tag);
1134 1138 /*
1135 1139 * If we're removing a clone, we might also need to remove its
1136 1140 * origin.
1137 1141 */
1138 1142 do {
1139 1143 dsda.need_prep = B_FALSE;
1140 1144 if (dsl_dir_is_clone(dd)) {
1141 1145 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1142 1146 if (err) {
1143 1147 dsl_dir_close(dd, FTAG);
1144 1148 goto out;
1145 1149 }
1146 1150 }
1147 1151
1148 1152 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1149 1153 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1150 1154 dsl_dataset_destroy_sync, &dsda, tag, 0);
1151 1155 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1152 1156 dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1153 1157 err = dsl_sync_task_group_wait(dstg);
1154 1158 dsl_sync_task_group_destroy(dstg);
1155 1159
1156 1160 /*
1157 1161 * We could be racing against 'zfs release' or 'zfs destroy -d'
1158 1162 * on the origin snap, in which case we can get EBUSY if we
1159 1163 * needed to destroy the origin snap but were not ready to
1160 1164 * do so.
1161 1165 */
1162 1166 if (dsda.need_prep) {
1163 1167 ASSERT(err == EBUSY);
1164 1168 ASSERT(dsl_dir_is_clone(dd));
1165 1169 ASSERT(dsda.rm_origin == NULL);
1166 1170 }
1167 1171 } while (dsda.need_prep);
1168 1172
1169 1173 if (dsda.rm_origin != NULL)
1170 1174 dsl_dataset_disown(dsda.rm_origin, tag);
1171 1175
1172 1176 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1173 1177 if (err)
1174 1178 dsl_dir_close(dd, FTAG);
1175 1179 out:
1176 1180 dsl_dataset_disown(ds, tag);
1177 1181 return (err);
1178 1182 }
1179 1183
1180 1184 blkptr_t *
1181 1185 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1182 1186 {
1183 1187 return (&ds->ds_phys->ds_bp);
1184 1188 }
1185 1189
1186 1190 void
1187 1191 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1188 1192 {
1189 1193 ASSERT(dmu_tx_is_syncing(tx));
1190 1194 /* If it's the meta-objset, set dp_meta_rootbp */
1191 1195 if (ds == NULL) {
1192 1196 tx->tx_pool->dp_meta_rootbp = *bp;
1193 1197 } else {
1194 1198 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1195 1199 ds->ds_phys->ds_bp = *bp;
1196 1200 }
1197 1201 }
1198 1202
1199 1203 spa_t *
1200 1204 dsl_dataset_get_spa(dsl_dataset_t *ds)
1201 1205 {
1202 1206 return (ds->ds_dir->dd_pool->dp_spa);
1203 1207 }
1204 1208
1205 1209 void
1206 1210 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1207 1211 {
1208 1212 dsl_pool_t *dp;
1209 1213
1210 1214 if (ds == NULL) /* this is the meta-objset */
1211 1215 return;
1212 1216
1213 1217 ASSERT(ds->ds_objset != NULL);
1214 1218
1215 1219 if (ds->ds_phys->ds_next_snap_obj != 0)
1216 1220 panic("dirtying snapshot!");
1217 1221
1218 1222 dp = ds->ds_dir->dd_pool;
1219 1223
1220 1224 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1221 1225 /* up the hold count until we can be written out */
1222 1226 dmu_buf_add_ref(ds->ds_dbuf, ds);
1223 1227 }
1224 1228 }
1225 1229
1226 1230 /*
1227 1231 * The unique space in the head dataset can be calculated by subtracting
1228 1232 * the space used in the most recent snapshot, that is still being used
1229 1233 * in this file system, from the space currently in use. To figure out
1230 1234 * the space in the most recent snapshot still in use, we need to take
1231 1235 * the total space used in the snapshot and subtract out the space that
1232 1236 * has been freed up since the snapshot was taken.
|
↓ open down ↓ |
125 lines elided |
↑ open up ↑ |
1233 1237 */
1234 1238 static void
1235 1239 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1236 1240 {
1237 1241 uint64_t mrs_used;
1238 1242 uint64_t dlused, dlcomp, dluncomp;
1239 1243
1240 1244 ASSERT(!dsl_dataset_is_snapshot(ds));
1241 1245
1242 1246 if (ds->ds_phys->ds_prev_snap_obj != 0)
1243 - mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1247 + mrs_used = ds->ds_prev->ds_phys->ds_referenced_bytes;
1244 1248 else
1245 1249 mrs_used = 0;
1246 1250
1247 1251 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1248 1252
1249 1253 ASSERT3U(dlused, <=, mrs_used);
1250 1254 ds->ds_phys->ds_unique_bytes =
1251 - ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1255 + ds->ds_phys->ds_referenced_bytes - (mrs_used - dlused);
1252 1256
1253 1257 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1254 1258 SPA_VERSION_UNIQUE_ACCURATE)
1255 1259 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1256 1260 }
1257 1261
1258 1262 struct killarg {
1259 1263 dsl_dataset_t *ds;
1260 1264 dmu_tx_t *tx;
1261 1265 };
1262 1266
1263 1267 /* ARGSUSED */
1264 1268 static int
1265 1269 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1266 1270 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1267 1271 {
1268 1272 struct killarg *ka = arg;
1269 1273 dmu_tx_t *tx = ka->tx;
1270 1274
1271 1275 if (bp == NULL)
1272 1276 return (0);
1273 1277
1274 1278 if (zb->zb_level == ZB_ZIL_LEVEL) {
1275 1279 ASSERT(zilog != NULL);
1276 1280 /*
1277 1281 * It's a block in the intent log. It has no
1278 1282 * accounting, so just free it.
1279 1283 */
1280 1284 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1281 1285 } else {
1282 1286 ASSERT(zilog == NULL);
1283 1287 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1284 1288 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1285 1289 }
1286 1290
1287 1291 return (0);
1288 1292 }
1289 1293
1290 1294 /* ARGSUSED */
1291 1295 static int
1292 1296 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1293 1297 {
1294 1298 dsl_dataset_t *ds = arg1;
1295 1299 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1296 1300 uint64_t count;
1297 1301 int err;
1298 1302
1299 1303 /*
1300 1304 * Can't delete a head dataset if there are snapshots of it.
1301 1305 * (Except if the only snapshots are from the branch we cloned
1302 1306 * from.)
1303 1307 */
1304 1308 if (ds->ds_prev != NULL &&
1305 1309 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1306 1310 return (EBUSY);
1307 1311
1308 1312 /*
1309 1313 * This is really a dsl_dir thing, but check it here so that
1310 1314 * we'll be less likely to leave this dataset inconsistent &
1311 1315 * nearly destroyed.
1312 1316 */
1313 1317 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1314 1318 if (err)
1315 1319 return (err);
1316 1320 if (count != 0)
1317 1321 return (EEXIST);
1318 1322
1319 1323 return (0);
1320 1324 }
1321 1325
1322 1326 /* ARGSUSED */
1323 1327 static void
1324 1328 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1325 1329 {
1326 1330 dsl_dataset_t *ds = arg1;
1327 1331 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1328 1332
1329 1333 /* Mark it as inconsistent on-disk, in case we crash */
1330 1334 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1331 1335 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1332 1336
1333 1337 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1334 1338 "dataset = %llu", ds->ds_object);
1335 1339 }
1336 1340
1337 1341 static int
1338 1342 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1339 1343 dmu_tx_t *tx)
1340 1344 {
1341 1345 dsl_dataset_t *ds = dsda->ds;
1342 1346 dsl_dataset_t *ds_prev = ds->ds_prev;
1343 1347
1344 1348 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1345 1349 struct dsl_ds_destroyarg ndsda = {0};
1346 1350
1347 1351 /*
1348 1352 * If we're not prepared to remove the origin, don't remove
1349 1353 * the clone either.
1350 1354 */
1351 1355 if (dsda->rm_origin == NULL) {
1352 1356 dsda->need_prep = B_TRUE;
1353 1357 return (EBUSY);
1354 1358 }
1355 1359
1356 1360 ndsda.ds = ds_prev;
1357 1361 ndsda.is_origin_rm = B_TRUE;
1358 1362 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1359 1363 }
1360 1364
1361 1365 /*
1362 1366 * If we're not going to remove the origin after all,
1363 1367 * undo the open context setup.
1364 1368 */
1365 1369 if (dsda->rm_origin != NULL) {
1366 1370 dsl_dataset_disown(dsda->rm_origin, tag);
1367 1371 dsda->rm_origin = NULL;
1368 1372 }
1369 1373
1370 1374 return (0);
1371 1375 }
1372 1376
1373 1377 /*
1374 1378 * If you add new checks here, you may need to add
1375 1379 * additional checks to the "temporary" case in
1376 1380 * snapshot_check() in dmu_objset.c.
1377 1381 */
1378 1382 /* ARGSUSED */
1379 1383 int
1380 1384 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1381 1385 {
1382 1386 struct dsl_ds_destroyarg *dsda = arg1;
1383 1387 dsl_dataset_t *ds = dsda->ds;
1384 1388
1385 1389 /* we have an owner hold, so noone else can destroy us */
1386 1390 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1387 1391
1388 1392 /*
1389 1393 * Only allow deferred destroy on pools that support it.
1390 1394 * NOTE: deferred destroy is only supported on snapshots.
1391 1395 */
1392 1396 if (dsda->defer) {
1393 1397 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1394 1398 SPA_VERSION_USERREFS)
1395 1399 return (ENOTSUP);
1396 1400 ASSERT(dsl_dataset_is_snapshot(ds));
1397 1401 return (0);
1398 1402 }
1399 1403
1400 1404 /*
1401 1405 * Can't delete a head dataset if there are snapshots of it.
1402 1406 * (Except if the only snapshots are from the branch we cloned
1403 1407 * from.)
1404 1408 */
1405 1409 if (ds->ds_prev != NULL &&
1406 1410 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1407 1411 return (EBUSY);
1408 1412
1409 1413 /*
1410 1414 * If we made changes this txg, traverse_dsl_dataset won't find
1411 1415 * them. Try again.
1412 1416 */
1413 1417 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1414 1418 return (EAGAIN);
1415 1419
1416 1420 if (dsl_dataset_is_snapshot(ds)) {
1417 1421 /*
1418 1422 * If this snapshot has an elevated user reference count,
1419 1423 * we can't destroy it yet.
1420 1424 */
1421 1425 if (ds->ds_userrefs > 0 && !dsda->releasing)
1422 1426 return (EBUSY);
1423 1427
1424 1428 mutex_enter(&ds->ds_lock);
1425 1429 /*
1426 1430 * Can't delete a branch point. However, if we're destroying
1427 1431 * a clone and removing its origin due to it having a user
1428 1432 * hold count of 0 and having been marked for deferred destroy,
1429 1433 * it's OK for the origin to have a single clone.
1430 1434 */
1431 1435 if (ds->ds_phys->ds_num_children >
1432 1436 (dsda->is_origin_rm ? 2 : 1)) {
1433 1437 mutex_exit(&ds->ds_lock);
1434 1438 return (EEXIST);
1435 1439 }
1436 1440 mutex_exit(&ds->ds_lock);
1437 1441 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1438 1442 return (dsl_dataset_origin_check(dsda, arg2, tx));
1439 1443 }
1440 1444
1441 1445 /* XXX we should do some i/o error checking... */
1442 1446 return (0);
1443 1447 }
1444 1448
1445 1449 struct refsarg {
1446 1450 kmutex_t lock;
1447 1451 boolean_t gone;
1448 1452 kcondvar_t cv;
1449 1453 };
1450 1454
1451 1455 /* ARGSUSED */
1452 1456 static void
1453 1457 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1454 1458 {
1455 1459 struct refsarg *arg = argv;
1456 1460
1457 1461 mutex_enter(&arg->lock);
1458 1462 arg->gone = TRUE;
1459 1463 cv_signal(&arg->cv);
1460 1464 mutex_exit(&arg->lock);
1461 1465 }
1462 1466
1463 1467 static void
1464 1468 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1465 1469 {
1466 1470 struct refsarg arg;
1467 1471
1468 1472 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1469 1473 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1470 1474 arg.gone = FALSE;
1471 1475 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1472 1476 dsl_dataset_refs_gone);
1473 1477 dmu_buf_rele(ds->ds_dbuf, tag);
1474 1478 mutex_enter(&arg.lock);
1475 1479 while (!arg.gone)
1476 1480 cv_wait(&arg.cv, &arg.lock);
1477 1481 ASSERT(arg.gone);
1478 1482 mutex_exit(&arg.lock);
1479 1483 ds->ds_dbuf = NULL;
1480 1484 ds->ds_phys = NULL;
1481 1485 mutex_destroy(&arg.lock);
1482 1486 cv_destroy(&arg.cv);
1483 1487 }
1484 1488
1485 1489 static void
1486 1490 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1487 1491 {
1488 1492 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1489 1493 uint64_t count;
1490 1494 int err;
1491 1495
1492 1496 ASSERT(ds->ds_phys->ds_num_children >= 2);
1493 1497 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1494 1498 /*
1495 1499 * The err should not be ENOENT, but a bug in a previous version
1496 1500 * of the code could cause upgrade_clones_cb() to not set
1497 1501 * ds_next_snap_obj when it should, leading to a missing entry.
1498 1502 * If we knew that the pool was created after
1499 1503 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1500 1504 * ENOENT. However, at least we can check that we don't have
1501 1505 * too many entries in the next_clones_obj even after failing to
1502 1506 * remove this one.
1503 1507 */
1504 1508 if (err != ENOENT) {
1505 1509 VERIFY3U(err, ==, 0);
1506 1510 }
1507 1511 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1508 1512 &count));
1509 1513 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1510 1514 }
1511 1515
1512 1516 static void
1513 1517 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1514 1518 {
1515 1519 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1516 1520 zap_cursor_t zc;
1517 1521 zap_attribute_t za;
1518 1522
1519 1523 /*
1520 1524 * If it is the old version, dd_clones doesn't exist so we can't
1521 1525 * find the clones, but deadlist_remove_key() is a no-op so it
1522 1526 * doesn't matter.
1523 1527 */
1524 1528 if (ds->ds_dir->dd_phys->dd_clones == 0)
1525 1529 return;
1526 1530
1527 1531 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1528 1532 zap_cursor_retrieve(&zc, &za) == 0;
1529 1533 zap_cursor_advance(&zc)) {
1530 1534 dsl_dataset_t *clone;
1531 1535
1532 1536 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1533 1537 za.za_first_integer, FTAG, &clone));
1534 1538 if (clone->ds_dir->dd_origin_txg > mintxg) {
1535 1539 dsl_deadlist_remove_key(&clone->ds_deadlist,
1536 1540 mintxg, tx);
1537 1541 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1538 1542 }
1539 1543 dsl_dataset_rele(clone, FTAG);
1540 1544 }
1541 1545 zap_cursor_fini(&zc);
1542 1546 }
1543 1547
1544 1548 struct process_old_arg {
1545 1549 dsl_dataset_t *ds;
1546 1550 dsl_dataset_t *ds_prev;
1547 1551 boolean_t after_branch_point;
1548 1552 zio_t *pio;
1549 1553 uint64_t used, comp, uncomp;
1550 1554 };
1551 1555
1552 1556 static int
1553 1557 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1554 1558 {
1555 1559 struct process_old_arg *poa = arg;
1556 1560 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1557 1561
1558 1562 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1559 1563 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1560 1564 if (poa->ds_prev && !poa->after_branch_point &&
1561 1565 bp->blk_birth >
1562 1566 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1563 1567 poa->ds_prev->ds_phys->ds_unique_bytes +=
1564 1568 bp_get_dsize_sync(dp->dp_spa, bp);
1565 1569 }
1566 1570 } else {
1567 1571 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1568 1572 poa->comp += BP_GET_PSIZE(bp);
1569 1573 poa->uncomp += BP_GET_UCSIZE(bp);
1570 1574 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1571 1575 }
1572 1576 return (0);
1573 1577 }
1574 1578
1575 1579 static void
1576 1580 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1577 1581 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1578 1582 {
1579 1583 struct process_old_arg poa = { 0 };
1580 1584 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1581 1585 objset_t *mos = dp->dp_meta_objset;
1582 1586
1583 1587 ASSERT(ds->ds_deadlist.dl_oldfmt);
1584 1588 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1585 1589
1586 1590 poa.ds = ds;
1587 1591 poa.ds_prev = ds_prev;
1588 1592 poa.after_branch_point = after_branch_point;
1589 1593 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1590 1594 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1591 1595 process_old_cb, &poa, tx));
1592 1596 VERIFY3U(zio_wait(poa.pio), ==, 0);
1593 1597 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1594 1598
1595 1599 /* change snapused */
1596 1600 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1597 1601 -poa.used, -poa.comp, -poa.uncomp, tx);
1598 1602
|
↓ open down ↓ |
337 lines elided |
↑ open up ↑ |
1599 1603 /* swap next's deadlist to our deadlist */
1600 1604 dsl_deadlist_close(&ds->ds_deadlist);
1601 1605 dsl_deadlist_close(&ds_next->ds_deadlist);
1602 1606 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1603 1607 ds->ds_phys->ds_deadlist_obj);
1604 1608 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1605 1609 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1606 1610 ds_next->ds_phys->ds_deadlist_obj);
1607 1611 }
1608 1612
1613 +static int
1614 +old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
1615 +{
1616 + int err;
1617 + struct killarg ka;
1618 +
1619 + /*
1620 + * Free everything that we point to (that's born after
1621 + * the previous snapshot, if we are a clone)
1622 + *
1623 + * NB: this should be very quick, because we already
1624 + * freed all the objects in open context.
1625 + */
1626 + ka.ds = ds;
1627 + ka.tx = tx;
1628 + err = traverse_dataset(ds,
1629 + ds->ds_phys->ds_prev_snap_txg, TRAVERSE_POST,
1630 + kill_blkptr, &ka);
1631 + ASSERT3U(err, ==, 0);
1632 + ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
1633 +
1634 + return (err);
1635 +}
1636 +
1609 1637 void
1610 1638 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1611 1639 {
1612 1640 struct dsl_ds_destroyarg *dsda = arg1;
1613 1641 dsl_dataset_t *ds = dsda->ds;
1614 1642 int err;
1615 1643 int after_branch_point = FALSE;
1616 1644 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1617 1645 objset_t *mos = dp->dp_meta_objset;
1618 1646 dsl_dataset_t *ds_prev = NULL;
1619 1647 boolean_t wont_destroy;
1620 1648 uint64_t obj;
1621 1649
1622 1650 wont_destroy = (dsda->defer &&
1623 1651 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1624 1652
1625 1653 ASSERT(ds->ds_owner || wont_destroy);
1626 1654 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1627 1655 ASSERT(ds->ds_prev == NULL ||
1628 1656 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1629 1657 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1630 1658
1631 1659 if (wont_destroy) {
1632 1660 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1633 1661 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1634 1662 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1635 1663 return;
1636 1664 }
1637 1665
1638 1666 /* signal any waiters that this dataset is going away */
1639 1667 mutex_enter(&ds->ds_lock);
1640 1668 ds->ds_owner = dsl_reaper;
1641 1669 cv_broadcast(&ds->ds_exclusive_cv);
1642 1670 mutex_exit(&ds->ds_lock);
1643 1671
1644 1672 /* Remove our reservation */
1645 1673 if (ds->ds_reserved != 0) {
1646 1674 dsl_prop_setarg_t psa;
1647 1675 uint64_t value = 0;
1648 1676
1649 1677 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1650 1678 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1651 1679 &value);
1652 1680 psa.psa_effective_value = 0; /* predict default value */
1653 1681
1654 1682 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1655 1683 ASSERT3U(ds->ds_reserved, ==, 0);
1656 1684 }
1657 1685
1658 1686 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1659 1687
1660 1688 dsl_scan_ds_destroyed(ds, tx);
1661 1689
1662 1690 obj = ds->ds_object;
1663 1691
1664 1692 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1665 1693 if (ds->ds_prev) {
1666 1694 ds_prev = ds->ds_prev;
1667 1695 } else {
1668 1696 VERIFY(0 == dsl_dataset_hold_obj(dp,
1669 1697 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1670 1698 }
1671 1699 after_branch_point =
1672 1700 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1673 1701
1674 1702 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1675 1703 if (after_branch_point &&
1676 1704 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1677 1705 remove_from_next_clones(ds_prev, obj, tx);
1678 1706 if (ds->ds_phys->ds_next_snap_obj != 0) {
1679 1707 VERIFY(0 == zap_add_int(mos,
1680 1708 ds_prev->ds_phys->ds_next_clones_obj,
1681 1709 ds->ds_phys->ds_next_snap_obj, tx));
1682 1710 }
1683 1711 }
1684 1712 if (after_branch_point &&
1685 1713 ds->ds_phys->ds_next_snap_obj == 0) {
1686 1714 /* This clone is toast. */
1687 1715 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1688 1716 ds_prev->ds_phys->ds_num_children--;
1689 1717
1690 1718 /*
1691 1719 * If the clone's origin has no other clones, no
1692 1720 * user holds, and has been marked for deferred
1693 1721 * deletion, then we should have done the necessary
1694 1722 * destroy setup for it.
1695 1723 */
1696 1724 if (ds_prev->ds_phys->ds_num_children == 1 &&
1697 1725 ds_prev->ds_userrefs == 0 &&
1698 1726 DS_IS_DEFER_DESTROY(ds_prev)) {
1699 1727 ASSERT3P(dsda->rm_origin, !=, NULL);
1700 1728 } else {
1701 1729 ASSERT3P(dsda->rm_origin, ==, NULL);
1702 1730 }
1703 1731 } else if (!after_branch_point) {
1704 1732 ds_prev->ds_phys->ds_next_snap_obj =
1705 1733 ds->ds_phys->ds_next_snap_obj;
1706 1734 }
1707 1735 }
1708 1736
1709 1737 if (dsl_dataset_is_snapshot(ds)) {
1710 1738 dsl_dataset_t *ds_next;
1711 1739 uint64_t old_unique;
1712 1740 uint64_t used = 0, comp = 0, uncomp = 0;
1713 1741
1714 1742 VERIFY(0 == dsl_dataset_hold_obj(dp,
1715 1743 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1716 1744 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1717 1745
1718 1746 old_unique = ds_next->ds_phys->ds_unique_bytes;
1719 1747
1720 1748 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1721 1749 ds_next->ds_phys->ds_prev_snap_obj =
1722 1750 ds->ds_phys->ds_prev_snap_obj;
1723 1751 ds_next->ds_phys->ds_prev_snap_txg =
1724 1752 ds->ds_phys->ds_prev_snap_txg;
1725 1753 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1726 1754 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1727 1755
1728 1756
1729 1757 if (ds_next->ds_deadlist.dl_oldfmt) {
1730 1758 process_old_deadlist(ds, ds_prev, ds_next,
1731 1759 after_branch_point, tx);
1732 1760 } else {
1733 1761 /* Adjust prev's unique space. */
1734 1762 if (ds_prev && !after_branch_point) {
1735 1763 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1736 1764 ds_prev->ds_phys->ds_prev_snap_txg,
1737 1765 ds->ds_phys->ds_prev_snap_txg,
1738 1766 &used, &comp, &uncomp);
1739 1767 ds_prev->ds_phys->ds_unique_bytes += used;
1740 1768 }
1741 1769
1742 1770 /* Adjust snapused. */
1743 1771 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1744 1772 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
|
↓ open down ↓ |
126 lines elided |
↑ open up ↑ |
1745 1773 &used, &comp, &uncomp);
1746 1774 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1747 1775 -used, -comp, -uncomp, tx);
1748 1776
1749 1777 /* Move blocks to be freed to pool's free list. */
1750 1778 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1751 1779 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1752 1780 tx);
1753 1781 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1754 1782 DD_USED_HEAD, used, comp, uncomp, tx);
1755 - dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx);
1756 1783
1757 1784 /* Merge our deadlist into next's and free it. */
1758 1785 dsl_deadlist_merge(&ds_next->ds_deadlist,
1759 1786 ds->ds_phys->ds_deadlist_obj, tx);
1760 1787 }
1761 1788 dsl_deadlist_close(&ds->ds_deadlist);
1762 1789 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1763 1790
1764 1791 /* Collapse range in clone heads */
1765 1792 dsl_dataset_remove_clones_key(ds,
1766 1793 ds->ds_phys->ds_creation_txg, tx);
1767 1794
1768 1795 if (dsl_dataset_is_snapshot(ds_next)) {
1769 1796 dsl_dataset_t *ds_nextnext;
1770 1797
1771 1798 /*
1772 1799 * Update next's unique to include blocks which
1773 1800 * were previously shared by only this snapshot
1774 1801 * and it. Those blocks will be born after the
1775 1802 * prev snap and before this snap, and will have
1776 1803 * died after the next snap and before the one
1777 1804 * after that (ie. be on the snap after next's
1778 1805 * deadlist).
1779 1806 */
1780 1807 VERIFY(0 == dsl_dataset_hold_obj(dp,
1781 1808 ds_next->ds_phys->ds_next_snap_obj,
1782 1809 FTAG, &ds_nextnext));
1783 1810 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1784 1811 ds->ds_phys->ds_prev_snap_txg,
1785 1812 ds->ds_phys->ds_creation_txg,
1786 1813 &used, &comp, &uncomp);
1787 1814 ds_next->ds_phys->ds_unique_bytes += used;
1788 1815 dsl_dataset_rele(ds_nextnext, FTAG);
1789 1816 ASSERT3P(ds_next->ds_prev, ==, NULL);
1790 1817
1791 1818 /* Collapse range in this head. */
1792 1819 dsl_dataset_t *hds;
1793 1820 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1794 1821 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1795 1822 FTAG, &hds));
1796 1823 dsl_deadlist_remove_key(&hds->ds_deadlist,
1797 1824 ds->ds_phys->ds_creation_txg, tx);
1798 1825 dsl_dataset_rele(hds, FTAG);
1799 1826
1800 1827 } else {
1801 1828 ASSERT3P(ds_next->ds_prev, ==, ds);
1802 1829 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1803 1830 ds_next->ds_prev = NULL;
1804 1831 if (ds_prev) {
1805 1832 VERIFY(0 == dsl_dataset_get_ref(dp,
1806 1833 ds->ds_phys->ds_prev_snap_obj,
1807 1834 ds_next, &ds_next->ds_prev));
1808 1835 }
1809 1836
1810 1837 dsl_dataset_recalc_head_uniq(ds_next);
1811 1838
1812 1839 /*
1813 1840 * Reduce the amount of our unconsmed refreservation
1814 1841 * being charged to our parent by the amount of
1815 1842 * new unique data we have gained.
1816 1843 */
1817 1844 if (old_unique < ds_next->ds_reserved) {
1818 1845 int64_t mrsdelta;
1819 1846 uint64_t new_unique =
1820 1847 ds_next->ds_phys->ds_unique_bytes;
|
↓ open down ↓ |
55 lines elided |
↑ open up ↑ |
1821 1848
1822 1849 ASSERT(old_unique <= new_unique);
1823 1850 mrsdelta = MIN(new_unique - old_unique,
1824 1851 ds_next->ds_reserved - old_unique);
1825 1852 dsl_dir_diduse_space(ds->ds_dir,
1826 1853 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1827 1854 }
1828 1855 }
1829 1856 dsl_dataset_rele(ds_next, FTAG);
1830 1857 } else {
1858 + zfeature_info_t *async_destroy =
1859 + &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY];
1860 +
1831 1861 /*
1832 1862 * There's no next snapshot, so this is a head dataset.
1833 1863 * Destroy the deadlist. Unless it's a clone, the
1834 1864 * deadlist should be empty. (If it's a clone, it's
1835 1865 * safe to ignore the deadlist contents.)
1836 1866 */
1837 - struct killarg ka;
1838 -
1839 1867 dsl_deadlist_close(&ds->ds_deadlist);
1840 1868 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1841 1869 ds->ds_phys->ds_deadlist_obj = 0;
1842 1870
1843 - /*
1844 - * Free everything that we point to (that's born after
1845 - * the previous snapshot, if we are a clone)
1846 - *
1847 - * NB: this should be very quick, because we already
1848 - * freed all the objects in open context.
1849 - */
1850 - ka.ds = ds;
1851 - ka.tx = tx;
1852 - err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1853 - TRAVERSE_POST, kill_blkptr, &ka);
1854 - ASSERT3U(err, ==, 0);
1855 - ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1856 - ds->ds_phys->ds_unique_bytes == 0);
1871 + if (!spa_feature_is_enabled(dp->dp_spa, async_destroy)) {
1872 + err = old_synchronous_dataset_destroy(ds, tx);
1873 + } else {
1874 + /*
1875 + * Move the bptree into the pool's list of trees to
1876 + * clean up and update space accounting information.
1877 + */
1878 + uint64_t used, comp, uncomp;
1857 1879
1880 + ASSERT(err == 0 || err == EBUSY);
1881 + if (!spa_feature_is_active(dp->dp_spa, async_destroy)) {
1882 + spa_feature_incr(dp->dp_spa, async_destroy, tx);
1883 + dp->dp_bptree_obj = bptree_alloc(
1884 + dp->dp_meta_objset, tx);
1885 + VERIFY(zap_add(dp->dp_meta_objset,
1886 + DMU_POOL_DIRECTORY_OBJECT,
1887 + DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
1888 + &dp->dp_bptree_obj, tx) == 0);
1889 + }
1890 +
1891 + used = ds->ds_dir->dd_phys->dd_used_bytes;
1892 + comp = ds->ds_dir->dd_phys->dd_compressed_bytes;
1893 + uncomp = ds->ds_dir->dd_phys->dd_uncompressed_bytes;
1894 +
1895 + ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1896 + ds->ds_phys->ds_unique_bytes == used);
1897 +
1898 + bptree_add(dp->dp_meta_objset, dp->dp_bptree_obj,
1899 + &ds->ds_phys->ds_bp, ds->ds_phys->ds_prev_snap_txg,
1900 + used, comp, uncomp, tx);
1901 + dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
1902 + -used, -comp, -uncomp, tx);
1903 + dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
1904 + used, comp, uncomp, tx);
1905 + }
1906 +
1858 1907 if (ds->ds_prev != NULL) {
1859 1908 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1860 1909 VERIFY3U(0, ==, zap_remove_int(mos,
1861 1910 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1862 1911 ds->ds_object, tx));
1863 1912 }
1864 1913 dsl_dataset_rele(ds->ds_prev, ds);
1865 1914 ds->ds_prev = ds_prev = NULL;
1866 1915 }
1867 1916 }
1868 1917
1869 1918 /*
1870 1919 * This must be done after the dsl_traverse(), because it will
1871 1920 * re-open the objset.
1872 1921 */
1873 1922 if (ds->ds_objset) {
1874 1923 dmu_objset_evict(ds->ds_objset);
1875 1924 ds->ds_objset = NULL;
1876 1925 }
1877 1926
1878 1927 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1879 1928 /* Erase the link in the dir */
1880 1929 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1881 1930 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1882 1931 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1883 1932 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1884 1933 ASSERT(err == 0);
1885 1934 } else {
1886 1935 /* remove from snapshot namespace */
1887 1936 dsl_dataset_t *ds_head;
1888 1937 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1889 1938 VERIFY(0 == dsl_dataset_hold_obj(dp,
1890 1939 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1891 1940 VERIFY(0 == dsl_dataset_get_snapname(ds));
1892 1941 #ifdef ZFS_DEBUG
1893 1942 {
1894 1943 uint64_t val;
1895 1944
1896 1945 err = dsl_dataset_snap_lookup(ds_head,
1897 1946 ds->ds_snapname, &val);
1898 1947 ASSERT3U(err, ==, 0);
1899 1948 ASSERT3U(val, ==, obj);
1900 1949 }
1901 1950 #endif
1902 1951 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1903 1952 ASSERT(err == 0);
1904 1953 dsl_dataset_rele(ds_head, FTAG);
1905 1954 }
1906 1955
1907 1956 if (ds_prev && ds->ds_prev != ds_prev)
1908 1957 dsl_dataset_rele(ds_prev, FTAG);
1909 1958
1910 1959 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1911 1960 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1912 1961 "dataset = %llu", ds->ds_object);
1913 1962
1914 1963 if (ds->ds_phys->ds_next_clones_obj != 0) {
1915 1964 uint64_t count;
1916 1965 ASSERT(0 == zap_count(mos,
1917 1966 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1918 1967 VERIFY(0 == dmu_object_free(mos,
1919 1968 ds->ds_phys->ds_next_clones_obj, tx));
1920 1969 }
1921 1970 if (ds->ds_phys->ds_props_obj != 0)
1922 1971 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1923 1972 if (ds->ds_phys->ds_userrefs_obj != 0)
1924 1973 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1925 1974 dsl_dir_close(ds->ds_dir, ds);
1926 1975 ds->ds_dir = NULL;
1927 1976 dsl_dataset_drain_refs(ds, tag);
1928 1977 VERIFY(0 == dmu_object_free(mos, obj, tx));
1929 1978
1930 1979 if (dsda->rm_origin) {
1931 1980 /*
1932 1981 * Remove the origin of the clone we just destroyed.
1933 1982 */
1934 1983 struct dsl_ds_destroyarg ndsda = {0};
1935 1984
1936 1985 ndsda.ds = dsda->rm_origin;
1937 1986 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1938 1987 }
1939 1988 }
1940 1989
1941 1990 static int
1942 1991 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1943 1992 {
1944 1993 uint64_t asize;
1945 1994
1946 1995 if (!dmu_tx_is_syncing(tx))
1947 1996 return (0);
1948 1997
1949 1998 /*
1950 1999 * If there's an fs-only reservation, any blocks that might become
1951 2000 * owned by the snapshot dataset must be accommodated by space
1952 2001 * outside of the reservation.
1953 2002 */
1954 2003 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1955 2004 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1956 2005 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
1957 2006 return (ENOSPC);
1958 2007
1959 2008 /*
1960 2009 * Propogate any reserved space for this snapshot to other
1961 2010 * snapshot checks in this sync group.
1962 2011 */
1963 2012 if (asize > 0)
1964 2013 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1965 2014
1966 2015 return (0);
1967 2016 }
1968 2017
1969 2018 int
1970 2019 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1971 2020 {
1972 2021 dsl_dataset_t *ds = arg1;
1973 2022 const char *snapname = arg2;
1974 2023 int err;
1975 2024 uint64_t value;
1976 2025
1977 2026 /*
1978 2027 * We don't allow multiple snapshots of the same txg. If there
1979 2028 * is already one, try again.
1980 2029 */
1981 2030 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1982 2031 return (EAGAIN);
1983 2032
1984 2033 /*
1985 2034 * Check for conflicting name snapshot name.
1986 2035 */
1987 2036 err = dsl_dataset_snap_lookup(ds, snapname, &value);
1988 2037 if (err == 0)
1989 2038 return (EEXIST);
1990 2039 if (err != ENOENT)
1991 2040 return (err);
1992 2041
1993 2042 /*
1994 2043 * Check that the dataset's name is not too long. Name consists
1995 2044 * of the dataset's length + 1 for the @-sign + snapshot name's length
1996 2045 */
1997 2046 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
1998 2047 return (ENAMETOOLONG);
1999 2048
2000 2049 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2001 2050 if (err)
2002 2051 return (err);
2003 2052
2004 2053 ds->ds_trysnap_txg = tx->tx_txg;
2005 2054 return (0);
2006 2055 }
2007 2056
2008 2057 void
2009 2058 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2010 2059 {
2011 2060 dsl_dataset_t *ds = arg1;
2012 2061 const char *snapname = arg2;
2013 2062 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2014 2063 dmu_buf_t *dbuf;
2015 2064 dsl_dataset_phys_t *dsphys;
2016 2065 uint64_t dsobj, crtxg;
2017 2066 objset_t *mos = dp->dp_meta_objset;
2018 2067 int err;
2019 2068
2020 2069 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2021 2070
2022 2071 /*
2023 2072 * The origin's ds_creation_txg has to be < TXG_INITIAL
2024 2073 */
2025 2074 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2026 2075 crtxg = 1;
2027 2076 else
2028 2077 crtxg = tx->tx_txg;
2029 2078
2030 2079 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2031 2080 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2032 2081 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2033 2082 dmu_buf_will_dirty(dbuf, tx);
2034 2083 dsphys = dbuf->db_data;
2035 2084 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2036 2085 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
|
↓ open down ↓ |
169 lines elided |
↑ open up ↑ |
2037 2086 dsphys->ds_fsid_guid = unique_create();
2038 2087 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2039 2088 sizeof (dsphys->ds_guid));
2040 2089 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2041 2090 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2042 2091 dsphys->ds_next_snap_obj = ds->ds_object;
2043 2092 dsphys->ds_num_children = 1;
2044 2093 dsphys->ds_creation_time = gethrestime_sec();
2045 2094 dsphys->ds_creation_txg = crtxg;
2046 2095 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2047 - dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
2096 + dsphys->ds_referenced_bytes = ds->ds_phys->ds_referenced_bytes;
2048 2097 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2049 2098 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2050 2099 dsphys->ds_flags = ds->ds_phys->ds_flags;
2051 2100 dsphys->ds_bp = ds->ds_phys->ds_bp;
2052 2101 dmu_buf_rele(dbuf, FTAG);
2053 2102
2054 2103 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2055 2104 if (ds->ds_prev) {
2056 2105 uint64_t next_clones_obj =
2057 2106 ds->ds_prev->ds_phys->ds_next_clones_obj;
2058 2107 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2059 2108 ds->ds_object ||
2060 2109 ds->ds_prev->ds_phys->ds_num_children > 1);
2061 2110 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2062 2111 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2063 2112 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2064 2113 ds->ds_prev->ds_phys->ds_creation_txg);
2065 2114 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2066 2115 } else if (next_clones_obj != 0) {
2067 2116 remove_from_next_clones(ds->ds_prev,
2068 2117 dsphys->ds_next_snap_obj, tx);
2069 2118 VERIFY3U(0, ==, zap_add_int(mos,
2070 2119 next_clones_obj, dsobj, tx));
2071 2120 }
2072 2121 }
2073 2122
2074 2123 /*
2075 2124 * If we have a reference-reservation on this dataset, we will
2076 2125 * need to increase the amount of refreservation being charged
2077 2126 * since our unique space is going to zero.
2078 2127 */
2079 2128 if (ds->ds_reserved) {
2080 2129 int64_t delta;
2081 2130 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2082 2131 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2083 2132 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2084 2133 delta, 0, 0, tx);
2085 2134 }
2086 2135
2087 2136 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2088 2137 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2089 2138 ds->ds_dir->dd_myname, snapname, dsobj,
2090 2139 ds->ds_phys->ds_prev_snap_txg);
2091 2140 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2092 2141 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2093 2142 dsl_deadlist_close(&ds->ds_deadlist);
2094 2143 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2095 2144 dsl_deadlist_add_key(&ds->ds_deadlist,
2096 2145 ds->ds_phys->ds_prev_snap_txg, tx);
2097 2146
2098 2147 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2099 2148 ds->ds_phys->ds_prev_snap_obj = dsobj;
2100 2149 ds->ds_phys->ds_prev_snap_txg = crtxg;
2101 2150 ds->ds_phys->ds_unique_bytes = 0;
2102 2151 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2103 2152 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2104 2153
2105 2154 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2106 2155 snapname, 8, 1, &dsobj, tx);
2107 2156 ASSERT(err == 0);
2108 2157
2109 2158 if (ds->ds_prev)
2110 2159 dsl_dataset_drop_ref(ds->ds_prev, ds);
2111 2160 VERIFY(0 == dsl_dataset_get_ref(dp,
2112 2161 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2113 2162
2114 2163 dsl_scan_ds_snapshotted(ds, tx);
2115 2164
2116 2165 dsl_dir_snap_cmtime_update(ds->ds_dir);
2117 2166
2118 2167 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2119 2168 "dataset = %llu", dsobj);
2120 2169 }
2121 2170
2122 2171 void
2123 2172 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2124 2173 {
2125 2174 ASSERT(dmu_tx_is_syncing(tx));
2126 2175 ASSERT(ds->ds_objset != NULL);
2127 2176 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2128 2177
2129 2178 /*
2130 2179 * in case we had to change ds_fsid_guid when we opened it,
2131 2180 * sync it out now.
2132 2181 */
2133 2182 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2134 2183 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2135 2184
2136 2185 dsl_dir_dirty(ds->ds_dir, tx);
2137 2186 dmu_objset_sync(ds->ds_objset, zio, tx);
2138 2187 }
2139 2188
2140 2189 static void
2141 2190 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
2142 2191 {
2143 2192 uint64_t count = 0;
2144 2193 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
2145 2194 zap_cursor_t zc;
2146 2195 zap_attribute_t za;
2147 2196 nvlist_t *propval;
2148 2197 nvlist_t *val;
2149 2198
2150 2199 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2151 2200 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2152 2201 VERIFY(nvlist_alloc(&val, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2153 2202
2154 2203 /*
2155 2204 * There may me missing entries in ds_next_clones_obj
2156 2205 * due to a bug in a previous version of the code.
2157 2206 * Only trust it if it has the right number of entries.
2158 2207 */
2159 2208 if (ds->ds_phys->ds_next_clones_obj != 0) {
2160 2209 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
|
↓ open down ↓ |
103 lines elided |
↑ open up ↑ |
2161 2210 &count));
2162 2211 }
2163 2212 if (count != ds->ds_phys->ds_num_children - 1) {
2164 2213 goto fail;
2165 2214 }
2166 2215 for (zap_cursor_init(&zc, mos, ds->ds_phys->ds_next_clones_obj);
2167 2216 zap_cursor_retrieve(&zc, &za) == 0;
2168 2217 zap_cursor_advance(&zc)) {
2169 2218 dsl_dataset_t *clone;
2170 2219 char buf[ZFS_MAXNAMELEN];
2220 + /*
2221 + * Even though we hold the dp_config_rwlock, the dataset
2222 + * may fail to open, returning ENOENT. If there is a
2223 + * thread concurrently attempting to destroy this
2224 + * dataset, it will have the ds_rwlock held for
2225 + * RW_WRITER. Our call to dsl_dataset_hold_obj() ->
2226 + * dsl_dataset_hold_ref() will fail its
2227 + * rw_tryenter(&ds->ds_rwlock, RW_READER), drop the
2228 + * dp_config_rwlock, and wait for the destroy progress
2229 + * and signal ds_exclusive_cv. If the destroy was
2230 + * successful, we will see that
2231 + * DSL_DATASET_IS_DESTROYED(), and return ENOENT.
2232 + */
2171 2233 if (dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
2172 - za.za_first_integer, FTAG, &clone) != 0) {
2173 - goto fail;
2174 - }
2234 + za.za_first_integer, FTAG, &clone) != 0)
2235 + continue;
2175 2236 dsl_dir_name(clone->ds_dir, buf);
2176 2237 VERIFY(nvlist_add_boolean(val, buf) == 0);
2177 2238 dsl_dataset_rele(clone, FTAG);
2178 2239 }
2179 2240 zap_cursor_fini(&zc);
2180 2241 VERIFY(nvlist_add_nvlist(propval, ZPROP_VALUE, val) == 0);
2181 2242 VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
2182 2243 propval) == 0);
2183 2244 fail:
2184 2245 nvlist_free(val);
2185 2246 nvlist_free(propval);
2186 2247 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2187 2248 }
2188 2249
2189 2250 void
2190 2251 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2191 2252 {
2192 2253 uint64_t refd, avail, uobjs, aobjs, ratio;
2193 2254
2194 2255 dsl_dir_stats(ds->ds_dir, nv);
2195 2256
2196 2257 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2197 2258 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2198 2259 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2199 2260
2200 2261 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2201 2262 ds->ds_phys->ds_creation_time);
2202 2263 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2203 2264 ds->ds_phys->ds_creation_txg);
2204 2265 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2205 2266 ds->ds_quota);
2206 2267 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2207 2268 ds->ds_reserved);
2208 2269 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2209 2270 ds->ds_phys->ds_guid);
2210 2271 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2211 2272 ds->ds_phys->ds_unique_bytes);
2212 2273 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2213 2274 ds->ds_object);
2214 2275 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2215 2276 ds->ds_userrefs);
2216 2277 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2217 2278 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2218 2279
2219 2280 if (ds->ds_phys->ds_prev_snap_obj != 0) {
2220 2281 uint64_t written, comp, uncomp;
2221 2282 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2222 2283 dsl_dataset_t *prev;
2223 2284
2224 2285 rw_enter(&dp->dp_config_rwlock, RW_READER);
2225 2286 int err = dsl_dataset_hold_obj(dp,
2226 2287 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
2227 2288 rw_exit(&dp->dp_config_rwlock);
2228 2289 if (err == 0) {
2229 2290 err = dsl_dataset_space_written(prev, ds, &written,
2230 2291 &comp, &uncomp);
2231 2292 dsl_dataset_rele(prev, FTAG);
2232 2293 if (err == 0) {
2233 2294 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2234 2295 written);
2235 2296 }
2236 2297 }
2237 2298 }
2238 2299
2239 2300 ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2240 2301 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2241 2302 ds->ds_phys->ds_compressed_bytes);
2242 2303 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio);
2243 2304
2244 2305 if (ds->ds_phys->ds_next_snap_obj) {
2245 2306 /*
2246 2307 * This is a snapshot; override the dd's space used with
2247 2308 * our unique space and compression ratio.
2248 2309 */
2249 2310 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2250 2311 ds->ds_phys->ds_unique_bytes);
2251 2312 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio);
2252 2313
2253 2314 get_clones_stat(ds, nv);
2254 2315 }
2255 2316 }
2256 2317
2257 2318 void
2258 2319 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2259 2320 {
2260 2321 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2261 2322 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2262 2323 stat->dds_guid = ds->ds_phys->ds_guid;
2263 2324 if (ds->ds_phys->ds_next_snap_obj) {
2264 2325 stat->dds_is_snapshot = B_TRUE;
2265 2326 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2266 2327 } else {
2267 2328 stat->dds_is_snapshot = B_FALSE;
2268 2329 stat->dds_num_clones = 0;
2269 2330 }
2270 2331
2271 2332 /* clone origin is really a dsl_dir thing... */
2272 2333 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2273 2334 if (dsl_dir_is_clone(ds->ds_dir)) {
2274 2335 dsl_dataset_t *ods;
2275 2336
2276 2337 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2277 2338 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2278 2339 dsl_dataset_name(ods, stat->dds_origin);
2279 2340 dsl_dataset_drop_ref(ods, FTAG);
2280 2341 } else {
2281 2342 stat->dds_origin[0] = '\0';
2282 2343 }
2283 2344 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2284 2345 }
2285 2346
2286 2347 uint64_t
|
↓ open down ↓ |
102 lines elided |
↑ open up ↑ |
2287 2348 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2288 2349 {
2289 2350 return (ds->ds_fsid_guid);
2290 2351 }
2291 2352
2292 2353 void
2293 2354 dsl_dataset_space(dsl_dataset_t *ds,
2294 2355 uint64_t *refdbytesp, uint64_t *availbytesp,
2295 2356 uint64_t *usedobjsp, uint64_t *availobjsp)
2296 2357 {
2297 - *refdbytesp = ds->ds_phys->ds_used_bytes;
2358 + *refdbytesp = ds->ds_phys->ds_referenced_bytes;
2298 2359 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2299 2360 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2300 2361 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2301 2362 if (ds->ds_quota != 0) {
2302 2363 /*
2303 2364 * Adjust available bytes according to refquota
2304 2365 */
2305 2366 if (*refdbytesp < ds->ds_quota)
2306 2367 *availbytesp = MIN(*availbytesp,
2307 2368 ds->ds_quota - *refdbytesp);
2308 2369 else
2309 2370 *availbytesp = 0;
2310 2371 }
2311 2372 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2312 2373 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2313 2374 }
2314 2375
2315 2376 boolean_t
2316 2377 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2317 2378 {
2318 2379 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2319 2380
2320 2381 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2321 2382 dsl_pool_sync_context(dp));
2322 2383 if (ds->ds_prev == NULL)
2323 2384 return (B_FALSE);
2324 2385 if (ds->ds_phys->ds_bp.blk_birth >
2325 2386 ds->ds_prev->ds_phys->ds_creation_txg) {
2326 2387 objset_t *os, *os_prev;
2327 2388 /*
2328 2389 * It may be that only the ZIL differs, because it was
2329 2390 * reset in the head. Don't count that as being
2330 2391 * modified.
2331 2392 */
2332 2393 if (dmu_objset_from_ds(ds, &os) != 0)
2333 2394 return (B_TRUE);
2334 2395 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2335 2396 return (B_TRUE);
2336 2397 return (bcmp(&os->os_phys->os_meta_dnode,
2337 2398 &os_prev->os_phys->os_meta_dnode,
2338 2399 sizeof (os->os_phys->os_meta_dnode)) != 0);
2339 2400 }
2340 2401 return (B_FALSE);
2341 2402 }
2342 2403
2343 2404 /* ARGSUSED */
2344 2405 static int
2345 2406 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2346 2407 {
2347 2408 dsl_dataset_t *ds = arg1;
2348 2409 char *newsnapname = arg2;
2349 2410 dsl_dir_t *dd = ds->ds_dir;
2350 2411 dsl_dataset_t *hds;
2351 2412 uint64_t val;
2352 2413 int err;
2353 2414
2354 2415 err = dsl_dataset_hold_obj(dd->dd_pool,
2355 2416 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2356 2417 if (err)
2357 2418 return (err);
2358 2419
2359 2420 /* new name better not be in use */
2360 2421 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2361 2422 dsl_dataset_rele(hds, FTAG);
2362 2423
2363 2424 if (err == 0)
2364 2425 err = EEXIST;
2365 2426 else if (err == ENOENT)
2366 2427 err = 0;
2367 2428
2368 2429 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2369 2430 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2370 2431 err = ENAMETOOLONG;
2371 2432
2372 2433 return (err);
2373 2434 }
2374 2435
2375 2436 static void
2376 2437 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2377 2438 {
2378 2439 dsl_dataset_t *ds = arg1;
2379 2440 const char *newsnapname = arg2;
2380 2441 dsl_dir_t *dd = ds->ds_dir;
2381 2442 objset_t *mos = dd->dd_pool->dp_meta_objset;
2382 2443 dsl_dataset_t *hds;
2383 2444 int err;
2384 2445
2385 2446 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2386 2447
2387 2448 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2388 2449 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2389 2450
2390 2451 VERIFY(0 == dsl_dataset_get_snapname(ds));
2391 2452 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2392 2453 ASSERT3U(err, ==, 0);
2393 2454 mutex_enter(&ds->ds_lock);
2394 2455 (void) strcpy(ds->ds_snapname, newsnapname);
2395 2456 mutex_exit(&ds->ds_lock);
2396 2457 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2397 2458 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2398 2459 ASSERT3U(err, ==, 0);
2399 2460
2400 2461 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2401 2462 "dataset = %llu", ds->ds_object);
2402 2463 dsl_dataset_rele(hds, FTAG);
2403 2464 }
2404 2465
2405 2466 struct renamesnaparg {
2406 2467 dsl_sync_task_group_t *dstg;
2407 2468 char failed[MAXPATHLEN];
2408 2469 char *oldsnap;
2409 2470 char *newsnap;
2410 2471 };
2411 2472
2412 2473 static int
2413 2474 dsl_snapshot_rename_one(const char *name, void *arg)
2414 2475 {
2415 2476 struct renamesnaparg *ra = arg;
2416 2477 dsl_dataset_t *ds = NULL;
2417 2478 char *snapname;
2418 2479 int err;
2419 2480
2420 2481 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2421 2482 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2422 2483
2423 2484 /*
2424 2485 * For recursive snapshot renames the parent won't be changing
2425 2486 * so we just pass name for both the to/from argument.
2426 2487 */
2427 2488 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2428 2489 if (err != 0) {
2429 2490 strfree(snapname);
2430 2491 return (err == ENOENT ? 0 : err);
2431 2492 }
2432 2493
2433 2494 #ifdef _KERNEL
2434 2495 /*
2435 2496 * For all filesystems undergoing rename, we'll need to unmount it.
2436 2497 */
2437 2498 (void) zfs_unmount_snap(snapname, NULL);
2438 2499 #endif
2439 2500 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2440 2501 strfree(snapname);
2441 2502 if (err != 0)
2442 2503 return (err == ENOENT ? 0 : err);
2443 2504
2444 2505 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2445 2506 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2446 2507
2447 2508 return (0);
2448 2509 }
2449 2510
2450 2511 static int
2451 2512 dsl_recursive_rename(char *oldname, const char *newname)
2452 2513 {
2453 2514 int err;
2454 2515 struct renamesnaparg *ra;
2455 2516 dsl_sync_task_t *dst;
2456 2517 spa_t *spa;
2457 2518 char *cp, *fsname = spa_strdup(oldname);
2458 2519 int len = strlen(oldname) + 1;
2459 2520
2460 2521 /* truncate the snapshot name to get the fsname */
2461 2522 cp = strchr(fsname, '@');
2462 2523 *cp = '\0';
2463 2524
2464 2525 err = spa_open(fsname, &spa, FTAG);
2465 2526 if (err) {
2466 2527 kmem_free(fsname, len);
2467 2528 return (err);
2468 2529 }
2469 2530 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2470 2531 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2471 2532
2472 2533 ra->oldsnap = strchr(oldname, '@') + 1;
2473 2534 ra->newsnap = strchr(newname, '@') + 1;
2474 2535 *ra->failed = '\0';
2475 2536
2476 2537 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2477 2538 DS_FIND_CHILDREN);
2478 2539 kmem_free(fsname, len);
2479 2540
2480 2541 if (err == 0) {
2481 2542 err = dsl_sync_task_group_wait(ra->dstg);
2482 2543 }
2483 2544
2484 2545 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2485 2546 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2486 2547 dsl_dataset_t *ds = dst->dst_arg1;
2487 2548 if (dst->dst_err) {
2488 2549 dsl_dir_name(ds->ds_dir, ra->failed);
2489 2550 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2490 2551 (void) strlcat(ra->failed, ra->newsnap,
2491 2552 sizeof (ra->failed));
2492 2553 }
2493 2554 dsl_dataset_rele(ds, ra->dstg);
2494 2555 }
2495 2556
2496 2557 if (err)
2497 2558 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2498 2559
2499 2560 dsl_sync_task_group_destroy(ra->dstg);
2500 2561 kmem_free(ra, sizeof (struct renamesnaparg));
2501 2562 spa_close(spa, FTAG);
2502 2563 return (err);
2503 2564 }
2504 2565
2505 2566 static int
2506 2567 dsl_valid_rename(const char *oldname, void *arg)
2507 2568 {
2508 2569 int delta = *(int *)arg;
2509 2570
2510 2571 if (strlen(oldname) + delta >= MAXNAMELEN)
2511 2572 return (ENAMETOOLONG);
2512 2573
2513 2574 return (0);
2514 2575 }
2515 2576
2516 2577 #pragma weak dmu_objset_rename = dsl_dataset_rename
2517 2578 int
2518 2579 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2519 2580 {
2520 2581 dsl_dir_t *dd;
2521 2582 dsl_dataset_t *ds;
2522 2583 const char *tail;
2523 2584 int err;
2524 2585
2525 2586 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2526 2587 if (err)
2527 2588 return (err);
2528 2589
2529 2590 if (tail == NULL) {
2530 2591 int delta = strlen(newname) - strlen(oldname);
2531 2592
2532 2593 /* if we're growing, validate child name lengths */
2533 2594 if (delta > 0)
2534 2595 err = dmu_objset_find(oldname, dsl_valid_rename,
2535 2596 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2536 2597
2537 2598 if (err == 0)
2538 2599 err = dsl_dir_rename(dd, newname);
2539 2600 dsl_dir_close(dd, FTAG);
2540 2601 return (err);
2541 2602 }
2542 2603
2543 2604 if (tail[0] != '@') {
2544 2605 /* the name ended in a nonexistent component */
2545 2606 dsl_dir_close(dd, FTAG);
2546 2607 return (ENOENT);
2547 2608 }
2548 2609
2549 2610 dsl_dir_close(dd, FTAG);
2550 2611
2551 2612 /* new name must be snapshot in same filesystem */
2552 2613 tail = strchr(newname, '@');
2553 2614 if (tail == NULL)
2554 2615 return (EINVAL);
2555 2616 tail++;
2556 2617 if (strncmp(oldname, newname, tail - newname) != 0)
2557 2618 return (EXDEV);
2558 2619
2559 2620 if (recursive) {
2560 2621 err = dsl_recursive_rename(oldname, newname);
2561 2622 } else {
2562 2623 err = dsl_dataset_hold(oldname, FTAG, &ds);
2563 2624 if (err)
2564 2625 return (err);
2565 2626
2566 2627 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2567 2628 dsl_dataset_snapshot_rename_check,
2568 2629 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2569 2630
2570 2631 dsl_dataset_rele(ds, FTAG);
2571 2632 }
2572 2633
2573 2634 return (err);
2574 2635 }
2575 2636
2576 2637 struct promotenode {
2577 2638 list_node_t link;
2578 2639 dsl_dataset_t *ds;
2579 2640 };
2580 2641
2581 2642 struct promotearg {
2582 2643 list_t shared_snaps, origin_snaps, clone_snaps;
2583 2644 dsl_dataset_t *origin_origin;
2584 2645 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2585 2646 char *err_ds;
2586 2647 };
2587 2648
2588 2649 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2589 2650 static boolean_t snaplist_unstable(list_t *l);
2590 2651
2591 2652 static int
2592 2653 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2593 2654 {
2594 2655 dsl_dataset_t *hds = arg1;
2595 2656 struct promotearg *pa = arg2;
2596 2657 struct promotenode *snap = list_head(&pa->shared_snaps);
2597 2658 dsl_dataset_t *origin_ds = snap->ds;
2598 2659 int err;
2599 2660 uint64_t unused;
2600 2661
2601 2662 /* Check that it is a real clone */
2602 2663 if (!dsl_dir_is_clone(hds->ds_dir))
2603 2664 return (EINVAL);
2604 2665
2605 2666 /* Since this is so expensive, don't do the preliminary check */
2606 2667 if (!dmu_tx_is_syncing(tx))
2607 2668 return (0);
2608 2669
2609 2670 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2610 2671 return (EXDEV);
2611 2672
2612 2673 /* compute origin's new unique space */
2613 2674 snap = list_tail(&pa->clone_snaps);
2614 2675 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2615 2676 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2616 2677 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2617 2678 &pa->unique, &unused, &unused);
2618 2679
2619 2680 /*
2620 2681 * Walk the snapshots that we are moving
2621 2682 *
2622 2683 * Compute space to transfer. Consider the incremental changes
2623 2684 * to used for each snapshot:
|
↓ open down ↓ |
316 lines elided |
↑ open up ↑ |
2624 2685 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2625 2686 * So each snapshot gave birth to:
2626 2687 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2627 2688 * So a sequence would look like:
2628 2689 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2629 2690 * Which simplifies to:
2630 2691 * uN + kN + kN-1 + ... + k1 + k0
2631 2692 * Note however, if we stop before we reach the ORIGIN we get:
2632 2693 * uN + kN + kN-1 + ... + kM - uM-1
2633 2694 */
2634 - pa->used = origin_ds->ds_phys->ds_used_bytes;
2695 + pa->used = origin_ds->ds_phys->ds_referenced_bytes;
2635 2696 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2636 2697 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2637 2698 for (snap = list_head(&pa->shared_snaps); snap;
2638 2699 snap = list_next(&pa->shared_snaps, snap)) {
2639 2700 uint64_t val, dlused, dlcomp, dluncomp;
2640 2701 dsl_dataset_t *ds = snap->ds;
2641 2702
2642 2703 /* Check that the snapshot name does not conflict */
2643 2704 VERIFY(0 == dsl_dataset_get_snapname(ds));
2644 2705 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2645 2706 if (err == 0) {
2646 2707 err = EEXIST;
2647 2708 goto out;
2648 2709 }
2649 2710 if (err != ENOENT)
2650 2711 goto out;
2651 2712
2652 2713 /* The very first snapshot does not have a deadlist */
2653 2714 if (ds->ds_phys->ds_prev_snap_obj == 0)
2654 2715 continue;
2655 2716
2656 2717 dsl_deadlist_space(&ds->ds_deadlist,
2657 2718 &dlused, &dlcomp, &dluncomp);
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
2658 2719 pa->used += dlused;
2659 2720 pa->comp += dlcomp;
2660 2721 pa->uncomp += dluncomp;
2661 2722 }
2662 2723
2663 2724 /*
2664 2725 * If we are a clone of a clone then we never reached ORIGIN,
2665 2726 * so we need to subtract out the clone origin's used space.
2666 2727 */
2667 2728 if (pa->origin_origin) {
2668 - pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2729 + pa->used -= pa->origin_origin->ds_phys->ds_referenced_bytes;
2669 2730 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2670 2731 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2671 2732 }
2672 2733
2673 2734 /* Check that there is enough space here */
2674 2735 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2675 2736 pa->used);
2676 2737 if (err)
2677 2738 return (err);
2678 2739
2679 2740 /*
2680 2741 * Compute the amounts of space that will be used by snapshots
2681 2742 * after the promotion (for both origin and clone). For each,
2682 2743 * it is the amount of space that will be on all of their
2683 2744 * deadlists (that was not born before their new origin).
2684 2745 */
2685 2746 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2686 2747 uint64_t space;
2687 2748
2688 2749 /*
2689 2750 * Note, typically this will not be a clone of a clone,
2690 2751 * so dd_origin_txg will be < TXG_INITIAL, so
2691 2752 * these snaplist_space() -> dsl_deadlist_space_range()
2692 2753 * calls will be fast because they do not have to
2693 2754 * iterate over all bps.
2694 2755 */
2695 2756 snap = list_head(&pa->origin_snaps);
2696 2757 err = snaplist_space(&pa->shared_snaps,
2697 2758 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2698 2759 if (err)
2699 2760 return (err);
2700 2761
2701 2762 err = snaplist_space(&pa->clone_snaps,
2702 2763 snap->ds->ds_dir->dd_origin_txg, &space);
2703 2764 if (err)
2704 2765 return (err);
2705 2766 pa->cloneusedsnap += space;
2706 2767 }
2707 2768 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2708 2769 err = snaplist_space(&pa->origin_snaps,
2709 2770 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2710 2771 if (err)
2711 2772 return (err);
2712 2773 }
2713 2774
2714 2775 return (0);
2715 2776 out:
2716 2777 pa->err_ds = snap->ds->ds_snapname;
2717 2778 return (err);
2718 2779 }
2719 2780
2720 2781 static void
2721 2782 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2722 2783 {
2723 2784 dsl_dataset_t *hds = arg1;
2724 2785 struct promotearg *pa = arg2;
2725 2786 struct promotenode *snap = list_head(&pa->shared_snaps);
2726 2787 dsl_dataset_t *origin_ds = snap->ds;
2727 2788 dsl_dataset_t *origin_head;
2728 2789 dsl_dir_t *dd = hds->ds_dir;
2729 2790 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2730 2791 dsl_dir_t *odd = NULL;
2731 2792 uint64_t oldnext_obj;
2732 2793 int64_t delta;
2733 2794
2734 2795 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2735 2796
2736 2797 snap = list_head(&pa->origin_snaps);
2737 2798 origin_head = snap->ds;
2738 2799
2739 2800 /*
2740 2801 * We need to explicitly open odd, since origin_ds's dd will be
2741 2802 * changing.
2742 2803 */
2743 2804 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2744 2805 NULL, FTAG, &odd));
2745 2806
2746 2807 /* change origin's next snap */
2747 2808 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2748 2809 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2749 2810 snap = list_tail(&pa->clone_snaps);
2750 2811 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2751 2812 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2752 2813
2753 2814 /* change the origin's next clone */
2754 2815 if (origin_ds->ds_phys->ds_next_clones_obj) {
2755 2816 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2756 2817 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2757 2818 origin_ds->ds_phys->ds_next_clones_obj,
2758 2819 oldnext_obj, tx));
2759 2820 }
2760 2821
2761 2822 /* change origin */
2762 2823 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2763 2824 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2764 2825 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2765 2826 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2766 2827 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2767 2828 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2768 2829 origin_head->ds_dir->dd_origin_txg =
2769 2830 origin_ds->ds_phys->ds_creation_txg;
2770 2831
2771 2832 /* change dd_clone entries */
2772 2833 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2773 2834 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2774 2835 odd->dd_phys->dd_clones, hds->ds_object, tx));
2775 2836 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2776 2837 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2777 2838 hds->ds_object, tx));
2778 2839
2779 2840 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2780 2841 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2781 2842 origin_head->ds_object, tx));
2782 2843 if (dd->dd_phys->dd_clones == 0) {
2783 2844 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2784 2845 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2785 2846 }
2786 2847 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2787 2848 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2788 2849
2789 2850 }
2790 2851
2791 2852 /* move snapshots to this dir */
2792 2853 for (snap = list_head(&pa->shared_snaps); snap;
2793 2854 snap = list_next(&pa->shared_snaps, snap)) {
2794 2855 dsl_dataset_t *ds = snap->ds;
2795 2856
2796 2857 /* unregister props as dsl_dir is changing */
2797 2858 if (ds->ds_objset) {
2798 2859 dmu_objset_evict(ds->ds_objset);
2799 2860 ds->ds_objset = NULL;
2800 2861 }
2801 2862 /* move snap name entry */
2802 2863 VERIFY(0 == dsl_dataset_get_snapname(ds));
2803 2864 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2804 2865 ds->ds_snapname, tx));
2805 2866 VERIFY(0 == zap_add(dp->dp_meta_objset,
2806 2867 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2807 2868 8, 1, &ds->ds_object, tx));
2808 2869
2809 2870 /* change containing dsl_dir */
2810 2871 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2811 2872 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2812 2873 ds->ds_phys->ds_dir_obj = dd->dd_object;
2813 2874 ASSERT3P(ds->ds_dir, ==, odd);
2814 2875 dsl_dir_close(ds->ds_dir, ds);
2815 2876 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2816 2877 NULL, ds, &ds->ds_dir));
2817 2878
2818 2879 /* move any clone references */
2819 2880 if (ds->ds_phys->ds_next_clones_obj &&
2820 2881 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2821 2882 zap_cursor_t zc;
2822 2883 zap_attribute_t za;
2823 2884
2824 2885 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2825 2886 ds->ds_phys->ds_next_clones_obj);
2826 2887 zap_cursor_retrieve(&zc, &za) == 0;
2827 2888 zap_cursor_advance(&zc)) {
2828 2889 dsl_dataset_t *cnds;
2829 2890 uint64_t o;
2830 2891
2831 2892 if (za.za_first_integer == oldnext_obj) {
2832 2893 /*
2833 2894 * We've already moved the
2834 2895 * origin's reference.
2835 2896 */
2836 2897 continue;
2837 2898 }
2838 2899
2839 2900 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2840 2901 za.za_first_integer, FTAG, &cnds));
2841 2902 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2842 2903
2843 2904 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2844 2905 odd->dd_phys->dd_clones, o, tx), ==, 0);
2845 2906 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2846 2907 dd->dd_phys->dd_clones, o, tx), ==, 0);
2847 2908 dsl_dataset_rele(cnds, FTAG);
2848 2909 }
2849 2910 zap_cursor_fini(&zc);
2850 2911 }
2851 2912
2852 2913 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2853 2914 }
2854 2915
2855 2916 /*
2856 2917 * Change space accounting.
2857 2918 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2858 2919 * both be valid, or both be 0 (resulting in delta == 0). This
2859 2920 * is true for each of {clone,origin} independently.
2860 2921 */
2861 2922
2862 2923 delta = pa->cloneusedsnap -
2863 2924 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2864 2925 ASSERT3S(delta, >=, 0);
2865 2926 ASSERT3U(pa->used, >=, delta);
2866 2927 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2867 2928 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2868 2929 pa->used - delta, pa->comp, pa->uncomp, tx);
2869 2930
2870 2931 delta = pa->originusedsnap -
2871 2932 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2872 2933 ASSERT3S(delta, <=, 0);
2873 2934 ASSERT3U(pa->used, >=, -delta);
2874 2935 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2875 2936 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2876 2937 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2877 2938
2878 2939 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2879 2940
2880 2941 /* log history record */
2881 2942 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2882 2943 "dataset = %llu", hds->ds_object);
2883 2944
2884 2945 dsl_dir_close(odd, FTAG);
2885 2946 }
2886 2947
2887 2948 static char *snaplist_tag = "snaplist";
2888 2949 /*
2889 2950 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2890 2951 * (exclusive) and last_obj (inclusive). The list will be in reverse
2891 2952 * order (last_obj will be the list_head()). If first_obj == 0, do all
2892 2953 * snapshots back to this dataset's origin.
2893 2954 */
2894 2955 static int
2895 2956 snaplist_make(dsl_pool_t *dp, boolean_t own,
2896 2957 uint64_t first_obj, uint64_t last_obj, list_t *l)
2897 2958 {
2898 2959 uint64_t obj = last_obj;
2899 2960
2900 2961 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2901 2962
2902 2963 list_create(l, sizeof (struct promotenode),
2903 2964 offsetof(struct promotenode, link));
2904 2965
2905 2966 while (obj != first_obj) {
2906 2967 dsl_dataset_t *ds;
2907 2968 struct promotenode *snap;
2908 2969 int err;
2909 2970
2910 2971 if (own) {
2911 2972 err = dsl_dataset_own_obj(dp, obj,
2912 2973 0, snaplist_tag, &ds);
2913 2974 if (err == 0)
2914 2975 dsl_dataset_make_exclusive(ds, snaplist_tag);
2915 2976 } else {
2916 2977 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2917 2978 }
2918 2979 if (err == ENOENT) {
2919 2980 /* lost race with snapshot destroy */
2920 2981 struct promotenode *last = list_tail(l);
2921 2982 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2922 2983 obj = last->ds->ds_phys->ds_prev_snap_obj;
2923 2984 continue;
2924 2985 } else if (err) {
2925 2986 return (err);
2926 2987 }
2927 2988
2928 2989 if (first_obj == 0)
2929 2990 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2930 2991
2931 2992 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2932 2993 snap->ds = ds;
2933 2994 list_insert_tail(l, snap);
2934 2995 obj = ds->ds_phys->ds_prev_snap_obj;
2935 2996 }
2936 2997
2937 2998 return (0);
2938 2999 }
2939 3000
2940 3001 static int
2941 3002 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2942 3003 {
2943 3004 struct promotenode *snap;
2944 3005
2945 3006 *spacep = 0;
2946 3007 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2947 3008 uint64_t used, comp, uncomp;
2948 3009 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2949 3010 mintxg, UINT64_MAX, &used, &comp, &uncomp);
2950 3011 *spacep += used;
2951 3012 }
2952 3013 return (0);
2953 3014 }
2954 3015
2955 3016 static void
2956 3017 snaplist_destroy(list_t *l, boolean_t own)
2957 3018 {
2958 3019 struct promotenode *snap;
2959 3020
2960 3021 if (!l || !list_link_active(&l->list_head))
2961 3022 return;
2962 3023
2963 3024 while ((snap = list_tail(l)) != NULL) {
2964 3025 list_remove(l, snap);
2965 3026 if (own)
2966 3027 dsl_dataset_disown(snap->ds, snaplist_tag);
2967 3028 else
2968 3029 dsl_dataset_rele(snap->ds, snaplist_tag);
2969 3030 kmem_free(snap, sizeof (struct promotenode));
2970 3031 }
2971 3032 list_destroy(l);
2972 3033 }
2973 3034
2974 3035 /*
2975 3036 * Promote a clone. Nomenclature note:
2976 3037 * "clone" or "cds": the original clone which is being promoted
2977 3038 * "origin" or "ods": the snapshot which is originally clone's origin
2978 3039 * "origin head" or "ohds": the dataset which is the head
2979 3040 * (filesystem/volume) for the origin
2980 3041 * "origin origin": the origin of the origin's filesystem (typically
2981 3042 * NULL, indicating that the clone is not a clone of a clone).
2982 3043 */
2983 3044 int
2984 3045 dsl_dataset_promote(const char *name, char *conflsnap)
2985 3046 {
2986 3047 dsl_dataset_t *ds;
2987 3048 dsl_dir_t *dd;
2988 3049 dsl_pool_t *dp;
2989 3050 dmu_object_info_t doi;
2990 3051 struct promotearg pa = { 0 };
2991 3052 struct promotenode *snap;
2992 3053 int err;
2993 3054
2994 3055 err = dsl_dataset_hold(name, FTAG, &ds);
2995 3056 if (err)
2996 3057 return (err);
2997 3058 dd = ds->ds_dir;
2998 3059 dp = dd->dd_pool;
2999 3060
3000 3061 err = dmu_object_info(dp->dp_meta_objset,
3001 3062 ds->ds_phys->ds_snapnames_zapobj, &doi);
3002 3063 if (err) {
3003 3064 dsl_dataset_rele(ds, FTAG);
3004 3065 return (err);
3005 3066 }
3006 3067
3007 3068 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
3008 3069 dsl_dataset_rele(ds, FTAG);
3009 3070 return (EINVAL);
3010 3071 }
3011 3072
3012 3073 /*
3013 3074 * We are going to inherit all the snapshots taken before our
3014 3075 * origin (i.e., our new origin will be our parent's origin).
3015 3076 * Take ownership of them so that we can rename them into our
3016 3077 * namespace.
3017 3078 */
3018 3079 rw_enter(&dp->dp_config_rwlock, RW_READER);
3019 3080
3020 3081 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
3021 3082 &pa.shared_snaps);
3022 3083 if (err != 0)
3023 3084 goto out;
3024 3085
3025 3086 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
3026 3087 if (err != 0)
3027 3088 goto out;
3028 3089
3029 3090 snap = list_head(&pa.shared_snaps);
3030 3091 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
3031 3092 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
3032 3093 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
3033 3094 if (err != 0)
3034 3095 goto out;
3035 3096
3036 3097 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
3037 3098 err = dsl_dataset_hold_obj(dp,
3038 3099 snap->ds->ds_dir->dd_phys->dd_origin_obj,
3039 3100 FTAG, &pa.origin_origin);
3040 3101 if (err != 0)
3041 3102 goto out;
3042 3103 }
3043 3104
3044 3105 out:
3045 3106 rw_exit(&dp->dp_config_rwlock);
3046 3107
3047 3108 /*
3048 3109 * Add in 128x the snapnames zapobj size, since we will be moving
3049 3110 * a bunch of snapnames to the promoted ds, and dirtying their
3050 3111 * bonus buffers.
3051 3112 */
3052 3113 if (err == 0) {
3053 3114 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
3054 3115 dsl_dataset_promote_sync, ds, &pa,
3055 3116 2 + 2 * doi.doi_physical_blocks_512);
3056 3117 if (err && pa.err_ds && conflsnap)
3057 3118 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3058 3119 }
3059 3120
3060 3121 snaplist_destroy(&pa.shared_snaps, B_TRUE);
3061 3122 snaplist_destroy(&pa.clone_snaps, B_FALSE);
3062 3123 snaplist_destroy(&pa.origin_snaps, B_FALSE);
3063 3124 if (pa.origin_origin)
3064 3125 dsl_dataset_rele(pa.origin_origin, FTAG);
3065 3126 dsl_dataset_rele(ds, FTAG);
3066 3127 return (err);
3067 3128 }
3068 3129
3069 3130 struct cloneswaparg {
3070 3131 dsl_dataset_t *cds; /* clone dataset */
3071 3132 dsl_dataset_t *ohds; /* origin's head dataset */
3072 3133 boolean_t force;
3073 3134 int64_t unused_refres_delta; /* change in unconsumed refreservation */
3074 3135 };
3075 3136
3076 3137 /* ARGSUSED */
3077 3138 static int
3078 3139 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3079 3140 {
3080 3141 struct cloneswaparg *csa = arg1;
3081 3142
3082 3143 /* they should both be heads */
3083 3144 if (dsl_dataset_is_snapshot(csa->cds) ||
3084 3145 dsl_dataset_is_snapshot(csa->ohds))
3085 3146 return (EINVAL);
3086 3147
3087 3148 /* the branch point should be just before them */
3088 3149 if (csa->cds->ds_prev != csa->ohds->ds_prev)
3089 3150 return (EINVAL);
3090 3151
3091 3152 /* cds should be the clone (unless they are unrelated) */
3092 3153 if (csa->cds->ds_prev != NULL &&
3093 3154 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3094 3155 csa->ohds->ds_object !=
3095 3156 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3096 3157 return (EINVAL);
3097 3158
3098 3159 /* the clone should be a child of the origin */
3099 3160 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3100 3161 return (EINVAL);
3101 3162
3102 3163 /* ohds shouldn't be modified unless 'force' */
3103 3164 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3104 3165 return (ETXTBSY);
3105 3166
3106 3167 /* adjust amount of any unconsumed refreservation */
3107 3168 csa->unused_refres_delta =
3108 3169 (int64_t)MIN(csa->ohds->ds_reserved,
3109 3170 csa->ohds->ds_phys->ds_unique_bytes) -
3110 3171 (int64_t)MIN(csa->ohds->ds_reserved,
3111 3172 csa->cds->ds_phys->ds_unique_bytes);
3112 3173
3113 3174 if (csa->unused_refres_delta > 0 &&
3114 3175 csa->unused_refres_delta >
3115 3176 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3116 3177 return (ENOSPC);
3117 3178
3118 3179 if (csa->ohds->ds_quota != 0 &&
3119 3180 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3120 3181 return (EDQUOT);
3121 3182
3122 3183 return (0);
3123 3184 }
3124 3185
3125 3186 /* ARGSUSED */
3126 3187 static void
3127 3188 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3128 3189 {
3129 3190 struct cloneswaparg *csa = arg1;
3130 3191 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3131 3192
3132 3193 ASSERT(csa->cds->ds_reserved == 0);
3133 3194 ASSERT(csa->ohds->ds_quota == 0 ||
3134 3195 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3135 3196
3136 3197 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3137 3198 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3138 3199
3139 3200 if (csa->cds->ds_objset != NULL) {
3140 3201 dmu_objset_evict(csa->cds->ds_objset);
3141 3202 csa->cds->ds_objset = NULL;
3142 3203 }
3143 3204
3144 3205 if (csa->ohds->ds_objset != NULL) {
3145 3206 dmu_objset_evict(csa->ohds->ds_objset);
3146 3207 csa->ohds->ds_objset = NULL;
3147 3208 }
3148 3209
3149 3210 /*
3150 3211 * Reset origin's unique bytes, if it exists.
3151 3212 */
3152 3213 if (csa->cds->ds_prev) {
3153 3214 dsl_dataset_t *origin = csa->cds->ds_prev;
3154 3215 uint64_t comp, uncomp;
3155 3216
3156 3217 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3157 3218 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3158 3219 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3159 3220 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3160 3221 }
3161 3222
3162 3223 /* swap blkptrs */
3163 3224 {
3164 3225 blkptr_t tmp;
3165 3226 tmp = csa->ohds->ds_phys->ds_bp;
3166 3227 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3167 3228 csa->cds->ds_phys->ds_bp = tmp;
3168 3229 }
3169 3230
3170 3231 /* set dd_*_bytes */
3171 3232 {
3172 3233 int64_t dused, dcomp, duncomp;
3173 3234 uint64_t cdl_used, cdl_comp, cdl_uncomp;
|
↓ open down ↓ |
495 lines elided |
↑ open up ↑ |
3174 3235 uint64_t odl_used, odl_comp, odl_uncomp;
3175 3236
3176 3237 ASSERT3U(csa->cds->ds_dir->dd_phys->
3177 3238 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3178 3239
3179 3240 dsl_deadlist_space(&csa->cds->ds_deadlist,
3180 3241 &cdl_used, &cdl_comp, &cdl_uncomp);
3181 3242 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3182 3243 &odl_used, &odl_comp, &odl_uncomp);
3183 3244
3184 - dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
3185 - (csa->ohds->ds_phys->ds_used_bytes + odl_used);
3245 + dused = csa->cds->ds_phys->ds_referenced_bytes + cdl_used -
3246 + (csa->ohds->ds_phys->ds_referenced_bytes + odl_used);
3186 3247 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3187 3248 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3188 3249 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3189 3250 cdl_uncomp -
3190 3251 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3191 3252
3192 3253 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3193 3254 dused, dcomp, duncomp, tx);
3194 3255 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3195 3256 -dused, -dcomp, -duncomp, tx);
3196 3257
3197 3258 /*
3198 3259 * The difference in the space used by snapshots is the
3199 3260 * difference in snapshot space due to the head's
3200 3261 * deadlist (since that's the only thing that's
3201 3262 * changing that affects the snapused).
3202 3263 */
3203 3264 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
3204 3265 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3205 3266 &cdl_used, &cdl_comp, &cdl_uncomp);
3206 3267 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3207 3268 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3208 3269 &odl_used, &odl_comp, &odl_uncomp);
3209 3270 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3210 3271 DD_USED_HEAD, DD_USED_SNAP, tx);
3211 3272 }
3212 3273
3213 3274 /* swap ds_*_bytes */
3214 - SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
3215 - csa->cds->ds_phys->ds_used_bytes);
3275 + SWITCH64(csa->ohds->ds_phys->ds_referenced_bytes,
3276 + csa->cds->ds_phys->ds_referenced_bytes);
3216 3277 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3217 3278 csa->cds->ds_phys->ds_compressed_bytes);
3218 3279 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3219 3280 csa->cds->ds_phys->ds_uncompressed_bytes);
3220 3281 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3221 3282 csa->cds->ds_phys->ds_unique_bytes);
3222 3283
3223 3284 /* apply any parent delta for change in unconsumed refreservation */
3224 3285 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3225 3286 csa->unused_refres_delta, 0, 0, tx);
3226 3287
3227 3288 /*
3228 3289 * Swap deadlists.
3229 3290 */
3230 3291 dsl_deadlist_close(&csa->cds->ds_deadlist);
3231 3292 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3232 3293 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3233 3294 csa->cds->ds_phys->ds_deadlist_obj);
3234 3295 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3235 3296 csa->cds->ds_phys->ds_deadlist_obj);
3236 3297 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3237 3298 csa->ohds->ds_phys->ds_deadlist_obj);
3238 3299
3239 3300 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3240 3301 }
3241 3302
3242 3303 /*
3243 3304 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3244 3305 * recv" into an existing fs to swizzle the file system to the new
3245 3306 * version, and by "zfs rollback". Can also be used to swap two
3246 3307 * independent head datasets if neither has any snapshots.
3247 3308 */
3248 3309 int
3249 3310 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3250 3311 boolean_t force)
3251 3312 {
3252 3313 struct cloneswaparg csa;
3253 3314 int error;
3254 3315
3255 3316 ASSERT(clone->ds_owner);
3256 3317 ASSERT(origin_head->ds_owner);
3257 3318 retry:
3258 3319 /*
3259 3320 * Need exclusive access for the swap. If we're swapping these
3260 3321 * datasets back after an error, we already hold the locks.
3261 3322 */
3262 3323 if (!RW_WRITE_HELD(&clone->ds_rwlock))
3263 3324 rw_enter(&clone->ds_rwlock, RW_WRITER);
3264 3325 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3265 3326 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3266 3327 rw_exit(&clone->ds_rwlock);
3267 3328 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3268 3329 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3269 3330 rw_exit(&origin_head->ds_rwlock);
3270 3331 goto retry;
3271 3332 }
3272 3333 }
3273 3334 csa.cds = clone;
3274 3335 csa.ohds = origin_head;
3275 3336 csa.force = force;
3276 3337 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3277 3338 dsl_dataset_clone_swap_check,
3278 3339 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3279 3340 return (error);
3280 3341 }
3281 3342
3282 3343 /*
3283 3344 * Given a pool name and a dataset object number in that pool,
3284 3345 * return the name of that dataset.
3285 3346 */
3286 3347 int
3287 3348 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3288 3349 {
3289 3350 spa_t *spa;
3290 3351 dsl_pool_t *dp;
3291 3352 dsl_dataset_t *ds;
3292 3353 int error;
3293 3354
3294 3355 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3295 3356 return (error);
3296 3357 dp = spa_get_dsl(spa);
3297 3358 rw_enter(&dp->dp_config_rwlock, RW_READER);
3298 3359 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3299 3360 dsl_dataset_name(ds, buf);
3300 3361 dsl_dataset_rele(ds, FTAG);
3301 3362 }
3302 3363 rw_exit(&dp->dp_config_rwlock);
3303 3364 spa_close(spa, FTAG);
3304 3365
3305 3366 return (error);
3306 3367 }
3307 3368
3308 3369 int
3309 3370 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3310 3371 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3311 3372 {
3312 3373 int error = 0;
3313 3374
3314 3375 ASSERT3S(asize, >, 0);
3315 3376
3316 3377 /*
3317 3378 * *ref_rsrv is the portion of asize that will come from any
3318 3379 * unconsumed refreservation space.
3319 3380 */
3320 3381 *ref_rsrv = 0;
3321 3382
3322 3383 mutex_enter(&ds->ds_lock);
3323 3384 /*
3324 3385 * Make a space adjustment for reserved bytes.
3325 3386 */
3326 3387 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3327 3388 ASSERT3U(*used, >=,
3328 3389 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3329 3390 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3330 3391 *ref_rsrv =
3331 3392 asize - MIN(asize, parent_delta(ds, asize + inflight));
3332 3393 }
3333 3394
|
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
3334 3395 if (!check_quota || ds->ds_quota == 0) {
3335 3396 mutex_exit(&ds->ds_lock);
3336 3397 return (0);
3337 3398 }
3338 3399 /*
3339 3400 * If they are requesting more space, and our current estimate
3340 3401 * is over quota, they get to try again unless the actual
3341 3402 * on-disk is over quota and there are no pending changes (which
3342 3403 * may free up space for us).
3343 3404 */
3344 - if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3345 - if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3405 + if (ds->ds_phys->ds_referenced_bytes + inflight >= ds->ds_quota) {
3406 + if (inflight > 0 ||
3407 + ds->ds_phys->ds_referenced_bytes < ds->ds_quota)
3346 3408 error = ERESTART;
3347 3409 else
3348 3410 error = EDQUOT;
3349 3411 }
3350 3412 mutex_exit(&ds->ds_lock);
3351 3413
3352 3414 return (error);
3353 3415 }
3354 3416
3355 3417 /* ARGSUSED */
3356 3418 static int
3357 3419 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3358 3420 {
3359 3421 dsl_dataset_t *ds = arg1;
3360 3422 dsl_prop_setarg_t *psa = arg2;
3361 3423 int err;
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
3362 3424
3363 3425 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3364 3426 return (ENOTSUP);
3365 3427
3366 3428 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3367 3429 return (err);
3368 3430
3369 3431 if (psa->psa_effective_value == 0)
3370 3432 return (0);
3371 3433
3372 - if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3434 + if (psa->psa_effective_value < ds->ds_phys->ds_referenced_bytes ||
3373 3435 psa->psa_effective_value < ds->ds_reserved)
3374 3436 return (ENOSPC);
3375 3437
3376 3438 return (0);
3377 3439 }
3378 3440
3379 3441 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3380 3442
3381 3443 void
3382 3444 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3383 3445 {
3384 3446 dsl_dataset_t *ds = arg1;
3385 3447 dsl_prop_setarg_t *psa = arg2;
3386 3448 uint64_t effective_value = psa->psa_effective_value;
3387 3449
3388 3450 dsl_prop_set_sync(ds, psa, tx);
3389 3451 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3390 3452
3391 3453 if (ds->ds_quota != effective_value) {
3392 3454 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3393 3455 ds->ds_quota = effective_value;
3394 3456
3395 3457 spa_history_log_internal(LOG_DS_REFQUOTA,
3396 3458 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3397 3459 (longlong_t)ds->ds_quota, ds->ds_object);
3398 3460 }
3399 3461 }
3400 3462
3401 3463 int
3402 3464 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3403 3465 {
3404 3466 dsl_dataset_t *ds;
3405 3467 dsl_prop_setarg_t psa;
3406 3468 int err;
3407 3469
3408 3470 dsl_prop_setarg_init_uint64(&psa, "refquota", source, "a);
3409 3471
3410 3472 err = dsl_dataset_hold(dsname, FTAG, &ds);
3411 3473 if (err)
3412 3474 return (err);
3413 3475
3414 3476 /*
3415 3477 * If someone removes a file, then tries to set the quota, we
3416 3478 * want to make sure the file freeing takes effect.
3417 3479 */
3418 3480 txg_wait_open(ds->ds_dir->dd_pool, 0);
3419 3481
3420 3482 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3421 3483 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3422 3484 ds, &psa, 0);
3423 3485
3424 3486 dsl_dataset_rele(ds, FTAG);
3425 3487 return (err);
3426 3488 }
3427 3489
3428 3490 static int
3429 3491 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3430 3492 {
3431 3493 dsl_dataset_t *ds = arg1;
3432 3494 dsl_prop_setarg_t *psa = arg2;
3433 3495 uint64_t effective_value;
3434 3496 uint64_t unique;
3435 3497 int err;
3436 3498
3437 3499 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3438 3500 SPA_VERSION_REFRESERVATION)
3439 3501 return (ENOTSUP);
3440 3502
3441 3503 if (dsl_dataset_is_snapshot(ds))
3442 3504 return (EINVAL);
3443 3505
3444 3506 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3445 3507 return (err);
3446 3508
3447 3509 effective_value = psa->psa_effective_value;
3448 3510
3449 3511 /*
3450 3512 * If we are doing the preliminary check in open context, the
3451 3513 * space estimates may be inaccurate.
3452 3514 */
3453 3515 if (!dmu_tx_is_syncing(tx))
3454 3516 return (0);
3455 3517
3456 3518 mutex_enter(&ds->ds_lock);
3457 3519 if (!DS_UNIQUE_IS_ACCURATE(ds))
3458 3520 dsl_dataset_recalc_head_uniq(ds);
3459 3521 unique = ds->ds_phys->ds_unique_bytes;
3460 3522 mutex_exit(&ds->ds_lock);
3461 3523
3462 3524 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3463 3525 uint64_t delta = MAX(unique, effective_value) -
3464 3526 MAX(unique, ds->ds_reserved);
3465 3527
3466 3528 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3467 3529 return (ENOSPC);
3468 3530 if (ds->ds_quota > 0 &&
3469 3531 effective_value > ds->ds_quota)
3470 3532 return (ENOSPC);
3471 3533 }
3472 3534
3473 3535 return (0);
3474 3536 }
3475 3537
3476 3538 static void
3477 3539 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3478 3540 {
3479 3541 dsl_dataset_t *ds = arg1;
3480 3542 dsl_prop_setarg_t *psa = arg2;
3481 3543 uint64_t effective_value = psa->psa_effective_value;
3482 3544 uint64_t unique;
3483 3545 int64_t delta;
3484 3546
3485 3547 dsl_prop_set_sync(ds, psa, tx);
3486 3548 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3487 3549
3488 3550 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3489 3551
3490 3552 mutex_enter(&ds->ds_dir->dd_lock);
3491 3553 mutex_enter(&ds->ds_lock);
3492 3554 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3493 3555 unique = ds->ds_phys->ds_unique_bytes;
3494 3556 delta = MAX(0, (int64_t)(effective_value - unique)) -
3495 3557 MAX(0, (int64_t)(ds->ds_reserved - unique));
3496 3558 ds->ds_reserved = effective_value;
3497 3559 mutex_exit(&ds->ds_lock);
3498 3560
3499 3561 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3500 3562 mutex_exit(&ds->ds_dir->dd_lock);
3501 3563
3502 3564 spa_history_log_internal(LOG_DS_REFRESERV,
3503 3565 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3504 3566 (longlong_t)effective_value, ds->ds_object);
3505 3567 }
3506 3568
3507 3569 int
3508 3570 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3509 3571 uint64_t reservation)
3510 3572 {
3511 3573 dsl_dataset_t *ds;
3512 3574 dsl_prop_setarg_t psa;
3513 3575 int err;
3514 3576
3515 3577 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3516 3578 &reservation);
3517 3579
3518 3580 err = dsl_dataset_hold(dsname, FTAG, &ds);
3519 3581 if (err)
3520 3582 return (err);
3521 3583
3522 3584 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3523 3585 dsl_dataset_set_reservation_check,
3524 3586 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3525 3587
3526 3588 dsl_dataset_rele(ds, FTAG);
3527 3589 return (err);
3528 3590 }
3529 3591
3530 3592 typedef struct zfs_hold_cleanup_arg {
3531 3593 dsl_pool_t *dp;
3532 3594 uint64_t dsobj;
3533 3595 char htag[MAXNAMELEN];
3534 3596 } zfs_hold_cleanup_arg_t;
3535 3597
3536 3598 static void
3537 3599 dsl_dataset_user_release_onexit(void *arg)
3538 3600 {
3539 3601 zfs_hold_cleanup_arg_t *ca = arg;
3540 3602
3541 3603 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3542 3604 B_TRUE);
3543 3605 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3544 3606 }
3545 3607
3546 3608 void
3547 3609 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3548 3610 minor_t minor)
3549 3611 {
3550 3612 zfs_hold_cleanup_arg_t *ca;
3551 3613
3552 3614 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3553 3615 ca->dp = ds->ds_dir->dd_pool;
3554 3616 ca->dsobj = ds->ds_object;
3555 3617 (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3556 3618 VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3557 3619 dsl_dataset_user_release_onexit, ca, NULL));
3558 3620 }
3559 3621
3560 3622 /*
3561 3623 * If you add new checks here, you may need to add
3562 3624 * additional checks to the "temporary" case in
3563 3625 * snapshot_check() in dmu_objset.c.
3564 3626 */
3565 3627 static int
3566 3628 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3567 3629 {
3568 3630 dsl_dataset_t *ds = arg1;
3569 3631 struct dsl_ds_holdarg *ha = arg2;
3570 3632 char *htag = ha->htag;
3571 3633 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3572 3634 int error = 0;
3573 3635
3574 3636 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3575 3637 return (ENOTSUP);
3576 3638
3577 3639 if (!dsl_dataset_is_snapshot(ds))
3578 3640 return (EINVAL);
3579 3641
3580 3642 /* tags must be unique */
3581 3643 mutex_enter(&ds->ds_lock);
3582 3644 if (ds->ds_phys->ds_userrefs_obj) {
3583 3645 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3584 3646 8, 1, tx);
3585 3647 if (error == 0)
3586 3648 error = EEXIST;
3587 3649 else if (error == ENOENT)
3588 3650 error = 0;
3589 3651 }
3590 3652 mutex_exit(&ds->ds_lock);
3591 3653
3592 3654 if (error == 0 && ha->temphold &&
3593 3655 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3594 3656 error = E2BIG;
3595 3657
3596 3658 return (error);
3597 3659 }
3598 3660
3599 3661 void
3600 3662 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3601 3663 {
3602 3664 dsl_dataset_t *ds = arg1;
3603 3665 struct dsl_ds_holdarg *ha = arg2;
3604 3666 char *htag = ha->htag;
3605 3667 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3606 3668 objset_t *mos = dp->dp_meta_objset;
3607 3669 uint64_t now = gethrestime_sec();
3608 3670 uint64_t zapobj;
3609 3671
3610 3672 mutex_enter(&ds->ds_lock);
3611 3673 if (ds->ds_phys->ds_userrefs_obj == 0) {
3612 3674 /*
3613 3675 * This is the first user hold for this dataset. Create
3614 3676 * the userrefs zap object.
3615 3677 */
3616 3678 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3617 3679 zapobj = ds->ds_phys->ds_userrefs_obj =
3618 3680 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3619 3681 } else {
3620 3682 zapobj = ds->ds_phys->ds_userrefs_obj;
3621 3683 }
3622 3684 ds->ds_userrefs++;
3623 3685 mutex_exit(&ds->ds_lock);
3624 3686
3625 3687 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3626 3688
3627 3689 if (ha->temphold) {
3628 3690 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3629 3691 htag, &now, tx));
3630 3692 }
3631 3693
3632 3694 spa_history_log_internal(LOG_DS_USER_HOLD,
3633 3695 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3634 3696 (int)ha->temphold, ds->ds_object);
3635 3697 }
3636 3698
3637 3699 static int
3638 3700 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3639 3701 {
3640 3702 struct dsl_ds_holdarg *ha = arg;
3641 3703 dsl_dataset_t *ds;
3642 3704 int error;
3643 3705 char *name;
3644 3706
3645 3707 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3646 3708 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3647 3709 error = dsl_dataset_hold(name, ha->dstg, &ds);
3648 3710 strfree(name);
3649 3711 if (error == 0) {
3650 3712 ha->gotone = B_TRUE;
3651 3713 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3652 3714 dsl_dataset_user_hold_sync, ds, ha, 0);
3653 3715 } else if (error == ENOENT && ha->recursive) {
3654 3716 error = 0;
3655 3717 } else {
3656 3718 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3657 3719 }
3658 3720 return (error);
3659 3721 }
3660 3722
3661 3723 int
3662 3724 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3663 3725 boolean_t temphold)
3664 3726 {
3665 3727 struct dsl_ds_holdarg *ha;
3666 3728 int error;
3667 3729
3668 3730 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3669 3731 ha->htag = htag;
3670 3732 ha->temphold = temphold;
3671 3733 error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3672 3734 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3673 3735 ds, ha, 0);
3674 3736 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3675 3737
3676 3738 return (error);
3677 3739 }
3678 3740
3679 3741 int
3680 3742 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3681 3743 boolean_t recursive, boolean_t temphold, int cleanup_fd)
3682 3744 {
3683 3745 struct dsl_ds_holdarg *ha;
3684 3746 dsl_sync_task_t *dst;
3685 3747 spa_t *spa;
3686 3748 int error;
3687 3749 minor_t minor = 0;
3688 3750
3689 3751 if (cleanup_fd != -1) {
3690 3752 /* Currently we only support cleanup-on-exit of tempholds. */
3691 3753 if (!temphold)
3692 3754 return (EINVAL);
3693 3755 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3694 3756 if (error)
3695 3757 return (error);
3696 3758 }
3697 3759
3698 3760 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3699 3761
3700 3762 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3701 3763
3702 3764 error = spa_open(dsname, &spa, FTAG);
3703 3765 if (error) {
3704 3766 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3705 3767 if (cleanup_fd != -1)
3706 3768 zfs_onexit_fd_rele(cleanup_fd);
3707 3769 return (error);
3708 3770 }
3709 3771
3710 3772 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3711 3773 ha->htag = htag;
3712 3774 ha->snapname = snapname;
3713 3775 ha->recursive = recursive;
3714 3776 ha->temphold = temphold;
3715 3777
3716 3778 if (recursive) {
3717 3779 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3718 3780 ha, DS_FIND_CHILDREN);
3719 3781 } else {
3720 3782 error = dsl_dataset_user_hold_one(dsname, ha);
3721 3783 }
3722 3784 if (error == 0)
3723 3785 error = dsl_sync_task_group_wait(ha->dstg);
3724 3786
3725 3787 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3726 3788 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3727 3789 dsl_dataset_t *ds = dst->dst_arg1;
3728 3790
3729 3791 if (dst->dst_err) {
3730 3792 dsl_dataset_name(ds, ha->failed);
3731 3793 *strchr(ha->failed, '@') = '\0';
3732 3794 } else if (error == 0 && minor != 0 && temphold) {
3733 3795 /*
3734 3796 * If this hold is to be released upon process exit,
3735 3797 * register that action now.
3736 3798 */
3737 3799 dsl_register_onexit_hold_cleanup(ds, htag, minor);
3738 3800 }
3739 3801 dsl_dataset_rele(ds, ha->dstg);
3740 3802 }
3741 3803
3742 3804 if (error == 0 && recursive && !ha->gotone)
3743 3805 error = ENOENT;
3744 3806
3745 3807 if (error)
3746 3808 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3747 3809
3748 3810 dsl_sync_task_group_destroy(ha->dstg);
3749 3811
3750 3812 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3751 3813 spa_close(spa, FTAG);
3752 3814 if (cleanup_fd != -1)
3753 3815 zfs_onexit_fd_rele(cleanup_fd);
3754 3816 return (error);
3755 3817 }
3756 3818
3757 3819 struct dsl_ds_releasearg {
3758 3820 dsl_dataset_t *ds;
3759 3821 const char *htag;
3760 3822 boolean_t own; /* do we own or just hold ds? */
3761 3823 };
3762 3824
3763 3825 static int
3764 3826 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3765 3827 boolean_t *might_destroy)
3766 3828 {
3767 3829 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3768 3830 uint64_t zapobj;
3769 3831 uint64_t tmp;
3770 3832 int error;
3771 3833
3772 3834 *might_destroy = B_FALSE;
3773 3835
3774 3836 mutex_enter(&ds->ds_lock);
3775 3837 zapobj = ds->ds_phys->ds_userrefs_obj;
3776 3838 if (zapobj == 0) {
3777 3839 /* The tag can't possibly exist */
3778 3840 mutex_exit(&ds->ds_lock);
3779 3841 return (ESRCH);
3780 3842 }
3781 3843
3782 3844 /* Make sure the tag exists */
3783 3845 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3784 3846 if (error) {
3785 3847 mutex_exit(&ds->ds_lock);
3786 3848 if (error == ENOENT)
3787 3849 error = ESRCH;
3788 3850 return (error);
3789 3851 }
3790 3852
3791 3853 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3792 3854 DS_IS_DEFER_DESTROY(ds))
3793 3855 *might_destroy = B_TRUE;
3794 3856
3795 3857 mutex_exit(&ds->ds_lock);
3796 3858 return (0);
3797 3859 }
3798 3860
3799 3861 static int
3800 3862 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3801 3863 {
3802 3864 struct dsl_ds_releasearg *ra = arg1;
3803 3865 dsl_dataset_t *ds = ra->ds;
3804 3866 boolean_t might_destroy;
3805 3867 int error;
3806 3868
3807 3869 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3808 3870 return (ENOTSUP);
3809 3871
3810 3872 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3811 3873 if (error)
3812 3874 return (error);
3813 3875
3814 3876 if (might_destroy) {
3815 3877 struct dsl_ds_destroyarg dsda = {0};
3816 3878
3817 3879 if (dmu_tx_is_syncing(tx)) {
3818 3880 /*
3819 3881 * If we're not prepared to remove the snapshot,
3820 3882 * we can't allow the release to happen right now.
3821 3883 */
3822 3884 if (!ra->own)
3823 3885 return (EBUSY);
3824 3886 }
3825 3887 dsda.ds = ds;
3826 3888 dsda.releasing = B_TRUE;
3827 3889 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3828 3890 }
3829 3891
3830 3892 return (0);
3831 3893 }
3832 3894
3833 3895 static void
3834 3896 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3835 3897 {
3836 3898 struct dsl_ds_releasearg *ra = arg1;
3837 3899 dsl_dataset_t *ds = ra->ds;
3838 3900 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3839 3901 objset_t *mos = dp->dp_meta_objset;
3840 3902 uint64_t zapobj;
3841 3903 uint64_t dsobj = ds->ds_object;
3842 3904 uint64_t refs;
3843 3905 int error;
3844 3906
3845 3907 mutex_enter(&ds->ds_lock);
3846 3908 ds->ds_userrefs--;
3847 3909 refs = ds->ds_userrefs;
3848 3910 mutex_exit(&ds->ds_lock);
3849 3911 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3850 3912 VERIFY(error == 0 || error == ENOENT);
3851 3913 zapobj = ds->ds_phys->ds_userrefs_obj;
3852 3914 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3853 3915 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3854 3916 DS_IS_DEFER_DESTROY(ds)) {
3855 3917 struct dsl_ds_destroyarg dsda = {0};
3856 3918
3857 3919 ASSERT(ra->own);
3858 3920 dsda.ds = ds;
3859 3921 dsda.releasing = B_TRUE;
3860 3922 /* We already did the destroy_check */
3861 3923 dsl_dataset_destroy_sync(&dsda, tag, tx);
3862 3924 }
3863 3925
3864 3926 spa_history_log_internal(LOG_DS_USER_RELEASE,
3865 3927 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3866 3928 ra->htag, (longlong_t)refs, dsobj);
3867 3929 }
3868 3930
3869 3931 static int
3870 3932 dsl_dataset_user_release_one(const char *dsname, void *arg)
3871 3933 {
3872 3934 struct dsl_ds_holdarg *ha = arg;
3873 3935 struct dsl_ds_releasearg *ra;
3874 3936 dsl_dataset_t *ds;
3875 3937 int error;
3876 3938 void *dtag = ha->dstg;
3877 3939 char *name;
3878 3940 boolean_t own = B_FALSE;
3879 3941 boolean_t might_destroy;
3880 3942
3881 3943 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3882 3944 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3883 3945 error = dsl_dataset_hold(name, dtag, &ds);
3884 3946 strfree(name);
3885 3947 if (error == ENOENT && ha->recursive)
3886 3948 return (0);
3887 3949 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3888 3950 if (error)
3889 3951 return (error);
3890 3952
3891 3953 ha->gotone = B_TRUE;
3892 3954
3893 3955 ASSERT(dsl_dataset_is_snapshot(ds));
3894 3956
3895 3957 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3896 3958 if (error) {
3897 3959 dsl_dataset_rele(ds, dtag);
3898 3960 return (error);
3899 3961 }
3900 3962
3901 3963 if (might_destroy) {
3902 3964 #ifdef _KERNEL
3903 3965 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3904 3966 error = zfs_unmount_snap(name, NULL);
3905 3967 strfree(name);
3906 3968 if (error) {
3907 3969 dsl_dataset_rele(ds, dtag);
3908 3970 return (error);
3909 3971 }
3910 3972 #endif
3911 3973 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3912 3974 dsl_dataset_rele(ds, dtag);
3913 3975 return (EBUSY);
3914 3976 } else {
3915 3977 own = B_TRUE;
3916 3978 dsl_dataset_make_exclusive(ds, dtag);
3917 3979 }
3918 3980 }
3919 3981
3920 3982 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3921 3983 ra->ds = ds;
3922 3984 ra->htag = ha->htag;
3923 3985 ra->own = own;
3924 3986 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3925 3987 dsl_dataset_user_release_sync, ra, dtag, 0);
3926 3988
3927 3989 return (0);
3928 3990 }
3929 3991
3930 3992 int
3931 3993 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3932 3994 boolean_t recursive)
3933 3995 {
3934 3996 struct dsl_ds_holdarg *ha;
3935 3997 dsl_sync_task_t *dst;
3936 3998 spa_t *spa;
3937 3999 int error;
3938 4000
3939 4001 top:
3940 4002 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3941 4003
3942 4004 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3943 4005
3944 4006 error = spa_open(dsname, &spa, FTAG);
3945 4007 if (error) {
3946 4008 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3947 4009 return (error);
3948 4010 }
3949 4011
3950 4012 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3951 4013 ha->htag = htag;
3952 4014 ha->snapname = snapname;
3953 4015 ha->recursive = recursive;
3954 4016 if (recursive) {
3955 4017 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3956 4018 ha, DS_FIND_CHILDREN);
3957 4019 } else {
3958 4020 error = dsl_dataset_user_release_one(dsname, ha);
3959 4021 }
3960 4022 if (error == 0)
3961 4023 error = dsl_sync_task_group_wait(ha->dstg);
3962 4024
3963 4025 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3964 4026 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3965 4027 struct dsl_ds_releasearg *ra = dst->dst_arg1;
3966 4028 dsl_dataset_t *ds = ra->ds;
3967 4029
3968 4030 if (dst->dst_err)
3969 4031 dsl_dataset_name(ds, ha->failed);
3970 4032
3971 4033 if (ra->own)
3972 4034 dsl_dataset_disown(ds, ha->dstg);
3973 4035 else
3974 4036 dsl_dataset_rele(ds, ha->dstg);
3975 4037
3976 4038 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3977 4039 }
3978 4040
3979 4041 if (error == 0 && recursive && !ha->gotone)
3980 4042 error = ENOENT;
3981 4043
3982 4044 if (error && error != EBUSY)
3983 4045 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3984 4046
3985 4047 dsl_sync_task_group_destroy(ha->dstg);
3986 4048 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3987 4049 spa_close(spa, FTAG);
3988 4050
3989 4051 /*
3990 4052 * We can get EBUSY if we were racing with deferred destroy and
3991 4053 * dsl_dataset_user_release_check() hadn't done the necessary
3992 4054 * open context setup. We can also get EBUSY if we're racing
3993 4055 * with destroy and that thread is the ds_owner. Either way
3994 4056 * the busy condition should be transient, and we should retry
3995 4057 * the release operation.
3996 4058 */
3997 4059 if (error == EBUSY)
3998 4060 goto top;
3999 4061
4000 4062 return (error);
4001 4063 }
4002 4064
4003 4065 /*
4004 4066 * Called at spa_load time (with retry == B_FALSE) to release a stale
4005 4067 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
4006 4068 */
4007 4069 int
4008 4070 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
4009 4071 boolean_t retry)
4010 4072 {
4011 4073 dsl_dataset_t *ds;
4012 4074 char *snap;
4013 4075 char *name;
4014 4076 int namelen;
4015 4077 int error;
4016 4078
4017 4079 do {
4018 4080 rw_enter(&dp->dp_config_rwlock, RW_READER);
4019 4081 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
4020 4082 rw_exit(&dp->dp_config_rwlock);
4021 4083 if (error)
4022 4084 return (error);
4023 4085 namelen = dsl_dataset_namelen(ds)+1;
4024 4086 name = kmem_alloc(namelen, KM_SLEEP);
4025 4087 dsl_dataset_name(ds, name);
4026 4088 dsl_dataset_rele(ds, FTAG);
4027 4089
4028 4090 snap = strchr(name, '@');
4029 4091 *snap = '\0';
4030 4092 ++snap;
4031 4093 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
4032 4094 kmem_free(name, namelen);
4033 4095
4034 4096 /*
4035 4097 * The object can't have been destroyed because we have a hold,
4036 4098 * but it might have been renamed, resulting in ENOENT. Retry
4037 4099 * if we've been requested to do so.
4038 4100 *
4039 4101 * It would be nice if we could use the dsobj all the way
4040 4102 * through and avoid ENOENT entirely. But we might need to
4041 4103 * unmount the snapshot, and there's currently no way to lookup
4042 4104 * a vfsp using a ZFS object id.
4043 4105 */
4044 4106 } while ((error == ENOENT) && retry);
4045 4107
4046 4108 return (error);
4047 4109 }
4048 4110
4049 4111 int
4050 4112 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
4051 4113 {
4052 4114 dsl_dataset_t *ds;
4053 4115 int err;
4054 4116
4055 4117 err = dsl_dataset_hold(dsname, FTAG, &ds);
4056 4118 if (err)
4057 4119 return (err);
4058 4120
4059 4121 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4060 4122 if (ds->ds_phys->ds_userrefs_obj != 0) {
4061 4123 zap_attribute_t *za;
4062 4124 zap_cursor_t zc;
4063 4125
4064 4126 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4065 4127 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4066 4128 ds->ds_phys->ds_userrefs_obj);
4067 4129 zap_cursor_retrieve(&zc, za) == 0;
4068 4130 zap_cursor_advance(&zc)) {
4069 4131 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4070 4132 za->za_first_integer));
4071 4133 }
4072 4134 zap_cursor_fini(&zc);
4073 4135 kmem_free(za, sizeof (zap_attribute_t));
4074 4136 }
4075 4137 dsl_dataset_rele(ds, FTAG);
4076 4138 return (0);
4077 4139 }
4078 4140
4079 4141 /*
4080 4142 * Note, this function is used as the callback for dmu_objset_find(). We
4081 4143 * always return 0 so that we will continue to find and process
4082 4144 * inconsistent datasets, even if we encounter an error trying to
4083 4145 * process one of them.
4084 4146 */
4085 4147 /* ARGSUSED */
4086 4148 int
4087 4149 dsl_destroy_inconsistent(const char *dsname, void *arg)
4088 4150 {
4089 4151 dsl_dataset_t *ds;
4090 4152
4091 4153 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4092 4154 if (DS_IS_INCONSISTENT(ds))
4093 4155 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4094 4156 else
4095 4157 dsl_dataset_disown(ds, FTAG);
4096 4158 }
4097 4159 return (0);
4098 4160 }
4099 4161
4100 4162 /*
4101 4163 * Return (in *usedp) the amount of space written in new that is not
4102 4164 * present in oldsnap. New may be a snapshot or the head. Old must be
4103 4165 * a snapshot before new, in new's filesystem (or its origin). If not then
4104 4166 * fail and return EINVAL.
4105 4167 *
4106 4168 * The written space is calculated by considering two components: First, we
4107 4169 * ignore any freed space, and calculate the written as new's used space
4108 4170 * minus old's used space. Next, we add in the amount of space that was freed
4109 4171 * between the two snapshots, thus reducing new's used space relative to old's.
4110 4172 * Specifically, this is the space that was born before old->ds_creation_txg,
4111 4173 * and freed before new (ie. on new's deadlist or a previous deadlist).
4112 4174 *
4113 4175 * space freed [---------------------]
4114 4176 * snapshots ---O-------O--------O-------O------
4115 4177 * oldsnap new
|
↓ open down ↓ |
733 lines elided |
↑ open up ↑ |
4116 4178 */
4117 4179 int
4118 4180 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
4119 4181 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4120 4182 {
4121 4183 int err = 0;
4122 4184 uint64_t snapobj;
4123 4185 dsl_pool_t *dp = new->ds_dir->dd_pool;
4124 4186
4125 4187 *usedp = 0;
4126 - *usedp += new->ds_phys->ds_used_bytes;
4127 - *usedp -= oldsnap->ds_phys->ds_used_bytes;
4188 + *usedp += new->ds_phys->ds_referenced_bytes;
4189 + *usedp -= oldsnap->ds_phys->ds_referenced_bytes;
4128 4190
4129 4191 *compp = 0;
4130 4192 *compp += new->ds_phys->ds_compressed_bytes;
4131 4193 *compp -= oldsnap->ds_phys->ds_compressed_bytes;
4132 4194
4133 4195 *uncompp = 0;
4134 4196 *uncompp += new->ds_phys->ds_uncompressed_bytes;
4135 4197 *uncompp -= oldsnap->ds_phys->ds_uncompressed_bytes;
4136 4198
4137 4199 rw_enter(&dp->dp_config_rwlock, RW_READER);
4138 4200 snapobj = new->ds_object;
4139 4201 while (snapobj != oldsnap->ds_object) {
4140 4202 dsl_dataset_t *snap;
4141 4203 uint64_t used, comp, uncomp;
4142 4204
4143 - err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4144 - if (err != 0)
4145 - break;
4205 + if (snapobj == new->ds_object) {
4206 + snap = new;
4207 + } else {
4208 + err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4209 + if (err != 0)
4210 + break;
4211 + }
4146 4212
4147 4213 if (snap->ds_phys->ds_prev_snap_txg ==
4148 4214 oldsnap->ds_phys->ds_creation_txg) {
4149 4215 /*
4150 4216 * The blocks in the deadlist can not be born after
4151 4217 * ds_prev_snap_txg, so get the whole deadlist space,
4152 4218 * which is more efficient (especially for old-format
4153 4219 * deadlists). Unfortunately the deadlist code
4154 4220 * doesn't have enough information to make this
4155 4221 * optimization itself.
4156 4222 */
4157 4223 dsl_deadlist_space(&snap->ds_deadlist,
4158 4224 &used, &comp, &uncomp);
4159 4225 } else {
4160 4226 dsl_deadlist_space_range(&snap->ds_deadlist,
4161 4227 0, oldsnap->ds_phys->ds_creation_txg,
4162 4228 &used, &comp, &uncomp);
4163 4229 }
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
4164 4230 *usedp += used;
4165 4231 *compp += comp;
4166 4232 *uncompp += uncomp;
4167 4233
4168 4234 /*
4169 4235 * If we get to the beginning of the chain of snapshots
4170 4236 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4171 4237 * was not a snapshot of/before new.
4172 4238 */
4173 4239 snapobj = snap->ds_phys->ds_prev_snap_obj;
4174 - dsl_dataset_rele(snap, FTAG);
4240 + if (snap != new)
4241 + dsl_dataset_rele(snap, FTAG);
4175 4242 if (snapobj == 0) {
4176 4243 err = EINVAL;
4177 4244 break;
4178 4245 }
4179 4246
4180 4247 }
4181 4248 rw_exit(&dp->dp_config_rwlock);
4182 4249 return (err);
4183 4250 }
4184 4251
4185 4252 /*
4186 4253 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4187 4254 * lastsnap, and all snapshots in between are deleted.
4188 4255 *
4189 4256 * blocks that would be freed [---------------------------]
4190 4257 * snapshots ---O-------O--------O-------O--------O
4191 4258 * firstsnap lastsnap
4192 4259 *
4193 4260 * This is the set of blocks that were born after the snap before firstsnap,
4194 4261 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4195 4262 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4196 4263 * We calculate this by iterating over the relevant deadlists (from the snap
4197 4264 * after lastsnap, backward to the snap after firstsnap), summing up the
4198 4265 * space on the deadlist that was born after the snap before firstsnap.
4199 4266 */
4200 4267 int
4201 4268 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4202 4269 dsl_dataset_t *lastsnap,
4203 4270 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4204 4271 {
4205 4272 int err = 0;
4206 4273 uint64_t snapobj;
4207 4274 dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4208 4275
4209 4276 ASSERT(dsl_dataset_is_snapshot(firstsnap));
4210 4277 ASSERT(dsl_dataset_is_snapshot(lastsnap));
4211 4278
4212 4279 /*
4213 4280 * Check that the snapshots are in the same dsl_dir, and firstsnap
4214 4281 * is before lastsnap.
4215 4282 */
4216 4283 if (firstsnap->ds_dir != lastsnap->ds_dir ||
4217 4284 firstsnap->ds_phys->ds_creation_txg >
4218 4285 lastsnap->ds_phys->ds_creation_txg)
4219 4286 return (EINVAL);
4220 4287
4221 4288 *usedp = *compp = *uncompp = 0;
4222 4289
4223 4290 rw_enter(&dp->dp_config_rwlock, RW_READER);
4224 4291 snapobj = lastsnap->ds_phys->ds_next_snap_obj;
4225 4292 while (snapobj != firstsnap->ds_object) {
4226 4293 dsl_dataset_t *ds;
4227 4294 uint64_t used, comp, uncomp;
4228 4295
4229 4296 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4230 4297 if (err != 0)
4231 4298 break;
4232 4299
4233 4300 dsl_deadlist_space_range(&ds->ds_deadlist,
4234 4301 firstsnap->ds_phys->ds_prev_snap_txg, UINT64_MAX,
4235 4302 &used, &comp, &uncomp);
4236 4303 *usedp += used;
4237 4304 *compp += comp;
4238 4305 *uncompp += uncomp;
4239 4306
4240 4307 snapobj = ds->ds_phys->ds_prev_snap_obj;
4241 4308 ASSERT3U(snapobj, !=, 0);
4242 4309 dsl_dataset_rele(ds, FTAG);
4243 4310 }
4244 4311 rw_exit(&dp->dp_config_rwlock);
4245 4312 return (err);
4246 4313 }
|
↓ open down ↓ |
62 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX