Print this page
2619 asynchronous destruction of ZFS file systems
2747 SPA versioning with zfs feature flags
Reviewed by: Matt Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <gwilson@delphix.com>
Reviewed by: Richard Lowe <richlowe@richlowe.net>
Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com>
Approved by: Dan McDonald <danmcd@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/zfs/dbuf.c
+++ new/usr/src/uts/common/fs/zfs/dbuf.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 + * Copyright (c) 2012 by Delphix. All rights reserved.
24 25 */
25 26
26 27 #include <sys/zfs_context.h>
27 28 #include <sys/dmu.h>
28 29 #include <sys/dmu_impl.h>
29 30 #include <sys/dbuf.h>
30 31 #include <sys/dmu_objset.h>
31 32 #include <sys/dsl_dataset.h>
32 33 #include <sys/dsl_dir.h>
33 34 #include <sys/dmu_tx.h>
34 35 #include <sys/spa.h>
35 36 #include <sys/zio.h>
36 37 #include <sys/dmu_zfetch.h>
37 38 #include <sys/sa.h>
38 39 #include <sys/sa_impl.h>
39 40
40 41 static void dbuf_destroy(dmu_buf_impl_t *db);
41 42 static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
42 43 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
43 44
44 45 /*
45 46 * Global data structures and functions for the dbuf cache.
46 47 */
47 48 static kmem_cache_t *dbuf_cache;
48 49
49 50 /* ARGSUSED */
50 51 static int
51 52 dbuf_cons(void *vdb, void *unused, int kmflag)
52 53 {
53 54 dmu_buf_impl_t *db = vdb;
54 55 bzero(db, sizeof (dmu_buf_impl_t));
55 56
56 57 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
57 58 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
58 59 refcount_create(&db->db_holds);
59 60 return (0);
60 61 }
61 62
62 63 /* ARGSUSED */
63 64 static void
64 65 dbuf_dest(void *vdb, void *unused)
65 66 {
66 67 dmu_buf_impl_t *db = vdb;
67 68 mutex_destroy(&db->db_mtx);
68 69 cv_destroy(&db->db_changed);
69 70 refcount_destroy(&db->db_holds);
70 71 }
71 72
72 73 /*
73 74 * dbuf hash table routines
74 75 */
75 76 static dbuf_hash_table_t dbuf_hash_table;
76 77
77 78 static uint64_t dbuf_hash_count;
78 79
79 80 static uint64_t
80 81 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
81 82 {
82 83 uintptr_t osv = (uintptr_t)os;
83 84 uint64_t crc = -1ULL;
84 85
85 86 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
86 87 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
87 88 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
88 89 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
89 90 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
90 91 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
91 92 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
92 93
93 94 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
94 95
95 96 return (crc);
96 97 }
97 98
98 99 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
99 100
100 101 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
101 102 ((dbuf)->db.db_object == (obj) && \
102 103 (dbuf)->db_objset == (os) && \
103 104 (dbuf)->db_level == (level) && \
104 105 (dbuf)->db_blkid == (blkid))
105 106
106 107 dmu_buf_impl_t *
107 108 dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
108 109 {
109 110 dbuf_hash_table_t *h = &dbuf_hash_table;
110 111 objset_t *os = dn->dn_objset;
111 112 uint64_t obj = dn->dn_object;
112 113 uint64_t hv = DBUF_HASH(os, obj, level, blkid);
113 114 uint64_t idx = hv & h->hash_table_mask;
114 115 dmu_buf_impl_t *db;
115 116
116 117 mutex_enter(DBUF_HASH_MUTEX(h, idx));
117 118 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
118 119 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
119 120 mutex_enter(&db->db_mtx);
120 121 if (db->db_state != DB_EVICTING) {
121 122 mutex_exit(DBUF_HASH_MUTEX(h, idx));
122 123 return (db);
123 124 }
124 125 mutex_exit(&db->db_mtx);
125 126 }
126 127 }
127 128 mutex_exit(DBUF_HASH_MUTEX(h, idx));
128 129 return (NULL);
129 130 }
130 131
131 132 /*
132 133 * Insert an entry into the hash table. If there is already an element
133 134 * equal to elem in the hash table, then the already existing element
134 135 * will be returned and the new element will not be inserted.
135 136 * Otherwise returns NULL.
136 137 */
137 138 static dmu_buf_impl_t *
138 139 dbuf_hash_insert(dmu_buf_impl_t *db)
139 140 {
140 141 dbuf_hash_table_t *h = &dbuf_hash_table;
141 142 objset_t *os = db->db_objset;
142 143 uint64_t obj = db->db.db_object;
143 144 int level = db->db_level;
144 145 uint64_t blkid = db->db_blkid;
145 146 uint64_t hv = DBUF_HASH(os, obj, level, blkid);
146 147 uint64_t idx = hv & h->hash_table_mask;
147 148 dmu_buf_impl_t *dbf;
148 149
149 150 mutex_enter(DBUF_HASH_MUTEX(h, idx));
150 151 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
151 152 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
152 153 mutex_enter(&dbf->db_mtx);
153 154 if (dbf->db_state != DB_EVICTING) {
154 155 mutex_exit(DBUF_HASH_MUTEX(h, idx));
155 156 return (dbf);
156 157 }
157 158 mutex_exit(&dbf->db_mtx);
158 159 }
159 160 }
160 161
161 162 mutex_enter(&db->db_mtx);
162 163 db->db_hash_next = h->hash_table[idx];
163 164 h->hash_table[idx] = db;
164 165 mutex_exit(DBUF_HASH_MUTEX(h, idx));
165 166 atomic_add_64(&dbuf_hash_count, 1);
166 167
167 168 return (NULL);
168 169 }
169 170
170 171 /*
171 172 * Remove an entry from the hash table. This operation will
172 173 * fail if there are any existing holds on the db.
173 174 */
174 175 static void
175 176 dbuf_hash_remove(dmu_buf_impl_t *db)
176 177 {
177 178 dbuf_hash_table_t *h = &dbuf_hash_table;
178 179 uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
179 180 db->db_level, db->db_blkid);
180 181 uint64_t idx = hv & h->hash_table_mask;
181 182 dmu_buf_impl_t *dbf, **dbp;
182 183
183 184 /*
184 185 * We musn't hold db_mtx to maintin lock ordering:
185 186 * DBUF_HASH_MUTEX > db_mtx.
186 187 */
187 188 ASSERT(refcount_is_zero(&db->db_holds));
188 189 ASSERT(db->db_state == DB_EVICTING);
189 190 ASSERT(!MUTEX_HELD(&db->db_mtx));
190 191
191 192 mutex_enter(DBUF_HASH_MUTEX(h, idx));
192 193 dbp = &h->hash_table[idx];
193 194 while ((dbf = *dbp) != db) {
194 195 dbp = &dbf->db_hash_next;
195 196 ASSERT(dbf != NULL);
196 197 }
197 198 *dbp = db->db_hash_next;
198 199 db->db_hash_next = NULL;
199 200 mutex_exit(DBUF_HASH_MUTEX(h, idx));
200 201 atomic_add_64(&dbuf_hash_count, -1);
201 202 }
202 203
203 204 static arc_evict_func_t dbuf_do_evict;
204 205
205 206 static void
206 207 dbuf_evict_user(dmu_buf_impl_t *db)
207 208 {
208 209 ASSERT(MUTEX_HELD(&db->db_mtx));
209 210
210 211 if (db->db_level != 0 || db->db_evict_func == NULL)
211 212 return;
212 213
213 214 if (db->db_user_data_ptr_ptr)
214 215 *db->db_user_data_ptr_ptr = db->db.db_data;
215 216 db->db_evict_func(&db->db, db->db_user_ptr);
216 217 db->db_user_ptr = NULL;
217 218 db->db_user_data_ptr_ptr = NULL;
218 219 db->db_evict_func = NULL;
219 220 }
|
↓ open down ↓ |
186 lines elided |
↑ open up ↑ |
220 221
221 222 boolean_t
222 223 dbuf_is_metadata(dmu_buf_impl_t *db)
223 224 {
224 225 if (db->db_level > 0) {
225 226 return (B_TRUE);
226 227 } else {
227 228 boolean_t is_metadata;
228 229
229 230 DB_DNODE_ENTER(db);
230 - is_metadata = dmu_ot[DB_DNODE(db)->dn_type].ot_metadata;
231 + is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
231 232 DB_DNODE_EXIT(db);
232 233
233 234 return (is_metadata);
234 235 }
235 236 }
236 237
237 238 void
238 239 dbuf_evict(dmu_buf_impl_t *db)
239 240 {
240 241 ASSERT(MUTEX_HELD(&db->db_mtx));
241 242 ASSERT(db->db_buf == NULL);
242 243 ASSERT(db->db_data_pending == NULL);
243 244
244 245 dbuf_clear(db);
245 246 dbuf_destroy(db);
246 247 }
247 248
248 249 void
249 250 dbuf_init(void)
250 251 {
251 252 uint64_t hsize = 1ULL << 16;
252 253 dbuf_hash_table_t *h = &dbuf_hash_table;
253 254 int i;
254 255
255 256 /*
256 257 * The hash table is big enough to fill all of physical memory
257 258 * with an average 4K block size. The table will take up
258 259 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
259 260 */
260 261 while (hsize * 4096 < physmem * PAGESIZE)
261 262 hsize <<= 1;
262 263
263 264 retry:
264 265 h->hash_table_mask = hsize - 1;
265 266 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
266 267 if (h->hash_table == NULL) {
267 268 /* XXX - we should really return an error instead of assert */
268 269 ASSERT(hsize > (1ULL << 10));
269 270 hsize >>= 1;
270 271 goto retry;
271 272 }
272 273
273 274 dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
274 275 sizeof (dmu_buf_impl_t),
275 276 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
276 277
277 278 for (i = 0; i < DBUF_MUTEXES; i++)
278 279 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
279 280 }
280 281
281 282 void
282 283 dbuf_fini(void)
283 284 {
284 285 dbuf_hash_table_t *h = &dbuf_hash_table;
285 286 int i;
286 287
287 288 for (i = 0; i < DBUF_MUTEXES; i++)
288 289 mutex_destroy(&h->hash_mutexes[i]);
289 290 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
290 291 kmem_cache_destroy(dbuf_cache);
291 292 }
292 293
293 294 /*
294 295 * Other stuff.
295 296 */
296 297
297 298 #ifdef ZFS_DEBUG
298 299 static void
299 300 dbuf_verify(dmu_buf_impl_t *db)
300 301 {
301 302 dnode_t *dn;
302 303 dbuf_dirty_record_t *dr;
303 304
304 305 ASSERT(MUTEX_HELD(&db->db_mtx));
305 306
306 307 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
307 308 return;
308 309
309 310 ASSERT(db->db_objset != NULL);
310 311 DB_DNODE_ENTER(db);
311 312 dn = DB_DNODE(db);
312 313 if (dn == NULL) {
313 314 ASSERT(db->db_parent == NULL);
314 315 ASSERT(db->db_blkptr == NULL);
315 316 } else {
316 317 ASSERT3U(db->db.db_object, ==, dn->dn_object);
317 318 ASSERT3P(db->db_objset, ==, dn->dn_objset);
318 319 ASSERT3U(db->db_level, <, dn->dn_nlevels);
319 320 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
320 321 db->db_blkid == DMU_SPILL_BLKID ||
321 322 !list_is_empty(&dn->dn_dbufs));
322 323 }
323 324 if (db->db_blkid == DMU_BONUS_BLKID) {
324 325 ASSERT(dn != NULL);
325 326 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
326 327 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
327 328 } else if (db->db_blkid == DMU_SPILL_BLKID) {
328 329 ASSERT(dn != NULL);
329 330 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
330 331 ASSERT3U(db->db.db_offset, ==, 0);
331 332 } else {
332 333 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
333 334 }
334 335
335 336 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
336 337 ASSERT(dr->dr_dbuf == db);
337 338
338 339 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
339 340 ASSERT(dr->dr_dbuf == db);
340 341
341 342 /*
342 343 * We can't assert that db_size matches dn_datablksz because it
343 344 * can be momentarily different when another thread is doing
344 345 * dnode_set_blksz().
345 346 */
346 347 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
347 348 dr = db->db_data_pending;
348 349 /*
349 350 * It should only be modified in syncing context, so
350 351 * make sure we only have one copy of the data.
351 352 */
352 353 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
353 354 }
354 355
355 356 /* verify db->db_blkptr */
356 357 if (db->db_blkptr) {
357 358 if (db->db_parent == dn->dn_dbuf) {
358 359 /* db is pointed to by the dnode */
359 360 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
360 361 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
361 362 ASSERT(db->db_parent == NULL);
362 363 else
363 364 ASSERT(db->db_parent != NULL);
364 365 if (db->db_blkid != DMU_SPILL_BLKID)
365 366 ASSERT3P(db->db_blkptr, ==,
366 367 &dn->dn_phys->dn_blkptr[db->db_blkid]);
367 368 } else {
368 369 /* db is pointed to by an indirect block */
369 370 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
370 371 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
371 372 ASSERT3U(db->db_parent->db.db_object, ==,
372 373 db->db.db_object);
373 374 /*
374 375 * dnode_grow_indblksz() can make this fail if we don't
375 376 * have the struct_rwlock. XXX indblksz no longer
376 377 * grows. safe to do this now?
377 378 */
378 379 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
379 380 ASSERT3P(db->db_blkptr, ==,
380 381 ((blkptr_t *)db->db_parent->db.db_data +
381 382 db->db_blkid % epb));
382 383 }
383 384 }
384 385 }
385 386 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
386 387 (db->db_buf == NULL || db->db_buf->b_data) &&
387 388 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
388 389 db->db_state != DB_FILL && !dn->dn_free_txg) {
389 390 /*
390 391 * If the blkptr isn't set but they have nonzero data,
391 392 * it had better be dirty, otherwise we'll lose that
392 393 * data when we evict this buffer.
393 394 */
394 395 if (db->db_dirtycnt == 0) {
395 396 uint64_t *buf = db->db.db_data;
396 397 int i;
397 398
398 399 for (i = 0; i < db->db.db_size >> 3; i++) {
399 400 ASSERT(buf[i] == 0);
400 401 }
401 402 }
402 403 }
403 404 DB_DNODE_EXIT(db);
404 405 }
405 406 #endif
406 407
407 408 static void
408 409 dbuf_update_data(dmu_buf_impl_t *db)
409 410 {
410 411 ASSERT(MUTEX_HELD(&db->db_mtx));
411 412 if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
412 413 ASSERT(!refcount_is_zero(&db->db_holds));
413 414 *db->db_user_data_ptr_ptr = db->db.db_data;
414 415 }
415 416 }
416 417
417 418 static void
418 419 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
419 420 {
420 421 ASSERT(MUTEX_HELD(&db->db_mtx));
421 422 ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf));
422 423 db->db_buf = buf;
423 424 if (buf != NULL) {
424 425 ASSERT(buf->b_data != NULL);
425 426 db->db.db_data = buf->b_data;
426 427 if (!arc_released(buf))
427 428 arc_set_callback(buf, dbuf_do_evict, db);
428 429 dbuf_update_data(db);
429 430 } else {
430 431 dbuf_evict_user(db);
431 432 db->db.db_data = NULL;
432 433 if (db->db_state != DB_NOFILL)
433 434 db->db_state = DB_UNCACHED;
434 435 }
435 436 }
436 437
437 438 /*
438 439 * Loan out an arc_buf for read. Return the loaned arc_buf.
439 440 */
440 441 arc_buf_t *
441 442 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
442 443 {
443 444 arc_buf_t *abuf;
444 445
445 446 mutex_enter(&db->db_mtx);
446 447 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
447 448 int blksz = db->db.db_size;
448 449 spa_t *spa;
449 450
450 451 mutex_exit(&db->db_mtx);
451 452 DB_GET_SPA(&spa, db);
452 453 abuf = arc_loan_buf(spa, blksz);
453 454 bcopy(db->db.db_data, abuf->b_data, blksz);
454 455 } else {
455 456 abuf = db->db_buf;
456 457 arc_loan_inuse_buf(abuf, db);
457 458 dbuf_set_data(db, NULL);
458 459 mutex_exit(&db->db_mtx);
459 460 }
460 461 return (abuf);
461 462 }
462 463
463 464 uint64_t
464 465 dbuf_whichblock(dnode_t *dn, uint64_t offset)
465 466 {
466 467 if (dn->dn_datablkshift) {
467 468 return (offset >> dn->dn_datablkshift);
468 469 } else {
469 470 ASSERT3U(offset, <, dn->dn_datablksz);
470 471 return (0);
471 472 }
472 473 }
473 474
474 475 static void
475 476 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
476 477 {
477 478 dmu_buf_impl_t *db = vdb;
478 479
479 480 mutex_enter(&db->db_mtx);
480 481 ASSERT3U(db->db_state, ==, DB_READ);
481 482 /*
482 483 * All reads are synchronous, so we must have a hold on the dbuf
483 484 */
484 485 ASSERT(refcount_count(&db->db_holds) > 0);
485 486 ASSERT(db->db_buf == NULL);
486 487 ASSERT(db->db.db_data == NULL);
487 488 if (db->db_level == 0 && db->db_freed_in_flight) {
488 489 /* we were freed in flight; disregard any error */
489 490 arc_release(buf, db);
490 491 bzero(buf->b_data, db->db.db_size);
491 492 arc_buf_freeze(buf);
492 493 db->db_freed_in_flight = FALSE;
493 494 dbuf_set_data(db, buf);
494 495 db->db_state = DB_CACHED;
495 496 } else if (zio == NULL || zio->io_error == 0) {
496 497 dbuf_set_data(db, buf);
497 498 db->db_state = DB_CACHED;
498 499 } else {
499 500 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
500 501 ASSERT3P(db->db_buf, ==, NULL);
501 502 VERIFY(arc_buf_remove_ref(buf, db) == 1);
502 503 db->db_state = DB_UNCACHED;
503 504 }
504 505 cv_broadcast(&db->db_changed);
505 506 dbuf_rele_and_unlock(db, NULL);
506 507 }
507 508
508 509 static void
509 510 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
510 511 {
511 512 dnode_t *dn;
512 513 spa_t *spa;
513 514 zbookmark_t zb;
514 515 uint32_t aflags = ARC_NOWAIT;
515 516 arc_buf_t *pbuf;
516 517
517 518 DB_DNODE_ENTER(db);
518 519 dn = DB_DNODE(db);
519 520 ASSERT(!refcount_is_zero(&db->db_holds));
520 521 /* We need the struct_rwlock to prevent db_blkptr from changing. */
521 522 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
522 523 ASSERT(MUTEX_HELD(&db->db_mtx));
523 524 ASSERT(db->db_state == DB_UNCACHED);
524 525 ASSERT(db->db_buf == NULL);
525 526
526 527 if (db->db_blkid == DMU_BONUS_BLKID) {
527 528 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
528 529
529 530 ASSERT3U(bonuslen, <=, db->db.db_size);
530 531 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
531 532 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
532 533 if (bonuslen < DN_MAX_BONUSLEN)
533 534 bzero(db->db.db_data, DN_MAX_BONUSLEN);
534 535 if (bonuslen)
535 536 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
536 537 DB_DNODE_EXIT(db);
537 538 dbuf_update_data(db);
538 539 db->db_state = DB_CACHED;
539 540 mutex_exit(&db->db_mtx);
540 541 return;
541 542 }
542 543
543 544 /*
544 545 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
545 546 * processes the delete record and clears the bp while we are waiting
546 547 * for the dn_mtx (resulting in a "no" from block_freed).
547 548 */
548 549 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
549 550 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
550 551 BP_IS_HOLE(db->db_blkptr)))) {
551 552 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
552 553
553 554 dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa,
554 555 db->db.db_size, db, type));
555 556 DB_DNODE_EXIT(db);
556 557 bzero(db->db.db_data, db->db.db_size);
557 558 db->db_state = DB_CACHED;
558 559 *flags |= DB_RF_CACHED;
559 560 mutex_exit(&db->db_mtx);
560 561 return;
561 562 }
562 563
563 564 spa = dn->dn_objset->os_spa;
564 565 DB_DNODE_EXIT(db);
565 566
566 567 db->db_state = DB_READ;
567 568 mutex_exit(&db->db_mtx);
568 569
569 570 if (DBUF_IS_L2CACHEABLE(db))
570 571 aflags |= ARC_L2CACHE;
571 572
572 573 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
573 574 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
574 575 db->db.db_object, db->db_level, db->db_blkid);
575 576
576 577 dbuf_add_ref(db, NULL);
577 578 /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */
578 579
579 580 if (db->db_parent)
580 581 pbuf = db->db_parent->db_buf;
581 582 else
582 583 pbuf = db->db_objset->os_phys_buf;
583 584
584 585 (void) dsl_read(zio, spa, db->db_blkptr, pbuf,
585 586 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
586 587 (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
587 588 &aflags, &zb);
588 589 if (aflags & ARC_CACHED)
589 590 *flags |= DB_RF_CACHED;
590 591 }
591 592
592 593 int
593 594 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
594 595 {
595 596 int err = 0;
596 597 int havepzio = (zio != NULL);
597 598 int prefetch;
598 599 dnode_t *dn;
599 600
600 601 /*
601 602 * We don't have to hold the mutex to check db_state because it
602 603 * can't be freed while we have a hold on the buffer.
603 604 */
604 605 ASSERT(!refcount_is_zero(&db->db_holds));
605 606
606 607 if (db->db_state == DB_NOFILL)
607 608 return (EIO);
608 609
609 610 DB_DNODE_ENTER(db);
610 611 dn = DB_DNODE(db);
611 612 if ((flags & DB_RF_HAVESTRUCT) == 0)
612 613 rw_enter(&dn->dn_struct_rwlock, RW_READER);
613 614
614 615 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
615 616 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
616 617 DBUF_IS_CACHEABLE(db);
617 618
618 619 mutex_enter(&db->db_mtx);
619 620 if (db->db_state == DB_CACHED) {
620 621 mutex_exit(&db->db_mtx);
621 622 if (prefetch)
622 623 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
623 624 db->db.db_size, TRUE);
624 625 if ((flags & DB_RF_HAVESTRUCT) == 0)
625 626 rw_exit(&dn->dn_struct_rwlock);
626 627 DB_DNODE_EXIT(db);
627 628 } else if (db->db_state == DB_UNCACHED) {
628 629 spa_t *spa = dn->dn_objset->os_spa;
629 630
630 631 if (zio == NULL)
631 632 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
632 633 dbuf_read_impl(db, zio, &flags);
633 634
634 635 /* dbuf_read_impl has dropped db_mtx for us */
635 636
636 637 if (prefetch)
637 638 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
638 639 db->db.db_size, flags & DB_RF_CACHED);
639 640
640 641 if ((flags & DB_RF_HAVESTRUCT) == 0)
641 642 rw_exit(&dn->dn_struct_rwlock);
642 643 DB_DNODE_EXIT(db);
643 644
644 645 if (!havepzio)
645 646 err = zio_wait(zio);
646 647 } else {
647 648 mutex_exit(&db->db_mtx);
648 649 if (prefetch)
649 650 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
650 651 db->db.db_size, TRUE);
651 652 if ((flags & DB_RF_HAVESTRUCT) == 0)
652 653 rw_exit(&dn->dn_struct_rwlock);
653 654 DB_DNODE_EXIT(db);
654 655
655 656 mutex_enter(&db->db_mtx);
656 657 if ((flags & DB_RF_NEVERWAIT) == 0) {
657 658 while (db->db_state == DB_READ ||
658 659 db->db_state == DB_FILL) {
659 660 ASSERT(db->db_state == DB_READ ||
660 661 (flags & DB_RF_HAVESTRUCT) == 0);
661 662 cv_wait(&db->db_changed, &db->db_mtx);
662 663 }
663 664 if (db->db_state == DB_UNCACHED)
664 665 err = EIO;
665 666 }
666 667 mutex_exit(&db->db_mtx);
667 668 }
668 669
669 670 ASSERT(err || havepzio || db->db_state == DB_CACHED);
670 671 return (err);
671 672 }
672 673
673 674 static void
674 675 dbuf_noread(dmu_buf_impl_t *db)
675 676 {
676 677 ASSERT(!refcount_is_zero(&db->db_holds));
677 678 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
678 679 mutex_enter(&db->db_mtx);
679 680 while (db->db_state == DB_READ || db->db_state == DB_FILL)
680 681 cv_wait(&db->db_changed, &db->db_mtx);
681 682 if (db->db_state == DB_UNCACHED) {
682 683 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
683 684 spa_t *spa;
684 685
685 686 ASSERT(db->db_buf == NULL);
686 687 ASSERT(db->db.db_data == NULL);
687 688 DB_GET_SPA(&spa, db);
688 689 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
689 690 db->db_state = DB_FILL;
690 691 } else if (db->db_state == DB_NOFILL) {
691 692 dbuf_set_data(db, NULL);
692 693 } else {
693 694 ASSERT3U(db->db_state, ==, DB_CACHED);
694 695 }
695 696 mutex_exit(&db->db_mtx);
696 697 }
697 698
698 699 /*
699 700 * This is our just-in-time copy function. It makes a copy of
700 701 * buffers, that have been modified in a previous transaction
701 702 * group, before we modify them in the current active group.
702 703 *
703 704 * This function is used in two places: when we are dirtying a
704 705 * buffer for the first time in a txg, and when we are freeing
705 706 * a range in a dnode that includes this buffer.
706 707 *
707 708 * Note that when we are called from dbuf_free_range() we do
708 709 * not put a hold on the buffer, we just traverse the active
709 710 * dbuf list for the dnode.
710 711 */
711 712 static void
712 713 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
713 714 {
714 715 dbuf_dirty_record_t *dr = db->db_last_dirty;
715 716
716 717 ASSERT(MUTEX_HELD(&db->db_mtx));
717 718 ASSERT(db->db.db_data != NULL);
718 719 ASSERT(db->db_level == 0);
719 720 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
720 721
721 722 if (dr == NULL ||
722 723 (dr->dt.dl.dr_data !=
723 724 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
724 725 return;
725 726
726 727 /*
727 728 * If the last dirty record for this dbuf has not yet synced
728 729 * and its referencing the dbuf data, either:
729 730 * reset the reference to point to a new copy,
730 731 * or (if there a no active holders)
731 732 * just null out the current db_data pointer.
732 733 */
733 734 ASSERT(dr->dr_txg >= txg - 2);
734 735 if (db->db_blkid == DMU_BONUS_BLKID) {
735 736 /* Note that the data bufs here are zio_bufs */
736 737 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
737 738 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
738 739 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
739 740 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
740 741 int size = db->db.db_size;
741 742 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
742 743 spa_t *spa;
743 744
744 745 DB_GET_SPA(&spa, db);
745 746 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
746 747 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
747 748 } else {
748 749 dbuf_set_data(db, NULL);
749 750 }
750 751 }
751 752
752 753 void
753 754 dbuf_unoverride(dbuf_dirty_record_t *dr)
754 755 {
755 756 dmu_buf_impl_t *db = dr->dr_dbuf;
756 757 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
757 758 uint64_t txg = dr->dr_txg;
758 759
759 760 ASSERT(MUTEX_HELD(&db->db_mtx));
760 761 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
761 762 ASSERT(db->db_level == 0);
762 763
763 764 if (db->db_blkid == DMU_BONUS_BLKID ||
764 765 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
765 766 return;
766 767
767 768 ASSERT(db->db_data_pending != dr);
768 769
769 770 /* free this block */
770 771 if (!BP_IS_HOLE(bp)) {
771 772 spa_t *spa;
772 773
773 774 DB_GET_SPA(&spa, db);
774 775 zio_free(spa, txg, bp);
775 776 }
776 777 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
777 778 /*
778 779 * Release the already-written buffer, so we leave it in
779 780 * a consistent dirty state. Note that all callers are
780 781 * modifying the buffer, so they will immediately do
781 782 * another (redundant) arc_release(). Therefore, leave
782 783 * the buf thawed to save the effort of freezing &
783 784 * immediately re-thawing it.
784 785 */
785 786 arc_release(dr->dt.dl.dr_data, db);
786 787 }
787 788
788 789 /*
789 790 * Evict (if its unreferenced) or clear (if its referenced) any level-0
790 791 * data blocks in the free range, so that any future readers will find
791 792 * empty blocks. Also, if we happen accross any level-1 dbufs in the
792 793 * range that have not already been marked dirty, mark them dirty so
793 794 * they stay in memory.
794 795 */
795 796 void
796 797 dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
797 798 {
798 799 dmu_buf_impl_t *db, *db_next;
799 800 uint64_t txg = tx->tx_txg;
800 801 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
801 802 uint64_t first_l1 = start >> epbs;
802 803 uint64_t last_l1 = end >> epbs;
803 804
804 805 if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID)) {
805 806 end = dn->dn_maxblkid;
806 807 last_l1 = end >> epbs;
807 808 }
808 809 dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
809 810 mutex_enter(&dn->dn_dbufs_mtx);
810 811 for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
811 812 db_next = list_next(&dn->dn_dbufs, db);
812 813 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
813 814
814 815 if (db->db_level == 1 &&
815 816 db->db_blkid >= first_l1 && db->db_blkid <= last_l1) {
816 817 mutex_enter(&db->db_mtx);
817 818 if (db->db_last_dirty &&
818 819 db->db_last_dirty->dr_txg < txg) {
819 820 dbuf_add_ref(db, FTAG);
820 821 mutex_exit(&db->db_mtx);
821 822 dbuf_will_dirty(db, tx);
822 823 dbuf_rele(db, FTAG);
823 824 } else {
824 825 mutex_exit(&db->db_mtx);
825 826 }
826 827 }
827 828
828 829 if (db->db_level != 0)
829 830 continue;
830 831 dprintf_dbuf(db, "found buf %s\n", "");
831 832 if (db->db_blkid < start || db->db_blkid > end)
832 833 continue;
833 834
834 835 /* found a level 0 buffer in the range */
835 836 if (dbuf_undirty(db, tx))
836 837 continue;
837 838
838 839 mutex_enter(&db->db_mtx);
839 840 if (db->db_state == DB_UNCACHED ||
840 841 db->db_state == DB_NOFILL ||
841 842 db->db_state == DB_EVICTING) {
842 843 ASSERT(db->db.db_data == NULL);
843 844 mutex_exit(&db->db_mtx);
844 845 continue;
845 846 }
846 847 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
847 848 /* will be handled in dbuf_read_done or dbuf_rele */
848 849 db->db_freed_in_flight = TRUE;
849 850 mutex_exit(&db->db_mtx);
850 851 continue;
851 852 }
852 853 if (refcount_count(&db->db_holds) == 0) {
853 854 ASSERT(db->db_buf);
854 855 dbuf_clear(db);
855 856 continue;
856 857 }
857 858 /* The dbuf is referenced */
858 859
859 860 if (db->db_last_dirty != NULL) {
860 861 dbuf_dirty_record_t *dr = db->db_last_dirty;
861 862
862 863 if (dr->dr_txg == txg) {
863 864 /*
864 865 * This buffer is "in-use", re-adjust the file
865 866 * size to reflect that this buffer may
866 867 * contain new data when we sync.
867 868 */
868 869 if (db->db_blkid != DMU_SPILL_BLKID &&
869 870 db->db_blkid > dn->dn_maxblkid)
870 871 dn->dn_maxblkid = db->db_blkid;
871 872 dbuf_unoverride(dr);
872 873 } else {
873 874 /*
874 875 * This dbuf is not dirty in the open context.
875 876 * Either uncache it (if its not referenced in
876 877 * the open context) or reset its contents to
877 878 * empty.
878 879 */
879 880 dbuf_fix_old_data(db, txg);
880 881 }
881 882 }
882 883 /* clear the contents if its cached */
883 884 if (db->db_state == DB_CACHED) {
884 885 ASSERT(db->db.db_data != NULL);
885 886 arc_release(db->db_buf, db);
886 887 bzero(db->db.db_data, db->db.db_size);
887 888 arc_buf_freeze(db->db_buf);
888 889 }
889 890
890 891 mutex_exit(&db->db_mtx);
891 892 }
892 893 mutex_exit(&dn->dn_dbufs_mtx);
893 894 }
894 895
895 896 static int
896 897 dbuf_block_freeable(dmu_buf_impl_t *db)
897 898 {
898 899 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
899 900 uint64_t birth_txg = 0;
900 901
901 902 /*
902 903 * We don't need any locking to protect db_blkptr:
903 904 * If it's syncing, then db_last_dirty will be set
904 905 * so we'll ignore db_blkptr.
905 906 */
906 907 ASSERT(MUTEX_HELD(&db->db_mtx));
907 908 if (db->db_last_dirty)
908 909 birth_txg = db->db_last_dirty->dr_txg;
909 910 else if (db->db_blkptr)
910 911 birth_txg = db->db_blkptr->blk_birth;
911 912
912 913 /*
913 914 * If we don't exist or are in a snapshot, we can't be freed.
914 915 * Don't pass the bp to dsl_dataset_block_freeable() since we
915 916 * are holding the db_mtx lock and might deadlock if we are
916 917 * prefetching a dedup-ed block.
917 918 */
918 919 if (birth_txg)
919 920 return (ds == NULL ||
920 921 dsl_dataset_block_freeable(ds, NULL, birth_txg));
921 922 else
922 923 return (FALSE);
923 924 }
924 925
925 926 void
926 927 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
927 928 {
928 929 arc_buf_t *buf, *obuf;
929 930 int osize = db->db.db_size;
930 931 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
931 932 dnode_t *dn;
932 933
933 934 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
934 935
935 936 DB_DNODE_ENTER(db);
936 937 dn = DB_DNODE(db);
937 938
938 939 /* XXX does *this* func really need the lock? */
939 940 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
940 941
941 942 /*
942 943 * This call to dbuf_will_dirty() with the dn_struct_rwlock held
943 944 * is OK, because there can be no other references to the db
944 945 * when we are changing its size, so no concurrent DB_FILL can
945 946 * be happening.
946 947 */
947 948 /*
948 949 * XXX we should be doing a dbuf_read, checking the return
949 950 * value and returning that up to our callers
950 951 */
951 952 dbuf_will_dirty(db, tx);
952 953
953 954 /* create the data buffer for the new block */
954 955 buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
955 956
956 957 /* copy old block data to the new block */
957 958 obuf = db->db_buf;
958 959 bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
959 960 /* zero the remainder */
960 961 if (size > osize)
961 962 bzero((uint8_t *)buf->b_data + osize, size - osize);
962 963
963 964 mutex_enter(&db->db_mtx);
964 965 dbuf_set_data(db, buf);
965 966 VERIFY(arc_buf_remove_ref(obuf, db) == 1);
966 967 db->db.db_size = size;
967 968
968 969 if (db->db_level == 0) {
969 970 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
970 971 db->db_last_dirty->dt.dl.dr_data = buf;
971 972 }
972 973 mutex_exit(&db->db_mtx);
973 974
974 975 dnode_willuse_space(dn, size-osize, tx);
975 976 DB_DNODE_EXIT(db);
976 977 }
977 978
978 979 void
979 980 dbuf_release_bp(dmu_buf_impl_t *db)
980 981 {
981 982 objset_t *os;
982 983 zbookmark_t zb;
983 984
984 985 DB_GET_OBJSET(&os, db);
985 986 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
986 987 ASSERT(arc_released(os->os_phys_buf) ||
987 988 list_link_active(&os->os_dsl_dataset->ds_synced_link));
988 989 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
989 990
990 991 zb.zb_objset = os->os_dsl_dataset ?
991 992 os->os_dsl_dataset->ds_object : 0;
992 993 zb.zb_object = db->db.db_object;
993 994 zb.zb_level = db->db_level;
994 995 zb.zb_blkid = db->db_blkid;
995 996 (void) arc_release_bp(db->db_buf, db,
996 997 db->db_blkptr, os->os_spa, &zb);
997 998 }
998 999
999 1000 dbuf_dirty_record_t *
1000 1001 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1001 1002 {
1002 1003 dnode_t *dn;
1003 1004 objset_t *os;
1004 1005 dbuf_dirty_record_t **drp, *dr;
1005 1006 int drop_struct_lock = FALSE;
1006 1007 boolean_t do_free_accounting = B_FALSE;
1007 1008 int txgoff = tx->tx_txg & TXG_MASK;
1008 1009
1009 1010 ASSERT(tx->tx_txg != 0);
1010 1011 ASSERT(!refcount_is_zero(&db->db_holds));
1011 1012 DMU_TX_DIRTY_BUF(tx, db);
1012 1013
1013 1014 DB_DNODE_ENTER(db);
1014 1015 dn = DB_DNODE(db);
1015 1016 /*
1016 1017 * Shouldn't dirty a regular buffer in syncing context. Private
1017 1018 * objects may be dirtied in syncing context, but only if they
1018 1019 * were already pre-dirtied in open context.
1019 1020 */
1020 1021 ASSERT(!dmu_tx_is_syncing(tx) ||
1021 1022 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1022 1023 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1023 1024 dn->dn_objset->os_dsl_dataset == NULL);
1024 1025 /*
1025 1026 * We make this assert for private objects as well, but after we
1026 1027 * check if we're already dirty. They are allowed to re-dirty
1027 1028 * in syncing context.
1028 1029 */
1029 1030 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1030 1031 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1031 1032 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1032 1033
1033 1034 mutex_enter(&db->db_mtx);
1034 1035 /*
1035 1036 * XXX make this true for indirects too? The problem is that
1036 1037 * transactions created with dmu_tx_create_assigned() from
1037 1038 * syncing context don't bother holding ahead.
1038 1039 */
1039 1040 ASSERT(db->db_level != 0 ||
1040 1041 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1041 1042 db->db_state == DB_NOFILL);
1042 1043
1043 1044 mutex_enter(&dn->dn_mtx);
1044 1045 /*
1045 1046 * Don't set dirtyctx to SYNC if we're just modifying this as we
1046 1047 * initialize the objset.
1047 1048 */
1048 1049 if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1049 1050 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1050 1051 dn->dn_dirtyctx =
1051 1052 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1052 1053 ASSERT(dn->dn_dirtyctx_firstset == NULL);
1053 1054 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1054 1055 }
1055 1056 mutex_exit(&dn->dn_mtx);
1056 1057
1057 1058 if (db->db_blkid == DMU_SPILL_BLKID)
1058 1059 dn->dn_have_spill = B_TRUE;
1059 1060
1060 1061 /*
1061 1062 * If this buffer is already dirty, we're done.
1062 1063 */
1063 1064 drp = &db->db_last_dirty;
1064 1065 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1065 1066 db->db.db_object == DMU_META_DNODE_OBJECT);
1066 1067 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1067 1068 drp = &dr->dr_next;
1068 1069 if (dr && dr->dr_txg == tx->tx_txg) {
1069 1070 DB_DNODE_EXIT(db);
1070 1071
1071 1072 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1072 1073 /*
1073 1074 * If this buffer has already been written out,
1074 1075 * we now need to reset its state.
1075 1076 */
1076 1077 dbuf_unoverride(dr);
1077 1078 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1078 1079 db->db_state != DB_NOFILL)
1079 1080 arc_buf_thaw(db->db_buf);
1080 1081 }
1081 1082 mutex_exit(&db->db_mtx);
1082 1083 return (dr);
1083 1084 }
1084 1085
1085 1086 /*
1086 1087 * Only valid if not already dirty.
1087 1088 */
1088 1089 ASSERT(dn->dn_object == 0 ||
1089 1090 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1090 1091 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1091 1092
1092 1093 ASSERT3U(dn->dn_nlevels, >, db->db_level);
1093 1094 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1094 1095 dn->dn_phys->dn_nlevels > db->db_level ||
1095 1096 dn->dn_next_nlevels[txgoff] > db->db_level ||
1096 1097 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1097 1098 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1098 1099
1099 1100 /*
1100 1101 * We should only be dirtying in syncing context if it's the
1101 1102 * mos or we're initializing the os or it's a special object.
1102 1103 * However, we are allowed to dirty in syncing context provided
1103 1104 * we already dirtied it in open context. Hence we must make
1104 1105 * this assertion only if we're not already dirty.
1105 1106 */
1106 1107 os = dn->dn_objset;
1107 1108 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1108 1109 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1109 1110 ASSERT(db->db.db_size != 0);
1110 1111
1111 1112 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1112 1113
1113 1114 if (db->db_blkid != DMU_BONUS_BLKID) {
1114 1115 /*
1115 1116 * Update the accounting.
1116 1117 * Note: we delay "free accounting" until after we drop
1117 1118 * the db_mtx. This keeps us from grabbing other locks
1118 1119 * (and possibly deadlocking) in bp_get_dsize() while
1119 1120 * also holding the db_mtx.
1120 1121 */
1121 1122 dnode_willuse_space(dn, db->db.db_size, tx);
1122 1123 do_free_accounting = dbuf_block_freeable(db);
1123 1124 }
1124 1125
1125 1126 /*
1126 1127 * If this buffer is dirty in an old transaction group we need
1127 1128 * to make a copy of it so that the changes we make in this
1128 1129 * transaction group won't leak out when we sync the older txg.
1129 1130 */
1130 1131 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1131 1132 if (db->db_level == 0) {
1132 1133 void *data_old = db->db_buf;
1133 1134
1134 1135 if (db->db_state != DB_NOFILL) {
1135 1136 if (db->db_blkid == DMU_BONUS_BLKID) {
1136 1137 dbuf_fix_old_data(db, tx->tx_txg);
1137 1138 data_old = db->db.db_data;
1138 1139 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1139 1140 /*
1140 1141 * Release the data buffer from the cache so
1141 1142 * that we can modify it without impacting
1142 1143 * possible other users of this cached data
1143 1144 * block. Note that indirect blocks and
1144 1145 * private objects are not released until the
1145 1146 * syncing state (since they are only modified
1146 1147 * then).
1147 1148 */
1148 1149 arc_release(db->db_buf, db);
1149 1150 dbuf_fix_old_data(db, tx->tx_txg);
1150 1151 data_old = db->db_buf;
1151 1152 }
1152 1153 ASSERT(data_old != NULL);
1153 1154 }
1154 1155 dr->dt.dl.dr_data = data_old;
1155 1156 } else {
1156 1157 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1157 1158 list_create(&dr->dt.di.dr_children,
1158 1159 sizeof (dbuf_dirty_record_t),
1159 1160 offsetof(dbuf_dirty_record_t, dr_dirty_node));
1160 1161 }
1161 1162 dr->dr_dbuf = db;
1162 1163 dr->dr_txg = tx->tx_txg;
1163 1164 dr->dr_next = *drp;
1164 1165 *drp = dr;
1165 1166
1166 1167 /*
1167 1168 * We could have been freed_in_flight between the dbuf_noread
1168 1169 * and dbuf_dirty. We win, as though the dbuf_noread() had
1169 1170 * happened after the free.
1170 1171 */
1171 1172 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1172 1173 db->db_blkid != DMU_SPILL_BLKID) {
1173 1174 mutex_enter(&dn->dn_mtx);
1174 1175 dnode_clear_range(dn, db->db_blkid, 1, tx);
1175 1176 mutex_exit(&dn->dn_mtx);
1176 1177 db->db_freed_in_flight = FALSE;
1177 1178 }
1178 1179
1179 1180 /*
1180 1181 * This buffer is now part of this txg
1181 1182 */
1182 1183 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1183 1184 db->db_dirtycnt += 1;
1184 1185 ASSERT3U(db->db_dirtycnt, <=, 3);
1185 1186
1186 1187 mutex_exit(&db->db_mtx);
1187 1188
1188 1189 if (db->db_blkid == DMU_BONUS_BLKID ||
1189 1190 db->db_blkid == DMU_SPILL_BLKID) {
1190 1191 mutex_enter(&dn->dn_mtx);
1191 1192 ASSERT(!list_link_active(&dr->dr_dirty_node));
1192 1193 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1193 1194 mutex_exit(&dn->dn_mtx);
1194 1195 dnode_setdirty(dn, tx);
1195 1196 DB_DNODE_EXIT(db);
1196 1197 return (dr);
1197 1198 } else if (do_free_accounting) {
1198 1199 blkptr_t *bp = db->db_blkptr;
1199 1200 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1200 1201 bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1201 1202 /*
1202 1203 * This is only a guess -- if the dbuf is dirty
1203 1204 * in a previous txg, we don't know how much
1204 1205 * space it will use on disk yet. We should
1205 1206 * really have the struct_rwlock to access
1206 1207 * db_blkptr, but since this is just a guess,
1207 1208 * it's OK if we get an odd answer.
1208 1209 */
1209 1210 ddt_prefetch(os->os_spa, bp);
1210 1211 dnode_willuse_space(dn, -willfree, tx);
1211 1212 }
1212 1213
1213 1214 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1214 1215 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1215 1216 drop_struct_lock = TRUE;
1216 1217 }
1217 1218
1218 1219 if (db->db_level == 0) {
1219 1220 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1220 1221 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1221 1222 }
1222 1223
1223 1224 if (db->db_level+1 < dn->dn_nlevels) {
1224 1225 dmu_buf_impl_t *parent = db->db_parent;
1225 1226 dbuf_dirty_record_t *di;
1226 1227 int parent_held = FALSE;
1227 1228
1228 1229 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1229 1230 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1230 1231
1231 1232 parent = dbuf_hold_level(dn, db->db_level+1,
1232 1233 db->db_blkid >> epbs, FTAG);
1233 1234 ASSERT(parent != NULL);
1234 1235 parent_held = TRUE;
1235 1236 }
1236 1237 if (drop_struct_lock)
1237 1238 rw_exit(&dn->dn_struct_rwlock);
1238 1239 ASSERT3U(db->db_level+1, ==, parent->db_level);
1239 1240 di = dbuf_dirty(parent, tx);
1240 1241 if (parent_held)
1241 1242 dbuf_rele(parent, FTAG);
1242 1243
1243 1244 mutex_enter(&db->db_mtx);
1244 1245 /* possible race with dbuf_undirty() */
1245 1246 if (db->db_last_dirty == dr ||
1246 1247 dn->dn_object == DMU_META_DNODE_OBJECT) {
1247 1248 mutex_enter(&di->dt.di.dr_mtx);
1248 1249 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1249 1250 ASSERT(!list_link_active(&dr->dr_dirty_node));
1250 1251 list_insert_tail(&di->dt.di.dr_children, dr);
1251 1252 mutex_exit(&di->dt.di.dr_mtx);
1252 1253 dr->dr_parent = di;
1253 1254 }
1254 1255 mutex_exit(&db->db_mtx);
1255 1256 } else {
1256 1257 ASSERT(db->db_level+1 == dn->dn_nlevels);
1257 1258 ASSERT(db->db_blkid < dn->dn_nblkptr);
1258 1259 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1259 1260 mutex_enter(&dn->dn_mtx);
1260 1261 ASSERT(!list_link_active(&dr->dr_dirty_node));
1261 1262 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1262 1263 mutex_exit(&dn->dn_mtx);
1263 1264 if (drop_struct_lock)
1264 1265 rw_exit(&dn->dn_struct_rwlock);
1265 1266 }
1266 1267
1267 1268 dnode_setdirty(dn, tx);
1268 1269 DB_DNODE_EXIT(db);
1269 1270 return (dr);
1270 1271 }
1271 1272
1272 1273 static int
1273 1274 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1274 1275 {
1275 1276 dnode_t *dn;
1276 1277 uint64_t txg = tx->tx_txg;
1277 1278 dbuf_dirty_record_t *dr, **drp;
1278 1279
1279 1280 ASSERT(txg != 0);
1280 1281 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1281 1282
1282 1283 mutex_enter(&db->db_mtx);
1283 1284 /*
1284 1285 * If this buffer is not dirty, we're done.
1285 1286 */
1286 1287 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1287 1288 if (dr->dr_txg <= txg)
1288 1289 break;
1289 1290 if (dr == NULL || dr->dr_txg < txg) {
1290 1291 mutex_exit(&db->db_mtx);
1291 1292 return (0);
1292 1293 }
1293 1294 ASSERT(dr->dr_txg == txg);
1294 1295 ASSERT(dr->dr_dbuf == db);
1295 1296
1296 1297 DB_DNODE_ENTER(db);
1297 1298 dn = DB_DNODE(db);
1298 1299
1299 1300 /*
1300 1301 * If this buffer is currently held, we cannot undirty
1301 1302 * it, since one of the current holders may be in the
1302 1303 * middle of an update. Note that users of dbuf_undirty()
1303 1304 * should not place a hold on the dbuf before the call.
1304 1305 * Also note: we can get here with a spill block, so
1305 1306 * test for that similar to how dbuf_dirty does.
1306 1307 */
1307 1308 if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
1308 1309 mutex_exit(&db->db_mtx);
1309 1310 /* Make sure we don't toss this buffer at sync phase */
1310 1311 if (db->db_blkid != DMU_SPILL_BLKID) {
1311 1312 mutex_enter(&dn->dn_mtx);
1312 1313 dnode_clear_range(dn, db->db_blkid, 1, tx);
1313 1314 mutex_exit(&dn->dn_mtx);
1314 1315 }
1315 1316 DB_DNODE_EXIT(db);
1316 1317 return (0);
1317 1318 }
1318 1319
1319 1320 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1320 1321
1321 1322 ASSERT(db->db.db_size != 0);
1322 1323
1323 1324 /* XXX would be nice to fix up dn_towrite_space[] */
1324 1325
1325 1326 *drp = dr->dr_next;
1326 1327
1327 1328 /*
1328 1329 * Note that there are three places in dbuf_dirty()
1329 1330 * where this dirty record may be put on a list.
1330 1331 * Make sure to do a list_remove corresponding to
1331 1332 * every one of those list_insert calls.
1332 1333 */
1333 1334 if (dr->dr_parent) {
1334 1335 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1335 1336 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1336 1337 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1337 1338 } else if (db->db_blkid == DMU_SPILL_BLKID ||
1338 1339 db->db_level+1 == dn->dn_nlevels) {
1339 1340 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1340 1341 mutex_enter(&dn->dn_mtx);
1341 1342 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1342 1343 mutex_exit(&dn->dn_mtx);
1343 1344 }
1344 1345 DB_DNODE_EXIT(db);
1345 1346
1346 1347 if (db->db_level == 0) {
1347 1348 if (db->db_state != DB_NOFILL) {
1348 1349 dbuf_unoverride(dr);
1349 1350
1350 1351 ASSERT(db->db_buf != NULL);
1351 1352 ASSERT(dr->dt.dl.dr_data != NULL);
1352 1353 if (dr->dt.dl.dr_data != db->db_buf)
1353 1354 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
1354 1355 db) == 1);
1355 1356 }
1356 1357 } else {
1357 1358 ASSERT(db->db_buf != NULL);
1358 1359 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
1359 1360 mutex_destroy(&dr->dt.di.dr_mtx);
1360 1361 list_destroy(&dr->dt.di.dr_children);
1361 1362 }
1362 1363 kmem_free(dr, sizeof (dbuf_dirty_record_t));
1363 1364
1364 1365 ASSERT(db->db_dirtycnt > 0);
1365 1366 db->db_dirtycnt -= 1;
1366 1367
1367 1368 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1368 1369 arc_buf_t *buf = db->db_buf;
1369 1370
1370 1371 ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1371 1372 dbuf_set_data(db, NULL);
1372 1373 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1373 1374 dbuf_evict(db);
1374 1375 return (1);
1375 1376 }
1376 1377
1377 1378 mutex_exit(&db->db_mtx);
1378 1379 return (0);
1379 1380 }
1380 1381
1381 1382 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty
1382 1383 void
1383 1384 dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1384 1385 {
1385 1386 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1386 1387
1387 1388 ASSERT(tx->tx_txg != 0);
1388 1389 ASSERT(!refcount_is_zero(&db->db_holds));
1389 1390
1390 1391 DB_DNODE_ENTER(db);
1391 1392 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1392 1393 rf |= DB_RF_HAVESTRUCT;
1393 1394 DB_DNODE_EXIT(db);
1394 1395 (void) dbuf_read(db, NULL, rf);
1395 1396 (void) dbuf_dirty(db, tx);
1396 1397 }
1397 1398
1398 1399 void
1399 1400 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1400 1401 {
1401 1402 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1402 1403
1403 1404 db->db_state = DB_NOFILL;
1404 1405
1405 1406 dmu_buf_will_fill(db_fake, tx);
1406 1407 }
1407 1408
1408 1409 void
1409 1410 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1410 1411 {
1411 1412 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1412 1413
1413 1414 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1414 1415 ASSERT(tx->tx_txg != 0);
1415 1416 ASSERT(db->db_level == 0);
1416 1417 ASSERT(!refcount_is_zero(&db->db_holds));
1417 1418
1418 1419 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1419 1420 dmu_tx_private_ok(tx));
1420 1421
1421 1422 dbuf_noread(db);
1422 1423 (void) dbuf_dirty(db, tx);
1423 1424 }
1424 1425
1425 1426 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1426 1427 /* ARGSUSED */
1427 1428 void
1428 1429 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1429 1430 {
1430 1431 mutex_enter(&db->db_mtx);
1431 1432 DBUF_VERIFY(db);
1432 1433
1433 1434 if (db->db_state == DB_FILL) {
1434 1435 if (db->db_level == 0 && db->db_freed_in_flight) {
1435 1436 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1436 1437 /* we were freed while filling */
1437 1438 /* XXX dbuf_undirty? */
1438 1439 bzero(db->db.db_data, db->db.db_size);
1439 1440 db->db_freed_in_flight = FALSE;
1440 1441 }
1441 1442 db->db_state = DB_CACHED;
1442 1443 cv_broadcast(&db->db_changed);
1443 1444 }
1444 1445 mutex_exit(&db->db_mtx);
1445 1446 }
1446 1447
1447 1448 /*
1448 1449 * Directly assign a provided arc buf to a given dbuf if it's not referenced
1449 1450 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1450 1451 */
1451 1452 void
1452 1453 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1453 1454 {
1454 1455 ASSERT(!refcount_is_zero(&db->db_holds));
1455 1456 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1456 1457 ASSERT(db->db_level == 0);
1457 1458 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1458 1459 ASSERT(buf != NULL);
1459 1460 ASSERT(arc_buf_size(buf) == db->db.db_size);
1460 1461 ASSERT(tx->tx_txg != 0);
1461 1462
1462 1463 arc_return_buf(buf, db);
1463 1464 ASSERT(arc_released(buf));
1464 1465
1465 1466 mutex_enter(&db->db_mtx);
1466 1467
1467 1468 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1468 1469 cv_wait(&db->db_changed, &db->db_mtx);
1469 1470
1470 1471 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1471 1472
1472 1473 if (db->db_state == DB_CACHED &&
1473 1474 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1474 1475 mutex_exit(&db->db_mtx);
1475 1476 (void) dbuf_dirty(db, tx);
1476 1477 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1477 1478 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1478 1479 xuio_stat_wbuf_copied();
1479 1480 return;
1480 1481 }
1481 1482
1482 1483 xuio_stat_wbuf_nocopy();
1483 1484 if (db->db_state == DB_CACHED) {
1484 1485 dbuf_dirty_record_t *dr = db->db_last_dirty;
1485 1486
1486 1487 ASSERT(db->db_buf != NULL);
1487 1488 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1488 1489 ASSERT(dr->dt.dl.dr_data == db->db_buf);
1489 1490 if (!arc_released(db->db_buf)) {
1490 1491 ASSERT(dr->dt.dl.dr_override_state ==
1491 1492 DR_OVERRIDDEN);
1492 1493 arc_release(db->db_buf, db);
1493 1494 }
1494 1495 dr->dt.dl.dr_data = buf;
1495 1496 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
1496 1497 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1497 1498 arc_release(db->db_buf, db);
1498 1499 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
1499 1500 }
1500 1501 db->db_buf = NULL;
1501 1502 }
1502 1503 ASSERT(db->db_buf == NULL);
1503 1504 dbuf_set_data(db, buf);
1504 1505 db->db_state = DB_FILL;
1505 1506 mutex_exit(&db->db_mtx);
1506 1507 (void) dbuf_dirty(db, tx);
1507 1508 dbuf_fill_done(db, tx);
1508 1509 }
1509 1510
1510 1511 /*
1511 1512 * "Clear" the contents of this dbuf. This will mark the dbuf
1512 1513 * EVICTING and clear *most* of its references. Unfortunetely,
1513 1514 * when we are not holding the dn_dbufs_mtx, we can't clear the
1514 1515 * entry in the dn_dbufs list. We have to wait until dbuf_destroy()
1515 1516 * in this case. For callers from the DMU we will usually see:
1516 1517 * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1517 1518 * For the arc callback, we will usually see:
1518 1519 * dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1519 1520 * Sometimes, though, we will get a mix of these two:
1520 1521 * DMU: dbuf_clear()->arc_buf_evict()
1521 1522 * ARC: dbuf_do_evict()->dbuf_destroy()
1522 1523 */
1523 1524 void
1524 1525 dbuf_clear(dmu_buf_impl_t *db)
1525 1526 {
1526 1527 dnode_t *dn;
1527 1528 dmu_buf_impl_t *parent = db->db_parent;
1528 1529 dmu_buf_impl_t *dndb;
1529 1530 int dbuf_gone = FALSE;
1530 1531
1531 1532 ASSERT(MUTEX_HELD(&db->db_mtx));
1532 1533 ASSERT(refcount_is_zero(&db->db_holds));
1533 1534
1534 1535 dbuf_evict_user(db);
1535 1536
1536 1537 if (db->db_state == DB_CACHED) {
1537 1538 ASSERT(db->db.db_data != NULL);
1538 1539 if (db->db_blkid == DMU_BONUS_BLKID) {
1539 1540 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1540 1541 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1541 1542 }
1542 1543 db->db.db_data = NULL;
1543 1544 db->db_state = DB_UNCACHED;
1544 1545 }
1545 1546
1546 1547 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1547 1548 ASSERT(db->db_data_pending == NULL);
1548 1549
1549 1550 db->db_state = DB_EVICTING;
1550 1551 db->db_blkptr = NULL;
1551 1552
1552 1553 DB_DNODE_ENTER(db);
1553 1554 dn = DB_DNODE(db);
1554 1555 dndb = dn->dn_dbuf;
1555 1556 if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1556 1557 list_remove(&dn->dn_dbufs, db);
1557 1558 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1558 1559 membar_producer();
1559 1560 DB_DNODE_EXIT(db);
1560 1561 /*
1561 1562 * Decrementing the dbuf count means that the hold corresponding
1562 1563 * to the removed dbuf is no longer discounted in dnode_move(),
1563 1564 * so the dnode cannot be moved until after we release the hold.
1564 1565 * The membar_producer() ensures visibility of the decremented
1565 1566 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1566 1567 * release any lock.
1567 1568 */
1568 1569 dnode_rele(dn, db);
1569 1570 db->db_dnode_handle = NULL;
1570 1571 } else {
1571 1572 DB_DNODE_EXIT(db);
1572 1573 }
1573 1574
1574 1575 if (db->db_buf)
1575 1576 dbuf_gone = arc_buf_evict(db->db_buf);
1576 1577
1577 1578 if (!dbuf_gone)
1578 1579 mutex_exit(&db->db_mtx);
1579 1580
1580 1581 /*
1581 1582 * If this dbuf is referenced from an indirect dbuf,
1582 1583 * decrement the ref count on the indirect dbuf.
1583 1584 */
1584 1585 if (parent && parent != dndb)
1585 1586 dbuf_rele(parent, db);
1586 1587 }
1587 1588
1588 1589 static int
1589 1590 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1590 1591 dmu_buf_impl_t **parentp, blkptr_t **bpp)
1591 1592 {
1592 1593 int nlevels, epbs;
1593 1594
1594 1595 *parentp = NULL;
1595 1596 *bpp = NULL;
1596 1597
1597 1598 ASSERT(blkid != DMU_BONUS_BLKID);
1598 1599
1599 1600 if (blkid == DMU_SPILL_BLKID) {
1600 1601 mutex_enter(&dn->dn_mtx);
1601 1602 if (dn->dn_have_spill &&
1602 1603 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1603 1604 *bpp = &dn->dn_phys->dn_spill;
1604 1605 else
1605 1606 *bpp = NULL;
1606 1607 dbuf_add_ref(dn->dn_dbuf, NULL);
1607 1608 *parentp = dn->dn_dbuf;
1608 1609 mutex_exit(&dn->dn_mtx);
1609 1610 return (0);
1610 1611 }
1611 1612
1612 1613 if (dn->dn_phys->dn_nlevels == 0)
1613 1614 nlevels = 1;
1614 1615 else
1615 1616 nlevels = dn->dn_phys->dn_nlevels;
1616 1617
1617 1618 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1618 1619
1619 1620 ASSERT3U(level * epbs, <, 64);
1620 1621 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1621 1622 if (level >= nlevels ||
1622 1623 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1623 1624 /* the buffer has no parent yet */
1624 1625 return (ENOENT);
1625 1626 } else if (level < nlevels-1) {
1626 1627 /* this block is referenced from an indirect block */
1627 1628 int err = dbuf_hold_impl(dn, level+1,
1628 1629 blkid >> epbs, fail_sparse, NULL, parentp);
1629 1630 if (err)
1630 1631 return (err);
1631 1632 err = dbuf_read(*parentp, NULL,
1632 1633 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1633 1634 if (err) {
1634 1635 dbuf_rele(*parentp, NULL);
1635 1636 *parentp = NULL;
1636 1637 return (err);
1637 1638 }
1638 1639 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1639 1640 (blkid & ((1ULL << epbs) - 1));
1640 1641 return (0);
1641 1642 } else {
1642 1643 /* the block is referenced from the dnode */
1643 1644 ASSERT3U(level, ==, nlevels-1);
1644 1645 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1645 1646 blkid < dn->dn_phys->dn_nblkptr);
1646 1647 if (dn->dn_dbuf) {
1647 1648 dbuf_add_ref(dn->dn_dbuf, NULL);
1648 1649 *parentp = dn->dn_dbuf;
1649 1650 }
1650 1651 *bpp = &dn->dn_phys->dn_blkptr[blkid];
1651 1652 return (0);
1652 1653 }
1653 1654 }
1654 1655
1655 1656 static dmu_buf_impl_t *
1656 1657 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1657 1658 dmu_buf_impl_t *parent, blkptr_t *blkptr)
1658 1659 {
1659 1660 objset_t *os = dn->dn_objset;
1660 1661 dmu_buf_impl_t *db, *odb;
1661 1662
1662 1663 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1663 1664 ASSERT(dn->dn_type != DMU_OT_NONE);
1664 1665
1665 1666 db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1666 1667
1667 1668 db->db_objset = os;
1668 1669 db->db.db_object = dn->dn_object;
1669 1670 db->db_level = level;
1670 1671 db->db_blkid = blkid;
1671 1672 db->db_last_dirty = NULL;
1672 1673 db->db_dirtycnt = 0;
1673 1674 db->db_dnode_handle = dn->dn_handle;
1674 1675 db->db_parent = parent;
1675 1676 db->db_blkptr = blkptr;
1676 1677
1677 1678 db->db_user_ptr = NULL;
1678 1679 db->db_user_data_ptr_ptr = NULL;
1679 1680 db->db_evict_func = NULL;
1680 1681 db->db_immediate_evict = 0;
1681 1682 db->db_freed_in_flight = 0;
1682 1683
1683 1684 if (blkid == DMU_BONUS_BLKID) {
1684 1685 ASSERT3P(parent, ==, dn->dn_dbuf);
1685 1686 db->db.db_size = DN_MAX_BONUSLEN -
1686 1687 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1687 1688 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1688 1689 db->db.db_offset = DMU_BONUS_BLKID;
1689 1690 db->db_state = DB_UNCACHED;
1690 1691 /* the bonus dbuf is not placed in the hash table */
1691 1692 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1692 1693 return (db);
1693 1694 } else if (blkid == DMU_SPILL_BLKID) {
1694 1695 db->db.db_size = (blkptr != NULL) ?
1695 1696 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1696 1697 db->db.db_offset = 0;
1697 1698 } else {
1698 1699 int blocksize =
1699 1700 db->db_level ? 1<<dn->dn_indblkshift : dn->dn_datablksz;
1700 1701 db->db.db_size = blocksize;
1701 1702 db->db.db_offset = db->db_blkid * blocksize;
1702 1703 }
1703 1704
1704 1705 /*
1705 1706 * Hold the dn_dbufs_mtx while we get the new dbuf
1706 1707 * in the hash table *and* added to the dbufs list.
1707 1708 * This prevents a possible deadlock with someone
1708 1709 * trying to look up this dbuf before its added to the
1709 1710 * dn_dbufs list.
1710 1711 */
1711 1712 mutex_enter(&dn->dn_dbufs_mtx);
1712 1713 db->db_state = DB_EVICTING;
1713 1714 if ((odb = dbuf_hash_insert(db)) != NULL) {
1714 1715 /* someone else inserted it first */
1715 1716 kmem_cache_free(dbuf_cache, db);
1716 1717 mutex_exit(&dn->dn_dbufs_mtx);
1717 1718 return (odb);
1718 1719 }
1719 1720 list_insert_head(&dn->dn_dbufs, db);
1720 1721 db->db_state = DB_UNCACHED;
1721 1722 mutex_exit(&dn->dn_dbufs_mtx);
1722 1723 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1723 1724
1724 1725 if (parent && parent != dn->dn_dbuf)
1725 1726 dbuf_add_ref(parent, db);
1726 1727
1727 1728 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1728 1729 refcount_count(&dn->dn_holds) > 0);
1729 1730 (void) refcount_add(&dn->dn_holds, db);
1730 1731 (void) atomic_inc_32_nv(&dn->dn_dbufs_count);
1731 1732
1732 1733 dprintf_dbuf(db, "db=%p\n", db);
1733 1734
1734 1735 return (db);
1735 1736 }
1736 1737
1737 1738 static int
1738 1739 dbuf_do_evict(void *private)
1739 1740 {
1740 1741 arc_buf_t *buf = private;
1741 1742 dmu_buf_impl_t *db = buf->b_private;
1742 1743
1743 1744 if (!MUTEX_HELD(&db->db_mtx))
1744 1745 mutex_enter(&db->db_mtx);
1745 1746
1746 1747 ASSERT(refcount_is_zero(&db->db_holds));
1747 1748
1748 1749 if (db->db_state != DB_EVICTING) {
1749 1750 ASSERT(db->db_state == DB_CACHED);
1750 1751 DBUF_VERIFY(db);
1751 1752 db->db_buf = NULL;
1752 1753 dbuf_evict(db);
1753 1754 } else {
1754 1755 mutex_exit(&db->db_mtx);
1755 1756 dbuf_destroy(db);
1756 1757 }
1757 1758 return (0);
1758 1759 }
1759 1760
1760 1761 static void
1761 1762 dbuf_destroy(dmu_buf_impl_t *db)
1762 1763 {
1763 1764 ASSERT(refcount_is_zero(&db->db_holds));
1764 1765
1765 1766 if (db->db_blkid != DMU_BONUS_BLKID) {
1766 1767 /*
1767 1768 * If this dbuf is still on the dn_dbufs list,
1768 1769 * remove it from that list.
1769 1770 */
1770 1771 if (db->db_dnode_handle != NULL) {
1771 1772 dnode_t *dn;
1772 1773
1773 1774 DB_DNODE_ENTER(db);
1774 1775 dn = DB_DNODE(db);
1775 1776 mutex_enter(&dn->dn_dbufs_mtx);
1776 1777 list_remove(&dn->dn_dbufs, db);
1777 1778 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1778 1779 mutex_exit(&dn->dn_dbufs_mtx);
1779 1780 DB_DNODE_EXIT(db);
1780 1781 /*
1781 1782 * Decrementing the dbuf count means that the hold
1782 1783 * corresponding to the removed dbuf is no longer
1783 1784 * discounted in dnode_move(), so the dnode cannot be
1784 1785 * moved until after we release the hold.
1785 1786 */
1786 1787 dnode_rele(dn, db);
1787 1788 db->db_dnode_handle = NULL;
1788 1789 }
1789 1790 dbuf_hash_remove(db);
1790 1791 }
1791 1792 db->db_parent = NULL;
1792 1793 db->db_buf = NULL;
1793 1794
1794 1795 ASSERT(!list_link_active(&db->db_link));
1795 1796 ASSERT(db->db.db_data == NULL);
1796 1797 ASSERT(db->db_hash_next == NULL);
1797 1798 ASSERT(db->db_blkptr == NULL);
1798 1799 ASSERT(db->db_data_pending == NULL);
1799 1800
1800 1801 kmem_cache_free(dbuf_cache, db);
1801 1802 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1802 1803 }
1803 1804
1804 1805 void
1805 1806 dbuf_prefetch(dnode_t *dn, uint64_t blkid)
1806 1807 {
1807 1808 dmu_buf_impl_t *db = NULL;
1808 1809 blkptr_t *bp = NULL;
1809 1810
1810 1811 ASSERT(blkid != DMU_BONUS_BLKID);
1811 1812 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1812 1813
1813 1814 if (dnode_block_freed(dn, blkid))
1814 1815 return;
1815 1816
1816 1817 /* dbuf_find() returns with db_mtx held */
1817 1818 if (db = dbuf_find(dn, 0, blkid)) {
1818 1819 /*
1819 1820 * This dbuf is already in the cache. We assume that
1820 1821 * it is already CACHED, or else about to be either
1821 1822 * read or filled.
1822 1823 */
1823 1824 mutex_exit(&db->db_mtx);
1824 1825 return;
1825 1826 }
1826 1827
1827 1828 if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) {
1828 1829 if (bp && !BP_IS_HOLE(bp)) {
1829 1830 int priority = dn->dn_type == DMU_OT_DDT_ZAP ?
1830 1831 ZIO_PRIORITY_DDT_PREFETCH : ZIO_PRIORITY_ASYNC_READ;
1831 1832 arc_buf_t *pbuf;
1832 1833 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
1833 1834 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1834 1835 zbookmark_t zb;
1835 1836
1836 1837 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1837 1838 dn->dn_object, 0, blkid);
1838 1839
1839 1840 if (db)
1840 1841 pbuf = db->db_buf;
1841 1842 else
1842 1843 pbuf = dn->dn_objset->os_phys_buf;
1843 1844
1844 1845 (void) dsl_read(NULL, dn->dn_objset->os_spa,
1845 1846 bp, pbuf, NULL, NULL, priority,
1846 1847 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1847 1848 &aflags, &zb);
1848 1849 }
1849 1850 if (db)
1850 1851 dbuf_rele(db, NULL);
1851 1852 }
1852 1853 }
1853 1854
1854 1855 /*
1855 1856 * Returns with db_holds incremented, and db_mtx not held.
1856 1857 * Note: dn_struct_rwlock must be held.
1857 1858 */
1858 1859 int
1859 1860 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
1860 1861 void *tag, dmu_buf_impl_t **dbp)
1861 1862 {
1862 1863 dmu_buf_impl_t *db, *parent = NULL;
1863 1864
1864 1865 ASSERT(blkid != DMU_BONUS_BLKID);
1865 1866 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1866 1867 ASSERT3U(dn->dn_nlevels, >, level);
1867 1868
1868 1869 *dbp = NULL;
1869 1870 top:
1870 1871 /* dbuf_find() returns with db_mtx held */
1871 1872 db = dbuf_find(dn, level, blkid);
1872 1873
1873 1874 if (db == NULL) {
1874 1875 blkptr_t *bp = NULL;
1875 1876 int err;
1876 1877
1877 1878 ASSERT3P(parent, ==, NULL);
1878 1879 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
1879 1880 if (fail_sparse) {
1880 1881 if (err == 0 && bp && BP_IS_HOLE(bp))
1881 1882 err = ENOENT;
1882 1883 if (err) {
1883 1884 if (parent)
1884 1885 dbuf_rele(parent, NULL);
1885 1886 return (err);
1886 1887 }
1887 1888 }
1888 1889 if (err && err != ENOENT)
1889 1890 return (err);
1890 1891 db = dbuf_create(dn, level, blkid, parent, bp);
1891 1892 }
1892 1893
1893 1894 if (db->db_buf && refcount_is_zero(&db->db_holds)) {
1894 1895 arc_buf_add_ref(db->db_buf, db);
1895 1896 if (db->db_buf->b_data == NULL) {
1896 1897 dbuf_clear(db);
1897 1898 if (parent) {
1898 1899 dbuf_rele(parent, NULL);
1899 1900 parent = NULL;
1900 1901 }
1901 1902 goto top;
1902 1903 }
1903 1904 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
1904 1905 }
1905 1906
1906 1907 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
1907 1908
1908 1909 /*
1909 1910 * If this buffer is currently syncing out, and we are are
1910 1911 * still referencing it from db_data, we need to make a copy
1911 1912 * of it in case we decide we want to dirty it again in this txg.
1912 1913 */
1913 1914 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1914 1915 dn->dn_object != DMU_META_DNODE_OBJECT &&
1915 1916 db->db_state == DB_CACHED && db->db_data_pending) {
1916 1917 dbuf_dirty_record_t *dr = db->db_data_pending;
1917 1918
1918 1919 if (dr->dt.dl.dr_data == db->db_buf) {
1919 1920 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1920 1921
1921 1922 dbuf_set_data(db,
1922 1923 arc_buf_alloc(dn->dn_objset->os_spa,
1923 1924 db->db.db_size, db, type));
1924 1925 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data,
1925 1926 db->db.db_size);
1926 1927 }
1927 1928 }
1928 1929
1929 1930 (void) refcount_add(&db->db_holds, tag);
1930 1931 dbuf_update_data(db);
1931 1932 DBUF_VERIFY(db);
1932 1933 mutex_exit(&db->db_mtx);
1933 1934
1934 1935 /* NOTE: we can't rele the parent until after we drop the db_mtx */
1935 1936 if (parent)
1936 1937 dbuf_rele(parent, NULL);
1937 1938
1938 1939 ASSERT3P(DB_DNODE(db), ==, dn);
1939 1940 ASSERT3U(db->db_blkid, ==, blkid);
1940 1941 ASSERT3U(db->db_level, ==, level);
1941 1942 *dbp = db;
1942 1943
1943 1944 return (0);
1944 1945 }
1945 1946
1946 1947 dmu_buf_impl_t *
1947 1948 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
1948 1949 {
1949 1950 dmu_buf_impl_t *db;
1950 1951 int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
1951 1952 return (err ? NULL : db);
1952 1953 }
1953 1954
1954 1955 dmu_buf_impl_t *
1955 1956 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
1956 1957 {
1957 1958 dmu_buf_impl_t *db;
1958 1959 int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
1959 1960 return (err ? NULL : db);
1960 1961 }
1961 1962
1962 1963 void
1963 1964 dbuf_create_bonus(dnode_t *dn)
1964 1965 {
1965 1966 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1966 1967
1967 1968 ASSERT(dn->dn_bonus == NULL);
1968 1969 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
1969 1970 }
1970 1971
1971 1972 int
1972 1973 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
1973 1974 {
1974 1975 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1975 1976 dnode_t *dn;
1976 1977
1977 1978 if (db->db_blkid != DMU_SPILL_BLKID)
1978 1979 return (ENOTSUP);
1979 1980 if (blksz == 0)
1980 1981 blksz = SPA_MINBLOCKSIZE;
1981 1982 if (blksz > SPA_MAXBLOCKSIZE)
1982 1983 blksz = SPA_MAXBLOCKSIZE;
1983 1984 else
1984 1985 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
1985 1986
1986 1987 DB_DNODE_ENTER(db);
1987 1988 dn = DB_DNODE(db);
1988 1989 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1989 1990 dbuf_new_size(db, blksz, tx);
1990 1991 rw_exit(&dn->dn_struct_rwlock);
1991 1992 DB_DNODE_EXIT(db);
1992 1993
1993 1994 return (0);
1994 1995 }
1995 1996
1996 1997 void
1997 1998 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
1998 1999 {
1999 2000 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2000 2001 }
2001 2002
2002 2003 #pragma weak dmu_buf_add_ref = dbuf_add_ref
2003 2004 void
2004 2005 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2005 2006 {
2006 2007 int64_t holds = refcount_add(&db->db_holds, tag);
2007 2008 ASSERT(holds > 1);
2008 2009 }
2009 2010
2010 2011 /*
2011 2012 * If you call dbuf_rele() you had better not be referencing the dnode handle
2012 2013 * unless you have some other direct or indirect hold on the dnode. (An indirect
2013 2014 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2014 2015 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2015 2016 * dnode's parent dbuf evicting its dnode handles.
2016 2017 */
2017 2018 #pragma weak dmu_buf_rele = dbuf_rele
2018 2019 void
2019 2020 dbuf_rele(dmu_buf_impl_t *db, void *tag)
2020 2021 {
2021 2022 mutex_enter(&db->db_mtx);
2022 2023 dbuf_rele_and_unlock(db, tag);
2023 2024 }
2024 2025
2025 2026 /*
2026 2027 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
2027 2028 * db_dirtycnt and db_holds to be updated atomically.
2028 2029 */
2029 2030 void
2030 2031 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2031 2032 {
2032 2033 int64_t holds;
2033 2034
2034 2035 ASSERT(MUTEX_HELD(&db->db_mtx));
2035 2036 DBUF_VERIFY(db);
2036 2037
2037 2038 /*
2038 2039 * Remove the reference to the dbuf before removing its hold on the
2039 2040 * dnode so we can guarantee in dnode_move() that a referenced bonus
2040 2041 * buffer has a corresponding dnode hold.
2041 2042 */
2042 2043 holds = refcount_remove(&db->db_holds, tag);
2043 2044 ASSERT(holds >= 0);
2044 2045
2045 2046 /*
2046 2047 * We can't freeze indirects if there is a possibility that they
2047 2048 * may be modified in the current syncing context.
2048 2049 */
2049 2050 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2050 2051 arc_buf_freeze(db->db_buf);
2051 2052
2052 2053 if (holds == db->db_dirtycnt &&
2053 2054 db->db_level == 0 && db->db_immediate_evict)
2054 2055 dbuf_evict_user(db);
2055 2056
2056 2057 if (holds == 0) {
2057 2058 if (db->db_blkid == DMU_BONUS_BLKID) {
2058 2059 mutex_exit(&db->db_mtx);
2059 2060
2060 2061 /*
2061 2062 * If the dnode moves here, we cannot cross this barrier
2062 2063 * until the move completes.
2063 2064 */
2064 2065 DB_DNODE_ENTER(db);
2065 2066 (void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count);
2066 2067 DB_DNODE_EXIT(db);
2067 2068 /*
2068 2069 * The bonus buffer's dnode hold is no longer discounted
2069 2070 * in dnode_move(). The dnode cannot move until after
2070 2071 * the dnode_rele().
2071 2072 */
2072 2073 dnode_rele(DB_DNODE(db), db);
2073 2074 } else if (db->db_buf == NULL) {
2074 2075 /*
2075 2076 * This is a special case: we never associated this
2076 2077 * dbuf with any data allocated from the ARC.
2077 2078 */
2078 2079 ASSERT(db->db_state == DB_UNCACHED ||
2079 2080 db->db_state == DB_NOFILL);
2080 2081 dbuf_evict(db);
2081 2082 } else if (arc_released(db->db_buf)) {
2082 2083 arc_buf_t *buf = db->db_buf;
2083 2084 /*
2084 2085 * This dbuf has anonymous data associated with it.
2085 2086 */
2086 2087 dbuf_set_data(db, NULL);
2087 2088 VERIFY(arc_buf_remove_ref(buf, db) == 1);
2088 2089 dbuf_evict(db);
2089 2090 } else {
2090 2091 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0);
2091 2092 if (!DBUF_IS_CACHEABLE(db))
2092 2093 dbuf_clear(db);
2093 2094 else
2094 2095 mutex_exit(&db->db_mtx);
2095 2096 }
2096 2097 } else {
2097 2098 mutex_exit(&db->db_mtx);
2098 2099 }
2099 2100 }
2100 2101
2101 2102 #pragma weak dmu_buf_refcount = dbuf_refcount
2102 2103 uint64_t
2103 2104 dbuf_refcount(dmu_buf_impl_t *db)
2104 2105 {
2105 2106 return (refcount_count(&db->db_holds));
2106 2107 }
2107 2108
2108 2109 void *
2109 2110 dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2110 2111 dmu_buf_evict_func_t *evict_func)
2111 2112 {
2112 2113 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2113 2114 user_data_ptr_ptr, evict_func));
2114 2115 }
2115 2116
2116 2117 void *
2117 2118 dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2118 2119 dmu_buf_evict_func_t *evict_func)
2119 2120 {
2120 2121 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2121 2122
2122 2123 db->db_immediate_evict = TRUE;
2123 2124 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2124 2125 user_data_ptr_ptr, evict_func));
2125 2126 }
2126 2127
2127 2128 void *
2128 2129 dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2129 2130 void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
2130 2131 {
2131 2132 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2132 2133 ASSERT(db->db_level == 0);
2133 2134
2134 2135 ASSERT((user_ptr == NULL) == (evict_func == NULL));
2135 2136
2136 2137 mutex_enter(&db->db_mtx);
2137 2138
2138 2139 if (db->db_user_ptr == old_user_ptr) {
2139 2140 db->db_user_ptr = user_ptr;
2140 2141 db->db_user_data_ptr_ptr = user_data_ptr_ptr;
2141 2142 db->db_evict_func = evict_func;
2142 2143
2143 2144 dbuf_update_data(db);
2144 2145 } else {
2145 2146 old_user_ptr = db->db_user_ptr;
2146 2147 }
2147 2148
2148 2149 mutex_exit(&db->db_mtx);
2149 2150 return (old_user_ptr);
2150 2151 }
2151 2152
2152 2153 void *
2153 2154 dmu_buf_get_user(dmu_buf_t *db_fake)
2154 2155 {
2155 2156 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2156 2157 ASSERT(!refcount_is_zero(&db->db_holds));
2157 2158
2158 2159 return (db->db_user_ptr);
2159 2160 }
2160 2161
2161 2162 boolean_t
2162 2163 dmu_buf_freeable(dmu_buf_t *dbuf)
2163 2164 {
2164 2165 boolean_t res = B_FALSE;
2165 2166 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2166 2167
2167 2168 if (db->db_blkptr)
2168 2169 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2169 2170 db->db_blkptr, db->db_blkptr->blk_birth);
2170 2171
2171 2172 return (res);
2172 2173 }
2173 2174
2174 2175 static void
2175 2176 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2176 2177 {
2177 2178 /* ASSERT(dmu_tx_is_syncing(tx) */
2178 2179 ASSERT(MUTEX_HELD(&db->db_mtx));
2179 2180
2180 2181 if (db->db_blkptr != NULL)
2181 2182 return;
2182 2183
2183 2184 if (db->db_blkid == DMU_SPILL_BLKID) {
2184 2185 db->db_blkptr = &dn->dn_phys->dn_spill;
2185 2186 BP_ZERO(db->db_blkptr);
2186 2187 return;
2187 2188 }
2188 2189 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2189 2190 /*
2190 2191 * This buffer was allocated at a time when there was
2191 2192 * no available blkptrs from the dnode, or it was
2192 2193 * inappropriate to hook it in (i.e., nlevels mis-match).
2193 2194 */
2194 2195 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2195 2196 ASSERT(db->db_parent == NULL);
2196 2197 db->db_parent = dn->dn_dbuf;
2197 2198 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2198 2199 DBUF_VERIFY(db);
2199 2200 } else {
2200 2201 dmu_buf_impl_t *parent = db->db_parent;
2201 2202 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2202 2203
2203 2204 ASSERT(dn->dn_phys->dn_nlevels > 1);
2204 2205 if (parent == NULL) {
2205 2206 mutex_exit(&db->db_mtx);
2206 2207 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2207 2208 (void) dbuf_hold_impl(dn, db->db_level+1,
2208 2209 db->db_blkid >> epbs, FALSE, db, &parent);
2209 2210 rw_exit(&dn->dn_struct_rwlock);
2210 2211 mutex_enter(&db->db_mtx);
2211 2212 db->db_parent = parent;
2212 2213 }
2213 2214 db->db_blkptr = (blkptr_t *)parent->db.db_data +
2214 2215 (db->db_blkid & ((1ULL << epbs) - 1));
2215 2216 DBUF_VERIFY(db);
2216 2217 }
2217 2218 }
2218 2219
2219 2220 static void
2220 2221 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2221 2222 {
2222 2223 dmu_buf_impl_t *db = dr->dr_dbuf;
2223 2224 dnode_t *dn;
2224 2225 zio_t *zio;
2225 2226
2226 2227 ASSERT(dmu_tx_is_syncing(tx));
2227 2228
2228 2229 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2229 2230
2230 2231 mutex_enter(&db->db_mtx);
2231 2232
2232 2233 ASSERT(db->db_level > 0);
2233 2234 DBUF_VERIFY(db);
2234 2235
2235 2236 if (db->db_buf == NULL) {
2236 2237 mutex_exit(&db->db_mtx);
2237 2238 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2238 2239 mutex_enter(&db->db_mtx);
2239 2240 }
2240 2241 ASSERT3U(db->db_state, ==, DB_CACHED);
2241 2242 ASSERT(db->db_buf != NULL);
2242 2243
2243 2244 DB_DNODE_ENTER(db);
2244 2245 dn = DB_DNODE(db);
2245 2246 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2246 2247 dbuf_check_blkptr(dn, db);
2247 2248 DB_DNODE_EXIT(db);
2248 2249
2249 2250 db->db_data_pending = dr;
2250 2251
2251 2252 mutex_exit(&db->db_mtx);
2252 2253 dbuf_write(dr, db->db_buf, tx);
2253 2254
2254 2255 zio = dr->dr_zio;
2255 2256 mutex_enter(&dr->dt.di.dr_mtx);
2256 2257 dbuf_sync_list(&dr->dt.di.dr_children, tx);
2257 2258 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2258 2259 mutex_exit(&dr->dt.di.dr_mtx);
2259 2260 zio_nowait(zio);
2260 2261 }
2261 2262
2262 2263 static void
2263 2264 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2264 2265 {
2265 2266 arc_buf_t **datap = &dr->dt.dl.dr_data;
2266 2267 dmu_buf_impl_t *db = dr->dr_dbuf;
2267 2268 dnode_t *dn;
2268 2269 objset_t *os;
2269 2270 uint64_t txg = tx->tx_txg;
2270 2271
2271 2272 ASSERT(dmu_tx_is_syncing(tx));
2272 2273
2273 2274 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2274 2275
2275 2276 mutex_enter(&db->db_mtx);
2276 2277 /*
2277 2278 * To be synced, we must be dirtied. But we
2278 2279 * might have been freed after the dirty.
2279 2280 */
2280 2281 if (db->db_state == DB_UNCACHED) {
2281 2282 /* This buffer has been freed since it was dirtied */
2282 2283 ASSERT(db->db.db_data == NULL);
2283 2284 } else if (db->db_state == DB_FILL) {
2284 2285 /* This buffer was freed and is now being re-filled */
2285 2286 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2286 2287 } else {
2287 2288 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2288 2289 }
2289 2290 DBUF_VERIFY(db);
2290 2291
2291 2292 DB_DNODE_ENTER(db);
2292 2293 dn = DB_DNODE(db);
2293 2294
2294 2295 if (db->db_blkid == DMU_SPILL_BLKID) {
2295 2296 mutex_enter(&dn->dn_mtx);
2296 2297 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2297 2298 mutex_exit(&dn->dn_mtx);
2298 2299 }
2299 2300
2300 2301 /*
2301 2302 * If this is a bonus buffer, simply copy the bonus data into the
2302 2303 * dnode. It will be written out when the dnode is synced (and it
2303 2304 * will be synced, since it must have been dirty for dbuf_sync to
2304 2305 * be called).
2305 2306 */
2306 2307 if (db->db_blkid == DMU_BONUS_BLKID) {
2307 2308 dbuf_dirty_record_t **drp;
2308 2309
2309 2310 ASSERT(*datap != NULL);
2310 2311 ASSERT3U(db->db_level, ==, 0);
2311 2312 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2312 2313 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2313 2314 DB_DNODE_EXIT(db);
2314 2315
2315 2316 if (*datap != db->db.db_data) {
2316 2317 zio_buf_free(*datap, DN_MAX_BONUSLEN);
2317 2318 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2318 2319 }
2319 2320 db->db_data_pending = NULL;
2320 2321 drp = &db->db_last_dirty;
2321 2322 while (*drp != dr)
2322 2323 drp = &(*drp)->dr_next;
2323 2324 ASSERT(dr->dr_next == NULL);
2324 2325 ASSERT(dr->dr_dbuf == db);
2325 2326 *drp = dr->dr_next;
2326 2327 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2327 2328 ASSERT(db->db_dirtycnt > 0);
2328 2329 db->db_dirtycnt -= 1;
2329 2330 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2330 2331 return;
2331 2332 }
2332 2333
2333 2334 os = dn->dn_objset;
2334 2335
2335 2336 /*
2336 2337 * This function may have dropped the db_mtx lock allowing a dmu_sync
2337 2338 * operation to sneak in. As a result, we need to ensure that we
2338 2339 * don't check the dr_override_state until we have returned from
2339 2340 * dbuf_check_blkptr.
2340 2341 */
2341 2342 dbuf_check_blkptr(dn, db);
2342 2343
2343 2344 /*
2344 2345 * If this buffer is in the middle of an immediate write,
2345 2346 * wait for the synchronous IO to complete.
2346 2347 */
2347 2348 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2348 2349 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2349 2350 cv_wait(&db->db_changed, &db->db_mtx);
2350 2351 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2351 2352 }
2352 2353
2353 2354 if (db->db_state != DB_NOFILL &&
2354 2355 dn->dn_object != DMU_META_DNODE_OBJECT &&
2355 2356 refcount_count(&db->db_holds) > 1 &&
2356 2357 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2357 2358 *datap == db->db_buf) {
2358 2359 /*
2359 2360 * If this buffer is currently "in use" (i.e., there
2360 2361 * are active holds and db_data still references it),
2361 2362 * then make a copy before we start the write so that
2362 2363 * any modifications from the open txg will not leak
2363 2364 * into this write.
2364 2365 *
2365 2366 * NOTE: this copy does not need to be made for
2366 2367 * objects only modified in the syncing context (e.g.
2367 2368 * DNONE_DNODE blocks).
2368 2369 */
2369 2370 int blksz = arc_buf_size(*datap);
2370 2371 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2371 2372 *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2372 2373 bcopy(db->db.db_data, (*datap)->b_data, blksz);
2373 2374 }
2374 2375 db->db_data_pending = dr;
2375 2376
2376 2377 mutex_exit(&db->db_mtx);
2377 2378
2378 2379 dbuf_write(dr, *datap, tx);
2379 2380
2380 2381 ASSERT(!list_link_active(&dr->dr_dirty_node));
2381 2382 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2382 2383 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2383 2384 DB_DNODE_EXIT(db);
2384 2385 } else {
2385 2386 /*
2386 2387 * Although zio_nowait() does not "wait for an IO", it does
2387 2388 * initiate the IO. If this is an empty write it seems plausible
2388 2389 * that the IO could actually be completed before the nowait
2389 2390 * returns. We need to DB_DNODE_EXIT() first in case
2390 2391 * zio_nowait() invalidates the dbuf.
2391 2392 */
2392 2393 DB_DNODE_EXIT(db);
2393 2394 zio_nowait(dr->dr_zio);
2394 2395 }
2395 2396 }
2396 2397
2397 2398 void
2398 2399 dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2399 2400 {
2400 2401 dbuf_dirty_record_t *dr;
2401 2402
2402 2403 while (dr = list_head(list)) {
2403 2404 if (dr->dr_zio != NULL) {
2404 2405 /*
2405 2406 * If we find an already initialized zio then we
2406 2407 * are processing the meta-dnode, and we have finished.
2407 2408 * The dbufs for all dnodes are put back on the list
2408 2409 * during processing, so that we can zio_wait()
2409 2410 * these IOs after initiating all child IOs.
2410 2411 */
2411 2412 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2412 2413 DMU_META_DNODE_OBJECT);
2413 2414 break;
2414 2415 }
2415 2416 list_remove(list, dr);
2416 2417 if (dr->dr_dbuf->db_level > 0)
2417 2418 dbuf_sync_indirect(dr, tx);
2418 2419 else
2419 2420 dbuf_sync_leaf(dr, tx);
2420 2421 }
2421 2422 }
2422 2423
2423 2424 /* ARGSUSED */
2424 2425 static void
2425 2426 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2426 2427 {
2427 2428 dmu_buf_impl_t *db = vdb;
2428 2429 dnode_t *dn;
2429 2430 blkptr_t *bp = zio->io_bp;
2430 2431 blkptr_t *bp_orig = &zio->io_bp_orig;
2431 2432 spa_t *spa = zio->io_spa;
2432 2433 int64_t delta;
2433 2434 uint64_t fill = 0;
2434 2435 int i;
2435 2436
2436 2437 ASSERT(db->db_blkptr == bp);
2437 2438
2438 2439 DB_DNODE_ENTER(db);
2439 2440 dn = DB_DNODE(db);
2440 2441 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2441 2442 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2442 2443 zio->io_prev_space_delta = delta;
2443 2444
2444 2445 if (BP_IS_HOLE(bp)) {
2445 2446 ASSERT(bp->blk_fill == 0);
2446 2447 DB_DNODE_EXIT(db);
2447 2448 return;
2448 2449 }
2449 2450
2450 2451 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2451 2452 BP_GET_TYPE(bp) == dn->dn_type) ||
2452 2453 (db->db_blkid == DMU_SPILL_BLKID &&
2453 2454 BP_GET_TYPE(bp) == dn->dn_bonustype));
2454 2455 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2455 2456
2456 2457 mutex_enter(&db->db_mtx);
2457 2458
2458 2459 #ifdef ZFS_DEBUG
2459 2460 if (db->db_blkid == DMU_SPILL_BLKID) {
2460 2461 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2461 2462 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2462 2463 db->db_blkptr == &dn->dn_phys->dn_spill);
2463 2464 }
2464 2465 #endif
2465 2466
2466 2467 if (db->db_level == 0) {
2467 2468 mutex_enter(&dn->dn_mtx);
2468 2469 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2469 2470 db->db_blkid != DMU_SPILL_BLKID)
2470 2471 dn->dn_phys->dn_maxblkid = db->db_blkid;
2471 2472 mutex_exit(&dn->dn_mtx);
2472 2473
2473 2474 if (dn->dn_type == DMU_OT_DNODE) {
2474 2475 dnode_phys_t *dnp = db->db.db_data;
2475 2476 for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2476 2477 i--, dnp++) {
2477 2478 if (dnp->dn_type != DMU_OT_NONE)
2478 2479 fill++;
2479 2480 }
2480 2481 } else {
2481 2482 fill = 1;
2482 2483 }
2483 2484 } else {
2484 2485 blkptr_t *ibp = db->db.db_data;
2485 2486 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2486 2487 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2487 2488 if (BP_IS_HOLE(ibp))
2488 2489 continue;
2489 2490 fill += ibp->blk_fill;
2490 2491 }
2491 2492 }
2492 2493 DB_DNODE_EXIT(db);
2493 2494
2494 2495 bp->blk_fill = fill;
2495 2496
2496 2497 mutex_exit(&db->db_mtx);
2497 2498 }
2498 2499
2499 2500 /* ARGSUSED */
2500 2501 static void
2501 2502 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2502 2503 {
2503 2504 dmu_buf_impl_t *db = vdb;
2504 2505 blkptr_t *bp = zio->io_bp;
2505 2506 blkptr_t *bp_orig = &zio->io_bp_orig;
2506 2507 uint64_t txg = zio->io_txg;
2507 2508 dbuf_dirty_record_t **drp, *dr;
2508 2509
2509 2510 ASSERT3U(zio->io_error, ==, 0);
2510 2511 ASSERT(db->db_blkptr == bp);
2511 2512
2512 2513 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
2513 2514 ASSERT(BP_EQUAL(bp, bp_orig));
2514 2515 } else {
2515 2516 objset_t *os;
2516 2517 dsl_dataset_t *ds;
2517 2518 dmu_tx_t *tx;
2518 2519
2519 2520 DB_GET_OBJSET(&os, db);
2520 2521 ds = os->os_dsl_dataset;
2521 2522 tx = os->os_synctx;
2522 2523
2523 2524 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2524 2525 dsl_dataset_block_born(ds, bp, tx);
2525 2526 }
2526 2527
2527 2528 mutex_enter(&db->db_mtx);
2528 2529
2529 2530 DBUF_VERIFY(db);
2530 2531
2531 2532 drp = &db->db_last_dirty;
2532 2533 while ((dr = *drp) != db->db_data_pending)
2533 2534 drp = &dr->dr_next;
2534 2535 ASSERT(!list_link_active(&dr->dr_dirty_node));
2535 2536 ASSERT(dr->dr_txg == txg);
2536 2537 ASSERT(dr->dr_dbuf == db);
2537 2538 ASSERT(dr->dr_next == NULL);
2538 2539 *drp = dr->dr_next;
2539 2540
2540 2541 #ifdef ZFS_DEBUG
2541 2542 if (db->db_blkid == DMU_SPILL_BLKID) {
2542 2543 dnode_t *dn;
2543 2544
2544 2545 DB_DNODE_ENTER(db);
2545 2546 dn = DB_DNODE(db);
2546 2547 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2547 2548 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2548 2549 db->db_blkptr == &dn->dn_phys->dn_spill);
2549 2550 DB_DNODE_EXIT(db);
2550 2551 }
2551 2552 #endif
2552 2553
2553 2554 if (db->db_level == 0) {
2554 2555 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2555 2556 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2556 2557 if (db->db_state != DB_NOFILL) {
2557 2558 if (dr->dt.dl.dr_data != db->db_buf)
2558 2559 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
2559 2560 db) == 1);
2560 2561 else if (!arc_released(db->db_buf))
2561 2562 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2562 2563 }
2563 2564 } else {
2564 2565 dnode_t *dn;
2565 2566
2566 2567 DB_DNODE_ENTER(db);
2567 2568 dn = DB_DNODE(db);
2568 2569 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2569 2570 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2570 2571 if (!BP_IS_HOLE(db->db_blkptr)) {
2571 2572 int epbs =
2572 2573 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2573 2574 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2574 2575 db->db.db_size);
2575 2576 ASSERT3U(dn->dn_phys->dn_maxblkid
2576 2577 >> (db->db_level * epbs), >=, db->db_blkid);
2577 2578 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2578 2579 }
2579 2580 DB_DNODE_EXIT(db);
2580 2581 mutex_destroy(&dr->dt.di.dr_mtx);
2581 2582 list_destroy(&dr->dt.di.dr_children);
2582 2583 }
2583 2584 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2584 2585
2585 2586 cv_broadcast(&db->db_changed);
2586 2587 ASSERT(db->db_dirtycnt > 0);
2587 2588 db->db_dirtycnt -= 1;
2588 2589 db->db_data_pending = NULL;
2589 2590 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2590 2591 }
2591 2592
2592 2593 static void
2593 2594 dbuf_write_nofill_ready(zio_t *zio)
2594 2595 {
2595 2596 dbuf_write_ready(zio, NULL, zio->io_private);
2596 2597 }
2597 2598
2598 2599 static void
2599 2600 dbuf_write_nofill_done(zio_t *zio)
2600 2601 {
2601 2602 dbuf_write_done(zio, NULL, zio->io_private);
2602 2603 }
2603 2604
2604 2605 static void
2605 2606 dbuf_write_override_ready(zio_t *zio)
2606 2607 {
2607 2608 dbuf_dirty_record_t *dr = zio->io_private;
2608 2609 dmu_buf_impl_t *db = dr->dr_dbuf;
2609 2610
2610 2611 dbuf_write_ready(zio, NULL, db);
2611 2612 }
2612 2613
2613 2614 static void
2614 2615 dbuf_write_override_done(zio_t *zio)
2615 2616 {
2616 2617 dbuf_dirty_record_t *dr = zio->io_private;
2617 2618 dmu_buf_impl_t *db = dr->dr_dbuf;
2618 2619 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2619 2620
2620 2621 mutex_enter(&db->db_mtx);
2621 2622 if (!BP_EQUAL(zio->io_bp, obp)) {
2622 2623 if (!BP_IS_HOLE(obp))
2623 2624 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2624 2625 arc_release(dr->dt.dl.dr_data, db);
2625 2626 }
2626 2627 mutex_exit(&db->db_mtx);
2627 2628
2628 2629 dbuf_write_done(zio, NULL, db);
2629 2630 }
2630 2631
2631 2632 static void
2632 2633 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2633 2634 {
2634 2635 dmu_buf_impl_t *db = dr->dr_dbuf;
2635 2636 dnode_t *dn;
2636 2637 objset_t *os;
2637 2638 dmu_buf_impl_t *parent = db->db_parent;
2638 2639 uint64_t txg = tx->tx_txg;
2639 2640 zbookmark_t zb;
2640 2641 zio_prop_t zp;
2641 2642 zio_t *zio;
2642 2643 int wp_flag = 0;
2643 2644
2644 2645 DB_DNODE_ENTER(db);
2645 2646 dn = DB_DNODE(db);
2646 2647 os = dn->dn_objset;
2647 2648
2648 2649 if (db->db_state != DB_NOFILL) {
2649 2650 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2650 2651 /*
2651 2652 * Private object buffers are released here rather
2652 2653 * than in dbuf_dirty() since they are only modified
2653 2654 * in the syncing context and we don't want the
2654 2655 * overhead of making multiple copies of the data.
2655 2656 */
2656 2657 if (BP_IS_HOLE(db->db_blkptr)) {
2657 2658 arc_buf_thaw(data);
2658 2659 } else {
2659 2660 dbuf_release_bp(db);
2660 2661 }
2661 2662 }
2662 2663 }
2663 2664
2664 2665 if (parent != dn->dn_dbuf) {
2665 2666 ASSERT(parent && parent->db_data_pending);
2666 2667 ASSERT(db->db_level == parent->db_level-1);
2667 2668 ASSERT(arc_released(parent->db_buf));
2668 2669 zio = parent->db_data_pending->dr_zio;
2669 2670 } else {
2670 2671 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2671 2672 db->db_blkid != DMU_SPILL_BLKID) ||
2672 2673 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2673 2674 if (db->db_blkid != DMU_SPILL_BLKID)
2674 2675 ASSERT3P(db->db_blkptr, ==,
2675 2676 &dn->dn_phys->dn_blkptr[db->db_blkid]);
2676 2677 zio = dn->dn_zio;
2677 2678 }
2678 2679
2679 2680 ASSERT(db->db_level == 0 || data == db->db_buf);
2680 2681 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2681 2682 ASSERT(zio);
2682 2683
2683 2684 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2684 2685 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2685 2686 db->db.db_object, db->db_level, db->db_blkid);
2686 2687
2687 2688 if (db->db_blkid == DMU_SPILL_BLKID)
2688 2689 wp_flag = WP_SPILL;
2689 2690 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2690 2691
2691 2692 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
2692 2693 DB_DNODE_EXIT(db);
2693 2694
2694 2695 if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2695 2696 ASSERT(db->db_state != DB_NOFILL);
2696 2697 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2697 2698 db->db_blkptr, data->b_data, arc_buf_size(data), &zp,
2698 2699 dbuf_write_override_ready, dbuf_write_override_done, dr,
2699 2700 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2700 2701 mutex_enter(&db->db_mtx);
2701 2702 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2702 2703 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
2703 2704 dr->dt.dl.dr_copies);
2704 2705 mutex_exit(&db->db_mtx);
2705 2706 } else if (db->db_state == DB_NOFILL) {
2706 2707 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF);
2707 2708 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2708 2709 db->db_blkptr, NULL, db->db.db_size, &zp,
2709 2710 dbuf_write_nofill_ready, dbuf_write_nofill_done, db,
2710 2711 ZIO_PRIORITY_ASYNC_WRITE,
2711 2712 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2712 2713 } else {
2713 2714 ASSERT(arc_released(data));
2714 2715 dr->dr_zio = arc_write(zio, os->os_spa, txg,
2715 2716 db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), &zp,
2716 2717 dbuf_write_ready, dbuf_write_done, db,
2717 2718 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2718 2719 }
2719 2720 }
|
↓ open down ↓ |
2479 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX