Print this page
6841 Undirty freed spill blocks
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/zfs/dbuf.c
+++ new/usr/src/uts/common/fs/zfs/dbuf.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 28 * Copyright (c) 2014 Integros [integros.com]
29 29 */
30 30
31 31 #include <sys/zfs_context.h>
32 32 #include <sys/dmu.h>
33 33 #include <sys/dmu_send.h>
34 34 #include <sys/dmu_impl.h>
35 35 #include <sys/dbuf.h>
36 36 #include <sys/dmu_objset.h>
37 37 #include <sys/dsl_dataset.h>
38 38 #include <sys/dsl_dir.h>
39 39 #include <sys/dmu_tx.h>
40 40 #include <sys/spa.h>
41 41 #include <sys/zio.h>
42 42 #include <sys/dmu_zfetch.h>
43 43 #include <sys/sa.h>
44 44 #include <sys/sa_impl.h>
45 45 #include <sys/zfeature.h>
46 46 #include <sys/blkptr.h>
47 47 #include <sys/range_tree.h>
48 48
49 49 /*
50 50 * Number of times that zfs_free_range() took the slow path while doing
51 51 * a zfs receive. A nonzero value indicates a potential performance problem.
52 52 */
53 53 uint64_t zfs_free_range_recv_miss;
54 54
55 55 static void dbuf_destroy(dmu_buf_impl_t *db);
56 56 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
57 57 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
58 58
59 59 #ifndef __lint
60 60 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu,
61 61 dmu_buf_evict_func_t *evict_func, dmu_buf_t **clear_on_evict_dbufp);
62 62 #endif /* ! __lint */
63 63
64 64 /*
65 65 * Global data structures and functions for the dbuf cache.
66 66 */
67 67 static kmem_cache_t *dbuf_cache;
68 68 static taskq_t *dbu_evict_taskq;
69 69
70 70 /* ARGSUSED */
71 71 static int
72 72 dbuf_cons(void *vdb, void *unused, int kmflag)
73 73 {
74 74 dmu_buf_impl_t *db = vdb;
75 75 bzero(db, sizeof (dmu_buf_impl_t));
76 76
77 77 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
78 78 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
79 79 refcount_create(&db->db_holds);
80 80
81 81 return (0);
82 82 }
83 83
84 84 /* ARGSUSED */
85 85 static void
86 86 dbuf_dest(void *vdb, void *unused)
87 87 {
88 88 dmu_buf_impl_t *db = vdb;
89 89 mutex_destroy(&db->db_mtx);
90 90 cv_destroy(&db->db_changed);
91 91 refcount_destroy(&db->db_holds);
92 92 }
93 93
94 94 /*
95 95 * dbuf hash table routines
96 96 */
97 97 static dbuf_hash_table_t dbuf_hash_table;
98 98
99 99 static uint64_t dbuf_hash_count;
100 100
101 101 static uint64_t
102 102 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
103 103 {
104 104 uintptr_t osv = (uintptr_t)os;
105 105 uint64_t crc = -1ULL;
106 106
107 107 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
108 108 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
109 109 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
110 110 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
111 111 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
112 112 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
113 113 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
114 114
115 115 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
116 116
117 117 return (crc);
118 118 }
119 119
120 120 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
121 121
122 122 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
123 123 ((dbuf)->db.db_object == (obj) && \
124 124 (dbuf)->db_objset == (os) && \
125 125 (dbuf)->db_level == (level) && \
126 126 (dbuf)->db_blkid == (blkid))
127 127
128 128 dmu_buf_impl_t *
129 129 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
130 130 {
131 131 dbuf_hash_table_t *h = &dbuf_hash_table;
132 132 uint64_t hv = DBUF_HASH(os, obj, level, blkid);
133 133 uint64_t idx = hv & h->hash_table_mask;
134 134 dmu_buf_impl_t *db;
135 135
136 136 mutex_enter(DBUF_HASH_MUTEX(h, idx));
137 137 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
138 138 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
139 139 mutex_enter(&db->db_mtx);
140 140 if (db->db_state != DB_EVICTING) {
141 141 mutex_exit(DBUF_HASH_MUTEX(h, idx));
142 142 return (db);
143 143 }
144 144 mutex_exit(&db->db_mtx);
145 145 }
146 146 }
147 147 mutex_exit(DBUF_HASH_MUTEX(h, idx));
148 148 return (NULL);
149 149 }
150 150
151 151 static dmu_buf_impl_t *
152 152 dbuf_find_bonus(objset_t *os, uint64_t object)
153 153 {
154 154 dnode_t *dn;
155 155 dmu_buf_impl_t *db = NULL;
156 156
157 157 if (dnode_hold(os, object, FTAG, &dn) == 0) {
158 158 rw_enter(&dn->dn_struct_rwlock, RW_READER);
159 159 if (dn->dn_bonus != NULL) {
160 160 db = dn->dn_bonus;
161 161 mutex_enter(&db->db_mtx);
162 162 }
163 163 rw_exit(&dn->dn_struct_rwlock);
164 164 dnode_rele(dn, FTAG);
165 165 }
166 166 return (db);
167 167 }
168 168
169 169 /*
170 170 * Insert an entry into the hash table. If there is already an element
171 171 * equal to elem in the hash table, then the already existing element
172 172 * will be returned and the new element will not be inserted.
173 173 * Otherwise returns NULL.
174 174 */
175 175 static dmu_buf_impl_t *
176 176 dbuf_hash_insert(dmu_buf_impl_t *db)
177 177 {
178 178 dbuf_hash_table_t *h = &dbuf_hash_table;
179 179 objset_t *os = db->db_objset;
180 180 uint64_t obj = db->db.db_object;
181 181 int level = db->db_level;
182 182 uint64_t blkid = db->db_blkid;
183 183 uint64_t hv = DBUF_HASH(os, obj, level, blkid);
184 184 uint64_t idx = hv & h->hash_table_mask;
185 185 dmu_buf_impl_t *dbf;
186 186
187 187 mutex_enter(DBUF_HASH_MUTEX(h, idx));
188 188 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
189 189 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
190 190 mutex_enter(&dbf->db_mtx);
191 191 if (dbf->db_state != DB_EVICTING) {
192 192 mutex_exit(DBUF_HASH_MUTEX(h, idx));
193 193 return (dbf);
194 194 }
195 195 mutex_exit(&dbf->db_mtx);
196 196 }
197 197 }
198 198
199 199 mutex_enter(&db->db_mtx);
200 200 db->db_hash_next = h->hash_table[idx];
201 201 h->hash_table[idx] = db;
202 202 mutex_exit(DBUF_HASH_MUTEX(h, idx));
203 203 atomic_inc_64(&dbuf_hash_count);
204 204
205 205 return (NULL);
206 206 }
207 207
208 208 /*
209 209 * Remove an entry from the hash table. It must be in the EVICTING state.
210 210 */
211 211 static void
212 212 dbuf_hash_remove(dmu_buf_impl_t *db)
213 213 {
214 214 dbuf_hash_table_t *h = &dbuf_hash_table;
215 215 uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
216 216 db->db_level, db->db_blkid);
217 217 uint64_t idx = hv & h->hash_table_mask;
218 218 dmu_buf_impl_t *dbf, **dbp;
219 219
220 220 /*
221 221 * We musn't hold db_mtx to maintain lock ordering:
222 222 * DBUF_HASH_MUTEX > db_mtx.
223 223 */
224 224 ASSERT(refcount_is_zero(&db->db_holds));
225 225 ASSERT(db->db_state == DB_EVICTING);
226 226 ASSERT(!MUTEX_HELD(&db->db_mtx));
227 227
228 228 mutex_enter(DBUF_HASH_MUTEX(h, idx));
229 229 dbp = &h->hash_table[idx];
230 230 while ((dbf = *dbp) != db) {
231 231 dbp = &dbf->db_hash_next;
232 232 ASSERT(dbf != NULL);
233 233 }
234 234 *dbp = db->db_hash_next;
235 235 db->db_hash_next = NULL;
236 236 mutex_exit(DBUF_HASH_MUTEX(h, idx));
237 237 atomic_dec_64(&dbuf_hash_count);
238 238 }
239 239
240 240 static arc_evict_func_t dbuf_do_evict;
241 241
242 242 typedef enum {
243 243 DBVU_EVICTING,
244 244 DBVU_NOT_EVICTING
245 245 } dbvu_verify_type_t;
246 246
247 247 static void
248 248 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
249 249 {
250 250 #ifdef ZFS_DEBUG
251 251 int64_t holds;
252 252
253 253 if (db->db_user == NULL)
254 254 return;
255 255
256 256 /* Only data blocks support the attachment of user data. */
257 257 ASSERT(db->db_level == 0);
258 258
259 259 /* Clients must resolve a dbuf before attaching user data. */
260 260 ASSERT(db->db.db_data != NULL);
261 261 ASSERT3U(db->db_state, ==, DB_CACHED);
262 262
263 263 holds = refcount_count(&db->db_holds);
264 264 if (verify_type == DBVU_EVICTING) {
265 265 /*
266 266 * Immediate eviction occurs when holds == dirtycnt.
267 267 * For normal eviction buffers, holds is zero on
268 268 * eviction, except when dbuf_fix_old_data() calls
269 269 * dbuf_clear_data(). However, the hold count can grow
270 270 * during eviction even though db_mtx is held (see
271 271 * dmu_bonus_hold() for an example), so we can only
272 272 * test the generic invariant that holds >= dirtycnt.
273 273 */
274 274 ASSERT3U(holds, >=, db->db_dirtycnt);
275 275 } else {
276 276 if (db->db_user_immediate_evict == TRUE)
277 277 ASSERT3U(holds, >=, db->db_dirtycnt);
278 278 else
279 279 ASSERT3U(holds, >, 0);
280 280 }
281 281 #endif
282 282 }
283 283
284 284 static void
285 285 dbuf_evict_user(dmu_buf_impl_t *db)
286 286 {
287 287 dmu_buf_user_t *dbu = db->db_user;
288 288
289 289 ASSERT(MUTEX_HELD(&db->db_mtx));
290 290
291 291 if (dbu == NULL)
292 292 return;
293 293
294 294 dbuf_verify_user(db, DBVU_EVICTING);
295 295 db->db_user = NULL;
296 296
297 297 #ifdef ZFS_DEBUG
298 298 if (dbu->dbu_clear_on_evict_dbufp != NULL)
299 299 *dbu->dbu_clear_on_evict_dbufp = NULL;
300 300 #endif
301 301
302 302 /*
303 303 * Invoke the callback from a taskq to avoid lock order reversals
304 304 * and limit stack depth.
305 305 */
306 306 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func, dbu, 0,
307 307 &dbu->dbu_tqent);
308 308 }
309 309
310 310 boolean_t
311 311 dbuf_is_metadata(dmu_buf_impl_t *db)
312 312 {
313 313 if (db->db_level > 0) {
314 314 return (B_TRUE);
315 315 } else {
316 316 boolean_t is_metadata;
317 317
318 318 DB_DNODE_ENTER(db);
319 319 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
320 320 DB_DNODE_EXIT(db);
321 321
322 322 return (is_metadata);
323 323 }
324 324 }
325 325
326 326 void
327 327 dbuf_evict(dmu_buf_impl_t *db)
328 328 {
329 329 ASSERT(MUTEX_HELD(&db->db_mtx));
330 330 ASSERT(db->db_buf == NULL);
331 331 ASSERT(db->db_data_pending == NULL);
332 332
333 333 dbuf_clear(db);
334 334 dbuf_destroy(db);
335 335 }
336 336
337 337 void
338 338 dbuf_init(void)
339 339 {
340 340 uint64_t hsize = 1ULL << 16;
341 341 dbuf_hash_table_t *h = &dbuf_hash_table;
342 342 int i;
343 343
344 344 /*
345 345 * The hash table is big enough to fill all of physical memory
346 346 * with an average 4K block size. The table will take up
347 347 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
348 348 */
349 349 while (hsize * 4096 < physmem * PAGESIZE)
350 350 hsize <<= 1;
351 351
352 352 retry:
353 353 h->hash_table_mask = hsize - 1;
354 354 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
355 355 if (h->hash_table == NULL) {
356 356 /* XXX - we should really return an error instead of assert */
357 357 ASSERT(hsize > (1ULL << 10));
358 358 hsize >>= 1;
359 359 goto retry;
360 360 }
361 361
362 362 dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
363 363 sizeof (dmu_buf_impl_t),
364 364 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
365 365
366 366 for (i = 0; i < DBUF_MUTEXES; i++)
367 367 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
368 368
369 369 /*
370 370 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
371 371 * configuration is not required.
372 372 */
373 373 dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0);
374 374 }
375 375
376 376 void
377 377 dbuf_fini(void)
378 378 {
379 379 dbuf_hash_table_t *h = &dbuf_hash_table;
380 380 int i;
381 381
382 382 for (i = 0; i < DBUF_MUTEXES; i++)
383 383 mutex_destroy(&h->hash_mutexes[i]);
384 384 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
385 385 kmem_cache_destroy(dbuf_cache);
386 386 taskq_destroy(dbu_evict_taskq);
387 387 }
388 388
389 389 /*
390 390 * Other stuff.
391 391 */
392 392
393 393 #ifdef ZFS_DEBUG
394 394 static void
395 395 dbuf_verify(dmu_buf_impl_t *db)
396 396 {
397 397 dnode_t *dn;
398 398 dbuf_dirty_record_t *dr;
399 399
400 400 ASSERT(MUTEX_HELD(&db->db_mtx));
401 401
402 402 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
403 403 return;
404 404
405 405 ASSERT(db->db_objset != NULL);
406 406 DB_DNODE_ENTER(db);
407 407 dn = DB_DNODE(db);
408 408 if (dn == NULL) {
409 409 ASSERT(db->db_parent == NULL);
410 410 ASSERT(db->db_blkptr == NULL);
411 411 } else {
412 412 ASSERT3U(db->db.db_object, ==, dn->dn_object);
413 413 ASSERT3P(db->db_objset, ==, dn->dn_objset);
414 414 ASSERT3U(db->db_level, <, dn->dn_nlevels);
415 415 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
416 416 db->db_blkid == DMU_SPILL_BLKID ||
417 417 !avl_is_empty(&dn->dn_dbufs));
418 418 }
419 419 if (db->db_blkid == DMU_BONUS_BLKID) {
420 420 ASSERT(dn != NULL);
421 421 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
422 422 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
423 423 } else if (db->db_blkid == DMU_SPILL_BLKID) {
424 424 ASSERT(dn != NULL);
425 425 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
426 426 ASSERT0(db->db.db_offset);
427 427 } else {
428 428 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
429 429 }
430 430
431 431 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
432 432 ASSERT(dr->dr_dbuf == db);
433 433
434 434 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
435 435 ASSERT(dr->dr_dbuf == db);
436 436
437 437 /*
438 438 * We can't assert that db_size matches dn_datablksz because it
439 439 * can be momentarily different when another thread is doing
440 440 * dnode_set_blksz().
441 441 */
442 442 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
443 443 dr = db->db_data_pending;
444 444 /*
445 445 * It should only be modified in syncing context, so
446 446 * make sure we only have one copy of the data.
447 447 */
448 448 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
449 449 }
450 450
451 451 /* verify db->db_blkptr */
452 452 if (db->db_blkptr) {
453 453 if (db->db_parent == dn->dn_dbuf) {
454 454 /* db is pointed to by the dnode */
455 455 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
456 456 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
457 457 ASSERT(db->db_parent == NULL);
458 458 else
459 459 ASSERT(db->db_parent != NULL);
460 460 if (db->db_blkid != DMU_SPILL_BLKID)
461 461 ASSERT3P(db->db_blkptr, ==,
462 462 &dn->dn_phys->dn_blkptr[db->db_blkid]);
463 463 } else {
464 464 /* db is pointed to by an indirect block */
465 465 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
466 466 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
467 467 ASSERT3U(db->db_parent->db.db_object, ==,
468 468 db->db.db_object);
469 469 /*
470 470 * dnode_grow_indblksz() can make this fail if we don't
471 471 * have the struct_rwlock. XXX indblksz no longer
472 472 * grows. safe to do this now?
473 473 */
474 474 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
475 475 ASSERT3P(db->db_blkptr, ==,
476 476 ((blkptr_t *)db->db_parent->db.db_data +
477 477 db->db_blkid % epb));
478 478 }
479 479 }
480 480 }
481 481 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
482 482 (db->db_buf == NULL || db->db_buf->b_data) &&
483 483 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
484 484 db->db_state != DB_FILL && !dn->dn_free_txg) {
485 485 /*
486 486 * If the blkptr isn't set but they have nonzero data,
487 487 * it had better be dirty, otherwise we'll lose that
488 488 * data when we evict this buffer.
489 489 */
490 490 if (db->db_dirtycnt == 0) {
491 491 uint64_t *buf = db->db.db_data;
492 492 int i;
493 493
494 494 for (i = 0; i < db->db.db_size >> 3; i++) {
495 495 ASSERT(buf[i] == 0);
496 496 }
497 497 }
498 498 }
499 499 DB_DNODE_EXIT(db);
500 500 }
501 501 #endif
502 502
503 503 static void
504 504 dbuf_clear_data(dmu_buf_impl_t *db)
505 505 {
506 506 ASSERT(MUTEX_HELD(&db->db_mtx));
507 507 dbuf_evict_user(db);
508 508 db->db_buf = NULL;
509 509 db->db.db_data = NULL;
510 510 if (db->db_state != DB_NOFILL)
511 511 db->db_state = DB_UNCACHED;
512 512 }
513 513
514 514 static void
515 515 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
516 516 {
517 517 ASSERT(MUTEX_HELD(&db->db_mtx));
518 518 ASSERT(buf != NULL);
519 519
520 520 db->db_buf = buf;
521 521 ASSERT(buf->b_data != NULL);
522 522 db->db.db_data = buf->b_data;
523 523 if (!arc_released(buf))
524 524 arc_set_callback(buf, dbuf_do_evict, db);
525 525 }
526 526
527 527 /*
528 528 * Loan out an arc_buf for read. Return the loaned arc_buf.
529 529 */
530 530 arc_buf_t *
531 531 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
532 532 {
533 533 arc_buf_t *abuf;
534 534
535 535 mutex_enter(&db->db_mtx);
536 536 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
537 537 int blksz = db->db.db_size;
538 538 spa_t *spa = db->db_objset->os_spa;
539 539
540 540 mutex_exit(&db->db_mtx);
541 541 abuf = arc_loan_buf(spa, blksz);
542 542 bcopy(db->db.db_data, abuf->b_data, blksz);
543 543 } else {
544 544 abuf = db->db_buf;
545 545 arc_loan_inuse_buf(abuf, db);
546 546 dbuf_clear_data(db);
547 547 mutex_exit(&db->db_mtx);
548 548 }
549 549 return (abuf);
550 550 }
551 551
552 552 /*
553 553 * Calculate which level n block references the data at the level 0 offset
554 554 * provided.
555 555 */
556 556 uint64_t
557 557 dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset)
558 558 {
559 559 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
560 560 /*
561 561 * The level n blkid is equal to the level 0 blkid divided by
562 562 * the number of level 0s in a level n block.
563 563 *
564 564 * The level 0 blkid is offset >> datablkshift =
565 565 * offset / 2^datablkshift.
566 566 *
567 567 * The number of level 0s in a level n is the number of block
568 568 * pointers in an indirect block, raised to the power of level.
569 569 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
570 570 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
571 571 *
572 572 * Thus, the level n blkid is: offset /
573 573 * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT)))
574 574 * = offset / 2^(datablkshift + level *
575 575 * (indblkshift - SPA_BLKPTRSHIFT))
576 576 * = offset >> (datablkshift + level *
577 577 * (indblkshift - SPA_BLKPTRSHIFT))
578 578 */
579 579 return (offset >> (dn->dn_datablkshift + level *
580 580 (dn->dn_indblkshift - SPA_BLKPTRSHIFT)));
581 581 } else {
582 582 ASSERT3U(offset, <, dn->dn_datablksz);
583 583 return (0);
584 584 }
585 585 }
586 586
587 587 static void
588 588 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
589 589 {
590 590 dmu_buf_impl_t *db = vdb;
591 591
592 592 mutex_enter(&db->db_mtx);
593 593 ASSERT3U(db->db_state, ==, DB_READ);
594 594 /*
595 595 * All reads are synchronous, so we must have a hold on the dbuf
596 596 */
597 597 ASSERT(refcount_count(&db->db_holds) > 0);
598 598 ASSERT(db->db_buf == NULL);
599 599 ASSERT(db->db.db_data == NULL);
600 600 if (db->db_level == 0 && db->db_freed_in_flight) {
601 601 /* we were freed in flight; disregard any error */
602 602 arc_release(buf, db);
603 603 bzero(buf->b_data, db->db.db_size);
604 604 arc_buf_freeze(buf);
605 605 db->db_freed_in_flight = FALSE;
606 606 dbuf_set_data(db, buf);
607 607 db->db_state = DB_CACHED;
608 608 } else if (zio == NULL || zio->io_error == 0) {
609 609 dbuf_set_data(db, buf);
610 610 db->db_state = DB_CACHED;
611 611 } else {
612 612 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
613 613 ASSERT3P(db->db_buf, ==, NULL);
614 614 VERIFY(arc_buf_remove_ref(buf, db));
615 615 db->db_state = DB_UNCACHED;
616 616 }
617 617 cv_broadcast(&db->db_changed);
618 618 dbuf_rele_and_unlock(db, NULL);
619 619 }
620 620
621 621 static void
622 622 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
623 623 {
624 624 dnode_t *dn;
625 625 zbookmark_phys_t zb;
626 626 arc_flags_t aflags = ARC_FLAG_NOWAIT;
627 627
628 628 DB_DNODE_ENTER(db);
629 629 dn = DB_DNODE(db);
630 630 ASSERT(!refcount_is_zero(&db->db_holds));
631 631 /* We need the struct_rwlock to prevent db_blkptr from changing. */
632 632 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
633 633 ASSERT(MUTEX_HELD(&db->db_mtx));
634 634 ASSERT(db->db_state == DB_UNCACHED);
635 635 ASSERT(db->db_buf == NULL);
636 636
637 637 if (db->db_blkid == DMU_BONUS_BLKID) {
638 638 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
639 639
640 640 ASSERT3U(bonuslen, <=, db->db.db_size);
641 641 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
642 642 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
643 643 if (bonuslen < DN_MAX_BONUSLEN)
644 644 bzero(db->db.db_data, DN_MAX_BONUSLEN);
645 645 if (bonuslen)
646 646 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
647 647 DB_DNODE_EXIT(db);
648 648 db->db_state = DB_CACHED;
649 649 mutex_exit(&db->db_mtx);
650 650 return;
651 651 }
652 652
653 653 /*
654 654 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
655 655 * processes the delete record and clears the bp while we are waiting
656 656 * for the dn_mtx (resulting in a "no" from block_freed).
657 657 */
658 658 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
659 659 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
660 660 BP_IS_HOLE(db->db_blkptr)))) {
661 661 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
662 662
663 663 DB_DNODE_EXIT(db);
664 664 dbuf_set_data(db, arc_buf_alloc(db->db_objset->os_spa,
665 665 db->db.db_size, db, type));
666 666 bzero(db->db.db_data, db->db.db_size);
667 667 db->db_state = DB_CACHED;
668 668 mutex_exit(&db->db_mtx);
669 669 return;
670 670 }
671 671
672 672 DB_DNODE_EXIT(db);
673 673
674 674 db->db_state = DB_READ;
675 675 mutex_exit(&db->db_mtx);
676 676
677 677 if (DBUF_IS_L2CACHEABLE(db))
678 678 aflags |= ARC_FLAG_L2CACHE;
679 679 if (DBUF_IS_L2COMPRESSIBLE(db))
680 680 aflags |= ARC_FLAG_L2COMPRESS;
681 681
682 682 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
683 683 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
684 684 db->db.db_object, db->db_level, db->db_blkid);
685 685
686 686 dbuf_add_ref(db, NULL);
687 687
688 688 (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr,
689 689 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
690 690 (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
691 691 &aflags, &zb);
692 692 }
693 693
694 694 int
695 695 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
696 696 {
697 697 int err = 0;
698 698 boolean_t havepzio = (zio != NULL);
699 699 boolean_t prefetch;
700 700 dnode_t *dn;
701 701
702 702 /*
703 703 * We don't have to hold the mutex to check db_state because it
704 704 * can't be freed while we have a hold on the buffer.
705 705 */
706 706 ASSERT(!refcount_is_zero(&db->db_holds));
707 707
708 708 if (db->db_state == DB_NOFILL)
709 709 return (SET_ERROR(EIO));
710 710
711 711 DB_DNODE_ENTER(db);
712 712 dn = DB_DNODE(db);
713 713 if ((flags & DB_RF_HAVESTRUCT) == 0)
714 714 rw_enter(&dn->dn_struct_rwlock, RW_READER);
715 715
716 716 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
717 717 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
718 718 DBUF_IS_CACHEABLE(db);
719 719
720 720 mutex_enter(&db->db_mtx);
721 721 if (db->db_state == DB_CACHED) {
722 722 mutex_exit(&db->db_mtx);
723 723 if (prefetch)
724 724 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
725 725 if ((flags & DB_RF_HAVESTRUCT) == 0)
726 726 rw_exit(&dn->dn_struct_rwlock);
727 727 DB_DNODE_EXIT(db);
728 728 } else if (db->db_state == DB_UNCACHED) {
729 729 spa_t *spa = dn->dn_objset->os_spa;
730 730
731 731 if (zio == NULL)
732 732 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
733 733 dbuf_read_impl(db, zio, flags);
734 734
735 735 /* dbuf_read_impl has dropped db_mtx for us */
736 736
737 737 if (prefetch)
738 738 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
739 739
740 740 if ((flags & DB_RF_HAVESTRUCT) == 0)
741 741 rw_exit(&dn->dn_struct_rwlock);
742 742 DB_DNODE_EXIT(db);
743 743
744 744 if (!havepzio)
745 745 err = zio_wait(zio);
746 746 } else {
747 747 /*
748 748 * Another reader came in while the dbuf was in flight
749 749 * between UNCACHED and CACHED. Either a writer will finish
750 750 * writing the buffer (sending the dbuf to CACHED) or the
751 751 * first reader's request will reach the read_done callback
752 752 * and send the dbuf to CACHED. Otherwise, a failure
753 753 * occurred and the dbuf went to UNCACHED.
754 754 */
755 755 mutex_exit(&db->db_mtx);
756 756 if (prefetch)
757 757 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
758 758 if ((flags & DB_RF_HAVESTRUCT) == 0)
759 759 rw_exit(&dn->dn_struct_rwlock);
760 760 DB_DNODE_EXIT(db);
761 761
762 762 /* Skip the wait per the caller's request. */
763 763 mutex_enter(&db->db_mtx);
764 764 if ((flags & DB_RF_NEVERWAIT) == 0) {
765 765 while (db->db_state == DB_READ ||
766 766 db->db_state == DB_FILL) {
767 767 ASSERT(db->db_state == DB_READ ||
768 768 (flags & DB_RF_HAVESTRUCT) == 0);
769 769 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
770 770 db, zio_t *, zio);
771 771 cv_wait(&db->db_changed, &db->db_mtx);
772 772 }
773 773 if (db->db_state == DB_UNCACHED)
774 774 err = SET_ERROR(EIO);
775 775 }
776 776 mutex_exit(&db->db_mtx);
777 777 }
778 778
779 779 ASSERT(err || havepzio || db->db_state == DB_CACHED);
780 780 return (err);
781 781 }
782 782
783 783 static void
784 784 dbuf_noread(dmu_buf_impl_t *db)
785 785 {
786 786 ASSERT(!refcount_is_zero(&db->db_holds));
787 787 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
788 788 mutex_enter(&db->db_mtx);
789 789 while (db->db_state == DB_READ || db->db_state == DB_FILL)
790 790 cv_wait(&db->db_changed, &db->db_mtx);
791 791 if (db->db_state == DB_UNCACHED) {
792 792 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
793 793 spa_t *spa = db->db_objset->os_spa;
794 794
795 795 ASSERT(db->db_buf == NULL);
796 796 ASSERT(db->db.db_data == NULL);
797 797 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
798 798 db->db_state = DB_FILL;
799 799 } else if (db->db_state == DB_NOFILL) {
800 800 dbuf_clear_data(db);
801 801 } else {
802 802 ASSERT3U(db->db_state, ==, DB_CACHED);
803 803 }
804 804 mutex_exit(&db->db_mtx);
805 805 }
806 806
807 807 /*
808 808 * This is our just-in-time copy function. It makes a copy of
809 809 * buffers, that have been modified in a previous transaction
810 810 * group, before we modify them in the current active group.
811 811 *
812 812 * This function is used in two places: when we are dirtying a
813 813 * buffer for the first time in a txg, and when we are freeing
814 814 * a range in a dnode that includes this buffer.
815 815 *
816 816 * Note that when we are called from dbuf_free_range() we do
817 817 * not put a hold on the buffer, we just traverse the active
818 818 * dbuf list for the dnode.
819 819 */
820 820 static void
821 821 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
822 822 {
823 823 dbuf_dirty_record_t *dr = db->db_last_dirty;
824 824
825 825 ASSERT(MUTEX_HELD(&db->db_mtx));
826 826 ASSERT(db->db.db_data != NULL);
827 827 ASSERT(db->db_level == 0);
828 828 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
829 829
830 830 if (dr == NULL ||
831 831 (dr->dt.dl.dr_data !=
832 832 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
833 833 return;
834 834
835 835 /*
836 836 * If the last dirty record for this dbuf has not yet synced
837 837 * and its referencing the dbuf data, either:
838 838 * reset the reference to point to a new copy,
839 839 * or (if there a no active holders)
840 840 * just null out the current db_data pointer.
841 841 */
842 842 ASSERT(dr->dr_txg >= txg - 2);
843 843 if (db->db_blkid == DMU_BONUS_BLKID) {
844 844 /* Note that the data bufs here are zio_bufs */
845 845 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
846 846 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
847 847 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
848 848 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
849 849 int size = db->db.db_size;
850 850 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
851 851 spa_t *spa = db->db_objset->os_spa;
852 852
853 853 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
854 854 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
855 855 } else {
856 856 dbuf_clear_data(db);
857 857 }
858 858 }
859 859
860 860 void
861 861 dbuf_unoverride(dbuf_dirty_record_t *dr)
862 862 {
863 863 dmu_buf_impl_t *db = dr->dr_dbuf;
864 864 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
865 865 uint64_t txg = dr->dr_txg;
866 866
867 867 ASSERT(MUTEX_HELD(&db->db_mtx));
868 868 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
869 869 ASSERT(db->db_level == 0);
870 870
871 871 if (db->db_blkid == DMU_BONUS_BLKID ||
872 872 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
873 873 return;
874 874
875 875 ASSERT(db->db_data_pending != dr);
876 876
877 877 /* free this block */
878 878 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
879 879 zio_free(db->db_objset->os_spa, txg, bp);
880 880
881 881 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
882 882 dr->dt.dl.dr_nopwrite = B_FALSE;
883 883
884 884 /*
885 885 * Release the already-written buffer, so we leave it in
886 886 * a consistent dirty state. Note that all callers are
887 887 * modifying the buffer, so they will immediately do
888 888 * another (redundant) arc_release(). Therefore, leave
889 889 * the buf thawed to save the effort of freezing &
890 890 * immediately re-thawing it.
891 891 */
892 892 arc_release(dr->dt.dl.dr_data, db);
893 893 }
894 894
895 895 /*
896 896 * Evict (if its unreferenced) or clear (if its referenced) any level-0
897 897 * data blocks in the free range, so that any future readers will find
898 898 * empty blocks.
899 899 *
900 900 * This is a no-op if the dataset is in the middle of an incremental
|
↓ open down ↓ |
900 lines elided |
↑ open up ↑ |
901 901 * receive; see comment below for details.
902 902 */
903 903 void
904 904 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
905 905 dmu_tx_t *tx)
906 906 {
907 907 dmu_buf_impl_t db_search;
908 908 dmu_buf_impl_t *db, *db_next;
909 909 uint64_t txg = tx->tx_txg;
910 910 avl_index_t where;
911 + boolean_t freespill =
912 + (start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID);
911 913
912 - if (end_blkid > dn->dn_maxblkid && (end_blkid != DMU_SPILL_BLKID))
914 + if (end_blkid > dn->dn_maxblkid && !freespill)
913 915 end_blkid = dn->dn_maxblkid;
914 916 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
915 917
916 918 db_search.db_level = 0;
917 919 db_search.db_blkid = start_blkid;
918 920 db_search.db_state = DB_SEARCH;
919 921
920 922 mutex_enter(&dn->dn_dbufs_mtx);
921 - if (start_blkid >= dn->dn_unlisted_l0_blkid) {
923 + if (start_blkid >= dn->dn_unlisted_l0_blkid && !freespill) {
922 924 /* There can't be any dbufs in this range; no need to search. */
923 925 #ifdef DEBUG
924 926 db = avl_find(&dn->dn_dbufs, &db_search, &where);
925 927 ASSERT3P(db, ==, NULL);
926 928 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
927 929 ASSERT(db == NULL || db->db_level > 0);
928 930 #endif
929 931 mutex_exit(&dn->dn_dbufs_mtx);
930 932 return;
931 933 } else if (dmu_objset_is_receiving(dn->dn_objset)) {
932 934 /*
933 935 * If we are receiving, we expect there to be no dbufs in
934 936 * the range to be freed, because receive modifies each
935 937 * block at most once, and in offset order. If this is
936 938 * not the case, it can lead to performance problems,
937 939 * so note that we unexpectedly took the slow path.
938 940 */
939 941 atomic_inc_64(&zfs_free_range_recv_miss);
940 942 }
941 943
942 944 db = avl_find(&dn->dn_dbufs, &db_search, &where);
943 945 ASSERT3P(db, ==, NULL);
944 946 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
945 947
946 948 for (; db != NULL; db = db_next) {
947 949 db_next = AVL_NEXT(&dn->dn_dbufs, db);
948 950 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
949 951
950 952 if (db->db_level != 0 || db->db_blkid > end_blkid) {
951 953 break;
952 954 }
953 955 ASSERT3U(db->db_blkid, >=, start_blkid);
954 956
955 957 /* found a level 0 buffer in the range */
956 958 mutex_enter(&db->db_mtx);
957 959 if (dbuf_undirty(db, tx)) {
958 960 /* mutex has been dropped and dbuf destroyed */
959 961 continue;
960 962 }
961 963
962 964 if (db->db_state == DB_UNCACHED ||
963 965 db->db_state == DB_NOFILL ||
964 966 db->db_state == DB_EVICTING) {
965 967 ASSERT(db->db.db_data == NULL);
966 968 mutex_exit(&db->db_mtx);
967 969 continue;
968 970 }
969 971 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
970 972 /* will be handled in dbuf_read_done or dbuf_rele */
971 973 db->db_freed_in_flight = TRUE;
972 974 mutex_exit(&db->db_mtx);
973 975 continue;
974 976 }
975 977 if (refcount_count(&db->db_holds) == 0) {
976 978 ASSERT(db->db_buf);
977 979 dbuf_clear(db);
978 980 continue;
979 981 }
980 982 /* The dbuf is referenced */
981 983
982 984 if (db->db_last_dirty != NULL) {
983 985 dbuf_dirty_record_t *dr = db->db_last_dirty;
984 986
985 987 if (dr->dr_txg == txg) {
986 988 /*
987 989 * This buffer is "in-use", re-adjust the file
988 990 * size to reflect that this buffer may
989 991 * contain new data when we sync.
990 992 */
991 993 if (db->db_blkid != DMU_SPILL_BLKID &&
992 994 db->db_blkid > dn->dn_maxblkid)
993 995 dn->dn_maxblkid = db->db_blkid;
994 996 dbuf_unoverride(dr);
995 997 } else {
996 998 /*
997 999 * This dbuf is not dirty in the open context.
998 1000 * Either uncache it (if its not referenced in
999 1001 * the open context) or reset its contents to
1000 1002 * empty.
1001 1003 */
1002 1004 dbuf_fix_old_data(db, txg);
1003 1005 }
1004 1006 }
1005 1007 /* clear the contents if its cached */
1006 1008 if (db->db_state == DB_CACHED) {
1007 1009 ASSERT(db->db.db_data != NULL);
1008 1010 arc_release(db->db_buf, db);
1009 1011 bzero(db->db.db_data, db->db.db_size);
1010 1012 arc_buf_freeze(db->db_buf);
1011 1013 }
1012 1014
1013 1015 mutex_exit(&db->db_mtx);
1014 1016 }
1015 1017 mutex_exit(&dn->dn_dbufs_mtx);
1016 1018 }
1017 1019
1018 1020 static int
1019 1021 dbuf_block_freeable(dmu_buf_impl_t *db)
1020 1022 {
1021 1023 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
1022 1024 uint64_t birth_txg = 0;
1023 1025
1024 1026 /*
1025 1027 * We don't need any locking to protect db_blkptr:
1026 1028 * If it's syncing, then db_last_dirty will be set
1027 1029 * so we'll ignore db_blkptr.
1028 1030 *
1029 1031 * This logic ensures that only block births for
1030 1032 * filled blocks are considered.
1031 1033 */
1032 1034 ASSERT(MUTEX_HELD(&db->db_mtx));
1033 1035 if (db->db_last_dirty && (db->db_blkptr == NULL ||
1034 1036 !BP_IS_HOLE(db->db_blkptr))) {
1035 1037 birth_txg = db->db_last_dirty->dr_txg;
1036 1038 } else if (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
1037 1039 birth_txg = db->db_blkptr->blk_birth;
1038 1040 }
1039 1041
1040 1042 /*
1041 1043 * If this block don't exist or is in a snapshot, it can't be freed.
1042 1044 * Don't pass the bp to dsl_dataset_block_freeable() since we
1043 1045 * are holding the db_mtx lock and might deadlock if we are
1044 1046 * prefetching a dedup-ed block.
1045 1047 */
1046 1048 if (birth_txg != 0)
1047 1049 return (ds == NULL ||
1048 1050 dsl_dataset_block_freeable(ds, NULL, birth_txg));
1049 1051 else
1050 1052 return (B_FALSE);
1051 1053 }
1052 1054
1053 1055 void
1054 1056 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
1055 1057 {
1056 1058 arc_buf_t *buf, *obuf;
1057 1059 int osize = db->db.db_size;
1058 1060 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1059 1061 dnode_t *dn;
1060 1062
1061 1063 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1062 1064
1063 1065 DB_DNODE_ENTER(db);
1064 1066 dn = DB_DNODE(db);
1065 1067
1066 1068 /* XXX does *this* func really need the lock? */
1067 1069 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1068 1070
1069 1071 /*
1070 1072 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held
1071 1073 * is OK, because there can be no other references to the db
1072 1074 * when we are changing its size, so no concurrent DB_FILL can
1073 1075 * be happening.
1074 1076 */
1075 1077 /*
1076 1078 * XXX we should be doing a dbuf_read, checking the return
1077 1079 * value and returning that up to our callers
1078 1080 */
1079 1081 dmu_buf_will_dirty(&db->db, tx);
1080 1082
1081 1083 /* create the data buffer for the new block */
1082 1084 buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
1083 1085
1084 1086 /* copy old block data to the new block */
1085 1087 obuf = db->db_buf;
1086 1088 bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
1087 1089 /* zero the remainder */
1088 1090 if (size > osize)
1089 1091 bzero((uint8_t *)buf->b_data + osize, size - osize);
1090 1092
1091 1093 mutex_enter(&db->db_mtx);
1092 1094 dbuf_set_data(db, buf);
1093 1095 VERIFY(arc_buf_remove_ref(obuf, db));
1094 1096 db->db.db_size = size;
1095 1097
1096 1098 if (db->db_level == 0) {
1097 1099 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1098 1100 db->db_last_dirty->dt.dl.dr_data = buf;
1099 1101 }
1100 1102 mutex_exit(&db->db_mtx);
1101 1103
1102 1104 dnode_willuse_space(dn, size-osize, tx);
1103 1105 DB_DNODE_EXIT(db);
1104 1106 }
1105 1107
1106 1108 void
1107 1109 dbuf_release_bp(dmu_buf_impl_t *db)
1108 1110 {
1109 1111 objset_t *os = db->db_objset;
1110 1112
1111 1113 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1112 1114 ASSERT(arc_released(os->os_phys_buf) ||
1113 1115 list_link_active(&os->os_dsl_dataset->ds_synced_link));
1114 1116 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1115 1117
1116 1118 (void) arc_release(db->db_buf, db);
1117 1119 }
1118 1120
1119 1121 /*
1120 1122 * We already have a dirty record for this TXG, and we are being
1121 1123 * dirtied again.
1122 1124 */
1123 1125 static void
1124 1126 dbuf_redirty(dbuf_dirty_record_t *dr)
1125 1127 {
1126 1128 dmu_buf_impl_t *db = dr->dr_dbuf;
1127 1129
1128 1130 ASSERT(MUTEX_HELD(&db->db_mtx));
1129 1131
1130 1132 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1131 1133 /*
1132 1134 * If this buffer has already been written out,
1133 1135 * we now need to reset its state.
1134 1136 */
1135 1137 dbuf_unoverride(dr);
1136 1138 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1137 1139 db->db_state != DB_NOFILL) {
1138 1140 /* Already released on initial dirty, so just thaw. */
1139 1141 ASSERT(arc_released(db->db_buf));
1140 1142 arc_buf_thaw(db->db_buf);
1141 1143 }
1142 1144 }
1143 1145 }
1144 1146
1145 1147 dbuf_dirty_record_t *
1146 1148 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1147 1149 {
1148 1150 dnode_t *dn;
1149 1151 objset_t *os;
1150 1152 dbuf_dirty_record_t **drp, *dr;
1151 1153 int drop_struct_lock = FALSE;
1152 1154 boolean_t do_free_accounting = B_FALSE;
1153 1155 int txgoff = tx->tx_txg & TXG_MASK;
1154 1156
1155 1157 ASSERT(tx->tx_txg != 0);
1156 1158 ASSERT(!refcount_is_zero(&db->db_holds));
1157 1159 DMU_TX_DIRTY_BUF(tx, db);
1158 1160
1159 1161 DB_DNODE_ENTER(db);
1160 1162 dn = DB_DNODE(db);
1161 1163 /*
1162 1164 * Shouldn't dirty a regular buffer in syncing context. Private
1163 1165 * objects may be dirtied in syncing context, but only if they
1164 1166 * were already pre-dirtied in open context.
1165 1167 */
1166 1168 ASSERT(!dmu_tx_is_syncing(tx) ||
1167 1169 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1168 1170 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1169 1171 dn->dn_objset->os_dsl_dataset == NULL);
1170 1172 /*
1171 1173 * We make this assert for private objects as well, but after we
1172 1174 * check if we're already dirty. They are allowed to re-dirty
1173 1175 * in syncing context.
1174 1176 */
1175 1177 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1176 1178 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1177 1179 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1178 1180
1179 1181 mutex_enter(&db->db_mtx);
1180 1182 /*
1181 1183 * XXX make this true for indirects too? The problem is that
1182 1184 * transactions created with dmu_tx_create_assigned() from
1183 1185 * syncing context don't bother holding ahead.
1184 1186 */
1185 1187 ASSERT(db->db_level != 0 ||
1186 1188 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1187 1189 db->db_state == DB_NOFILL);
1188 1190
1189 1191 mutex_enter(&dn->dn_mtx);
1190 1192 /*
1191 1193 * Don't set dirtyctx to SYNC if we're just modifying this as we
1192 1194 * initialize the objset.
1193 1195 */
1194 1196 if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1195 1197 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1196 1198 dn->dn_dirtyctx =
1197 1199 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1198 1200 ASSERT(dn->dn_dirtyctx_firstset == NULL);
1199 1201 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1200 1202 }
1201 1203 mutex_exit(&dn->dn_mtx);
1202 1204
1203 1205 if (db->db_blkid == DMU_SPILL_BLKID)
1204 1206 dn->dn_have_spill = B_TRUE;
1205 1207
1206 1208 /*
1207 1209 * If this buffer is already dirty, we're done.
1208 1210 */
1209 1211 drp = &db->db_last_dirty;
1210 1212 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1211 1213 db->db.db_object == DMU_META_DNODE_OBJECT);
1212 1214 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1213 1215 drp = &dr->dr_next;
1214 1216 if (dr && dr->dr_txg == tx->tx_txg) {
1215 1217 DB_DNODE_EXIT(db);
1216 1218
1217 1219 dbuf_redirty(dr);
1218 1220 mutex_exit(&db->db_mtx);
1219 1221 return (dr);
1220 1222 }
1221 1223
1222 1224 /*
1223 1225 * Only valid if not already dirty.
1224 1226 */
1225 1227 ASSERT(dn->dn_object == 0 ||
1226 1228 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1227 1229 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1228 1230
1229 1231 ASSERT3U(dn->dn_nlevels, >, db->db_level);
1230 1232 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1231 1233 dn->dn_phys->dn_nlevels > db->db_level ||
1232 1234 dn->dn_next_nlevels[txgoff] > db->db_level ||
1233 1235 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1234 1236 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1235 1237
1236 1238 /*
1237 1239 * We should only be dirtying in syncing context if it's the
1238 1240 * mos or we're initializing the os or it's a special object.
1239 1241 * However, we are allowed to dirty in syncing context provided
1240 1242 * we already dirtied it in open context. Hence we must make
1241 1243 * this assertion only if we're not already dirty.
1242 1244 */
1243 1245 os = dn->dn_objset;
1244 1246 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1245 1247 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1246 1248 ASSERT(db->db.db_size != 0);
1247 1249
1248 1250 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1249 1251
1250 1252 if (db->db_blkid != DMU_BONUS_BLKID) {
1251 1253 /*
1252 1254 * Update the accounting.
1253 1255 * Note: we delay "free accounting" until after we drop
1254 1256 * the db_mtx. This keeps us from grabbing other locks
1255 1257 * (and possibly deadlocking) in bp_get_dsize() while
1256 1258 * also holding the db_mtx.
1257 1259 */
1258 1260 dnode_willuse_space(dn, db->db.db_size, tx);
1259 1261 do_free_accounting = dbuf_block_freeable(db);
1260 1262 }
1261 1263
1262 1264 /*
1263 1265 * If this buffer is dirty in an old transaction group we need
1264 1266 * to make a copy of it so that the changes we make in this
1265 1267 * transaction group won't leak out when we sync the older txg.
1266 1268 */
1267 1269 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1268 1270 if (db->db_level == 0) {
1269 1271 void *data_old = db->db_buf;
1270 1272
1271 1273 if (db->db_state != DB_NOFILL) {
1272 1274 if (db->db_blkid == DMU_BONUS_BLKID) {
1273 1275 dbuf_fix_old_data(db, tx->tx_txg);
1274 1276 data_old = db->db.db_data;
1275 1277 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1276 1278 /*
1277 1279 * Release the data buffer from the cache so
1278 1280 * that we can modify it without impacting
1279 1281 * possible other users of this cached data
1280 1282 * block. Note that indirect blocks and
1281 1283 * private objects are not released until the
1282 1284 * syncing state (since they are only modified
1283 1285 * then).
1284 1286 */
1285 1287 arc_release(db->db_buf, db);
1286 1288 dbuf_fix_old_data(db, tx->tx_txg);
1287 1289 data_old = db->db_buf;
1288 1290 }
1289 1291 ASSERT(data_old != NULL);
1290 1292 }
1291 1293 dr->dt.dl.dr_data = data_old;
1292 1294 } else {
1293 1295 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1294 1296 list_create(&dr->dt.di.dr_children,
1295 1297 sizeof (dbuf_dirty_record_t),
1296 1298 offsetof(dbuf_dirty_record_t, dr_dirty_node));
1297 1299 }
1298 1300 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
1299 1301 dr->dr_accounted = db->db.db_size;
1300 1302 dr->dr_dbuf = db;
1301 1303 dr->dr_txg = tx->tx_txg;
1302 1304 dr->dr_next = *drp;
1303 1305 *drp = dr;
1304 1306
1305 1307 /*
1306 1308 * We could have been freed_in_flight between the dbuf_noread
1307 1309 * and dbuf_dirty. We win, as though the dbuf_noread() had
1308 1310 * happened after the free.
1309 1311 */
1310 1312 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1311 1313 db->db_blkid != DMU_SPILL_BLKID) {
1312 1314 mutex_enter(&dn->dn_mtx);
1313 1315 if (dn->dn_free_ranges[txgoff] != NULL) {
1314 1316 range_tree_clear(dn->dn_free_ranges[txgoff],
1315 1317 db->db_blkid, 1);
1316 1318 }
1317 1319 mutex_exit(&dn->dn_mtx);
1318 1320 db->db_freed_in_flight = FALSE;
1319 1321 }
1320 1322
1321 1323 /*
1322 1324 * This buffer is now part of this txg
1323 1325 */
1324 1326 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1325 1327 db->db_dirtycnt += 1;
1326 1328 ASSERT3U(db->db_dirtycnt, <=, 3);
1327 1329
1328 1330 mutex_exit(&db->db_mtx);
1329 1331
1330 1332 if (db->db_blkid == DMU_BONUS_BLKID ||
1331 1333 db->db_blkid == DMU_SPILL_BLKID) {
1332 1334 mutex_enter(&dn->dn_mtx);
1333 1335 ASSERT(!list_link_active(&dr->dr_dirty_node));
1334 1336 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1335 1337 mutex_exit(&dn->dn_mtx);
1336 1338 dnode_setdirty(dn, tx);
1337 1339 DB_DNODE_EXIT(db);
1338 1340 return (dr);
1339 1341 } else if (do_free_accounting) {
1340 1342 blkptr_t *bp = db->db_blkptr;
1341 1343 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1342 1344 bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1343 1345 /*
1344 1346 * This is only a guess -- if the dbuf is dirty
1345 1347 * in a previous txg, we don't know how much
1346 1348 * space it will use on disk yet. We should
1347 1349 * really have the struct_rwlock to access
1348 1350 * db_blkptr, but since this is just a guess,
1349 1351 * it's OK if we get an odd answer.
1350 1352 */
1351 1353 ddt_prefetch(os->os_spa, bp);
1352 1354 dnode_willuse_space(dn, -willfree, tx);
1353 1355 }
1354 1356
1355 1357 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1356 1358 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1357 1359 drop_struct_lock = TRUE;
1358 1360 }
1359 1361
1360 1362 if (db->db_level == 0) {
1361 1363 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1362 1364 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1363 1365 }
1364 1366
1365 1367 if (db->db_level+1 < dn->dn_nlevels) {
1366 1368 dmu_buf_impl_t *parent = db->db_parent;
1367 1369 dbuf_dirty_record_t *di;
1368 1370 int parent_held = FALSE;
1369 1371
1370 1372 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1371 1373 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1372 1374
1373 1375 parent = dbuf_hold_level(dn, db->db_level+1,
1374 1376 db->db_blkid >> epbs, FTAG);
1375 1377 ASSERT(parent != NULL);
1376 1378 parent_held = TRUE;
1377 1379 }
1378 1380 if (drop_struct_lock)
1379 1381 rw_exit(&dn->dn_struct_rwlock);
1380 1382 ASSERT3U(db->db_level+1, ==, parent->db_level);
1381 1383 di = dbuf_dirty(parent, tx);
1382 1384 if (parent_held)
1383 1385 dbuf_rele(parent, FTAG);
1384 1386
1385 1387 mutex_enter(&db->db_mtx);
1386 1388 /*
1387 1389 * Since we've dropped the mutex, it's possible that
1388 1390 * dbuf_undirty() might have changed this out from under us.
1389 1391 */
1390 1392 if (db->db_last_dirty == dr ||
1391 1393 dn->dn_object == DMU_META_DNODE_OBJECT) {
1392 1394 mutex_enter(&di->dt.di.dr_mtx);
1393 1395 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1394 1396 ASSERT(!list_link_active(&dr->dr_dirty_node));
1395 1397 list_insert_tail(&di->dt.di.dr_children, dr);
1396 1398 mutex_exit(&di->dt.di.dr_mtx);
1397 1399 dr->dr_parent = di;
1398 1400 }
1399 1401 mutex_exit(&db->db_mtx);
1400 1402 } else {
1401 1403 ASSERT(db->db_level+1 == dn->dn_nlevels);
1402 1404 ASSERT(db->db_blkid < dn->dn_nblkptr);
1403 1405 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1404 1406 mutex_enter(&dn->dn_mtx);
1405 1407 ASSERT(!list_link_active(&dr->dr_dirty_node));
1406 1408 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1407 1409 mutex_exit(&dn->dn_mtx);
1408 1410 if (drop_struct_lock)
1409 1411 rw_exit(&dn->dn_struct_rwlock);
1410 1412 }
1411 1413
1412 1414 dnode_setdirty(dn, tx);
1413 1415 DB_DNODE_EXIT(db);
1414 1416 return (dr);
1415 1417 }
1416 1418
1417 1419 /*
1418 1420 * Undirty a buffer in the transaction group referenced by the given
1419 1421 * transaction. Return whether this evicted the dbuf.
1420 1422 */
1421 1423 static boolean_t
1422 1424 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1423 1425 {
1424 1426 dnode_t *dn;
1425 1427 uint64_t txg = tx->tx_txg;
1426 1428 dbuf_dirty_record_t *dr, **drp;
1427 1429
1428 1430 ASSERT(txg != 0);
1429 1431
1430 1432 /*
1431 1433 * Due to our use of dn_nlevels below, this can only be called
1432 1434 * in open context, unless we are operating on the MOS.
1433 1435 * From syncing context, dn_nlevels may be different from the
1434 1436 * dn_nlevels used when dbuf was dirtied.
1435 1437 */
1436 1438 ASSERT(db->db_objset ==
1437 1439 dmu_objset_pool(db->db_objset)->dp_meta_objset ||
1438 1440 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
1439 1441 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1440 1442 ASSERT0(db->db_level);
1441 1443 ASSERT(MUTEX_HELD(&db->db_mtx));
1442 1444
1443 1445 /*
1444 1446 * If this buffer is not dirty, we're done.
1445 1447 */
1446 1448 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1447 1449 if (dr->dr_txg <= txg)
1448 1450 break;
1449 1451 if (dr == NULL || dr->dr_txg < txg)
1450 1452 return (B_FALSE);
1451 1453 ASSERT(dr->dr_txg == txg);
1452 1454 ASSERT(dr->dr_dbuf == db);
1453 1455
1454 1456 DB_DNODE_ENTER(db);
1455 1457 dn = DB_DNODE(db);
1456 1458
1457 1459 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1458 1460
1459 1461 ASSERT(db->db.db_size != 0);
1460 1462
1461 1463 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
1462 1464 dr->dr_accounted, txg);
1463 1465
1464 1466 *drp = dr->dr_next;
1465 1467
1466 1468 /*
1467 1469 * Note that there are three places in dbuf_dirty()
1468 1470 * where this dirty record may be put on a list.
1469 1471 * Make sure to do a list_remove corresponding to
1470 1472 * every one of those list_insert calls.
1471 1473 */
1472 1474 if (dr->dr_parent) {
1473 1475 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1474 1476 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1475 1477 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1476 1478 } else if (db->db_blkid == DMU_SPILL_BLKID ||
1477 1479 db->db_level + 1 == dn->dn_nlevels) {
1478 1480 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1479 1481 mutex_enter(&dn->dn_mtx);
1480 1482 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1481 1483 mutex_exit(&dn->dn_mtx);
1482 1484 }
1483 1485 DB_DNODE_EXIT(db);
1484 1486
1485 1487 if (db->db_state != DB_NOFILL) {
1486 1488 dbuf_unoverride(dr);
1487 1489
1488 1490 ASSERT(db->db_buf != NULL);
1489 1491 ASSERT(dr->dt.dl.dr_data != NULL);
1490 1492 if (dr->dt.dl.dr_data != db->db_buf)
1491 1493 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
1492 1494 }
1493 1495
1494 1496 kmem_free(dr, sizeof (dbuf_dirty_record_t));
1495 1497
1496 1498 ASSERT(db->db_dirtycnt > 0);
1497 1499 db->db_dirtycnt -= 1;
1498 1500
1499 1501 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1500 1502 arc_buf_t *buf = db->db_buf;
1501 1503
1502 1504 ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1503 1505 dbuf_clear_data(db);
1504 1506 VERIFY(arc_buf_remove_ref(buf, db));
1505 1507 dbuf_evict(db);
1506 1508 return (B_TRUE);
1507 1509 }
1508 1510
1509 1511 return (B_FALSE);
1510 1512 }
1511 1513
1512 1514 void
1513 1515 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
1514 1516 {
1515 1517 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1516 1518 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1517 1519
1518 1520 ASSERT(tx->tx_txg != 0);
1519 1521 ASSERT(!refcount_is_zero(&db->db_holds));
1520 1522
1521 1523 /*
1522 1524 * Quick check for dirtyness. For already dirty blocks, this
1523 1525 * reduces runtime of this function by >90%, and overall performance
1524 1526 * by 50% for some workloads (e.g. file deletion with indirect blocks
1525 1527 * cached).
1526 1528 */
1527 1529 mutex_enter(&db->db_mtx);
1528 1530 dbuf_dirty_record_t *dr;
1529 1531 for (dr = db->db_last_dirty;
1530 1532 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) {
1531 1533 /*
1532 1534 * It's possible that it is already dirty but not cached,
1533 1535 * because there are some calls to dbuf_dirty() that don't
1534 1536 * go through dmu_buf_will_dirty().
1535 1537 */
1536 1538 if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) {
1537 1539 /* This dbuf is already dirty and cached. */
1538 1540 dbuf_redirty(dr);
1539 1541 mutex_exit(&db->db_mtx);
1540 1542 return;
1541 1543 }
1542 1544 }
1543 1545 mutex_exit(&db->db_mtx);
1544 1546
1545 1547 DB_DNODE_ENTER(db);
1546 1548 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1547 1549 rf |= DB_RF_HAVESTRUCT;
1548 1550 DB_DNODE_EXIT(db);
1549 1551 (void) dbuf_read(db, NULL, rf);
1550 1552 (void) dbuf_dirty(db, tx);
1551 1553 }
1552 1554
1553 1555 void
1554 1556 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1555 1557 {
1556 1558 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1557 1559
1558 1560 db->db_state = DB_NOFILL;
1559 1561
1560 1562 dmu_buf_will_fill(db_fake, tx);
1561 1563 }
1562 1564
1563 1565 void
1564 1566 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1565 1567 {
1566 1568 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1567 1569
1568 1570 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1569 1571 ASSERT(tx->tx_txg != 0);
1570 1572 ASSERT(db->db_level == 0);
1571 1573 ASSERT(!refcount_is_zero(&db->db_holds));
1572 1574
1573 1575 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1574 1576 dmu_tx_private_ok(tx));
1575 1577
1576 1578 dbuf_noread(db);
1577 1579 (void) dbuf_dirty(db, tx);
1578 1580 }
1579 1581
1580 1582 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1581 1583 /* ARGSUSED */
1582 1584 void
1583 1585 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1584 1586 {
1585 1587 mutex_enter(&db->db_mtx);
1586 1588 DBUF_VERIFY(db);
1587 1589
1588 1590 if (db->db_state == DB_FILL) {
1589 1591 if (db->db_level == 0 && db->db_freed_in_flight) {
1590 1592 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1591 1593 /* we were freed while filling */
1592 1594 /* XXX dbuf_undirty? */
1593 1595 bzero(db->db.db_data, db->db.db_size);
1594 1596 db->db_freed_in_flight = FALSE;
1595 1597 }
1596 1598 db->db_state = DB_CACHED;
1597 1599 cv_broadcast(&db->db_changed);
1598 1600 }
1599 1601 mutex_exit(&db->db_mtx);
1600 1602 }
1601 1603
1602 1604 void
1603 1605 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
1604 1606 bp_embedded_type_t etype, enum zio_compress comp,
1605 1607 int uncompressed_size, int compressed_size, int byteorder,
1606 1608 dmu_tx_t *tx)
1607 1609 {
1608 1610 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
1609 1611 struct dirty_leaf *dl;
1610 1612 dmu_object_type_t type;
1611 1613
1612 1614 if (etype == BP_EMBEDDED_TYPE_DATA) {
1613 1615 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
1614 1616 SPA_FEATURE_EMBEDDED_DATA));
1615 1617 }
1616 1618
1617 1619 DB_DNODE_ENTER(db);
1618 1620 type = DB_DNODE(db)->dn_type;
1619 1621 DB_DNODE_EXIT(db);
1620 1622
1621 1623 ASSERT0(db->db_level);
1622 1624 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1623 1625
1624 1626 dmu_buf_will_not_fill(dbuf, tx);
1625 1627
1626 1628 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1627 1629 dl = &db->db_last_dirty->dt.dl;
1628 1630 encode_embedded_bp_compressed(&dl->dr_overridden_by,
1629 1631 data, comp, uncompressed_size, compressed_size);
1630 1632 BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
1631 1633 BP_SET_TYPE(&dl->dr_overridden_by, type);
1632 1634 BP_SET_LEVEL(&dl->dr_overridden_by, 0);
1633 1635 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
1634 1636
1635 1637 dl->dr_override_state = DR_OVERRIDDEN;
1636 1638 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg;
1637 1639 }
1638 1640
1639 1641 /*
1640 1642 * Directly assign a provided arc buf to a given dbuf if it's not referenced
1641 1643 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1642 1644 */
1643 1645 void
1644 1646 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1645 1647 {
1646 1648 ASSERT(!refcount_is_zero(&db->db_holds));
1647 1649 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1648 1650 ASSERT(db->db_level == 0);
1649 1651 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1650 1652 ASSERT(buf != NULL);
1651 1653 ASSERT(arc_buf_size(buf) == db->db.db_size);
1652 1654 ASSERT(tx->tx_txg != 0);
1653 1655
1654 1656 arc_return_buf(buf, db);
1655 1657 ASSERT(arc_released(buf));
1656 1658
1657 1659 mutex_enter(&db->db_mtx);
1658 1660
1659 1661 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1660 1662 cv_wait(&db->db_changed, &db->db_mtx);
1661 1663
1662 1664 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1663 1665
1664 1666 if (db->db_state == DB_CACHED &&
1665 1667 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1666 1668 mutex_exit(&db->db_mtx);
1667 1669 (void) dbuf_dirty(db, tx);
1668 1670 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1669 1671 VERIFY(arc_buf_remove_ref(buf, db));
1670 1672 xuio_stat_wbuf_copied();
1671 1673 return;
1672 1674 }
1673 1675
1674 1676 xuio_stat_wbuf_nocopy();
1675 1677 if (db->db_state == DB_CACHED) {
1676 1678 dbuf_dirty_record_t *dr = db->db_last_dirty;
1677 1679
1678 1680 ASSERT(db->db_buf != NULL);
1679 1681 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1680 1682 ASSERT(dr->dt.dl.dr_data == db->db_buf);
1681 1683 if (!arc_released(db->db_buf)) {
1682 1684 ASSERT(dr->dt.dl.dr_override_state ==
1683 1685 DR_OVERRIDDEN);
1684 1686 arc_release(db->db_buf, db);
1685 1687 }
1686 1688 dr->dt.dl.dr_data = buf;
1687 1689 VERIFY(arc_buf_remove_ref(db->db_buf, db));
1688 1690 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1689 1691 arc_release(db->db_buf, db);
1690 1692 VERIFY(arc_buf_remove_ref(db->db_buf, db));
1691 1693 }
1692 1694 db->db_buf = NULL;
1693 1695 }
1694 1696 ASSERT(db->db_buf == NULL);
1695 1697 dbuf_set_data(db, buf);
1696 1698 db->db_state = DB_FILL;
1697 1699 mutex_exit(&db->db_mtx);
1698 1700 (void) dbuf_dirty(db, tx);
1699 1701 dmu_buf_fill_done(&db->db, tx);
1700 1702 }
1701 1703
1702 1704 /*
1703 1705 * "Clear" the contents of this dbuf. This will mark the dbuf
1704 1706 * EVICTING and clear *most* of its references. Unfortunately,
1705 1707 * when we are not holding the dn_dbufs_mtx, we can't clear the
1706 1708 * entry in the dn_dbufs list. We have to wait until dbuf_destroy()
1707 1709 * in this case. For callers from the DMU we will usually see:
1708 1710 * dbuf_clear()->arc_clear_callback()->dbuf_do_evict()->dbuf_destroy()
1709 1711 * For the arc callback, we will usually see:
1710 1712 * dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1711 1713 * Sometimes, though, we will get a mix of these two:
1712 1714 * DMU: dbuf_clear()->arc_clear_callback()
1713 1715 * ARC: dbuf_do_evict()->dbuf_destroy()
1714 1716 *
1715 1717 * This routine will dissociate the dbuf from the arc, by calling
1716 1718 * arc_clear_callback(), but will not evict the data from the ARC.
1717 1719 */
1718 1720 void
1719 1721 dbuf_clear(dmu_buf_impl_t *db)
1720 1722 {
1721 1723 dnode_t *dn;
1722 1724 dmu_buf_impl_t *parent = db->db_parent;
1723 1725 dmu_buf_impl_t *dndb;
1724 1726 boolean_t dbuf_gone = B_FALSE;
1725 1727
1726 1728 ASSERT(MUTEX_HELD(&db->db_mtx));
1727 1729 ASSERT(refcount_is_zero(&db->db_holds));
1728 1730
1729 1731 dbuf_evict_user(db);
1730 1732
1731 1733 if (db->db_state == DB_CACHED) {
1732 1734 ASSERT(db->db.db_data != NULL);
1733 1735 if (db->db_blkid == DMU_BONUS_BLKID) {
1734 1736 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1735 1737 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1736 1738 }
1737 1739 db->db.db_data = NULL;
1738 1740 db->db_state = DB_UNCACHED;
1739 1741 }
1740 1742
1741 1743 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1742 1744 ASSERT(db->db_data_pending == NULL);
1743 1745
1744 1746 db->db_state = DB_EVICTING;
1745 1747 db->db_blkptr = NULL;
1746 1748
1747 1749 DB_DNODE_ENTER(db);
1748 1750 dn = DB_DNODE(db);
1749 1751 dndb = dn->dn_dbuf;
1750 1752 if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1751 1753 avl_remove(&dn->dn_dbufs, db);
1752 1754 atomic_dec_32(&dn->dn_dbufs_count);
1753 1755 membar_producer();
1754 1756 DB_DNODE_EXIT(db);
1755 1757 /*
1756 1758 * Decrementing the dbuf count means that the hold corresponding
1757 1759 * to the removed dbuf is no longer discounted in dnode_move(),
1758 1760 * so the dnode cannot be moved until after we release the hold.
1759 1761 * The membar_producer() ensures visibility of the decremented
1760 1762 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1761 1763 * release any lock.
1762 1764 */
1763 1765 dnode_rele(dn, db);
1764 1766 db->db_dnode_handle = NULL;
1765 1767 } else {
1766 1768 DB_DNODE_EXIT(db);
1767 1769 }
1768 1770
1769 1771 if (db->db_buf)
1770 1772 dbuf_gone = arc_clear_callback(db->db_buf);
1771 1773
1772 1774 if (!dbuf_gone)
1773 1775 mutex_exit(&db->db_mtx);
1774 1776
1775 1777 /*
1776 1778 * If this dbuf is referenced from an indirect dbuf,
1777 1779 * decrement the ref count on the indirect dbuf.
1778 1780 */
1779 1781 if (parent && parent != dndb)
1780 1782 dbuf_rele(parent, db);
1781 1783 }
1782 1784
1783 1785 /*
1784 1786 * Note: While bpp will always be updated if the function returns success,
1785 1787 * parentp will not be updated if the dnode does not have dn_dbuf filled in;
1786 1788 * this happens when the dnode is the meta-dnode, or a userused or groupused
1787 1789 * object.
1788 1790 */
1789 1791 static int
1790 1792 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1791 1793 dmu_buf_impl_t **parentp, blkptr_t **bpp)
1792 1794 {
1793 1795 int nlevels, epbs;
1794 1796
1795 1797 *parentp = NULL;
1796 1798 *bpp = NULL;
1797 1799
1798 1800 ASSERT(blkid != DMU_BONUS_BLKID);
1799 1801
1800 1802 if (blkid == DMU_SPILL_BLKID) {
1801 1803 mutex_enter(&dn->dn_mtx);
1802 1804 if (dn->dn_have_spill &&
1803 1805 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1804 1806 *bpp = &dn->dn_phys->dn_spill;
1805 1807 else
1806 1808 *bpp = NULL;
1807 1809 dbuf_add_ref(dn->dn_dbuf, NULL);
1808 1810 *parentp = dn->dn_dbuf;
1809 1811 mutex_exit(&dn->dn_mtx);
1810 1812 return (0);
1811 1813 }
1812 1814
1813 1815 if (dn->dn_phys->dn_nlevels == 0)
1814 1816 nlevels = 1;
1815 1817 else
1816 1818 nlevels = dn->dn_phys->dn_nlevels;
1817 1819
1818 1820 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1819 1821
1820 1822 ASSERT3U(level * epbs, <, 64);
1821 1823 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1822 1824 if (level >= nlevels ||
1823 1825 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1824 1826 /* the buffer has no parent yet */
1825 1827 return (SET_ERROR(ENOENT));
1826 1828 } else if (level < nlevels-1) {
1827 1829 /* this block is referenced from an indirect block */
1828 1830 int err = dbuf_hold_impl(dn, level+1,
1829 1831 blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
1830 1832 if (err)
1831 1833 return (err);
1832 1834 err = dbuf_read(*parentp, NULL,
1833 1835 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1834 1836 if (err) {
1835 1837 dbuf_rele(*parentp, NULL);
1836 1838 *parentp = NULL;
1837 1839 return (err);
1838 1840 }
1839 1841 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1840 1842 (blkid & ((1ULL << epbs) - 1));
1841 1843 return (0);
1842 1844 } else {
1843 1845 /* the block is referenced from the dnode */
1844 1846 ASSERT3U(level, ==, nlevels-1);
1845 1847 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1846 1848 blkid < dn->dn_phys->dn_nblkptr);
1847 1849 if (dn->dn_dbuf) {
1848 1850 dbuf_add_ref(dn->dn_dbuf, NULL);
1849 1851 *parentp = dn->dn_dbuf;
1850 1852 }
1851 1853 *bpp = &dn->dn_phys->dn_blkptr[blkid];
1852 1854 return (0);
1853 1855 }
1854 1856 }
1855 1857
1856 1858 static dmu_buf_impl_t *
1857 1859 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1858 1860 dmu_buf_impl_t *parent, blkptr_t *blkptr)
1859 1861 {
1860 1862 objset_t *os = dn->dn_objset;
1861 1863 dmu_buf_impl_t *db, *odb;
1862 1864
1863 1865 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1864 1866 ASSERT(dn->dn_type != DMU_OT_NONE);
1865 1867
1866 1868 db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1867 1869
1868 1870 db->db_objset = os;
1869 1871 db->db.db_object = dn->dn_object;
1870 1872 db->db_level = level;
1871 1873 db->db_blkid = blkid;
1872 1874 db->db_last_dirty = NULL;
1873 1875 db->db_dirtycnt = 0;
1874 1876 db->db_dnode_handle = dn->dn_handle;
1875 1877 db->db_parent = parent;
1876 1878 db->db_blkptr = blkptr;
1877 1879
1878 1880 db->db_user = NULL;
1879 1881 db->db_user_immediate_evict = FALSE;
1880 1882 db->db_freed_in_flight = FALSE;
1881 1883 db->db_pending_evict = FALSE;
1882 1884
1883 1885 if (blkid == DMU_BONUS_BLKID) {
1884 1886 ASSERT3P(parent, ==, dn->dn_dbuf);
1885 1887 db->db.db_size = DN_MAX_BONUSLEN -
1886 1888 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1887 1889 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1888 1890 db->db.db_offset = DMU_BONUS_BLKID;
1889 1891 db->db_state = DB_UNCACHED;
1890 1892 /* the bonus dbuf is not placed in the hash table */
1891 1893 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1892 1894 return (db);
1893 1895 } else if (blkid == DMU_SPILL_BLKID) {
1894 1896 db->db.db_size = (blkptr != NULL) ?
1895 1897 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1896 1898 db->db.db_offset = 0;
1897 1899 } else {
1898 1900 int blocksize =
1899 1901 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
1900 1902 db->db.db_size = blocksize;
1901 1903 db->db.db_offset = db->db_blkid * blocksize;
1902 1904 }
1903 1905
1904 1906 /*
1905 1907 * Hold the dn_dbufs_mtx while we get the new dbuf
1906 1908 * in the hash table *and* added to the dbufs list.
1907 1909 * This prevents a possible deadlock with someone
1908 1910 * trying to look up this dbuf before its added to the
1909 1911 * dn_dbufs list.
1910 1912 */
1911 1913 mutex_enter(&dn->dn_dbufs_mtx);
1912 1914 db->db_state = DB_EVICTING;
1913 1915 if ((odb = dbuf_hash_insert(db)) != NULL) {
1914 1916 /* someone else inserted it first */
1915 1917 kmem_cache_free(dbuf_cache, db);
1916 1918 mutex_exit(&dn->dn_dbufs_mtx);
1917 1919 return (odb);
1918 1920 }
1919 1921 avl_add(&dn->dn_dbufs, db);
1920 1922 if (db->db_level == 0 && db->db_blkid >=
1921 1923 dn->dn_unlisted_l0_blkid)
1922 1924 dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
1923 1925 db->db_state = DB_UNCACHED;
1924 1926 mutex_exit(&dn->dn_dbufs_mtx);
1925 1927 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1926 1928
1927 1929 if (parent && parent != dn->dn_dbuf)
1928 1930 dbuf_add_ref(parent, db);
1929 1931
1930 1932 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1931 1933 refcount_count(&dn->dn_holds) > 0);
1932 1934 (void) refcount_add(&dn->dn_holds, db);
1933 1935 atomic_inc_32(&dn->dn_dbufs_count);
1934 1936
1935 1937 dprintf_dbuf(db, "db=%p\n", db);
1936 1938
1937 1939 return (db);
1938 1940 }
1939 1941
1940 1942 static int
1941 1943 dbuf_do_evict(void *private)
1942 1944 {
1943 1945 dmu_buf_impl_t *db = private;
1944 1946
1945 1947 if (!MUTEX_HELD(&db->db_mtx))
1946 1948 mutex_enter(&db->db_mtx);
1947 1949
1948 1950 ASSERT(refcount_is_zero(&db->db_holds));
1949 1951
1950 1952 if (db->db_state != DB_EVICTING) {
1951 1953 ASSERT(db->db_state == DB_CACHED);
1952 1954 DBUF_VERIFY(db);
1953 1955 db->db_buf = NULL;
1954 1956 dbuf_evict(db);
1955 1957 } else {
1956 1958 mutex_exit(&db->db_mtx);
1957 1959 dbuf_destroy(db);
1958 1960 }
1959 1961 return (0);
1960 1962 }
1961 1963
1962 1964 static void
1963 1965 dbuf_destroy(dmu_buf_impl_t *db)
1964 1966 {
1965 1967 ASSERT(refcount_is_zero(&db->db_holds));
1966 1968
1967 1969 if (db->db_blkid != DMU_BONUS_BLKID) {
1968 1970 /*
1969 1971 * If this dbuf is still on the dn_dbufs list,
1970 1972 * remove it from that list.
1971 1973 */
1972 1974 if (db->db_dnode_handle != NULL) {
1973 1975 dnode_t *dn;
1974 1976
1975 1977 DB_DNODE_ENTER(db);
1976 1978 dn = DB_DNODE(db);
1977 1979 mutex_enter(&dn->dn_dbufs_mtx);
1978 1980 avl_remove(&dn->dn_dbufs, db);
1979 1981 atomic_dec_32(&dn->dn_dbufs_count);
1980 1982 mutex_exit(&dn->dn_dbufs_mtx);
1981 1983 DB_DNODE_EXIT(db);
1982 1984 /*
1983 1985 * Decrementing the dbuf count means that the hold
1984 1986 * corresponding to the removed dbuf is no longer
1985 1987 * discounted in dnode_move(), so the dnode cannot be
1986 1988 * moved until after we release the hold.
1987 1989 */
1988 1990 dnode_rele(dn, db);
1989 1991 db->db_dnode_handle = NULL;
1990 1992 }
1991 1993 dbuf_hash_remove(db);
1992 1994 }
1993 1995 db->db_parent = NULL;
1994 1996 db->db_buf = NULL;
1995 1997
1996 1998 ASSERT(db->db.db_data == NULL);
1997 1999 ASSERT(db->db_hash_next == NULL);
1998 2000 ASSERT(db->db_blkptr == NULL);
1999 2001 ASSERT(db->db_data_pending == NULL);
2000 2002
2001 2003 kmem_cache_free(dbuf_cache, db);
2002 2004 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
2003 2005 }
2004 2006
2005 2007 typedef struct dbuf_prefetch_arg {
2006 2008 spa_t *dpa_spa; /* The spa to issue the prefetch in. */
2007 2009 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
2008 2010 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
2009 2011 int dpa_curlevel; /* The current level that we're reading */
2010 2012 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
2011 2013 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
2012 2014 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
2013 2015 } dbuf_prefetch_arg_t;
2014 2016
2015 2017 /*
2016 2018 * Actually issue the prefetch read for the block given.
2017 2019 */
2018 2020 static void
2019 2021 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
2020 2022 {
2021 2023 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
2022 2024 return;
2023 2025
2024 2026 arc_flags_t aflags =
2025 2027 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
2026 2028
2027 2029 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
2028 2030 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
2029 2031 ASSERT(dpa->dpa_zio != NULL);
2030 2032 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL,
2031 2033 dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2032 2034 &aflags, &dpa->dpa_zb);
2033 2035 }
2034 2036
2035 2037 /*
2036 2038 * Called when an indirect block above our prefetch target is read in. This
2037 2039 * will either read in the next indirect block down the tree or issue the actual
2038 2040 * prefetch if the next block down is our target.
2039 2041 */
2040 2042 static void
2041 2043 dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private)
2042 2044 {
2043 2045 dbuf_prefetch_arg_t *dpa = private;
2044 2046
2045 2047 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
2046 2048 ASSERT3S(dpa->dpa_curlevel, >, 0);
2047 2049 if (zio != NULL) {
2048 2050 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
2049 2051 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
2050 2052 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
2051 2053 }
2052 2054
2053 2055 dpa->dpa_curlevel--;
2054 2056
2055 2057 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
2056 2058 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
2057 2059 blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
2058 2060 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
2059 2061 if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) {
2060 2062 kmem_free(dpa, sizeof (*dpa));
2061 2063 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
2062 2064 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
2063 2065 dbuf_issue_final_prefetch(dpa, bp);
2064 2066 kmem_free(dpa, sizeof (*dpa));
2065 2067 } else {
2066 2068 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
2067 2069 zbookmark_phys_t zb;
2068 2070
2069 2071 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
2070 2072
2071 2073 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
2072 2074 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
2073 2075
2074 2076 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
2075 2077 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio,
2076 2078 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2077 2079 &iter_aflags, &zb);
2078 2080 }
2079 2081 (void) arc_buf_remove_ref(abuf, private);
2080 2082 }
2081 2083
2082 2084 /*
2083 2085 * Issue prefetch reads for the given block on the given level. If the indirect
2084 2086 * blocks above that block are not in memory, we will read them in
2085 2087 * asynchronously. As a result, this call never blocks waiting for a read to
2086 2088 * complete.
2087 2089 */
2088 2090 void
2089 2091 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
2090 2092 arc_flags_t aflags)
2091 2093 {
2092 2094 blkptr_t bp;
2093 2095 int epbs, nlevels, curlevel;
2094 2096 uint64_t curblkid;
2095 2097
2096 2098 ASSERT(blkid != DMU_BONUS_BLKID);
2097 2099 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2098 2100
2099 2101 if (blkid > dn->dn_maxblkid)
2100 2102 return;
2101 2103
2102 2104 if (dnode_block_freed(dn, blkid))
2103 2105 return;
2104 2106
2105 2107 /*
2106 2108 * This dnode hasn't been written to disk yet, so there's nothing to
2107 2109 * prefetch.
2108 2110 */
2109 2111 nlevels = dn->dn_phys->dn_nlevels;
2110 2112 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
2111 2113 return;
2112 2114
2113 2115 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2114 2116 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
2115 2117 return;
2116 2118
2117 2119 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
2118 2120 level, blkid);
2119 2121 if (db != NULL) {
2120 2122 mutex_exit(&db->db_mtx);
2121 2123 /*
2122 2124 * This dbuf already exists. It is either CACHED, or
2123 2125 * (we assume) about to be read or filled.
2124 2126 */
2125 2127 return;
2126 2128 }
2127 2129
2128 2130 /*
2129 2131 * Find the closest ancestor (indirect block) of the target block
2130 2132 * that is present in the cache. In this indirect block, we will
2131 2133 * find the bp that is at curlevel, curblkid.
2132 2134 */
2133 2135 curlevel = level;
2134 2136 curblkid = blkid;
2135 2137 while (curlevel < nlevels - 1) {
2136 2138 int parent_level = curlevel + 1;
2137 2139 uint64_t parent_blkid = curblkid >> epbs;
2138 2140 dmu_buf_impl_t *db;
2139 2141
2140 2142 if (dbuf_hold_impl(dn, parent_level, parent_blkid,
2141 2143 FALSE, TRUE, FTAG, &db) == 0) {
2142 2144 blkptr_t *bpp = db->db_buf->b_data;
2143 2145 bp = bpp[P2PHASE(curblkid, 1 << epbs)];
2144 2146 dbuf_rele(db, FTAG);
2145 2147 break;
2146 2148 }
2147 2149
2148 2150 curlevel = parent_level;
2149 2151 curblkid = parent_blkid;
2150 2152 }
2151 2153
2152 2154 if (curlevel == nlevels - 1) {
2153 2155 /* No cached indirect blocks found. */
2154 2156 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
2155 2157 bp = dn->dn_phys->dn_blkptr[curblkid];
2156 2158 }
2157 2159 if (BP_IS_HOLE(&bp))
2158 2160 return;
2159 2161
2160 2162 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
2161 2163
2162 2164 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
2163 2165 ZIO_FLAG_CANFAIL);
2164 2166
2165 2167 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
2166 2168 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
2167 2169 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
2168 2170 dn->dn_object, level, blkid);
2169 2171 dpa->dpa_curlevel = curlevel;
2170 2172 dpa->dpa_prio = prio;
2171 2173 dpa->dpa_aflags = aflags;
2172 2174 dpa->dpa_spa = dn->dn_objset->os_spa;
2173 2175 dpa->dpa_epbs = epbs;
2174 2176 dpa->dpa_zio = pio;
2175 2177
2176 2178 /*
2177 2179 * If we have the indirect just above us, no need to do the asynchronous
2178 2180 * prefetch chain; we'll just run the last step ourselves. If we're at
2179 2181 * a higher level, though, we want to issue the prefetches for all the
2180 2182 * indirect blocks asynchronously, so we can go on with whatever we were
2181 2183 * doing.
2182 2184 */
2183 2185 if (curlevel == level) {
2184 2186 ASSERT3U(curblkid, ==, blkid);
2185 2187 dbuf_issue_final_prefetch(dpa, &bp);
2186 2188 kmem_free(dpa, sizeof (*dpa));
2187 2189 } else {
2188 2190 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
2189 2191 zbookmark_phys_t zb;
2190 2192
2191 2193 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
2192 2194 dn->dn_object, curlevel, curblkid);
2193 2195 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
2194 2196 &bp, dbuf_prefetch_indirect_done, dpa, prio,
2195 2197 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2196 2198 &iter_aflags, &zb);
2197 2199 }
2198 2200 /*
2199 2201 * We use pio here instead of dpa_zio since it's possible that
2200 2202 * dpa may have already been freed.
2201 2203 */
2202 2204 zio_nowait(pio);
2203 2205 }
2204 2206
2205 2207 /*
2206 2208 * Returns with db_holds incremented, and db_mtx not held.
2207 2209 * Note: dn_struct_rwlock must be held.
2208 2210 */
2209 2211 int
2210 2212 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
2211 2213 boolean_t fail_sparse, boolean_t fail_uncached,
2212 2214 void *tag, dmu_buf_impl_t **dbp)
2213 2215 {
2214 2216 dmu_buf_impl_t *db, *parent = NULL;
2215 2217
2216 2218 ASSERT(blkid != DMU_BONUS_BLKID);
2217 2219 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2218 2220 ASSERT3U(dn->dn_nlevels, >, level);
2219 2221
2220 2222 *dbp = NULL;
2221 2223 top:
2222 2224 /* dbuf_find() returns with db_mtx held */
2223 2225 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid);
2224 2226
2225 2227 if (db == NULL) {
2226 2228 blkptr_t *bp = NULL;
2227 2229 int err;
2228 2230
2229 2231 if (fail_uncached)
2230 2232 return (SET_ERROR(ENOENT));
2231 2233
2232 2234 ASSERT3P(parent, ==, NULL);
2233 2235 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
2234 2236 if (fail_sparse) {
2235 2237 if (err == 0 && bp && BP_IS_HOLE(bp))
2236 2238 err = SET_ERROR(ENOENT);
2237 2239 if (err) {
2238 2240 if (parent)
2239 2241 dbuf_rele(parent, NULL);
2240 2242 return (err);
2241 2243 }
2242 2244 }
2243 2245 if (err && err != ENOENT)
2244 2246 return (err);
2245 2247 db = dbuf_create(dn, level, blkid, parent, bp);
2246 2248 }
2247 2249
2248 2250 if (fail_uncached && db->db_state != DB_CACHED) {
2249 2251 mutex_exit(&db->db_mtx);
2250 2252 return (SET_ERROR(ENOENT));
2251 2253 }
2252 2254
2253 2255 if (db->db_buf && refcount_is_zero(&db->db_holds)) {
2254 2256 arc_buf_add_ref(db->db_buf, db);
2255 2257 if (db->db_buf->b_data == NULL) {
2256 2258 dbuf_clear(db);
2257 2259 if (parent) {
2258 2260 dbuf_rele(parent, NULL);
2259 2261 parent = NULL;
2260 2262 }
2261 2263 goto top;
2262 2264 }
2263 2265 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
2264 2266 }
2265 2267
2266 2268 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
2267 2269
2268 2270 /*
2269 2271 * If this buffer is currently syncing out, and we are are
2270 2272 * still referencing it from db_data, we need to make a copy
2271 2273 * of it in case we decide we want to dirty it again in this txg.
2272 2274 */
2273 2275 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2274 2276 dn->dn_object != DMU_META_DNODE_OBJECT &&
2275 2277 db->db_state == DB_CACHED && db->db_data_pending) {
2276 2278 dbuf_dirty_record_t *dr = db->db_data_pending;
2277 2279
2278 2280 if (dr->dt.dl.dr_data == db->db_buf) {
2279 2281 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2280 2282
2281 2283 dbuf_set_data(db,
2282 2284 arc_buf_alloc(dn->dn_objset->os_spa,
2283 2285 db->db.db_size, db, type));
2284 2286 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data,
2285 2287 db->db.db_size);
2286 2288 }
2287 2289 }
2288 2290
2289 2291 (void) refcount_add(&db->db_holds, tag);
2290 2292 DBUF_VERIFY(db);
2291 2293 mutex_exit(&db->db_mtx);
2292 2294
2293 2295 /* NOTE: we can't rele the parent until after we drop the db_mtx */
2294 2296 if (parent)
2295 2297 dbuf_rele(parent, NULL);
2296 2298
2297 2299 ASSERT3P(DB_DNODE(db), ==, dn);
2298 2300 ASSERT3U(db->db_blkid, ==, blkid);
2299 2301 ASSERT3U(db->db_level, ==, level);
2300 2302 *dbp = db;
2301 2303
2302 2304 return (0);
2303 2305 }
2304 2306
2305 2307 dmu_buf_impl_t *
2306 2308 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
2307 2309 {
2308 2310 return (dbuf_hold_level(dn, 0, blkid, tag));
2309 2311 }
2310 2312
2311 2313 dmu_buf_impl_t *
2312 2314 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
2313 2315 {
2314 2316 dmu_buf_impl_t *db;
2315 2317 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
2316 2318 return (err ? NULL : db);
2317 2319 }
2318 2320
2319 2321 void
2320 2322 dbuf_create_bonus(dnode_t *dn)
2321 2323 {
2322 2324 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
2323 2325
2324 2326 ASSERT(dn->dn_bonus == NULL);
2325 2327 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
2326 2328 }
2327 2329
2328 2330 int
2329 2331 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
2330 2332 {
2331 2333 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2332 2334 dnode_t *dn;
2333 2335
2334 2336 if (db->db_blkid != DMU_SPILL_BLKID)
2335 2337 return (SET_ERROR(ENOTSUP));
2336 2338 if (blksz == 0)
2337 2339 blksz = SPA_MINBLOCKSIZE;
2338 2340 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
2339 2341 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2340 2342
2341 2343 DB_DNODE_ENTER(db);
2342 2344 dn = DB_DNODE(db);
2343 2345 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2344 2346 dbuf_new_size(db, blksz, tx);
2345 2347 rw_exit(&dn->dn_struct_rwlock);
2346 2348 DB_DNODE_EXIT(db);
2347 2349
2348 2350 return (0);
2349 2351 }
2350 2352
2351 2353 void
2352 2354 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2353 2355 {
2354 2356 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2355 2357 }
2356 2358
2357 2359 #pragma weak dmu_buf_add_ref = dbuf_add_ref
2358 2360 void
2359 2361 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2360 2362 {
2361 2363 int64_t holds = refcount_add(&db->db_holds, tag);
2362 2364 ASSERT(holds > 1);
2363 2365 }
2364 2366
2365 2367 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
2366 2368 boolean_t
2367 2369 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
2368 2370 void *tag)
2369 2371 {
2370 2372 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2371 2373 dmu_buf_impl_t *found_db;
2372 2374 boolean_t result = B_FALSE;
2373 2375
2374 2376 if (db->db_blkid == DMU_BONUS_BLKID)
2375 2377 found_db = dbuf_find_bonus(os, obj);
2376 2378 else
2377 2379 found_db = dbuf_find(os, obj, 0, blkid);
2378 2380
2379 2381 if (found_db != NULL) {
2380 2382 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
2381 2383 (void) refcount_add(&db->db_holds, tag);
2382 2384 result = B_TRUE;
2383 2385 }
2384 2386 mutex_exit(&db->db_mtx);
2385 2387 }
2386 2388 return (result);
2387 2389 }
2388 2390
2389 2391 /*
2390 2392 * If you call dbuf_rele() you had better not be referencing the dnode handle
2391 2393 * unless you have some other direct or indirect hold on the dnode. (An indirect
2392 2394 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2393 2395 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2394 2396 * dnode's parent dbuf evicting its dnode handles.
2395 2397 */
2396 2398 void
2397 2399 dbuf_rele(dmu_buf_impl_t *db, void *tag)
2398 2400 {
2399 2401 mutex_enter(&db->db_mtx);
2400 2402 dbuf_rele_and_unlock(db, tag);
2401 2403 }
2402 2404
2403 2405 void
2404 2406 dmu_buf_rele(dmu_buf_t *db, void *tag)
2405 2407 {
2406 2408 dbuf_rele((dmu_buf_impl_t *)db, tag);
2407 2409 }
2408 2410
2409 2411 /*
2410 2412 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
2411 2413 * db_dirtycnt and db_holds to be updated atomically.
2412 2414 */
2413 2415 void
2414 2416 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2415 2417 {
2416 2418 int64_t holds;
2417 2419
2418 2420 ASSERT(MUTEX_HELD(&db->db_mtx));
2419 2421 DBUF_VERIFY(db);
2420 2422
2421 2423 /*
2422 2424 * Remove the reference to the dbuf before removing its hold on the
2423 2425 * dnode so we can guarantee in dnode_move() that a referenced bonus
2424 2426 * buffer has a corresponding dnode hold.
2425 2427 */
2426 2428 holds = refcount_remove(&db->db_holds, tag);
2427 2429 ASSERT(holds >= 0);
2428 2430
2429 2431 /*
2430 2432 * We can't freeze indirects if there is a possibility that they
2431 2433 * may be modified in the current syncing context.
2432 2434 */
2433 2435 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2434 2436 arc_buf_freeze(db->db_buf);
2435 2437
2436 2438 if (holds == db->db_dirtycnt &&
2437 2439 db->db_level == 0 && db->db_user_immediate_evict)
2438 2440 dbuf_evict_user(db);
2439 2441
2440 2442 if (holds == 0) {
2441 2443 if (db->db_blkid == DMU_BONUS_BLKID) {
2442 2444 dnode_t *dn;
2443 2445 boolean_t evict_dbuf = db->db_pending_evict;
2444 2446
2445 2447 /*
2446 2448 * If the dnode moves here, we cannot cross this
2447 2449 * barrier until the move completes.
2448 2450 */
2449 2451 DB_DNODE_ENTER(db);
2450 2452
2451 2453 dn = DB_DNODE(db);
2452 2454 atomic_dec_32(&dn->dn_dbufs_count);
2453 2455
2454 2456 /*
2455 2457 * Decrementing the dbuf count means that the bonus
2456 2458 * buffer's dnode hold is no longer discounted in
2457 2459 * dnode_move(). The dnode cannot move until after
2458 2460 * the dnode_rele() below.
2459 2461 */
2460 2462 DB_DNODE_EXIT(db);
2461 2463
2462 2464 /*
2463 2465 * Do not reference db after its lock is dropped.
2464 2466 * Another thread may evict it.
2465 2467 */
2466 2468 mutex_exit(&db->db_mtx);
2467 2469
2468 2470 if (evict_dbuf)
2469 2471 dnode_evict_bonus(dn);
2470 2472
2471 2473 dnode_rele(dn, db);
2472 2474 } else if (db->db_buf == NULL) {
2473 2475 /*
2474 2476 * This is a special case: we never associated this
2475 2477 * dbuf with any data allocated from the ARC.
2476 2478 */
2477 2479 ASSERT(db->db_state == DB_UNCACHED ||
2478 2480 db->db_state == DB_NOFILL);
2479 2481 dbuf_evict(db);
2480 2482 } else if (arc_released(db->db_buf)) {
2481 2483 arc_buf_t *buf = db->db_buf;
2482 2484 /*
2483 2485 * This dbuf has anonymous data associated with it.
2484 2486 */
2485 2487 dbuf_clear_data(db);
2486 2488 VERIFY(arc_buf_remove_ref(buf, db));
2487 2489 dbuf_evict(db);
2488 2490 } else {
2489 2491 VERIFY(!arc_buf_remove_ref(db->db_buf, db));
2490 2492
2491 2493 /*
2492 2494 * A dbuf will be eligible for eviction if either the
2493 2495 * 'primarycache' property is set or a duplicate
2494 2496 * copy of this buffer is already cached in the arc.
2495 2497 *
2496 2498 * In the case of the 'primarycache' a buffer
2497 2499 * is considered for eviction if it matches the
2498 2500 * criteria set in the property.
2499 2501 *
2500 2502 * To decide if our buffer is considered a
2501 2503 * duplicate, we must call into the arc to determine
2502 2504 * if multiple buffers are referencing the same
2503 2505 * block on-disk. If so, then we simply evict
2504 2506 * ourselves.
2505 2507 */
2506 2508 if (!DBUF_IS_CACHEABLE(db)) {
2507 2509 if (db->db_blkptr != NULL &&
2508 2510 !BP_IS_HOLE(db->db_blkptr) &&
2509 2511 !BP_IS_EMBEDDED(db->db_blkptr)) {
2510 2512 spa_t *spa =
2511 2513 dmu_objset_spa(db->db_objset);
2512 2514 blkptr_t bp = *db->db_blkptr;
2513 2515 dbuf_clear(db);
2514 2516 arc_freed(spa, &bp);
2515 2517 } else {
2516 2518 dbuf_clear(db);
2517 2519 }
2518 2520 } else if (db->db_pending_evict ||
2519 2521 arc_buf_eviction_needed(db->db_buf)) {
2520 2522 dbuf_clear(db);
2521 2523 } else {
2522 2524 mutex_exit(&db->db_mtx);
2523 2525 }
2524 2526 }
2525 2527 } else {
2526 2528 mutex_exit(&db->db_mtx);
2527 2529 }
2528 2530 }
2529 2531
2530 2532 #pragma weak dmu_buf_refcount = dbuf_refcount
2531 2533 uint64_t
2532 2534 dbuf_refcount(dmu_buf_impl_t *db)
2533 2535 {
2534 2536 return (refcount_count(&db->db_holds));
2535 2537 }
2536 2538
2537 2539 void *
2538 2540 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
2539 2541 dmu_buf_user_t *new_user)
2540 2542 {
2541 2543 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2542 2544
2543 2545 mutex_enter(&db->db_mtx);
2544 2546 dbuf_verify_user(db, DBVU_NOT_EVICTING);
2545 2547 if (db->db_user == old_user)
2546 2548 db->db_user = new_user;
2547 2549 else
2548 2550 old_user = db->db_user;
2549 2551 dbuf_verify_user(db, DBVU_NOT_EVICTING);
2550 2552 mutex_exit(&db->db_mtx);
2551 2553
2552 2554 return (old_user);
2553 2555 }
2554 2556
2555 2557 void *
2556 2558 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
2557 2559 {
2558 2560 return (dmu_buf_replace_user(db_fake, NULL, user));
2559 2561 }
2560 2562
2561 2563 void *
2562 2564 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
2563 2565 {
2564 2566 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2565 2567
2566 2568 db->db_user_immediate_evict = TRUE;
2567 2569 return (dmu_buf_set_user(db_fake, user));
2568 2570 }
2569 2571
2570 2572 void *
2571 2573 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
2572 2574 {
2573 2575 return (dmu_buf_replace_user(db_fake, user, NULL));
2574 2576 }
2575 2577
2576 2578 void *
2577 2579 dmu_buf_get_user(dmu_buf_t *db_fake)
2578 2580 {
2579 2581 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2580 2582
2581 2583 dbuf_verify_user(db, DBVU_NOT_EVICTING);
2582 2584 return (db->db_user);
2583 2585 }
2584 2586
2585 2587 void
2586 2588 dmu_buf_user_evict_wait()
2587 2589 {
2588 2590 taskq_wait(dbu_evict_taskq);
2589 2591 }
2590 2592
2591 2593 boolean_t
2592 2594 dmu_buf_freeable(dmu_buf_t *dbuf)
2593 2595 {
2594 2596 boolean_t res = B_FALSE;
2595 2597 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2596 2598
2597 2599 if (db->db_blkptr)
2598 2600 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2599 2601 db->db_blkptr, db->db_blkptr->blk_birth);
2600 2602
2601 2603 return (res);
2602 2604 }
2603 2605
2604 2606 blkptr_t *
2605 2607 dmu_buf_get_blkptr(dmu_buf_t *db)
2606 2608 {
2607 2609 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
2608 2610 return (dbi->db_blkptr);
2609 2611 }
2610 2612
2611 2613 static void
2612 2614 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2613 2615 {
2614 2616 /* ASSERT(dmu_tx_is_syncing(tx) */
2615 2617 ASSERT(MUTEX_HELD(&db->db_mtx));
2616 2618
2617 2619 if (db->db_blkptr != NULL)
2618 2620 return;
2619 2621
2620 2622 if (db->db_blkid == DMU_SPILL_BLKID) {
2621 2623 db->db_blkptr = &dn->dn_phys->dn_spill;
2622 2624 BP_ZERO(db->db_blkptr);
2623 2625 return;
2624 2626 }
2625 2627 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2626 2628 /*
2627 2629 * This buffer was allocated at a time when there was
2628 2630 * no available blkptrs from the dnode, or it was
2629 2631 * inappropriate to hook it in (i.e., nlevels mis-match).
2630 2632 */
2631 2633 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2632 2634 ASSERT(db->db_parent == NULL);
2633 2635 db->db_parent = dn->dn_dbuf;
2634 2636 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2635 2637 DBUF_VERIFY(db);
2636 2638 } else {
2637 2639 dmu_buf_impl_t *parent = db->db_parent;
2638 2640 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2639 2641
2640 2642 ASSERT(dn->dn_phys->dn_nlevels > 1);
2641 2643 if (parent == NULL) {
2642 2644 mutex_exit(&db->db_mtx);
2643 2645 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2644 2646 parent = dbuf_hold_level(dn, db->db_level + 1,
2645 2647 db->db_blkid >> epbs, db);
2646 2648 rw_exit(&dn->dn_struct_rwlock);
2647 2649 mutex_enter(&db->db_mtx);
2648 2650 db->db_parent = parent;
2649 2651 }
2650 2652 db->db_blkptr = (blkptr_t *)parent->db.db_data +
2651 2653 (db->db_blkid & ((1ULL << epbs) - 1));
2652 2654 DBUF_VERIFY(db);
2653 2655 }
2654 2656 }
2655 2657
2656 2658 static void
2657 2659 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2658 2660 {
2659 2661 dmu_buf_impl_t *db = dr->dr_dbuf;
2660 2662 dnode_t *dn;
2661 2663 zio_t *zio;
2662 2664
2663 2665 ASSERT(dmu_tx_is_syncing(tx));
2664 2666
2665 2667 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2666 2668
2667 2669 mutex_enter(&db->db_mtx);
2668 2670
2669 2671 ASSERT(db->db_level > 0);
2670 2672 DBUF_VERIFY(db);
2671 2673
2672 2674 /* Read the block if it hasn't been read yet. */
2673 2675 if (db->db_buf == NULL) {
2674 2676 mutex_exit(&db->db_mtx);
2675 2677 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2676 2678 mutex_enter(&db->db_mtx);
2677 2679 }
2678 2680 ASSERT3U(db->db_state, ==, DB_CACHED);
2679 2681 ASSERT(db->db_buf != NULL);
2680 2682
2681 2683 DB_DNODE_ENTER(db);
2682 2684 dn = DB_DNODE(db);
2683 2685 /* Indirect block size must match what the dnode thinks it is. */
2684 2686 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2685 2687 dbuf_check_blkptr(dn, db);
2686 2688 DB_DNODE_EXIT(db);
2687 2689
2688 2690 /* Provide the pending dirty record to child dbufs */
2689 2691 db->db_data_pending = dr;
2690 2692
2691 2693 mutex_exit(&db->db_mtx);
2692 2694 dbuf_write(dr, db->db_buf, tx);
2693 2695
2694 2696 zio = dr->dr_zio;
2695 2697 mutex_enter(&dr->dt.di.dr_mtx);
2696 2698 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
2697 2699 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2698 2700 mutex_exit(&dr->dt.di.dr_mtx);
2699 2701 zio_nowait(zio);
2700 2702 }
2701 2703
2702 2704 static void
2703 2705 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2704 2706 {
2705 2707 arc_buf_t **datap = &dr->dt.dl.dr_data;
2706 2708 dmu_buf_impl_t *db = dr->dr_dbuf;
2707 2709 dnode_t *dn;
2708 2710 objset_t *os;
2709 2711 uint64_t txg = tx->tx_txg;
2710 2712
2711 2713 ASSERT(dmu_tx_is_syncing(tx));
2712 2714
2713 2715 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2714 2716
2715 2717 mutex_enter(&db->db_mtx);
2716 2718 /*
2717 2719 * To be synced, we must be dirtied. But we
2718 2720 * might have been freed after the dirty.
2719 2721 */
2720 2722 if (db->db_state == DB_UNCACHED) {
2721 2723 /* This buffer has been freed since it was dirtied */
2722 2724 ASSERT(db->db.db_data == NULL);
2723 2725 } else if (db->db_state == DB_FILL) {
2724 2726 /* This buffer was freed and is now being re-filled */
2725 2727 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2726 2728 } else {
2727 2729 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2728 2730 }
2729 2731 DBUF_VERIFY(db);
2730 2732
2731 2733 DB_DNODE_ENTER(db);
2732 2734 dn = DB_DNODE(db);
2733 2735
2734 2736 if (db->db_blkid == DMU_SPILL_BLKID) {
2735 2737 mutex_enter(&dn->dn_mtx);
2736 2738 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2737 2739 mutex_exit(&dn->dn_mtx);
2738 2740 }
2739 2741
2740 2742 /*
2741 2743 * If this is a bonus buffer, simply copy the bonus data into the
2742 2744 * dnode. It will be written out when the dnode is synced (and it
2743 2745 * will be synced, since it must have been dirty for dbuf_sync to
2744 2746 * be called).
2745 2747 */
2746 2748 if (db->db_blkid == DMU_BONUS_BLKID) {
2747 2749 dbuf_dirty_record_t **drp;
2748 2750
2749 2751 ASSERT(*datap != NULL);
2750 2752 ASSERT0(db->db_level);
2751 2753 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2752 2754 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2753 2755 DB_DNODE_EXIT(db);
2754 2756
2755 2757 if (*datap != db->db.db_data) {
2756 2758 zio_buf_free(*datap, DN_MAX_BONUSLEN);
2757 2759 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2758 2760 }
2759 2761 db->db_data_pending = NULL;
2760 2762 drp = &db->db_last_dirty;
2761 2763 while (*drp != dr)
2762 2764 drp = &(*drp)->dr_next;
2763 2765 ASSERT(dr->dr_next == NULL);
2764 2766 ASSERT(dr->dr_dbuf == db);
2765 2767 *drp = dr->dr_next;
2766 2768 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2767 2769 ASSERT(db->db_dirtycnt > 0);
2768 2770 db->db_dirtycnt -= 1;
2769 2771 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2770 2772 return;
2771 2773 }
2772 2774
2773 2775 os = dn->dn_objset;
2774 2776
2775 2777 /*
2776 2778 * This function may have dropped the db_mtx lock allowing a dmu_sync
2777 2779 * operation to sneak in. As a result, we need to ensure that we
2778 2780 * don't check the dr_override_state until we have returned from
2779 2781 * dbuf_check_blkptr.
2780 2782 */
2781 2783 dbuf_check_blkptr(dn, db);
2782 2784
2783 2785 /*
2784 2786 * If this buffer is in the middle of an immediate write,
2785 2787 * wait for the synchronous IO to complete.
2786 2788 */
2787 2789 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2788 2790 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2789 2791 cv_wait(&db->db_changed, &db->db_mtx);
2790 2792 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2791 2793 }
2792 2794
2793 2795 if (db->db_state != DB_NOFILL &&
2794 2796 dn->dn_object != DMU_META_DNODE_OBJECT &&
2795 2797 refcount_count(&db->db_holds) > 1 &&
2796 2798 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2797 2799 *datap == db->db_buf) {
2798 2800 /*
2799 2801 * If this buffer is currently "in use" (i.e., there
2800 2802 * are active holds and db_data still references it),
2801 2803 * then make a copy before we start the write so that
2802 2804 * any modifications from the open txg will not leak
2803 2805 * into this write.
2804 2806 *
2805 2807 * NOTE: this copy does not need to be made for
2806 2808 * objects only modified in the syncing context (e.g.
2807 2809 * DNONE_DNODE blocks).
2808 2810 */
2809 2811 int blksz = arc_buf_size(*datap);
2810 2812 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2811 2813 *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2812 2814 bcopy(db->db.db_data, (*datap)->b_data, blksz);
2813 2815 }
2814 2816 db->db_data_pending = dr;
2815 2817
2816 2818 mutex_exit(&db->db_mtx);
2817 2819
2818 2820 dbuf_write(dr, *datap, tx);
2819 2821
2820 2822 ASSERT(!list_link_active(&dr->dr_dirty_node));
2821 2823 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2822 2824 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2823 2825 DB_DNODE_EXIT(db);
2824 2826 } else {
2825 2827 /*
2826 2828 * Although zio_nowait() does not "wait for an IO", it does
2827 2829 * initiate the IO. If this is an empty write it seems plausible
2828 2830 * that the IO could actually be completed before the nowait
2829 2831 * returns. We need to DB_DNODE_EXIT() first in case
2830 2832 * zio_nowait() invalidates the dbuf.
2831 2833 */
2832 2834 DB_DNODE_EXIT(db);
2833 2835 zio_nowait(dr->dr_zio);
2834 2836 }
2835 2837 }
2836 2838
2837 2839 void
2838 2840 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
2839 2841 {
2840 2842 dbuf_dirty_record_t *dr;
2841 2843
2842 2844 while (dr = list_head(list)) {
2843 2845 if (dr->dr_zio != NULL) {
2844 2846 /*
2845 2847 * If we find an already initialized zio then we
2846 2848 * are processing the meta-dnode, and we have finished.
2847 2849 * The dbufs for all dnodes are put back on the list
2848 2850 * during processing, so that we can zio_wait()
2849 2851 * these IOs after initiating all child IOs.
2850 2852 */
2851 2853 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2852 2854 DMU_META_DNODE_OBJECT);
2853 2855 break;
2854 2856 }
2855 2857 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
2856 2858 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
2857 2859 VERIFY3U(dr->dr_dbuf->db_level, ==, level);
2858 2860 }
2859 2861 list_remove(list, dr);
2860 2862 if (dr->dr_dbuf->db_level > 0)
2861 2863 dbuf_sync_indirect(dr, tx);
2862 2864 else
2863 2865 dbuf_sync_leaf(dr, tx);
2864 2866 }
2865 2867 }
2866 2868
2867 2869 /* ARGSUSED */
2868 2870 static void
2869 2871 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2870 2872 {
2871 2873 dmu_buf_impl_t *db = vdb;
2872 2874 dnode_t *dn;
2873 2875 blkptr_t *bp = zio->io_bp;
2874 2876 blkptr_t *bp_orig = &zio->io_bp_orig;
2875 2877 spa_t *spa = zio->io_spa;
2876 2878 int64_t delta;
2877 2879 uint64_t fill = 0;
2878 2880 int i;
2879 2881
2880 2882 ASSERT3P(db->db_blkptr, ==, bp);
2881 2883
2882 2884 DB_DNODE_ENTER(db);
2883 2885 dn = DB_DNODE(db);
2884 2886 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2885 2887 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2886 2888 zio->io_prev_space_delta = delta;
2887 2889
2888 2890 if (bp->blk_birth != 0) {
2889 2891 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2890 2892 BP_GET_TYPE(bp) == dn->dn_type) ||
2891 2893 (db->db_blkid == DMU_SPILL_BLKID &&
2892 2894 BP_GET_TYPE(bp) == dn->dn_bonustype) ||
2893 2895 BP_IS_EMBEDDED(bp));
2894 2896 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2895 2897 }
2896 2898
2897 2899 mutex_enter(&db->db_mtx);
2898 2900
2899 2901 #ifdef ZFS_DEBUG
2900 2902 if (db->db_blkid == DMU_SPILL_BLKID) {
2901 2903 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2902 2904 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2903 2905 db->db_blkptr == &dn->dn_phys->dn_spill);
2904 2906 }
2905 2907 #endif
2906 2908
2907 2909 if (db->db_level == 0) {
2908 2910 mutex_enter(&dn->dn_mtx);
2909 2911 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2910 2912 db->db_blkid != DMU_SPILL_BLKID)
2911 2913 dn->dn_phys->dn_maxblkid = db->db_blkid;
2912 2914 mutex_exit(&dn->dn_mtx);
2913 2915
2914 2916 if (dn->dn_type == DMU_OT_DNODE) {
2915 2917 dnode_phys_t *dnp = db->db.db_data;
2916 2918 for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2917 2919 i--, dnp++) {
2918 2920 if (dnp->dn_type != DMU_OT_NONE)
2919 2921 fill++;
2920 2922 }
2921 2923 } else {
2922 2924 if (BP_IS_HOLE(bp)) {
2923 2925 fill = 0;
2924 2926 } else {
2925 2927 fill = 1;
2926 2928 }
2927 2929 }
2928 2930 } else {
2929 2931 blkptr_t *ibp = db->db.db_data;
2930 2932 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2931 2933 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2932 2934 if (BP_IS_HOLE(ibp))
2933 2935 continue;
2934 2936 fill += BP_GET_FILL(ibp);
2935 2937 }
2936 2938 }
2937 2939 DB_DNODE_EXIT(db);
2938 2940
2939 2941 if (!BP_IS_EMBEDDED(bp))
2940 2942 bp->blk_fill = fill;
2941 2943
2942 2944 mutex_exit(&db->db_mtx);
2943 2945 }
2944 2946
2945 2947 /*
2946 2948 * The SPA will call this callback several times for each zio - once
2947 2949 * for every physical child i/o (zio->io_phys_children times). This
2948 2950 * allows the DMU to monitor the progress of each logical i/o. For example,
2949 2951 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
2950 2952 * block. There may be a long delay before all copies/fragments are completed,
2951 2953 * so this callback allows us to retire dirty space gradually, as the physical
2952 2954 * i/os complete.
2953 2955 */
2954 2956 /* ARGSUSED */
2955 2957 static void
2956 2958 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
2957 2959 {
2958 2960 dmu_buf_impl_t *db = arg;
2959 2961 objset_t *os = db->db_objset;
2960 2962 dsl_pool_t *dp = dmu_objset_pool(os);
2961 2963 dbuf_dirty_record_t *dr;
2962 2964 int delta = 0;
2963 2965
2964 2966 dr = db->db_data_pending;
2965 2967 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
2966 2968
2967 2969 /*
2968 2970 * The callback will be called io_phys_children times. Retire one
2969 2971 * portion of our dirty space each time we are called. Any rounding
2970 2972 * error will be cleaned up by dsl_pool_sync()'s call to
2971 2973 * dsl_pool_undirty_space().
2972 2974 */
2973 2975 delta = dr->dr_accounted / zio->io_phys_children;
2974 2976 dsl_pool_undirty_space(dp, delta, zio->io_txg);
2975 2977 }
2976 2978
2977 2979 /* ARGSUSED */
2978 2980 static void
2979 2981 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2980 2982 {
2981 2983 dmu_buf_impl_t *db = vdb;
2982 2984 blkptr_t *bp_orig = &zio->io_bp_orig;
2983 2985 blkptr_t *bp = db->db_blkptr;
2984 2986 objset_t *os = db->db_objset;
2985 2987 dmu_tx_t *tx = os->os_synctx;
2986 2988 dbuf_dirty_record_t **drp, *dr;
2987 2989
2988 2990 ASSERT0(zio->io_error);
2989 2991 ASSERT(db->db_blkptr == bp);
2990 2992
2991 2993 /*
2992 2994 * For nopwrites and rewrites we ensure that the bp matches our
2993 2995 * original and bypass all the accounting.
2994 2996 */
2995 2997 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
2996 2998 ASSERT(BP_EQUAL(bp, bp_orig));
2997 2999 } else {
2998 3000 dsl_dataset_t *ds = os->os_dsl_dataset;
2999 3001 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
3000 3002 dsl_dataset_block_born(ds, bp, tx);
3001 3003 }
3002 3004
3003 3005 mutex_enter(&db->db_mtx);
3004 3006
3005 3007 DBUF_VERIFY(db);
3006 3008
3007 3009 drp = &db->db_last_dirty;
3008 3010 while ((dr = *drp) != db->db_data_pending)
3009 3011 drp = &dr->dr_next;
3010 3012 ASSERT(!list_link_active(&dr->dr_dirty_node));
3011 3013 ASSERT(dr->dr_dbuf == db);
3012 3014 ASSERT(dr->dr_next == NULL);
3013 3015 *drp = dr->dr_next;
3014 3016
3015 3017 #ifdef ZFS_DEBUG
3016 3018 if (db->db_blkid == DMU_SPILL_BLKID) {
3017 3019 dnode_t *dn;
3018 3020
3019 3021 DB_DNODE_ENTER(db);
3020 3022 dn = DB_DNODE(db);
3021 3023 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
3022 3024 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
3023 3025 db->db_blkptr == &dn->dn_phys->dn_spill);
3024 3026 DB_DNODE_EXIT(db);
3025 3027 }
3026 3028 #endif
3027 3029
3028 3030 if (db->db_level == 0) {
3029 3031 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
3030 3032 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
3031 3033 if (db->db_state != DB_NOFILL) {
3032 3034 if (dr->dt.dl.dr_data != db->db_buf)
3033 3035 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
3034 3036 db));
3035 3037 else if (!arc_released(db->db_buf))
3036 3038 arc_set_callback(db->db_buf, dbuf_do_evict, db);
3037 3039 }
3038 3040 } else {
3039 3041 dnode_t *dn;
3040 3042
3041 3043 DB_DNODE_ENTER(db);
3042 3044 dn = DB_DNODE(db);
3043 3045 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
3044 3046 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
3045 3047 if (!BP_IS_HOLE(db->db_blkptr)) {
3046 3048 int epbs =
3047 3049 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3048 3050 ASSERT3U(db->db_blkid, <=,
3049 3051 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
3050 3052 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
3051 3053 db->db.db_size);
3052 3054 if (!arc_released(db->db_buf))
3053 3055 arc_set_callback(db->db_buf, dbuf_do_evict, db);
3054 3056 }
3055 3057 DB_DNODE_EXIT(db);
3056 3058 mutex_destroy(&dr->dt.di.dr_mtx);
3057 3059 list_destroy(&dr->dt.di.dr_children);
3058 3060 }
3059 3061 kmem_free(dr, sizeof (dbuf_dirty_record_t));
3060 3062
3061 3063 cv_broadcast(&db->db_changed);
3062 3064 ASSERT(db->db_dirtycnt > 0);
3063 3065 db->db_dirtycnt -= 1;
3064 3066 db->db_data_pending = NULL;
3065 3067 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg);
3066 3068 }
3067 3069
3068 3070 static void
3069 3071 dbuf_write_nofill_ready(zio_t *zio)
3070 3072 {
3071 3073 dbuf_write_ready(zio, NULL, zio->io_private);
3072 3074 }
3073 3075
3074 3076 static void
3075 3077 dbuf_write_nofill_done(zio_t *zio)
3076 3078 {
3077 3079 dbuf_write_done(zio, NULL, zio->io_private);
3078 3080 }
3079 3081
3080 3082 static void
3081 3083 dbuf_write_override_ready(zio_t *zio)
3082 3084 {
3083 3085 dbuf_dirty_record_t *dr = zio->io_private;
3084 3086 dmu_buf_impl_t *db = dr->dr_dbuf;
3085 3087
3086 3088 dbuf_write_ready(zio, NULL, db);
3087 3089 }
3088 3090
3089 3091 static void
3090 3092 dbuf_write_override_done(zio_t *zio)
3091 3093 {
3092 3094 dbuf_dirty_record_t *dr = zio->io_private;
3093 3095 dmu_buf_impl_t *db = dr->dr_dbuf;
3094 3096 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
3095 3097
3096 3098 mutex_enter(&db->db_mtx);
3097 3099 if (!BP_EQUAL(zio->io_bp, obp)) {
3098 3100 if (!BP_IS_HOLE(obp))
3099 3101 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
3100 3102 arc_release(dr->dt.dl.dr_data, db);
3101 3103 }
3102 3104 mutex_exit(&db->db_mtx);
3103 3105
3104 3106 dbuf_write_done(zio, NULL, db);
3105 3107 }
3106 3108
3107 3109 /* Issue I/O to commit a dirty buffer to disk. */
3108 3110 static void
3109 3111 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
3110 3112 {
3111 3113 dmu_buf_impl_t *db = dr->dr_dbuf;
3112 3114 dnode_t *dn;
3113 3115 objset_t *os;
3114 3116 dmu_buf_impl_t *parent = db->db_parent;
3115 3117 uint64_t txg = tx->tx_txg;
3116 3118 zbookmark_phys_t zb;
3117 3119 zio_prop_t zp;
3118 3120 zio_t *zio;
3119 3121 int wp_flag = 0;
3120 3122
3121 3123 DB_DNODE_ENTER(db);
3122 3124 dn = DB_DNODE(db);
3123 3125 os = dn->dn_objset;
3124 3126
3125 3127 if (db->db_state != DB_NOFILL) {
3126 3128 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
3127 3129 /*
3128 3130 * Private object buffers are released here rather
3129 3131 * than in dbuf_dirty() since they are only modified
3130 3132 * in the syncing context and we don't want the
3131 3133 * overhead of making multiple copies of the data.
3132 3134 */
3133 3135 if (BP_IS_HOLE(db->db_blkptr)) {
3134 3136 arc_buf_thaw(data);
3135 3137 } else {
3136 3138 dbuf_release_bp(db);
3137 3139 }
3138 3140 }
3139 3141 }
3140 3142
3141 3143 if (parent != dn->dn_dbuf) {
3142 3144 /* Our parent is an indirect block. */
3143 3145 /* We have a dirty parent that has been scheduled for write. */
3144 3146 ASSERT(parent && parent->db_data_pending);
3145 3147 /* Our parent's buffer is one level closer to the dnode. */
3146 3148 ASSERT(db->db_level == parent->db_level-1);
3147 3149 /*
3148 3150 * We're about to modify our parent's db_data by modifying
3149 3151 * our block pointer, so the parent must be released.
3150 3152 */
3151 3153 ASSERT(arc_released(parent->db_buf));
3152 3154 zio = parent->db_data_pending->dr_zio;
3153 3155 } else {
3154 3156 /* Our parent is the dnode itself. */
3155 3157 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
3156 3158 db->db_blkid != DMU_SPILL_BLKID) ||
3157 3159 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
3158 3160 if (db->db_blkid != DMU_SPILL_BLKID)
3159 3161 ASSERT3P(db->db_blkptr, ==,
3160 3162 &dn->dn_phys->dn_blkptr[db->db_blkid]);
3161 3163 zio = dn->dn_zio;
3162 3164 }
3163 3165
3164 3166 ASSERT(db->db_level == 0 || data == db->db_buf);
3165 3167 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
3166 3168 ASSERT(zio);
3167 3169
3168 3170 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
3169 3171 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
3170 3172 db->db.db_object, db->db_level, db->db_blkid);
3171 3173
3172 3174 if (db->db_blkid == DMU_SPILL_BLKID)
3173 3175 wp_flag = WP_SPILL;
3174 3176 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
3175 3177
3176 3178 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
3177 3179 DB_DNODE_EXIT(db);
3178 3180
3179 3181 if (db->db_level == 0 &&
3180 3182 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
3181 3183 /*
3182 3184 * The BP for this block has been provided by open context
3183 3185 * (by dmu_sync() or dmu_buf_write_embedded()).
3184 3186 */
3185 3187 void *contents = (data != NULL) ? data->b_data : NULL;
3186 3188
3187 3189 dr->dr_zio = zio_write(zio, os->os_spa, txg,
3188 3190 db->db_blkptr, contents, db->db.db_size, &zp,
3189 3191 dbuf_write_override_ready, NULL, dbuf_write_override_done,
3190 3192 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
3191 3193 mutex_enter(&db->db_mtx);
3192 3194 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
3193 3195 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
3194 3196 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
3195 3197 mutex_exit(&db->db_mtx);
3196 3198 } else if (db->db_state == DB_NOFILL) {
3197 3199 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
3198 3200 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
3199 3201 dr->dr_zio = zio_write(zio, os->os_spa, txg,
3200 3202 db->db_blkptr, NULL, db->db.db_size, &zp,
3201 3203 dbuf_write_nofill_ready, NULL, dbuf_write_nofill_done, db,
3202 3204 ZIO_PRIORITY_ASYNC_WRITE,
3203 3205 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
3204 3206 } else {
3205 3207 ASSERT(arc_released(data));
3206 3208 dr->dr_zio = arc_write(zio, os->os_spa, txg,
3207 3209 db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db),
3208 3210 DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready,
3209 3211 dbuf_write_physdone, dbuf_write_done, db,
3210 3212 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
3211 3213 }
3212 3214 }
|
↓ open down ↓ |
2281 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX