1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
27 */
28
29 #include <sys/zfs_context.h>
30 #include <sys/spa.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zio.h>
36 #include <sys/space_map.h>
37 #include <sys/refcount.h>
38 #include <sys/zfeature.h>
39
40 /*
41 * Note on space map block size:
42 *
43 * The data for a given space map can be kept on blocks of any size.
44 * Larger blocks entail fewer I/O operations, but they also cause the
45 * DMU to keep more data in-core, and also to waste more I/O bandwidth
46 * when only a few blocks have changed since the last transaction group.
47 */
48
49 /*
50 * Enabled whenever we want to stress test the use of double-word
51 * space map entries.
52 */
53 boolean_t zfs_force_some_double_word_sm_entries = B_FALSE;
54
55 /*
56 * Override the default indirect block size of 128K, instead using 16K for
57 * spacemaps (2^14 bytes). This dramatically reduces write inflation since
58 * appending to a spacemap typically has to write one data block (4KB) and one
59 * or two indirect blocks (16K-32K, rather than 128K).
60 */
61 int space_map_ibs = 14;
62
63 boolean_t
64 sm_entry_is_debug(uint64_t e)
65 {
66 return (SM_PREFIX_DECODE(e) == SM_DEBUG_PREFIX);
67 }
68
69 boolean_t
70 sm_entry_is_single_word(uint64_t e)
71 {
72 uint8_t prefix = SM_PREFIX_DECODE(e);
73 return (prefix != SM_DEBUG_PREFIX && prefix != SM2_PREFIX);
74 }
75
76 boolean_t
77 sm_entry_is_double_word(uint64_t e)
78 {
79 return (SM_PREFIX_DECODE(e) == SM2_PREFIX);
80 }
81
82 /*
83 * Iterate through the space map, invoking the callback on each (non-debug)
84 * space map entry. Stop after reading 'end' bytes of the space map.
85 */
86 int
87 space_map_iterate(space_map_t *sm, uint64_t end, sm_cb_t callback, void *arg)
88 {
89 uint64_t blksz = sm->sm_blksz;
90
91 ASSERT3U(blksz, !=, 0);
92 ASSERT3U(end, <=, space_map_length(sm));
93 ASSERT0(P2PHASE(end, sizeof (uint64_t)));
94
95 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, 0, end,
96 ZIO_PRIORITY_SYNC_READ);
97
98 int error = 0;
99 for (uint64_t block_base = 0; block_base < end && error == 0;
100 block_base += blksz) {
101 dmu_buf_t *db;
102 error = dmu_buf_hold(sm->sm_os, space_map_object(sm),
103 block_base, FTAG, &db, DMU_READ_PREFETCH);
104 if (error != 0)
105 return (error);
106
107 uint64_t *block_start = db->db_data;
108 uint64_t block_length = MIN(end - block_base, blksz);
109 uint64_t *block_end = block_start +
110 (block_length / sizeof (uint64_t));
111
112 VERIFY0(P2PHASE(block_length, sizeof (uint64_t)));
113 VERIFY3U(block_length, !=, 0);
114 ASSERT3U(blksz, ==, db->db_size);
115
116 for (uint64_t *block_cursor = block_start;
117 block_cursor < block_end && error == 0; block_cursor++) {
118 uint64_t e = *block_cursor;
119
120 if (sm_entry_is_debug(e)) /* Skip debug entries */
121 continue;
122
123 uint64_t raw_offset, raw_run, vdev_id;
124 maptype_t type;
125 if (sm_entry_is_single_word(e)) {
126 type = SM_TYPE_DECODE(e);
127 vdev_id = SM_NO_VDEVID;
128 raw_offset = SM_OFFSET_DECODE(e);
129 raw_run = SM_RUN_DECODE(e);
130 } else {
131 /* it is a two-word entry */
132 ASSERT(sm_entry_is_double_word(e));
133 raw_run = SM2_RUN_DECODE(e);
134 vdev_id = SM2_VDEV_DECODE(e);
135
136 /* move on to the second word */
137 block_cursor++;
138 e = *block_cursor;
139 VERIFY3P(block_cursor, <=, block_end);
140
141 type = SM2_TYPE_DECODE(e);
142 raw_offset = SM2_OFFSET_DECODE(e);
143 }
144
145 uint64_t entry_offset = (raw_offset << sm->sm_shift) +
146 sm->sm_start;
147 uint64_t entry_run = raw_run << sm->sm_shift;
148
149 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
150 VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
151 ASSERT3U(entry_offset, >=, sm->sm_start);
152 ASSERT3U(entry_offset, <, sm->sm_start + sm->sm_size);
153 ASSERT3U(entry_run, <=, sm->sm_size);
154 ASSERT3U(entry_offset + entry_run, <=,
155 sm->sm_start + sm->sm_size);
156
157 space_map_entry_t sme = {
158 .sme_type = type,
159 .sme_vdev = vdev_id,
160 .sme_offset = entry_offset,
161 .sme_run = entry_run
162 };
163 error = callback(&sme, arg);
164 }
165 dmu_buf_rele(db, FTAG);
166 }
167 return (error);
168 }
169
170 /*
171 * Reads the entries from the last block of the space map into
172 * buf in reverse order. Populates nwords with number of words
173 * in the last block.
174 *
175 * Refer to block comment within space_map_incremental_destroy()
176 * to understand why this function is needed.
177 */
178 static int
179 space_map_reversed_last_block_entries(space_map_t *sm, uint64_t *buf,
180 uint64_t bufsz, uint64_t *nwords)
181 {
182 int error = 0;
183 dmu_buf_t *db;
184
185 /*
186 * Find the offset of the last word in the space map and use
187 * that to read the last block of the space map with
188 * dmu_buf_hold().
189 */
190 uint64_t last_word_offset =
191 sm->sm_phys->smp_length - sizeof (uint64_t);
192 error = dmu_buf_hold(sm->sm_os, space_map_object(sm), last_word_offset,
193 FTAG, &db, DMU_READ_NO_PREFETCH);
194 if (error != 0)
195 return (error);
196
197 ASSERT3U(sm->sm_object, ==, db->db_object);
198 ASSERT3U(sm->sm_blksz, ==, db->db_size);
199 ASSERT3U(bufsz, >=, db->db_size);
200 ASSERT(nwords != NULL);
201
202 uint64_t *words = db->db_data;
203 *nwords =
204 (sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t);
205
206 ASSERT3U(*nwords, <=, bufsz / sizeof (uint64_t));
207
208 uint64_t n = *nwords;
209 uint64_t j = n - 1;
210 for (uint64_t i = 0; i < n; i++) {
211 uint64_t entry = words[i];
212 if (sm_entry_is_double_word(entry)) {
213 /*
214 * Since we are populating the buffer backwards
215 * we have to be extra careful and add the two
216 * words of the double-word entry in the right
217 * order.
218 */
219 ASSERT3U(j, >, 0);
220 buf[j - 1] = entry;
221
222 i++;
223 ASSERT3U(i, <, n);
224 entry = words[i];
225 buf[j] = entry;
226 j -= 2;
227 } else {
228 ASSERT(sm_entry_is_debug(entry) ||
229 sm_entry_is_single_word(entry));
230 buf[j] = entry;
231 j--;
232 }
233 }
234
235 /*
236 * Assert that we wrote backwards all the
237 * way to the beginning of the buffer.
238 */
239 ASSERT3S(j, ==, -1);
240
241 dmu_buf_rele(db, FTAG);
242 return (error);
243 }
244
245 /*
246 * Note: This function performs destructive actions - specifically
247 * it deletes entries from the end of the space map. Thus, callers
248 * should ensure that they are holding the appropriate locks for
249 * the space map that they provide.
250 */
251 int
252 space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
253 dmu_tx_t *tx)
254 {
255 uint64_t bufsz = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
256 uint64_t *buf = zio_buf_alloc(bufsz);
257
258 dmu_buf_will_dirty(sm->sm_dbuf, tx);
259
260 /*
261 * Ideally we would want to iterate from the beginning of the
262 * space map to the end in incremental steps. The issue with this
263 * approach is that we don't have any field on-disk that points
264 * us where to start between each step. We could try zeroing out
265 * entries that we've destroyed, but this doesn't work either as
266 * an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]).
267 *
268 * As a result, we destroy its entries incrementally starting from
269 * the end after applying the callback to each of them.
270 *
271 * The problem with this approach is that we cannot literally
272 * iterate through the words in the space map backwards as we
273 * can't distinguish two-word space map entries from their second
274 * word. Thus we do the following:
275 *
276 * 1] We get all the entries from the last block of the space map
277 * and put them into a buffer in reverse order. This way the
278 * last entry comes first in the buffer, the second to last is
279 * second, etc.
280 * 2] We iterate through the entries in the buffer and we apply
281 * the callback to each one. As we move from entry to entry we
282 * we decrease the size of the space map, deleting effectively
283 * each entry.
284 * 3] If there are no more entries in the space map or the callback
285 * returns a value other than 0, we stop iterating over the
286 * space map. If there are entries remaining and the callback
287 * returned 0, we go back to step [1].
288 */
289 int error = 0;
290 while (space_map_length(sm) > 0 && error == 0) {
291 uint64_t nwords = 0;
292 error = space_map_reversed_last_block_entries(sm, buf, bufsz,
293 &nwords);
294 if (error != 0)
295 break;
296
297 ASSERT3U(nwords, <=, bufsz / sizeof (uint64_t));
298
299 for (uint64_t i = 0; i < nwords; i++) {
300 uint64_t e = buf[i];
301
302 if (sm_entry_is_debug(e)) {
303 sm->sm_phys->smp_length -= sizeof (uint64_t);
304 continue;
305 }
306
307 int words = 1;
308 uint64_t raw_offset, raw_run, vdev_id;
309 maptype_t type;
310 if (sm_entry_is_single_word(e)) {
311 type = SM_TYPE_DECODE(e);
312 vdev_id = SM_NO_VDEVID;
313 raw_offset = SM_OFFSET_DECODE(e);
314 raw_run = SM_RUN_DECODE(e);
315 } else {
316 ASSERT(sm_entry_is_double_word(e));
317 words = 2;
318
319 raw_run = SM2_RUN_DECODE(e);
320 vdev_id = SM2_VDEV_DECODE(e);
321
322 /* move to the second word */
323 i++;
324 e = buf[i];
325
326 ASSERT3P(i, <=, nwords);
327
328 type = SM2_TYPE_DECODE(e);
329 raw_offset = SM2_OFFSET_DECODE(e);
330 }
331
332 uint64_t entry_offset =
333 (raw_offset << sm->sm_shift) + sm->sm_start;
334 uint64_t entry_run = raw_run << sm->sm_shift;
335
336 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
337 VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
338 VERIFY3U(entry_offset, >=, sm->sm_start);
339 VERIFY3U(entry_offset, <, sm->sm_start + sm->sm_size);
340 VERIFY3U(entry_run, <=, sm->sm_size);
341 VERIFY3U(entry_offset + entry_run, <=,
342 sm->sm_start + sm->sm_size);
343
344 space_map_entry_t sme = {
345 .sme_type = type,
346 .sme_vdev = vdev_id,
347 .sme_offset = entry_offset,
348 .sme_run = entry_run
349 };
350 error = callback(&sme, arg);
351 if (error != 0)
352 break;
353
354 if (type == SM_ALLOC)
355 sm->sm_phys->smp_alloc -= entry_run;
356 else
357 sm->sm_phys->smp_alloc += entry_run;
358 sm->sm_phys->smp_length -= words * sizeof (uint64_t);
359 }
360 }
361
362 if (space_map_length(sm) == 0) {
363 ASSERT0(error);
364 ASSERT0(space_map_allocated(sm));
365 }
366
367 zio_buf_free(buf, bufsz);
368 return (error);
369 }
370
371 typedef struct space_map_load_arg {
372 space_map_t *smla_sm;
373 range_tree_t *smla_rt;
374 maptype_t smla_type;
375 } space_map_load_arg_t;
376
377 static int
378 space_map_load_callback(space_map_entry_t *sme, void *arg)
379 {
380 space_map_load_arg_t *smla = arg;
381 if (sme->sme_type == smla->smla_type) {
382 VERIFY3U(range_tree_space(smla->smla_rt) + sme->sme_run, <=,
383 smla->smla_sm->sm_size);
384 range_tree_add(smla->smla_rt, sme->sme_offset, sme->sme_run);
385 } else {
386 range_tree_remove(smla->smla_rt, sme->sme_offset, sme->sme_run);
387 }
388
389 return (0);
390 }
391
392 /*
393 * Load the spacemap into the rangetree, like space_map_load. But only
394 * read the first 'length' bytes of the spacemap.
395 */
396 int
397 space_map_load_length(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
398 uint64_t length)
399 {
400 space_map_load_arg_t smla;
401
402 VERIFY0(range_tree_space(rt));
403
404 if (maptype == SM_FREE)
405 range_tree_add(rt, sm->sm_start, sm->sm_size);
406
407 smla.smla_rt = rt;
408 smla.smla_sm = sm;
409 smla.smla_type = maptype;
410 int err = space_map_iterate(sm, length,
411 space_map_load_callback, &smla);
412
413 if (err != 0)
414 range_tree_vacate(rt, NULL, NULL);
415
416 return (err);
417 }
418
419 /*
420 * Load the space map disk into the specified range tree. Segments of maptype
421 * are added to the range tree, other segment types are removed.
422 */
423 int
424 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
425 {
426 return (space_map_load_length(sm, rt, maptype, space_map_length(sm)));
427 }
428
429 void
430 space_map_histogram_clear(space_map_t *sm)
431 {
432 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
433 return;
434
435 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
436 }
437
438 boolean_t
439 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
440 {
441 /*
442 * Verify that the in-core range tree does not have any
443 * ranges smaller than our sm_shift size.
444 */
445 for (int i = 0; i < sm->sm_shift; i++) {
446 if (rt->rt_histogram[i] != 0)
447 return (B_FALSE);
448 }
449 return (B_TRUE);
450 }
451
452 void
453 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
454 {
455 int idx = 0;
456
457 ASSERT(dmu_tx_is_syncing(tx));
458 VERIFY3U(space_map_object(sm), !=, 0);
459
460 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
461 return;
462
463 dmu_buf_will_dirty(sm->sm_dbuf, tx);
464
465 ASSERT(space_map_histogram_verify(sm, rt));
466 /*
467 * Transfer the content of the range tree histogram to the space
468 * map histogram. The space map histogram contains 32 buckets ranging
469 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
470 * however, can represent ranges from 2^0 to 2^63. Since the space
471 * map only cares about allocatable blocks (minimum of sm_shift) we
472 * can safely ignore all ranges in the range tree smaller than sm_shift.
473 */
474 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
475
476 /*
477 * Since the largest histogram bucket in the space map is
478 * 2^(32+sm_shift-1), we need to normalize the values in
479 * the range tree for any bucket larger than that size. For
480 * example given an sm_shift of 9, ranges larger than 2^40
481 * would get normalized as if they were 1TB ranges. Assume
482 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
483 * the calculation below would normalize this to 5 * 2^4 (16).
484 */
485 ASSERT3U(i, >=, idx + sm->sm_shift);
486 sm->sm_phys->smp_histogram[idx] +=
487 rt->rt_histogram[i] << (i - idx - sm->sm_shift);
488
489 /*
490 * Increment the space map's index as long as we haven't
491 * reached the maximum bucket size. Accumulate all ranges
492 * larger than the max bucket size into the last bucket.
493 */
494 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
495 ASSERT3U(idx + sm->sm_shift, ==, i);
496 idx++;
497 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
498 }
499 }
500 }
501
502 static void
503 space_map_write_intro_debug(space_map_t *sm, maptype_t maptype, dmu_tx_t *tx)
504 {
505 dmu_buf_will_dirty(sm->sm_dbuf, tx);
506
507 uint64_t dentry = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
508 SM_DEBUG_ACTION_ENCODE(maptype) |
509 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx->tx_pool->dp_spa)) |
510 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
511
512 dmu_write(sm->sm_os, space_map_object(sm), sm->sm_phys->smp_length,
513 sizeof (dentry), &dentry, tx);
514
515 sm->sm_phys->smp_length += sizeof (dentry);
516 }
517
518 /*
519 * Writes one or more entries given a segment.
520 *
521 * Note: The function may release the dbuf from the pointer initially
522 * passed to it, and return a different dbuf. Also, the space map's
523 * dbuf must be dirty for the changes in sm_phys to take effect.
524 */
525 static void
526 space_map_write_seg(space_map_t *sm, range_seg_t *rs, maptype_t maptype,
527 uint64_t vdev_id, uint8_t words, dmu_buf_t **dbp, void *tag, dmu_tx_t *tx)
528 {
529 ASSERT3U(words, !=, 0);
530 ASSERT3U(words, <=, 2);
531
532 /* ensure the vdev_id can be represented by the space map */
533 ASSERT3U(vdev_id, <=, SM_NO_VDEVID);
534
535 /*
536 * if this is a single word entry, ensure that no vdev was
537 * specified.
538 */
539 IMPLY(words == 1, vdev_id == SM_NO_VDEVID);
540
541 dmu_buf_t *db = *dbp;
542 ASSERT3U(db->db_size, ==, sm->sm_blksz);
543
544 uint64_t *block_base = db->db_data;
545 uint64_t *block_end = block_base + (sm->sm_blksz / sizeof (uint64_t));
546 uint64_t *block_cursor = block_base +
547 (sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t);
548
549 ASSERT3P(block_cursor, <=, block_end);
550
551 uint64_t size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
552 uint64_t start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
553 uint64_t run_max = (words == 2) ? SM2_RUN_MAX : SM_RUN_MAX;
554
555 ASSERT3U(rs->rs_start, >=, sm->sm_start);
556 ASSERT3U(rs->rs_start, <, sm->sm_start + sm->sm_size);
557 ASSERT3U(rs->rs_end - rs->rs_start, <=, sm->sm_size);
558 ASSERT3U(rs->rs_end, <=, sm->sm_start + sm->sm_size);
559
560 while (size != 0) {
561 ASSERT3P(block_cursor, <=, block_end);
562
563 /*
564 * If we are at the end of this block, flush it and start
565 * writing again from the beginning.
566 */
567 if (block_cursor == block_end) {
568 dmu_buf_rele(db, tag);
569
570 uint64_t next_word_offset = sm->sm_phys->smp_length;
571 VERIFY0(dmu_buf_hold(sm->sm_os,
572 space_map_object(sm), next_word_offset,
573 tag, &db, DMU_READ_PREFETCH));
574 dmu_buf_will_dirty(db, tx);
575
576 /* update caller's dbuf */
577 *dbp = db;
578
579 ASSERT3U(db->db_size, ==, sm->sm_blksz);
580
581 block_base = db->db_data;
582 block_cursor = block_base;
583 block_end = block_base +
584 (db->db_size / sizeof (uint64_t));
585 }
586
587 /*
588 * If we are writing a two-word entry and we only have one
589 * word left on this block, just pad it with an empty debug
590 * entry and write the two-word entry in the next block.
591 */
592 uint64_t *next_entry = block_cursor + 1;
593 if (next_entry == block_end && words > 1) {
594 ASSERT3U(words, ==, 2);
595 *block_cursor = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
596 SM_DEBUG_ACTION_ENCODE(0) |
597 SM_DEBUG_SYNCPASS_ENCODE(0) |
598 SM_DEBUG_TXG_ENCODE(0);
599 block_cursor++;
600 sm->sm_phys->smp_length += sizeof (uint64_t);
601 ASSERT3P(block_cursor, ==, block_end);
602 continue;
603 }
604
605 uint64_t run_len = MIN(size, run_max);
606 switch (words) {
607 case 1:
608 *block_cursor = SM_OFFSET_ENCODE(start) |
609 SM_TYPE_ENCODE(maptype) |
610 SM_RUN_ENCODE(run_len);
611 block_cursor++;
612 break;
613 case 2:
614 /* write the first word of the entry */
615 *block_cursor = SM_PREFIX_ENCODE(SM2_PREFIX) |
616 SM2_RUN_ENCODE(run_len) |
617 SM2_VDEV_ENCODE(vdev_id);
618 block_cursor++;
619
620 /* move on to the second word of the entry */
621 ASSERT3P(block_cursor, <, block_end);
622 *block_cursor = SM2_TYPE_ENCODE(maptype) |
623 SM2_OFFSET_ENCODE(start);
624 block_cursor++;
625 break;
626 default:
627 panic("%d-word space map entries are not supported",
628 words);
629 break;
630 }
631 sm->sm_phys->smp_length += words * sizeof (uint64_t);
632
633 start += run_len;
634 size -= run_len;
635 }
636 ASSERT0(size);
637
638 }
639
640 /*
641 * Note: The space map's dbuf must be dirty for the changes in sm_phys to
642 * take effect.
643 */
644 static void
645 space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
646 uint64_t vdev_id, dmu_tx_t *tx)
647 {
648 spa_t *spa = tx->tx_pool->dp_spa;
649 dmu_buf_t *db;
650
651 space_map_write_intro_debug(sm, maptype, tx);
652
653 #ifdef DEBUG
654 /*
655 * We do this right after we write the intro debug entry
656 * because the estimate does not take it into account.
657 */
658 uint64_t initial_objsize = sm->sm_phys->smp_length;
659 uint64_t estimated_growth =
660 space_map_estimate_optimal_size(sm, rt, SM_NO_VDEVID);
661 uint64_t estimated_final_objsize = initial_objsize + estimated_growth;
662 #endif
663
664 /*
665 * Find the offset right after the last word in the space map
666 * and use that to get a hold of the last block, so we can
667 * start appending to it.
668 */
669 uint64_t next_word_offset = sm->sm_phys->smp_length;
670 VERIFY0(dmu_buf_hold(sm->sm_os, space_map_object(sm),
671 next_word_offset, FTAG, &db, DMU_READ_PREFETCH));
672 ASSERT3U(db->db_size, ==, sm->sm_blksz);
673
674 dmu_buf_will_dirty(db, tx);
675
676 avl_tree_t *t = &rt->rt_root;
677 for (range_seg_t *rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
678 uint64_t offset = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
679 uint64_t length = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
680 uint8_t words = 1;
681
682 /*
683 * We only write two-word entries when both of the following
684 * are true:
685 *
686 * [1] The feature is enabled.
687 * [2] The offset or run is too big for a single-word entry,
688 * or the vdev_id is set (meaning not equal to
689 * SM_NO_VDEVID).
690 *
691 * Note that for purposes of testing we've added the case that
692 * we write two-word entries occasionally when the feature is
693 * enabled and zfs_force_some_double_word_sm_entries has been
694 * set.
695 */
696 if (spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_V2) &&
697 (offset >= (1ULL << SM_OFFSET_BITS) ||
698 length > SM_RUN_MAX ||
699 vdev_id != SM_NO_VDEVID ||
700 (zfs_force_some_double_word_sm_entries &&
701 spa_get_random(100) == 0)))
702 words = 2;
703
704 space_map_write_seg(sm, rs, maptype, vdev_id, words,
705 &db, FTAG, tx);
706 }
707
708 dmu_buf_rele(db, FTAG);
709
710 #ifdef DEBUG
711 /*
712 * We expect our estimation to be based on the worst case
713 * scenario [see comment in space_map_estimate_optimal_size()].
714 * Therefore we expect the actual objsize to be equal or less
715 * than whatever we estimated it to be.
716 */
717 ASSERT3U(estimated_final_objsize, >=, sm->sm_phys->smp_length);
718 #endif
719 }
720
721 /*
722 * Note: This function manipulates the state of the given space map but
723 * does not hold any locks implicitly. Thus the caller is responsible
724 * for synchronizing writes to the space map.
725 */
726 void
727 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
728 uint64_t vdev_id, dmu_tx_t *tx)
729 {
730 objset_t *os = sm->sm_os;
731
732 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
733 VERIFY3U(space_map_object(sm), !=, 0);
734
735 dmu_buf_will_dirty(sm->sm_dbuf, tx);
736
737 /*
738 * This field is no longer necessary since the in-core space map
739 * now contains the object number but is maintained for backwards
740 * compatibility.
741 */
742 sm->sm_phys->smp_object = sm->sm_object;
743
744 if (range_tree_is_empty(rt)) {
745 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
746 return;
747 }
748
749 if (maptype == SM_ALLOC)
750 sm->sm_phys->smp_alloc += range_tree_space(rt);
751 else
752 sm->sm_phys->smp_alloc -= range_tree_space(rt);
753
754 uint64_t nodes = avl_numnodes(&rt->rt_root);
755 uint64_t rt_space = range_tree_space(rt);
756
757 space_map_write_impl(sm, rt, maptype, vdev_id, tx);
758
759 /*
760 * Ensure that the space_map's accounting wasn't changed
761 * while we were in the middle of writing it out.
762 */
763 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
764 VERIFY3U(range_tree_space(rt), ==, rt_space);
765 }
766
767 static int
768 space_map_open_impl(space_map_t *sm)
769 {
770 int error;
771 u_longlong_t blocks;
772
773 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
774 if (error)
775 return (error);
776
777 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
778 sm->sm_phys = sm->sm_dbuf->db_data;
779 return (0);
780 }
781
782 int
783 space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
784 uint64_t start, uint64_t size, uint8_t shift)
785 {
786 space_map_t *sm;
787 int error;
788
789 ASSERT(*smp == NULL);
790 ASSERT(os != NULL);
791 ASSERT(object != 0);
792
793 sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
794
795 sm->sm_start = start;
796 sm->sm_size = size;
797 sm->sm_shift = shift;
798 sm->sm_os = os;
799 sm->sm_object = object;
800
801 error = space_map_open_impl(sm);
802 if (error != 0) {
803 space_map_close(sm);
804 return (error);
805 }
806 *smp = sm;
807
808 return (0);
809 }
810
811 void
812 space_map_close(space_map_t *sm)
813 {
814 if (sm == NULL)
815 return;
816
817 if (sm->sm_dbuf != NULL)
818 dmu_buf_rele(sm->sm_dbuf, sm);
819 sm->sm_dbuf = NULL;
820 sm->sm_phys = NULL;
821
822 kmem_free(sm, sizeof (*sm));
823 }
824
825 void
826 space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx)
827 {
828 objset_t *os = sm->sm_os;
829 spa_t *spa = dmu_objset_spa(os);
830 dmu_object_info_t doi;
831
832 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
833 ASSERT(dmu_tx_is_syncing(tx));
834 VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
835
836 dmu_object_info_from_db(sm->sm_dbuf, &doi);
837
838 /*
839 * If the space map has the wrong bonus size (because
840 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
841 * the wrong block size (because space_map_blksz has changed),
842 * free and re-allocate its object with the updated sizes.
843 *
844 * Otherwise, just truncate the current object.
845 */
846 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
847 doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
848 doi.doi_data_block_size != blocksize ||
849 doi.doi_metadata_block_size != 1 << space_map_ibs) {
850 zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
851 "object[%llu]: old bonus %u, old blocksz %u",
852 dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
853 doi.doi_bonus_size, doi.doi_data_block_size);
854
855 space_map_free(sm, tx);
856 dmu_buf_rele(sm->sm_dbuf, sm);
857
858 sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx);
859 VERIFY0(space_map_open_impl(sm));
860 } else {
861 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
862
863 /*
864 * If the spacemap is reallocated, its histogram
865 * will be reset. Do the same in the common case so that
866 * bugs related to the uncommon case do not go unnoticed.
867 */
868 bzero(sm->sm_phys->smp_histogram,
869 sizeof (sm->sm_phys->smp_histogram));
870 }
871
872 dmu_buf_will_dirty(sm->sm_dbuf, tx);
873 sm->sm_phys->smp_length = 0;
874 sm->sm_phys->smp_alloc = 0;
875 }
876
877 uint64_t
878 space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
879 {
880 spa_t *spa = dmu_objset_spa(os);
881 uint64_t object;
882 int bonuslen;
883
884 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
885 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
886 bonuslen = sizeof (space_map_phys_t);
887 ASSERT3U(bonuslen, <=, dmu_bonus_max());
888 } else {
889 bonuslen = SPACE_MAP_SIZE_V0;
890 }
891
892 object = dmu_object_alloc_ibs(os, DMU_OT_SPACE_MAP, blocksize,
893 space_map_ibs, DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
894
895 return (object);
896 }
897
898 void
899 space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx)
900 {
901 spa_t *spa = dmu_objset_spa(os);
902 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
903 dmu_object_info_t doi;
904
905 VERIFY0(dmu_object_info(os, smobj, &doi));
906 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
907 spa_feature_decr(spa,
908 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
909 }
910 }
911
912 VERIFY0(dmu_object_free(os, smobj, tx));
913 }
914
915 void
916 space_map_free(space_map_t *sm, dmu_tx_t *tx)
917 {
918 if (sm == NULL)
919 return;
920
921 space_map_free_obj(sm->sm_os, space_map_object(sm), tx);
922 sm->sm_object = 0;
923 }
924
925 /*
926 * Given a range tree, it makes a worst-case estimate of how much
927 * space would the tree's segments take if they were written to
928 * the given space map.
929 */
930 uint64_t
931 space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt,
932 uint64_t vdev_id)
933 {
934 spa_t *spa = dmu_objset_spa(sm->sm_os);
935 uint64_t shift = sm->sm_shift;
936 uint64_t *histogram = rt->rt_histogram;
937 uint64_t entries_for_seg = 0;
938
939 /*
940 * In order to get a quick estimate of the optimal size that this
941 * range tree would have on-disk as a space map, we iterate through
942 * its histogram buckets instead of iterating through its nodes.
943 *
944 * Note that this is a highest-bound/worst-case estimate for the
945 * following reasons:
946 *
947 * 1] We assume that we always add a debug padding for each block
948 * we write and we also assume that we start at the last word
949 * of a block attempting to write a two-word entry.
950 * 2] Rounding up errors due to the way segments are distributed
951 * in the buckets of the range tree's histogram.
952 * 3] The activation of zfs_force_some_double_word_sm_entries
953 * (tunable) when testing.
954 *
955 * = Math and Rounding Errors =
956 *
957 * rt_histogram[i] bucket of a range tree represents the number
958 * of entries in [2^i, (2^(i+1))-1] of that range_tree. Given
959 * that, we want to divide the buckets into groups: Buckets that
960 * can be represented using a single-word entry, ones that can
961 * be represented with a double-word entry, and ones that can
962 * only be represented with multiple two-word entries.
963 *
964 * [Note that if the new encoding feature is not enabled there
965 * are only two groups: single-word entry buckets and multiple
966 * single-word entry buckets. The information below assumes
967 * two-word entries enabled, but it can easily applied when
968 * the feature is not enabled]
969 *
970 * To find the highest bucket that can be represented with a
971 * single-word entry we look at the maximum run that such entry
972 * can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that
973 * the run of a space map entry is shifted by sm_shift, thus we
974 * add it to the exponent]. This way, excluding the value of the
975 * maximum run that can be represented by a single-word entry,
976 * all runs that are smaller exist in buckets 0 to
977 * SM_RUN_BITS + shift - 1.
978 *
979 * To find the highest bucket that can be represented with a
980 * double-word entry, we follow the same approach. Finally, any
981 * bucket higher than that are represented with multiple two-word
982 * entries. To be more specific, if the highest bucket whose
983 * segments can be represented with a single two-word entry is X,
984 * then bucket X+1 will need 2 two-word entries for each of its
985 * segments, X+2 will need 4, X+3 will need 8, ...etc.
986 *
987 * With all of the above we make our estimation based on bucket
988 * groups. There is a rounding error though. As we mentioned in
989 * the example with the one-word entry, the maximum run that can
990 * be represented in a one-word entry 2^(SM_RUN_BITS + shift) is
991 * not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of
992 * that length fall into the next bucket (and bucket group) where
993 * we start counting two-word entries and this is one more reason
994 * why the estimated size may end up being bigger than the actual
995 * size written.
996 */
997 uint64_t size = 0;
998 uint64_t idx = 0;
999
1000 if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) ||
1001 (vdev_id == SM_NO_VDEVID && sm->sm_size < SM_OFFSET_MAX)) {
1002
1003 /*
1004 * If we are trying to force some double word entries just
1005 * assume the worst-case of every single word entry being
1006 * written as a double word entry.
1007 */
1008 uint64_t entry_size =
1009 (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) &&
1010 zfs_force_some_double_word_sm_entries) ?
1011 (2 * sizeof (uint64_t)) : sizeof (uint64_t);
1012
1013 uint64_t single_entry_max_bucket = SM_RUN_BITS + shift - 1;
1014 for (; idx <= single_entry_max_bucket; idx++)
1015 size += histogram[idx] * entry_size;
1016
1017 if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)) {
1018 for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
1019 ASSERT3U(idx, >=, single_entry_max_bucket);
1020 entries_for_seg =
1021 1ULL << (idx - single_entry_max_bucket);
1022 size += histogram[idx] *
1023 entries_for_seg * entry_size;
1024 }
1025 return (size);
1026 }
1027 }
1028
1029 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2));
1030
1031 uint64_t double_entry_max_bucket = SM2_RUN_BITS + shift - 1;
1032 for (; idx <= double_entry_max_bucket; idx++)
1033 size += histogram[idx] * 2 * sizeof (uint64_t);
1034
1035 for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
1036 ASSERT3U(idx, >=, double_entry_max_bucket);
1037 entries_for_seg = 1ULL << (idx - double_entry_max_bucket);
1038 size += histogram[idx] *
1039 entries_for_seg * 2 * sizeof (uint64_t);
1040 }
1041
1042 /*
1043 * Assume the worst case where we start with the padding at the end
1044 * of the current block and we add an extra padding entry at the end
1045 * of all subsequent blocks.
1046 */
1047 size += ((size / sm->sm_blksz) + 1) * sizeof (uint64_t);
1048
1049 return (size);
1050 }
1051
1052 uint64_t
1053 space_map_object(space_map_t *sm)
1054 {
1055 return (sm != NULL ? sm->sm_object : 0);
1056 }
1057
1058 int64_t
1059 space_map_allocated(space_map_t *sm)
1060 {
1061 return (sm != NULL ? sm->sm_phys->smp_alloc : 0);
1062 }
1063
1064 uint64_t
1065 space_map_length(space_map_t *sm)
1066 {
1067 return (sm != NULL ? sm->sm_phys->smp_length : 0);
1068 }