1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 */
25
26 #include <sys/zio.h>
27 #include <sys/spa.h>
28 #include <sys/dmu.h>
29 #include <sys/zfs_context.h>
30 #include <sys/zap.h>
31 #include <sys/refcount.h>
32 #include <sys/zap_impl.h>
33 #include <sys/zap_leaf.h>
34 #include <sys/avl.h>
35 #include <sys/arc.h>
36 #include <sys/dmu_objset.h>
37
38 #ifdef _KERNEL
39 #include <sys/sunddi.h>
40 #endif
41
42 extern inline mzap_phys_t *zap_m_phys(zap_t *zap);
43
44 static int mzap_upgrade(zap_t **zapp, dmu_tx_t *tx, zap_flags_t flags);
45
46 uint64_t
47 zap_getflags(zap_t *zap)
48 {
49 if (zap->zap_ismicro)
50 return (0);
51 return (zap_f_phys(zap)->zap_flags);
52 }
53
54 int
55 zap_hashbits(zap_t *zap)
56 {
57 if (zap_getflags(zap) & ZAP_FLAG_HASH64)
58 return (48);
59 else
60 return (28);
61 }
62
63 uint32_t
64 zap_maxcd(zap_t *zap)
65 {
66 if (zap_getflags(zap) & ZAP_FLAG_HASH64)
67 return ((1<<16)-1);
68 else
69 return (-1U);
70 }
71
72 static uint64_t
73 zap_hash(zap_name_t *zn)
74 {
75 zap_t *zap = zn->zn_zap;
76 uint64_t h = 0;
77
78 if (zap_getflags(zap) & ZAP_FLAG_PRE_HASHED_KEY) {
79 ASSERT(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY);
80 h = *(uint64_t *)zn->zn_key_orig;
81 } else {
82 h = zap->zap_salt;
83 ASSERT(h != 0);
84 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
85
86 if (zap_getflags(zap) & ZAP_FLAG_UINT64_KEY) {
87 int i;
88 const uint64_t *wp = zn->zn_key_norm;
89
90 ASSERT(zn->zn_key_intlen == 8);
91 for (i = 0; i < zn->zn_key_norm_numints; wp++, i++) {
92 int j;
93 uint64_t word = *wp;
94
95 for (j = 0; j < zn->zn_key_intlen; j++) {
96 h = (h >> 8) ^
97 zfs_crc64_table[(h ^ word) & 0xFF];
98 word >>= NBBY;
99 }
100 }
101 } else {
102 int i, len;
103 const uint8_t *cp = zn->zn_key_norm;
104
105 /*
106 * We previously stored the terminating null on
107 * disk, but didn't hash it, so we need to
108 * continue to not hash it. (The
109 * zn_key_*_numints includes the terminating
110 * null for non-binary keys.)
111 */
112 len = zn->zn_key_norm_numints - 1;
113
114 ASSERT(zn->zn_key_intlen == 1);
115 for (i = 0; i < len; cp++, i++) {
116 h = (h >> 8) ^
117 zfs_crc64_table[(h ^ *cp) & 0xFF];
118 }
119 }
120 }
121 /*
122 * Don't use all 64 bits, since we need some in the cookie for
123 * the collision differentiator. We MUST use the high bits,
124 * since those are the ones that we first pay attention to when
125 * chosing the bucket.
126 */
127 h &= ~((1ULL << (64 - zap_hashbits(zap))) - 1);
128
129 return (h);
130 }
131
132 static int
133 zap_normalize(zap_t *zap, const char *name, char *namenorm)
134 {
135 size_t inlen, outlen;
136 int err;
137
138 ASSERT(!(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY));
139
140 inlen = strlen(name) + 1;
141 outlen = ZAP_MAXNAMELEN;
142
143 err = 0;
144 (void) u8_textprep_str((char *)name, &inlen, namenorm, &outlen,
145 zap->zap_normflags | U8_TEXTPREP_IGNORE_NULL |
146 U8_TEXTPREP_IGNORE_INVALID, U8_UNICODE_LATEST, &err);
147
148 return (err);
149 }
150
151 boolean_t
152 zap_match(zap_name_t *zn, const char *matchname)
153 {
154 ASSERT(!(zap_getflags(zn->zn_zap) & ZAP_FLAG_UINT64_KEY));
155
156 if (zn->zn_matchtype == MT_FIRST) {
157 char norm[ZAP_MAXNAMELEN];
158
159 if (zap_normalize(zn->zn_zap, matchname, norm) != 0)
160 return (B_FALSE);
161
162 return (strcmp(zn->zn_key_norm, norm) == 0);
163 } else {
164 /* MT_BEST or MT_EXACT */
165 return (strcmp(zn->zn_key_orig, matchname) == 0);
166 }
167 }
168
169 void
170 zap_name_free(zap_name_t *zn)
171 {
172 kmem_free(zn, sizeof (zap_name_t));
173 }
174
175 zap_name_t *
176 zap_name_alloc(zap_t *zap, const char *key, matchtype_t mt)
177 {
178 zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP);
179
180 zn->zn_zap = zap;
181 zn->zn_key_intlen = sizeof (*key);
182 zn->zn_key_orig = key;
183 zn->zn_key_orig_numints = strlen(zn->zn_key_orig) + 1;
184 zn->zn_matchtype = mt;
185 if (zap->zap_normflags) {
186 if (zap_normalize(zap, key, zn->zn_normbuf) != 0) {
187 zap_name_free(zn);
188 return (NULL);
189 }
190 zn->zn_key_norm = zn->zn_normbuf;
191 zn->zn_key_norm_numints = strlen(zn->zn_key_norm) + 1;
192 } else {
193 if (mt != MT_EXACT) {
194 zap_name_free(zn);
195 return (NULL);
196 }
197 zn->zn_key_norm = zn->zn_key_orig;
198 zn->zn_key_norm_numints = zn->zn_key_orig_numints;
199 }
200
201 zn->zn_hash = zap_hash(zn);
202 return (zn);
203 }
204
205 zap_name_t *
206 zap_name_alloc_uint64(zap_t *zap, const uint64_t *key, int numints)
207 {
208 zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP);
209
210 ASSERT(zap->zap_normflags == 0);
211 zn->zn_zap = zap;
212 zn->zn_key_intlen = sizeof (*key);
213 zn->zn_key_orig = zn->zn_key_norm = key;
214 zn->zn_key_orig_numints = zn->zn_key_norm_numints = numints;
215 zn->zn_matchtype = MT_EXACT;
216
217 zn->zn_hash = zap_hash(zn);
218 return (zn);
219 }
220
221 static void
222 mzap_byteswap(mzap_phys_t *buf, size_t size)
223 {
224 int i, max;
225 buf->mz_block_type = BSWAP_64(buf->mz_block_type);
226 buf->mz_salt = BSWAP_64(buf->mz_salt);
227 buf->mz_normflags = BSWAP_64(buf->mz_normflags);
228 max = (size / MZAP_ENT_LEN) - 1;
229 for (i = 0; i < max; i++) {
230 buf->mz_chunk[i].mze_value =
231 BSWAP_64(buf->mz_chunk[i].mze_value);
232 buf->mz_chunk[i].mze_cd =
233 BSWAP_32(buf->mz_chunk[i].mze_cd);
234 }
235 }
236
237 void
238 zap_byteswap(void *buf, size_t size)
239 {
240 uint64_t block_type;
241
242 block_type = *(uint64_t *)buf;
243
244 if (block_type == ZBT_MICRO || block_type == BSWAP_64(ZBT_MICRO)) {
245 /* ASSERT(magic == ZAP_LEAF_MAGIC); */
246 mzap_byteswap(buf, size);
247 } else {
248 fzap_byteswap(buf, size);
249 }
250 }
251
252 static int
253 mze_compare(const void *arg1, const void *arg2)
254 {
255 const mzap_ent_t *mze1 = arg1;
256 const mzap_ent_t *mze2 = arg2;
257
258 if (mze1->mze_hash > mze2->mze_hash)
259 return (+1);
260 if (mze1->mze_hash < mze2->mze_hash)
261 return (-1);
262 if (mze1->mze_cd > mze2->mze_cd)
263 return (+1);
264 if (mze1->mze_cd < mze2->mze_cd)
265 return (-1);
266 return (0);
267 }
268
269 static void
270 mze_insert(zap_t *zap, int chunkid, uint64_t hash)
271 {
272 mzap_ent_t *mze;
273
274 ASSERT(zap->zap_ismicro);
275 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
276
277 mze = kmem_alloc(sizeof (mzap_ent_t), KM_SLEEP);
278 mze->mze_chunkid = chunkid;
279 mze->mze_hash = hash;
280 mze->mze_cd = MZE_PHYS(zap, mze)->mze_cd;
281 ASSERT(MZE_PHYS(zap, mze)->mze_name[0] != 0);
282 avl_add(&zap->zap_m.zap_avl, mze);
283 }
284
285 static mzap_ent_t *
286 mze_find(zap_name_t *zn)
287 {
288 mzap_ent_t mze_tofind;
289 mzap_ent_t *mze;
290 avl_index_t idx;
291 avl_tree_t *avl = &zn->zn_zap->zap_m.zap_avl;
292
293 ASSERT(zn->zn_zap->zap_ismicro);
294 ASSERT(RW_LOCK_HELD(&zn->zn_zap->zap_rwlock));
295
296 mze_tofind.mze_hash = zn->zn_hash;
297 mze_tofind.mze_cd = 0;
298
299 again:
300 mze = avl_find(avl, &mze_tofind, &idx);
301 if (mze == NULL)
302 mze = avl_nearest(avl, idx, AVL_AFTER);
303 for (; mze && mze->mze_hash == zn->zn_hash; mze = AVL_NEXT(avl, mze)) {
304 ASSERT3U(mze->mze_cd, ==, MZE_PHYS(zn->zn_zap, mze)->mze_cd);
305 if (zap_match(zn, MZE_PHYS(zn->zn_zap, mze)->mze_name))
306 return (mze);
307 }
308 if (zn->zn_matchtype == MT_BEST) {
309 zn->zn_matchtype = MT_FIRST;
310 goto again;
311 }
312 return (NULL);
313 }
314
315 static uint32_t
316 mze_find_unused_cd(zap_t *zap, uint64_t hash)
317 {
318 mzap_ent_t mze_tofind;
319 mzap_ent_t *mze;
320 avl_index_t idx;
321 avl_tree_t *avl = &zap->zap_m.zap_avl;
322 uint32_t cd;
323
324 ASSERT(zap->zap_ismicro);
325 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
326
327 mze_tofind.mze_hash = hash;
328 mze_tofind.mze_cd = 0;
329
330 cd = 0;
331 for (mze = avl_find(avl, &mze_tofind, &idx);
332 mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) {
333 if (mze->mze_cd != cd)
334 break;
335 cd++;
336 }
337
338 return (cd);
339 }
340
341 static void
342 mze_remove(zap_t *zap, mzap_ent_t *mze)
343 {
344 ASSERT(zap->zap_ismicro);
345 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
346
347 avl_remove(&zap->zap_m.zap_avl, mze);
348 kmem_free(mze, sizeof (mzap_ent_t));
349 }
350
351 static void
352 mze_destroy(zap_t *zap)
353 {
354 mzap_ent_t *mze;
355 void *avlcookie = NULL;
356
357 while (mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie))
358 kmem_free(mze, sizeof (mzap_ent_t));
359 avl_destroy(&zap->zap_m.zap_avl);
360 }
361
362 static zap_t *
363 mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
364 {
365 zap_t *winner;
366 zap_t *zap;
367 int i;
368
369 ASSERT3U(MZAP_ENT_LEN, ==, sizeof (mzap_ent_phys_t));
370
371 zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
372 rw_init(&zap->zap_rwlock, 0, 0, 0);
373 rw_enter(&zap->zap_rwlock, RW_WRITER);
374 zap->zap_objset = os;
375 zap->zap_object = obj;
376 zap->zap_dbuf = db;
377
378 if (*(uint64_t *)db->db_data != ZBT_MICRO) {
379 mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0);
380 zap->zap_f.zap_block_shift = highbit64(db->db_size) - 1;
381 } else {
382 zap->zap_ismicro = TRUE;
383 }
384
385 /*
386 * Make sure that zap_ismicro is set before we let others see
387 * it, because zap_lockdir() checks zap_ismicro without the lock
388 * held.
389 */
390 winner = dmu_buf_set_user(db, zap, zap_evict);
391
392 if (winner != NULL) {
393 rw_exit(&zap->zap_rwlock);
394 rw_destroy(&zap->zap_rwlock);
395 if (!zap->zap_ismicro)
396 mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
397 kmem_free(zap, sizeof (zap_t));
398 return (winner);
399 }
400
401 if (zap->zap_ismicro) {
402 zap->zap_salt = zap_m_phys(zap)->mz_salt;
403 zap->zap_normflags = zap_m_phys(zap)->mz_normflags;
404 zap->zap_m.zap_num_chunks = db->db_size / MZAP_ENT_LEN - 1;
405 avl_create(&zap->zap_m.zap_avl, mze_compare,
406 sizeof (mzap_ent_t), offsetof(mzap_ent_t, mze_node));
407
408 for (i = 0; i < zap->zap_m.zap_num_chunks; i++) {
409 mzap_ent_phys_t *mze =
410 &zap_m_phys(zap)->mz_chunk[i];
411 if (mze->mze_name[0]) {
412 zap_name_t *zn;
413
414 zap->zap_m.zap_num_entries++;
415 zn = zap_name_alloc(zap, mze->mze_name,
416 MT_EXACT);
417 mze_insert(zap, i, zn->zn_hash);
418 zap_name_free(zn);
419 }
420 }
421 } else {
422 zap->zap_salt = zap_f_phys(zap)->zap_salt;
423 zap->zap_normflags = zap_f_phys(zap)->zap_normflags;
424
425 ASSERT3U(sizeof (struct zap_leaf_header), ==,
426 2*ZAP_LEAF_CHUNKSIZE);
427
428 /*
429 * The embedded pointer table should not overlap the
430 * other members.
431 */
432 ASSERT3P(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), >,
433 &zap_f_phys(zap)->zap_salt);
434
435 /*
436 * The embedded pointer table should end at the end of
437 * the block
438 */
439 ASSERT3U((uintptr_t)&ZAP_EMBEDDED_PTRTBL_ENT(zap,
440 1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)) -
441 (uintptr_t)zap_f_phys(zap), ==,
442 zap->zap_dbuf->db_size);
443 }
444 rw_exit(&zap->zap_rwlock);
445 return (zap);
446 }
447
448 int
449 zap_lockdir(objset_t *os, uint64_t obj, dmu_tx_t *tx,
450 krw_t lti, boolean_t fatreader, boolean_t adding, zap_t **zapp)
451 {
452 zap_t *zap;
453 dmu_buf_t *db;
454 krw_t lt;
455 int err;
456
457 *zapp = NULL;
458
459 err = dmu_buf_hold(os, obj, 0, NULL, &db, DMU_READ_NO_PREFETCH);
460 if (err)
461 return (err);
462
463 #ifdef ZFS_DEBUG
464 {
465 dmu_object_info_t doi;
466 dmu_object_info_from_db(db, &doi);
467 ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
468 }
469 #endif
470
471 zap = dmu_buf_get_user(db);
472 if (zap == NULL)
473 zap = mzap_open(os, obj, db);
474
475 /*
476 * We're checking zap_ismicro without the lock held, in order to
477 * tell what type of lock we want. Once we have some sort of
478 * lock, see if it really is the right type. In practice this
479 * can only be different if it was upgraded from micro to fat,
480 * and micro wanted WRITER but fat only needs READER.
481 */
482 lt = (!zap->zap_ismicro && fatreader) ? RW_READER : lti;
483 rw_enter(&zap->zap_rwlock, lt);
484 if (lt != ((!zap->zap_ismicro && fatreader) ? RW_READER : lti)) {
485 /* it was upgraded, now we only need reader */
486 ASSERT(lt == RW_WRITER);
487 ASSERT(RW_READER ==
488 (!zap->zap_ismicro && fatreader) ? RW_READER : lti);
489 rw_downgrade(&zap->zap_rwlock);
490 lt = RW_READER;
491 }
492
493 zap->zap_objset = os;
494
495 if (lt == RW_WRITER)
496 dmu_buf_will_dirty(db, tx);
497
498 ASSERT3P(zap->zap_dbuf, ==, db);
499
500 ASSERT(!zap->zap_ismicro ||
501 zap->zap_m.zap_num_entries <= zap->zap_m.zap_num_chunks);
502 if (zap->zap_ismicro && tx && adding &&
503 zap->zap_m.zap_num_entries == zap->zap_m.zap_num_chunks) {
504 uint64_t newsz = db->db_size + SPA_MINBLOCKSIZE;
505 if (newsz > MZAP_MAX_BLKSZ) {
506 dprintf("upgrading obj %llu: num_entries=%u\n",
507 obj, zap->zap_m.zap_num_entries);
508 *zapp = zap;
509 return (mzap_upgrade(zapp, tx, 0));
510 }
511 err = dmu_object_set_blocksize(os, obj, newsz, 0, tx);
512 ASSERT0(err);
513 zap->zap_m.zap_num_chunks =
514 db->db_size / MZAP_ENT_LEN - 1;
515 }
516
517 *zapp = zap;
518 return (0);
519 }
520
521 void
522 zap_unlockdir(zap_t *zap)
523 {
524 rw_exit(&zap->zap_rwlock);
525 dmu_buf_rele(zap->zap_dbuf, NULL);
526 }
527
528 static int
529 mzap_upgrade(zap_t **zapp, dmu_tx_t *tx, zap_flags_t flags)
530 {
531 mzap_phys_t *mzp;
532 int i, sz, nchunks;
533 int err = 0;
534 zap_t *zap = *zapp;
535
536 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
537
538 sz = zap->zap_dbuf->db_size;
539 mzp = kmem_alloc(sz, KM_SLEEP);
540 bcopy(zap->zap_dbuf->db_data, mzp, sz);
541 nchunks = zap->zap_m.zap_num_chunks;
542
543 if (!flags) {
544 err = dmu_object_set_blocksize(zap->zap_objset, zap->zap_object,
545 1ULL << fzap_default_block_shift, 0, tx);
546 if (err) {
547 kmem_free(mzp, sz);
548 return (err);
549 }
550 }
551
552 dprintf("upgrading obj=%llu with %u chunks\n",
553 zap->zap_object, nchunks);
554 /* XXX destroy the avl later, so we can use the stored hash value */
555 mze_destroy(zap);
556
557 fzap_upgrade(zap, tx, flags);
558
559 for (i = 0; i < nchunks; i++) {
560 mzap_ent_phys_t *mze = &mzp->mz_chunk[i];
561 zap_name_t *zn;
562 if (mze->mze_name[0] == 0)
563 continue;
564 dprintf("adding %s=%llu\n",
565 mze->mze_name, mze->mze_value);
566 zn = zap_name_alloc(zap, mze->mze_name, MT_EXACT);
567 err = fzap_add_cd(zn, 8, 1, &mze->mze_value, mze->mze_cd, tx);
568 zap = zn->zn_zap; /* fzap_add_cd() may change zap */
569 zap_name_free(zn);
570 if (err)
571 break;
572 }
573 kmem_free(mzp, sz);
574 *zapp = zap;
575 return (err);
576 }
577
578 void
579 mzap_create_impl(objset_t *os, uint64_t obj, int normflags, zap_flags_t flags,
580 dmu_tx_t *tx)
581 {
582 dmu_buf_t *db;
583 mzap_phys_t *zp;
584
585 VERIFY(0 == dmu_buf_hold(os, obj, 0, FTAG, &db, DMU_READ_NO_PREFETCH));
586
587 #ifdef ZFS_DEBUG
588 {
589 dmu_object_info_t doi;
590 dmu_object_info_from_db(db, &doi);
591 ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
592 }
593 #endif
594
595 dmu_buf_will_dirty(db, tx);
596 zp = db->db_data;
597 zp->mz_block_type = ZBT_MICRO;
598 zp->mz_salt = ((uintptr_t)db ^ (uintptr_t)tx ^ (obj << 1)) | 1ULL;
599 zp->mz_normflags = normflags;
600 dmu_buf_rele(db, FTAG);
601
602 if (flags != 0) {
603 zap_t *zap;
604 /* Only fat zap supports flags; upgrade immediately. */
605 VERIFY(0 == zap_lockdir(os, obj, tx, RW_WRITER,
606 B_FALSE, B_FALSE, &zap));
607 VERIFY3U(0, ==, mzap_upgrade(&zap, tx, flags));
608 zap_unlockdir(zap);
609 }
610 }
611
612 int
613 zap_create_claim(objset_t *os, uint64_t obj, dmu_object_type_t ot,
614 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
615 {
616 return (zap_create_claim_norm(os, obj,
617 0, ot, bonustype, bonuslen, tx));
618 }
619
620 int
621 zap_create_claim_norm(objset_t *os, uint64_t obj, int normflags,
622 dmu_object_type_t ot,
623 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
624 {
625 int err;
626
627 err = dmu_object_claim(os, obj, ot, 0, bonustype, bonuslen, tx);
628 if (err != 0)
629 return (err);
630 mzap_create_impl(os, obj, normflags, 0, tx);
631 return (0);
632 }
633
634 uint64_t
635 zap_create(objset_t *os, dmu_object_type_t ot,
636 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
637 {
638 return (zap_create_norm(os, 0, ot, bonustype, bonuslen, tx));
639 }
640
641 uint64_t
642 zap_create_norm(objset_t *os, int normflags, dmu_object_type_t ot,
643 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
644 {
645 uint64_t obj = dmu_object_alloc(os, ot, 0, bonustype, bonuslen, tx);
646
647 mzap_create_impl(os, obj, normflags, 0, tx);
648 return (obj);
649 }
650
651 uint64_t
652 zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
653 dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
654 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
655 {
656 uint64_t obj = dmu_object_alloc(os, ot, 0, bonustype, bonuslen, tx);
657
658 ASSERT(leaf_blockshift >= SPA_MINBLOCKSHIFT &&
659 leaf_blockshift <= SPA_OLD_MAXBLOCKSHIFT &&
660 indirect_blockshift >= SPA_MINBLOCKSHIFT &&
661 indirect_blockshift <= SPA_OLD_MAXBLOCKSHIFT);
662
663 VERIFY(dmu_object_set_blocksize(os, obj,
664 1ULL << leaf_blockshift, indirect_blockshift, tx) == 0);
665
666 mzap_create_impl(os, obj, normflags, flags, tx);
667 return (obj);
668 }
669
670 int
671 zap_destroy(objset_t *os, uint64_t zapobj, dmu_tx_t *tx)
672 {
673 /*
674 * dmu_object_free will free the object number and free the
675 * data. Freeing the data will cause our pageout function to be
676 * called, which will destroy our data (zap_leaf_t's and zap_t).
677 */
678
679 return (dmu_object_free(os, zapobj, tx));
680 }
681
682 _NOTE(ARGSUSED(0))
683 void
684 zap_evict(dmu_buf_t *db, void *vzap)
685 {
686 zap_t *zap = vzap;
687
688 rw_destroy(&zap->zap_rwlock);
689
690 if (zap->zap_ismicro)
691 mze_destroy(zap);
692 else
693 mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
694
695 kmem_free(zap, sizeof (zap_t));
696 }
697
698 int
699 zap_count(objset_t *os, uint64_t zapobj, uint64_t *count)
700 {
701 zap_t *zap;
702 int err;
703
704 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, &zap);
705 if (err)
706 return (err);
707 if (!zap->zap_ismicro) {
708 err = fzap_count(zap, count);
709 } else {
710 *count = zap->zap_m.zap_num_entries;
711 }
712 zap_unlockdir(zap);
713 return (err);
714 }
715
716 /*
717 * zn may be NULL; if not specified, it will be computed if needed.
718 * See also the comment above zap_entry_normalization_conflict().
719 */
720 static boolean_t
721 mzap_normalization_conflict(zap_t *zap, zap_name_t *zn, mzap_ent_t *mze)
722 {
723 mzap_ent_t *other;
724 int direction = AVL_BEFORE;
725 boolean_t allocdzn = B_FALSE;
726
727 if (zap->zap_normflags == 0)
728 return (B_FALSE);
729
730 again:
731 for (other = avl_walk(&zap->zap_m.zap_avl, mze, direction);
732 other && other->mze_hash == mze->mze_hash;
733 other = avl_walk(&zap->zap_m.zap_avl, other, direction)) {
734
735 if (zn == NULL) {
736 zn = zap_name_alloc(zap, MZE_PHYS(zap, mze)->mze_name,
737 MT_FIRST);
738 allocdzn = B_TRUE;
739 }
740 if (zap_match(zn, MZE_PHYS(zap, other)->mze_name)) {
741 if (allocdzn)
742 zap_name_free(zn);
743 return (B_TRUE);
744 }
745 }
746
747 if (direction == AVL_BEFORE) {
748 direction = AVL_AFTER;
749 goto again;
750 }
751
752 if (allocdzn)
753 zap_name_free(zn);
754 return (B_FALSE);
755 }
756
757 /*
758 * Routines for manipulating attributes.
759 */
760
761 int
762 zap_lookup(objset_t *os, uint64_t zapobj, const char *name,
763 uint64_t integer_size, uint64_t num_integers, void *buf)
764 {
765 return (zap_lookup_norm(os, zapobj, name, integer_size,
766 num_integers, buf, MT_EXACT, NULL, 0, NULL));
767 }
768
769 int
770 zap_lookup_norm(objset_t *os, uint64_t zapobj, const char *name,
771 uint64_t integer_size, uint64_t num_integers, void *buf,
772 matchtype_t mt, char *realname, int rn_len,
773 boolean_t *ncp)
774 {
775 zap_t *zap;
776 int err;
777 mzap_ent_t *mze;
778 zap_name_t *zn;
779
780 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, &zap);
781 if (err)
782 return (err);
783 zn = zap_name_alloc(zap, name, mt);
784 if (zn == NULL) {
785 zap_unlockdir(zap);
786 return (SET_ERROR(ENOTSUP));
787 }
788
789 if (!zap->zap_ismicro) {
790 err = fzap_lookup(zn, integer_size, num_integers, buf,
791 realname, rn_len, ncp);
792 } else {
793 mze = mze_find(zn);
794 if (mze == NULL) {
795 err = SET_ERROR(ENOENT);
796 } else {
797 if (num_integers < 1) {
798 err = SET_ERROR(EOVERFLOW);
799 } else if (integer_size != 8) {
800 err = SET_ERROR(EINVAL);
801 } else {
802 *(uint64_t *)buf =
803 MZE_PHYS(zap, mze)->mze_value;
804 (void) strlcpy(realname,
805 MZE_PHYS(zap, mze)->mze_name, rn_len);
806 if (ncp) {
807 *ncp = mzap_normalization_conflict(zap,
808 zn, mze);
809 }
810 }
811 }
812 }
813 zap_name_free(zn);
814 zap_unlockdir(zap);
815 return (err);
816 }
817
818 int
819 zap_prefetch_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
820 int key_numints)
821 {
822 zap_t *zap;
823 int err;
824 zap_name_t *zn;
825
826 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, &zap);
827 if (err)
828 return (err);
829 zn = zap_name_alloc_uint64(zap, key, key_numints);
830 if (zn == NULL) {
831 zap_unlockdir(zap);
832 return (SET_ERROR(ENOTSUP));
833 }
834
835 fzap_prefetch(zn);
836 zap_name_free(zn);
837 zap_unlockdir(zap);
838 return (err);
839 }
840
841 int
842 zap_lookup_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
843 int key_numints, uint64_t integer_size, uint64_t num_integers, void *buf)
844 {
845 zap_t *zap;
846 int err;
847 zap_name_t *zn;
848
849 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, &zap);
850 if (err)
851 return (err);
852 zn = zap_name_alloc_uint64(zap, key, key_numints);
853 if (zn == NULL) {
854 zap_unlockdir(zap);
855 return (SET_ERROR(ENOTSUP));
856 }
857
858 err = fzap_lookup(zn, integer_size, num_integers, buf,
859 NULL, 0, NULL);
860 zap_name_free(zn);
861 zap_unlockdir(zap);
862 return (err);
863 }
864
865 int
866 zap_contains(objset_t *os, uint64_t zapobj, const char *name)
867 {
868 int err = zap_lookup_norm(os, zapobj, name, 0,
869 0, NULL, MT_EXACT, NULL, 0, NULL);
870 if (err == EOVERFLOW || err == EINVAL)
871 err = 0; /* found, but skipped reading the value */
872 return (err);
873 }
874
875 int
876 zap_length(objset_t *os, uint64_t zapobj, const char *name,
877 uint64_t *integer_size, uint64_t *num_integers)
878 {
879 zap_t *zap;
880 int err;
881 mzap_ent_t *mze;
882 zap_name_t *zn;
883
884 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, &zap);
885 if (err)
886 return (err);
887 zn = zap_name_alloc(zap, name, MT_EXACT);
888 if (zn == NULL) {
889 zap_unlockdir(zap);
890 return (SET_ERROR(ENOTSUP));
891 }
892 if (!zap->zap_ismicro) {
893 err = fzap_length(zn, integer_size, num_integers);
894 } else {
895 mze = mze_find(zn);
896 if (mze == NULL) {
897 err = SET_ERROR(ENOENT);
898 } else {
899 if (integer_size)
900 *integer_size = 8;
901 if (num_integers)
902 *num_integers = 1;
903 }
904 }
905 zap_name_free(zn);
906 zap_unlockdir(zap);
907 return (err);
908 }
909
910 int
911 zap_length_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
912 int key_numints, uint64_t *integer_size, uint64_t *num_integers)
913 {
914 zap_t *zap;
915 int err;
916 zap_name_t *zn;
917
918 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, &zap);
919 if (err)
920 return (err);
921 zn = zap_name_alloc_uint64(zap, key, key_numints);
922 if (zn == NULL) {
923 zap_unlockdir(zap);
924 return (SET_ERROR(ENOTSUP));
925 }
926 err = fzap_length(zn, integer_size, num_integers);
927 zap_name_free(zn);
928 zap_unlockdir(zap);
929 return (err);
930 }
931
932 static void
933 mzap_addent(zap_name_t *zn, uint64_t value)
934 {
935 int i;
936 zap_t *zap = zn->zn_zap;
937 int start = zap->zap_m.zap_alloc_next;
938 uint32_t cd;
939
940 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
941
942 #ifdef ZFS_DEBUG
943 for (i = 0; i < zap->zap_m.zap_num_chunks; i++) {
944 mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
945 ASSERT(strcmp(zn->zn_key_orig, mze->mze_name) != 0);
946 }
947 #endif
948
949 cd = mze_find_unused_cd(zap, zn->zn_hash);
950 /* given the limited size of the microzap, this can't happen */
951 ASSERT(cd < zap_maxcd(zap));
952
953 again:
954 for (i = start; i < zap->zap_m.zap_num_chunks; i++) {
955 mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
956 if (mze->mze_name[0] == 0) {
957 mze->mze_value = value;
958 mze->mze_cd = cd;
959 (void) strcpy(mze->mze_name, zn->zn_key_orig);
960 zap->zap_m.zap_num_entries++;
961 zap->zap_m.zap_alloc_next = i+1;
962 if (zap->zap_m.zap_alloc_next ==
963 zap->zap_m.zap_num_chunks)
964 zap->zap_m.zap_alloc_next = 0;
965 mze_insert(zap, i, zn->zn_hash);
966 return;
967 }
968 }
969 if (start != 0) {
970 start = 0;
971 goto again;
972 }
973 ASSERT(!"out of entries!");
974 }
975
976 int
977 zap_add(objset_t *os, uint64_t zapobj, const char *key,
978 int integer_size, uint64_t num_integers,
979 const void *val, dmu_tx_t *tx)
980 {
981 zap_t *zap;
982 int err;
983 mzap_ent_t *mze;
984 const uint64_t *intval = val;
985 zap_name_t *zn;
986
987 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, &zap);
988 if (err)
989 return (err);
990 zn = zap_name_alloc(zap, key, MT_EXACT);
991 if (zn == NULL) {
992 zap_unlockdir(zap);
993 return (SET_ERROR(ENOTSUP));
994 }
995 if (!zap->zap_ismicro) {
996 err = fzap_add(zn, integer_size, num_integers, val, tx);
997 zap = zn->zn_zap; /* fzap_add() may change zap */
998 } else if (integer_size != 8 || num_integers != 1 ||
999 strlen(key) >= MZAP_NAME_LEN) {
1000 err = mzap_upgrade(&zn->zn_zap, tx, 0);
1001 if (err == 0)
1002 err = fzap_add(zn, integer_size, num_integers, val, tx);
1003 zap = zn->zn_zap; /* fzap_add() may change zap */
1004 } else {
1005 mze = mze_find(zn);
1006 if (mze != NULL) {
1007 err = SET_ERROR(EEXIST);
1008 } else {
1009 mzap_addent(zn, *intval);
1010 }
1011 }
1012 ASSERT(zap == zn->zn_zap);
1013 zap_name_free(zn);
1014 if (zap != NULL) /* may be NULL if fzap_add() failed */
1015 zap_unlockdir(zap);
1016 return (err);
1017 }
1018
1019 int
1020 zap_add_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
1021 int key_numints, int integer_size, uint64_t num_integers,
1022 const void *val, dmu_tx_t *tx)
1023 {
1024 zap_t *zap;
1025 int err;
1026 zap_name_t *zn;
1027
1028 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, &zap);
1029 if (err)
1030 return (err);
1031 zn = zap_name_alloc_uint64(zap, key, key_numints);
1032 if (zn == NULL) {
1033 zap_unlockdir(zap);
1034 return (SET_ERROR(ENOTSUP));
1035 }
1036 err = fzap_add(zn, integer_size, num_integers, val, tx);
1037 zap = zn->zn_zap; /* fzap_add() may change zap */
1038 zap_name_free(zn);
1039 if (zap != NULL) /* may be NULL if fzap_add() failed */
1040 zap_unlockdir(zap);
1041 return (err);
1042 }
1043
1044 int
1045 zap_update(objset_t *os, uint64_t zapobj, const char *name,
1046 int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
1047 {
1048 zap_t *zap;
1049 mzap_ent_t *mze;
1050 uint64_t oldval;
1051 const uint64_t *intval = val;
1052 zap_name_t *zn;
1053 int err;
1054
1055 #ifdef ZFS_DEBUG
1056 /*
1057 * If there is an old value, it shouldn't change across the
1058 * lockdir (eg, due to bprewrite's xlation).
1059 */
1060 if (integer_size == 8 && num_integers == 1)
1061 (void) zap_lookup(os, zapobj, name, 8, 1, &oldval);
1062 #endif
1063
1064 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, &zap);
1065 if (err)
1066 return (err);
1067 zn = zap_name_alloc(zap, name, MT_EXACT);
1068 if (zn == NULL) {
1069 zap_unlockdir(zap);
1070 return (SET_ERROR(ENOTSUP));
1071 }
1072 if (!zap->zap_ismicro) {
1073 err = fzap_update(zn, integer_size, num_integers, val, tx);
1074 zap = zn->zn_zap; /* fzap_update() may change zap */
1075 } else if (integer_size != 8 || num_integers != 1 ||
1076 strlen(name) >= MZAP_NAME_LEN) {
1077 dprintf("upgrading obj %llu: intsz=%u numint=%llu name=%s\n",
1078 zapobj, integer_size, num_integers, name);
1079 err = mzap_upgrade(&zn->zn_zap, tx, 0);
1080 if (err == 0)
1081 err = fzap_update(zn, integer_size, num_integers,
1082 val, tx);
1083 zap = zn->zn_zap; /* fzap_update() may change zap */
1084 } else {
1085 mze = mze_find(zn);
1086 if (mze != NULL) {
1087 ASSERT3U(MZE_PHYS(zap, mze)->mze_value, ==, oldval);
1088 MZE_PHYS(zap, mze)->mze_value = *intval;
1089 } else {
1090 mzap_addent(zn, *intval);
1091 }
1092 }
1093 ASSERT(zap == zn->zn_zap);
1094 zap_name_free(zn);
1095 if (zap != NULL) /* may be NULL if fzap_upgrade() failed */
1096 zap_unlockdir(zap);
1097 return (err);
1098 }
1099
1100 int
1101 zap_update_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
1102 int key_numints,
1103 int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
1104 {
1105 zap_t *zap;
1106 zap_name_t *zn;
1107 int err;
1108
1109 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, &zap);
1110 if (err)
1111 return (err);
1112 zn = zap_name_alloc_uint64(zap, key, key_numints);
1113 if (zn == NULL) {
1114 zap_unlockdir(zap);
1115 return (SET_ERROR(ENOTSUP));
1116 }
1117 err = fzap_update(zn, integer_size, num_integers, val, tx);
1118 zap = zn->zn_zap; /* fzap_update() may change zap */
1119 zap_name_free(zn);
1120 if (zap != NULL) /* may be NULL if fzap_upgrade() failed */
1121 zap_unlockdir(zap);
1122 return (err);
1123 }
1124
1125 int
1126 zap_remove(objset_t *os, uint64_t zapobj, const char *name, dmu_tx_t *tx)
1127 {
1128 return (zap_remove_norm(os, zapobj, name, MT_EXACT, tx));
1129 }
1130
1131 int
1132 zap_remove_norm(objset_t *os, uint64_t zapobj, const char *name,
1133 matchtype_t mt, dmu_tx_t *tx)
1134 {
1135 zap_t *zap;
1136 int err;
1137 mzap_ent_t *mze;
1138 zap_name_t *zn;
1139
1140 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, &zap);
1141 if (err)
1142 return (err);
1143 zn = zap_name_alloc(zap, name, mt);
1144 if (zn == NULL) {
1145 zap_unlockdir(zap);
1146 return (SET_ERROR(ENOTSUP));
1147 }
1148 if (!zap->zap_ismicro) {
1149 err = fzap_remove(zn, tx);
1150 } else {
1151 mze = mze_find(zn);
1152 if (mze == NULL) {
1153 err = SET_ERROR(ENOENT);
1154 } else {
1155 zap->zap_m.zap_num_entries--;
1156 bzero(&zap_m_phys(zap)->mz_chunk[mze->mze_chunkid],
1157 sizeof (mzap_ent_phys_t));
1158 mze_remove(zap, mze);
1159 }
1160 }
1161 zap_name_free(zn);
1162 zap_unlockdir(zap);
1163 return (err);
1164 }
1165
1166 int
1167 zap_remove_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
1168 int key_numints, dmu_tx_t *tx)
1169 {
1170 zap_t *zap;
1171 int err;
1172 zap_name_t *zn;
1173
1174 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, &zap);
1175 if (err)
1176 return (err);
1177 zn = zap_name_alloc_uint64(zap, key, key_numints);
1178 if (zn == NULL) {
1179 zap_unlockdir(zap);
1180 return (SET_ERROR(ENOTSUP));
1181 }
1182 err = fzap_remove(zn, tx);
1183 zap_name_free(zn);
1184 zap_unlockdir(zap);
1185 return (err);
1186 }
1187
1188 /*
1189 * Routines for iterating over the attributes.
1190 */
1191
1192 void
1193 zap_cursor_init_serialized(zap_cursor_t *zc, objset_t *os, uint64_t zapobj,
1194 uint64_t serialized)
1195 {
1196 zc->zc_objset = os;
1197 zc->zc_zap = NULL;
1198 zc->zc_leaf = NULL;
1199 zc->zc_zapobj = zapobj;
1200 zc->zc_serialized = serialized;
1201 zc->zc_hash = 0;
1202 zc->zc_cd = 0;
1203 }
1204
1205 void
1206 zap_cursor_init(zap_cursor_t *zc, objset_t *os, uint64_t zapobj)
1207 {
1208 zap_cursor_init_serialized(zc, os, zapobj, 0);
1209 }
1210
1211 void
1212 zap_cursor_fini(zap_cursor_t *zc)
1213 {
1214 if (zc->zc_zap) {
1215 rw_enter(&zc->zc_zap->zap_rwlock, RW_READER);
1216 zap_unlockdir(zc->zc_zap);
1217 zc->zc_zap = NULL;
1218 }
1219 if (zc->zc_leaf) {
1220 rw_enter(&zc->zc_leaf->l_rwlock, RW_READER);
1221 zap_put_leaf(zc->zc_leaf);
1222 zc->zc_leaf = NULL;
1223 }
1224 zc->zc_objset = NULL;
1225 }
1226
1227 uint64_t
1228 zap_cursor_serialize(zap_cursor_t *zc)
1229 {
1230 if (zc->zc_hash == -1ULL)
1231 return (-1ULL);
1232 if (zc->zc_zap == NULL)
1233 return (zc->zc_serialized);
1234 ASSERT((zc->zc_hash & zap_maxcd(zc->zc_zap)) == 0);
1235 ASSERT(zc->zc_cd < zap_maxcd(zc->zc_zap));
1236
1237 /*
1238 * We want to keep the high 32 bits of the cursor zero if we can, so
1239 * that 32-bit programs can access this. So usually use a small
1240 * (28-bit) hash value so we can fit 4 bits of cd into the low 32-bits
1241 * of the cursor.
1242 *
1243 * [ collision differentiator | zap_hashbits()-bit hash value ]
1244 */
1245 return ((zc->zc_hash >> (64 - zap_hashbits(zc->zc_zap))) |
1246 ((uint64_t)zc->zc_cd << zap_hashbits(zc->zc_zap)));
1247 }
1248
1249 int
1250 zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
1251 {
1252 int err;
1253 avl_index_t idx;
1254 mzap_ent_t mze_tofind;
1255 mzap_ent_t *mze;
1256
1257 if (zc->zc_hash == -1ULL)
1258 return (SET_ERROR(ENOENT));
1259
1260 if (zc->zc_zap == NULL) {
1261 int hb;
1262 err = zap_lockdir(zc->zc_objset, zc->zc_zapobj, NULL,
1263 RW_READER, TRUE, FALSE, &zc->zc_zap);
1264 if (err)
1265 return (err);
1266
1267 /*
1268 * To support zap_cursor_init_serialized, advance, retrieve,
1269 * we must add to the existing zc_cd, which may already
1270 * be 1 due to the zap_cursor_advance.
1271 */
1272 ASSERT(zc->zc_hash == 0);
1273 hb = zap_hashbits(zc->zc_zap);
1274 zc->zc_hash = zc->zc_serialized << (64 - hb);
1275 zc->zc_cd += zc->zc_serialized >> hb;
1276 if (zc->zc_cd >= zap_maxcd(zc->zc_zap)) /* corrupt serialized */
1277 zc->zc_cd = 0;
1278 } else {
1279 rw_enter(&zc->zc_zap->zap_rwlock, RW_READER);
1280 }
1281 if (!zc->zc_zap->zap_ismicro) {
1282 err = fzap_cursor_retrieve(zc->zc_zap, zc, za);
1283 } else {
1284 mze_tofind.mze_hash = zc->zc_hash;
1285 mze_tofind.mze_cd = zc->zc_cd;
1286
1287 mze = avl_find(&zc->zc_zap->zap_m.zap_avl, &mze_tofind, &idx);
1288 if (mze == NULL) {
1289 mze = avl_nearest(&zc->zc_zap->zap_m.zap_avl,
1290 idx, AVL_AFTER);
1291 }
1292 if (mze) {
1293 mzap_ent_phys_t *mzep = MZE_PHYS(zc->zc_zap, mze);
1294 ASSERT3U(mze->mze_cd, ==, mzep->mze_cd);
1295 za->za_normalization_conflict =
1296 mzap_normalization_conflict(zc->zc_zap, NULL, mze);
1297 za->za_integer_length = 8;
1298 za->za_num_integers = 1;
1299 za->za_first_integer = mzep->mze_value;
1300 (void) strcpy(za->za_name, mzep->mze_name);
1301 zc->zc_hash = mze->mze_hash;
1302 zc->zc_cd = mze->mze_cd;
1303 err = 0;
1304 } else {
1305 zc->zc_hash = -1ULL;
1306 err = SET_ERROR(ENOENT);
1307 }
1308 }
1309 rw_exit(&zc->zc_zap->zap_rwlock);
1310 return (err);
1311 }
1312
1313 void
1314 zap_cursor_advance(zap_cursor_t *zc)
1315 {
1316 if (zc->zc_hash == -1ULL)
1317 return;
1318 zc->zc_cd++;
1319 }
1320
1321 int
1322 zap_get_stats(objset_t *os, uint64_t zapobj, zap_stats_t *zs)
1323 {
1324 int err;
1325 zap_t *zap;
1326
1327 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, &zap);
1328 if (err)
1329 return (err);
1330
1331 bzero(zs, sizeof (zap_stats_t));
1332
1333 if (zap->zap_ismicro) {
1334 zs->zs_blocksize = zap->zap_dbuf->db_size;
1335 zs->zs_num_entries = zap->zap_m.zap_num_entries;
1336 zs->zs_num_blocks = 1;
1337 } else {
1338 fzap_get_stats(zap, zs);
1339 }
1340 zap_unlockdir(zap);
1341 return (0);
1342 }
1343
1344 int
1345 zap_count_write(objset_t *os, uint64_t zapobj, const char *name, int add,
1346 uint64_t *towrite, uint64_t *tooverwrite)
1347 {
1348 zap_t *zap;
1349 int err = 0;
1350
1351 /*
1352 * Since, we don't have a name, we cannot figure out which blocks will
1353 * be affected in this operation. So, account for the worst case :
1354 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
1355 * - 4 new blocks written if adding:
1356 * - 2 blocks for possibly split leaves,
1357 * - 2 grown ptrtbl blocks
1358 *
1359 * This also accomodates the case where an add operation to a fairly
1360 * large microzap results in a promotion to fatzap.
1361 */
1362 if (name == NULL) {
1363 *towrite += (3 + (add ? 4 : 0)) * SPA_OLD_MAXBLOCKSIZE;
1364 return (err);
1365 }
1366
1367 /*
1368 * We lock the zap with adding == FALSE. Because, if we pass
1369 * the actual value of add, it could trigger a mzap_upgrade().
1370 * At present we are just evaluating the possibility of this operation
1371 * and hence we donot want to trigger an upgrade.
1372 */
1373 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, &zap);
1374 if (err)
1375 return (err);
1376
1377 if (!zap->zap_ismicro) {
1378 zap_name_t *zn = zap_name_alloc(zap, name, MT_EXACT);
1379 if (zn) {
1380 err = fzap_count_write(zn, add, towrite,
1381 tooverwrite);
1382 zap_name_free(zn);
1383 } else {
1384 /*
1385 * We treat this case as similar to (name == NULL)
1386 */
1387 *towrite += (3 + (add ? 4 : 0)) * SPA_OLD_MAXBLOCKSIZE;
1388 }
1389 } else {
1390 /*
1391 * We are here if (name != NULL) and this is a micro-zap.
1392 * We account for the header block depending on whether it
1393 * is freeable.
1394 *
1395 * Incase of an add-operation it is hard to find out
1396 * if this add will promote this microzap to fatzap.
1397 * Hence, we consider the worst case and account for the
1398 * blocks assuming this microzap would be promoted to a
1399 * fatzap.
1400 *
1401 * 1 block overwritten : header block
1402 * 4 new blocks written : 2 new split leaf, 2 grown
1403 * ptrtbl blocks
1404 */
1405 if (dmu_buf_freeable(zap->zap_dbuf))
1406 *tooverwrite += MZAP_MAX_BLKSZ;
1407 else
1408 *towrite += MZAP_MAX_BLKSZ;
1409
1410 if (add) {
1411 *towrite += 4 * MZAP_MAX_BLKSZ;
1412 }
1413 }
1414
1415 zap_unlockdir(zap);
1416 return (err);
1417 }