1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Portions Copyright 2011 iXsystems, Inc
25 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright (c) 2014 Integros [integros.com]
28 * Copyright 2019 Joyent, Inc.
29 */
30
31 #include <sys/zfs_context.h>
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/dmu.h>
37 #include <sys/dmu_impl.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dmu_tx.h>
40 #include <sys/dbuf.h>
41 #include <sys/dnode.h>
42 #include <sys/zap.h>
43 #include <sys/sa.h>
44 #include <sys/sunddi.h>
45 #include <sys/sa_impl.h>
46 #include <sys/dnode.h>
47 #include <sys/errno.h>
48 #include <sys/zfs_context.h>
49
50 #ifdef _KERNEL
51 #include <sys/zfs_znode.h>
52 #endif
53
54 /*
55 * ZFS System attributes:
56 *
57 * A generic mechanism to allow for arbitrary attributes
58 * to be stored in a dnode. The data will be stored in the bonus buffer of
59 * the dnode and if necessary a special "spill" block will be used to handle
60 * overflow situations. The spill block will be sized to fit the data
61 * from 512 - 128K. When a spill block is used the BP (blkptr_t) for the
62 * spill block is stored at the end of the current bonus buffer. Any
63 * attributes that would be in the way of the blkptr_t will be relocated
64 * into the spill block.
65 *
66 * Attribute registration:
67 *
68 * Stored persistently on a per dataset basis
69 * a mapping between attribute "string" names and their actual attribute
70 * numeric values, length, and byteswap function. The names are only used
71 * during registration. All attributes are known by their unique attribute
72 * id value. If an attribute can have a variable size then the value
73 * 0 will be used to indicate this.
74 *
75 * Attribute Layout:
76 *
77 * Attribute layouts are a way to compactly store multiple attributes, but
78 * without taking the overhead associated with managing each attribute
79 * individually. Since you will typically have the same set of attributes
80 * stored in the same order a single table will be used to represent that
81 * layout. The ZPL for example will usually have only about 10 different
82 * layouts (regular files, device files, symlinks,
83 * regular files + scanstamp, files/dir with extended attributes, and then
84 * you have the possibility of all of those minus ACL, because it would
85 * be kicked out into the spill block)
86 *
87 * Layouts are simply an array of the attributes and their
88 * ordering i.e. [0, 1, 4, 5, 2]
89 *
90 * Each distinct layout is given a unique layout number and that is whats
91 * stored in the header at the beginning of the SA data buffer.
92 *
93 * A layout only covers a single dbuf (bonus or spill). If a set of
94 * attributes is split up between the bonus buffer and a spill buffer then
95 * two different layouts will be used. This allows us to byteswap the
96 * spill without looking at the bonus buffer and keeps the on disk format of
97 * the bonus and spill buffer the same.
98 *
99 * Adding a single attribute will cause the entire set of attributes to
100 * be rewritten and could result in a new layout number being constructed
101 * as part of the rewrite if no such layout exists for the new set of
102 * attribues. The new attribute will be appended to the end of the already
103 * existing attributes.
104 *
105 * Both the attribute registration and attribute layout information are
106 * stored in normal ZAP attributes. Their should be a small number of
107 * known layouts and the set of attributes is assumed to typically be quite
108 * small.
109 *
110 * The registered attributes and layout "table" information is maintained
111 * in core and a special "sa_os_t" is attached to the objset_t.
112 *
113 * A special interface is provided to allow for quickly applying
114 * a large set of attributes at once. sa_replace_all_by_template() is
115 * used to set an array of attributes. This is used by the ZPL when
116 * creating a brand new file. The template that is passed into the function
117 * specifies the attribute, size for variable length attributes, location of
118 * data and special "data locator" function if the data isn't in a contiguous
119 * location.
120 *
121 * Byteswap implications:
122 *
123 * Since the SA attributes are not entirely self describing we can't do
124 * the normal byteswap processing. The special ZAP layout attribute and
125 * attribute registration attributes define the byteswap function and the
126 * size of the attributes, unless it is variable sized.
127 * The normal ZFS byteswapping infrastructure assumes you don't need
128 * to read any objects in order to do the necessary byteswapping. Whereas
129 * SA attributes can only be properly byteswapped if the dataset is opened
130 * and the layout/attribute ZAP attributes are available. Because of this
131 * the SA attributes will be byteswapped when they are first accessed by
132 * the SA code that will read the SA data.
133 */
134
135 typedef void (sa_iterfunc_t)(void *hdr, void *addr, sa_attr_type_t,
136 uint16_t length, int length_idx, boolean_t, void *userp);
137
138 static int sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype);
139 static void sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab);
140 static sa_idx_tab_t *sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype,
141 sa_hdr_phys_t *hdr);
142 static void sa_idx_tab_rele(objset_t *os, void *arg);
143 static void sa_copy_data(sa_data_locator_t *func, void *start, void *target,
144 int buflen);
145 static int sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
146 sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
147 uint16_t buflen, dmu_tx_t *tx);
148
149 arc_byteswap_func_t *sa_bswap_table[] = {
150 byteswap_uint64_array,
151 byteswap_uint32_array,
152 byteswap_uint16_array,
153 byteswap_uint8_array,
154 zfs_acl_byteswap,
155 };
156
157 #define SA_COPY_DATA(f, s, t, l) \
158 { \
159 if (f == NULL) { \
160 if (l == 8) { \
161 *(uint64_t *)t = *(uint64_t *)s; \
162 } else if (l == 16) { \
163 *(uint64_t *)t = *(uint64_t *)s; \
164 *(uint64_t *)((uintptr_t)t + 8) = \
165 *(uint64_t *)((uintptr_t)s + 8); \
166 } else { \
167 bcopy(s, t, l); \
168 } \
169 } else \
170 sa_copy_data(f, s, t, l); \
171 }
172
173 /*
174 * This table is fixed and cannot be changed. Its purpose is to
175 * allow the SA code to work with both old/new ZPL file systems.
176 * It contains the list of legacy attributes. These attributes aren't
177 * stored in the "attribute" registry zap objects, since older ZPL file systems
178 * won't have the registry. Only objsets of type ZFS_TYPE_FILESYSTEM will
179 * use this static table.
180 */
181 sa_attr_reg_t sa_legacy_attrs[] = {
182 {"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 0},
183 {"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 1},
184 {"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 2},
185 {"ZPL_CRTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 3},
186 {"ZPL_GEN", sizeof (uint64_t), SA_UINT64_ARRAY, 4},
187 {"ZPL_MODE", sizeof (uint64_t), SA_UINT64_ARRAY, 5},
188 {"ZPL_SIZE", sizeof (uint64_t), SA_UINT64_ARRAY, 6},
189 {"ZPL_PARENT", sizeof (uint64_t), SA_UINT64_ARRAY, 7},
190 {"ZPL_LINKS", sizeof (uint64_t), SA_UINT64_ARRAY, 8},
191 {"ZPL_XATTR", sizeof (uint64_t), SA_UINT64_ARRAY, 9},
192 {"ZPL_RDEV", sizeof (uint64_t), SA_UINT64_ARRAY, 10},
193 {"ZPL_FLAGS", sizeof (uint64_t), SA_UINT64_ARRAY, 11},
194 {"ZPL_UID", sizeof (uint64_t), SA_UINT64_ARRAY, 12},
195 {"ZPL_GID", sizeof (uint64_t), SA_UINT64_ARRAY, 13},
196 {"ZPL_PAD", sizeof (uint64_t) * 4, SA_UINT64_ARRAY, 14},
197 {"ZPL_ZNODE_ACL", 88, SA_UINT8_ARRAY, 15},
198 };
199
200 /*
201 * This is only used for objects of type DMU_OT_ZNODE
202 */
203 sa_attr_type_t sa_legacy_zpl_layout[] = {
204 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
205 };
206
207 /*
208 * Special dummy layout used for buffers with no attributes.
209 */
210 sa_attr_type_t sa_dummy_zpl_layout[] = { 0 };
211
212 static int sa_legacy_attr_count = 16;
213 static kmem_cache_t *sa_cache = NULL;
214
215 /*ARGSUSED*/
216 static int
217 sa_cache_constructor(void *buf, void *unused, int kmflag)
218 {
219 sa_handle_t *hdl = buf;
220
221 mutex_init(&hdl->sa_lock, NULL, MUTEX_DEFAULT, NULL);
222 return (0);
223 }
224
225 /*ARGSUSED*/
226 static void
227 sa_cache_destructor(void *buf, void *unused)
228 {
229 sa_handle_t *hdl = buf;
230 mutex_destroy(&hdl->sa_lock);
231 }
232
233 void
234 sa_cache_init(void)
235 {
236 sa_cache = kmem_cache_create("sa_cache",
237 sizeof (sa_handle_t), 0, sa_cache_constructor,
238 sa_cache_destructor, NULL, NULL, NULL, 0);
239 }
240
241 void
242 sa_cache_fini(void)
243 {
244 if (sa_cache)
245 kmem_cache_destroy(sa_cache);
246 }
247
248 static int
249 layout_num_compare(const void *arg1, const void *arg2)
250 {
251 const sa_lot_t *node1 = (const sa_lot_t *)arg1;
252 const sa_lot_t *node2 = (const sa_lot_t *)arg2;
253
254 return (AVL_CMP(node1->lot_num, node2->lot_num));
255 }
256
257 static int
258 layout_hash_compare(const void *arg1, const void *arg2)
259 {
260 const sa_lot_t *node1 = (const sa_lot_t *)arg1;
261 const sa_lot_t *node2 = (const sa_lot_t *)arg2;
262
263 int cmp = AVL_CMP(node1->lot_hash, node2->lot_hash);
264 if (likely(cmp))
265 return (cmp);
266
267 return (AVL_CMP(node1->lot_instance, node2->lot_instance));
268 }
269
270 boolean_t
271 sa_layout_equal(sa_lot_t *tbf, sa_attr_type_t *attrs, int count)
272 {
273 int i;
274
275 if (count != tbf->lot_attr_count)
276 return (1);
277
278 for (i = 0; i != count; i++) {
279 if (attrs[i] != tbf->lot_attrs[i])
280 return (1);
281 }
282 return (0);
283 }
284
285 #define SA_ATTR_HASH(attr) (zfs_crc64_table[(-1ULL ^ attr) & 0xFF])
286
287 static uint64_t
288 sa_layout_info_hash(sa_attr_type_t *attrs, int attr_count)
289 {
290 int i;
291 uint64_t crc = -1ULL;
292
293 for (i = 0; i != attr_count; i++)
294 crc ^= SA_ATTR_HASH(attrs[i]);
295
296 return (crc);
297 }
298
299 static int
300 sa_get_spill(sa_handle_t *hdl)
301 {
302 int rc;
303 if (hdl->sa_spill == NULL) {
304 if ((rc = dmu_spill_hold_existing(hdl->sa_bonus, NULL,
305 &hdl->sa_spill)) == 0)
306 VERIFY(0 == sa_build_index(hdl, SA_SPILL));
307 } else {
308 rc = 0;
309 }
310
311 return (rc);
312 }
313
314 /*
315 * Main attribute lookup/update function
316 * returns 0 for success or non zero for failures
317 *
318 * Operates on bulk array, first failure will abort further processing
319 */
320 int
321 sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
322 sa_data_op_t data_op, dmu_tx_t *tx)
323 {
324 sa_os_t *sa = hdl->sa_os->os_sa;
325 int i;
326 int error = 0;
327 sa_buf_type_t buftypes;
328
329 buftypes = 0;
330
331 ASSERT(count > 0);
332 for (i = 0; i != count; i++) {
333 ASSERT(bulk[i].sa_attr <= hdl->sa_os->os_sa->sa_num_attrs);
334
335 bulk[i].sa_addr = NULL;
336 /* First check the bonus buffer */
337
338 if (hdl->sa_bonus_tab && TOC_ATTR_PRESENT(
339 hdl->sa_bonus_tab->sa_idx_tab[bulk[i].sa_attr])) {
340 SA_ATTR_INFO(sa, hdl->sa_bonus_tab,
341 SA_GET_HDR(hdl, SA_BONUS),
342 bulk[i].sa_attr, bulk[i], SA_BONUS, hdl);
343 if (tx && !(buftypes & SA_BONUS)) {
344 dmu_buf_will_dirty(hdl->sa_bonus, tx);
345 buftypes |= SA_BONUS;
346 }
347 }
348 if (bulk[i].sa_addr == NULL &&
349 ((error = sa_get_spill(hdl)) == 0)) {
350 if (TOC_ATTR_PRESENT(
351 hdl->sa_spill_tab->sa_idx_tab[bulk[i].sa_attr])) {
352 SA_ATTR_INFO(sa, hdl->sa_spill_tab,
353 SA_GET_HDR(hdl, SA_SPILL),
354 bulk[i].sa_attr, bulk[i], SA_SPILL, hdl);
355 if (tx && !(buftypes & SA_SPILL) &&
356 bulk[i].sa_size == bulk[i].sa_length) {
357 dmu_buf_will_dirty(hdl->sa_spill, tx);
358 buftypes |= SA_SPILL;
359 }
360 }
361 }
362 if (error && error != ENOENT) {
363 return ((error == ECKSUM) ? EIO : error);
364 }
365
366 switch (data_op) {
367 case SA_LOOKUP:
368 if (bulk[i].sa_addr == NULL)
369 return (SET_ERROR(ENOENT));
370 if (bulk[i].sa_data) {
371 SA_COPY_DATA(bulk[i].sa_data_func,
372 bulk[i].sa_addr, bulk[i].sa_data,
373 bulk[i].sa_size);
374 }
375 continue;
376
377 case SA_UPDATE:
378 /* existing rewrite of attr */
379 if (bulk[i].sa_addr &&
380 bulk[i].sa_size == bulk[i].sa_length) {
381 SA_COPY_DATA(bulk[i].sa_data_func,
382 bulk[i].sa_data, bulk[i].sa_addr,
383 bulk[i].sa_length);
384 continue;
385 } else if (bulk[i].sa_addr) { /* attr size change */
386 error = sa_modify_attrs(hdl, bulk[i].sa_attr,
387 SA_REPLACE, bulk[i].sa_data_func,
388 bulk[i].sa_data, bulk[i].sa_length, tx);
389 } else { /* adding new attribute */
390 error = sa_modify_attrs(hdl, bulk[i].sa_attr,
391 SA_ADD, bulk[i].sa_data_func,
392 bulk[i].sa_data, bulk[i].sa_length, tx);
393 }
394 if (error)
395 return (error);
396 break;
397 }
398 }
399 return (error);
400 }
401
402 static sa_lot_t *
403 sa_add_layout_entry(objset_t *os, sa_attr_type_t *attrs, int attr_count,
404 uint64_t lot_num, uint64_t hash, boolean_t zapadd, dmu_tx_t *tx)
405 {
406 sa_os_t *sa = os->os_sa;
407 sa_lot_t *tb, *findtb;
408 int i, size;
409 avl_index_t loc;
410
411 ASSERT(MUTEX_HELD(&sa->sa_lock));
412 tb = kmem_zalloc(sizeof (sa_lot_t), KM_SLEEP);
413 tb->lot_attr_count = attr_count;
414
415 if ((size = sizeof (sa_attr_type_t) * attr_count) != 0) {
416 tb->lot_attrs = kmem_alloc(size, KM_SLEEP);
417 bcopy(attrs, tb->lot_attrs, size);
418 }
419
420 tb->lot_num = lot_num;
421 tb->lot_hash = hash;
422 tb->lot_instance = 0;
423
424 if (zapadd) {
425 char attr_name[8];
426
427 if (sa->sa_layout_attr_obj == 0) {
428 sa->sa_layout_attr_obj = zap_create_link(os,
429 DMU_OT_SA_ATTR_LAYOUTS,
430 sa->sa_master_obj, SA_LAYOUTS, tx);
431 }
432
433 (void) snprintf(attr_name, sizeof (attr_name),
434 "%d", (int)lot_num);
435 VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj,
436 attr_name, 2, attr_count, attrs, tx));
437 }
438
439 list_create(&tb->lot_idx_tab, sizeof (sa_idx_tab_t),
440 offsetof(sa_idx_tab_t, sa_next));
441
442 for (i = 0; i != attr_count; i++) {
443 if (sa->sa_attr_table[tb->lot_attrs[i]].sa_length == 0)
444 tb->lot_var_sizes++;
445 }
446
447 avl_add(&sa->sa_layout_num_tree, tb);
448
449 /* verify we don't have a hash collision */
450 if ((findtb = avl_find(&sa->sa_layout_hash_tree, tb, &loc)) != NULL) {
451 for (; findtb && findtb->lot_hash == hash;
452 findtb = AVL_NEXT(&sa->sa_layout_hash_tree, findtb)) {
453 if (findtb->lot_instance != tb->lot_instance)
454 break;
455 tb->lot_instance++;
456 }
457 }
458 avl_add(&sa->sa_layout_hash_tree, tb);
459 return (tb);
460 }
461
462 static void
463 sa_find_layout(objset_t *os, uint64_t hash, sa_attr_type_t *attrs,
464 int count, dmu_tx_t *tx, sa_lot_t **lot)
465 {
466 sa_lot_t *tb, tbsearch;
467 avl_index_t loc;
468 sa_os_t *sa = os->os_sa;
469 boolean_t found = B_FALSE;
470
471 mutex_enter(&sa->sa_lock);
472 tbsearch.lot_hash = hash;
473 tbsearch.lot_instance = 0;
474 tb = avl_find(&sa->sa_layout_hash_tree, &tbsearch, &loc);
475 if (tb) {
476 for (; tb && tb->lot_hash == hash;
477 tb = AVL_NEXT(&sa->sa_layout_hash_tree, tb)) {
478 if (sa_layout_equal(tb, attrs, count) == 0) {
479 found = B_TRUE;
480 break;
481 }
482 }
483 }
484 if (!found) {
485 tb = sa_add_layout_entry(os, attrs, count,
486 avl_numnodes(&sa->sa_layout_num_tree), hash, B_TRUE, tx);
487 }
488 mutex_exit(&sa->sa_lock);
489 *lot = tb;
490 }
491
492 static int
493 sa_resize_spill(sa_handle_t *hdl, uint32_t size, dmu_tx_t *tx)
494 {
495 int error;
496 uint32_t blocksize;
497
498 if (size == 0) {
499 blocksize = SPA_MINBLOCKSIZE;
500 } else if (size > SPA_OLD_MAXBLOCKSIZE) {
501 ASSERT(0);
502 return (SET_ERROR(EFBIG));
503 } else {
504 blocksize = P2ROUNDUP_TYPED(size, SPA_MINBLOCKSIZE, uint32_t);
505 }
506
507 error = dbuf_spill_set_blksz(hdl->sa_spill, blocksize, tx);
508 ASSERT(error == 0);
509 return (error);
510 }
511
512 static void
513 sa_copy_data(sa_data_locator_t *func, void *datastart, void *target, int buflen)
514 {
515 if (func == NULL) {
516 bcopy(datastart, target, buflen);
517 } else {
518 boolean_t start;
519 int bytes;
520 void *dataptr;
521 void *saptr = target;
522 uint32_t length;
523
524 start = B_TRUE;
525 bytes = 0;
526 while (bytes < buflen) {
527 func(&dataptr, &length, buflen, start, datastart);
528 bcopy(dataptr, saptr, length);
529 saptr = (void *)((caddr_t)saptr + length);
530 bytes += length;
531 start = B_FALSE;
532 }
533 }
534 }
535
536 /*
537 * Determine several different sizes
538 * first the sa header size
539 * the number of bytes to be stored
540 * if spill would occur the index in the attribute array is returned
541 *
542 * the boolean will_spill will be set when spilling is necessary. It
543 * is only set when the buftype is SA_BONUS
544 */
545 static int
546 sa_find_sizes(sa_os_t *sa, sa_bulk_attr_t *attr_desc, int attr_count,
547 dmu_buf_t *db, sa_buf_type_t buftype, int full_space, int *index,
548 int *total, boolean_t *will_spill)
549 {
550 int var_size = 0;
551 int i;
552 int hdrsize;
553 int extra_hdrsize;
554
555 if (buftype == SA_BONUS && sa->sa_force_spill) {
556 *total = 0;
557 *index = 0;
558 *will_spill = B_TRUE;
559 return (0);
560 }
561
562 *index = -1;
563 *total = 0;
564 *will_spill = B_FALSE;
565
566 extra_hdrsize = 0;
567 hdrsize = (SA_BONUSTYPE_FROM_DB(db) == DMU_OT_ZNODE) ? 0 :
568 sizeof (sa_hdr_phys_t);
569
570 ASSERT(IS_P2ALIGNED(full_space, 8));
571
572 for (i = 0; i != attr_count; i++) {
573 boolean_t is_var_sz;
574
575 *total = P2ROUNDUP(*total, 8);
576 *total += attr_desc[i].sa_length;
577 if (*will_spill)
578 continue;
579
580 is_var_sz = (SA_REGISTERED_LEN(sa, attr_desc[i].sa_attr) == 0);
581 if (is_var_sz) {
582 var_size++;
583 }
584
585 if (is_var_sz && var_size > 1) {
586 /*
587 * Don't worry that the spill block might overflow.
588 * It will be resized if needed in sa_build_layouts().
589 */
590 if (buftype == SA_SPILL ||
591 P2ROUNDUP(hdrsize + sizeof (uint16_t), 8) +
592 *total < full_space) {
593 /*
594 * Account for header space used by array of
595 * optional sizes of variable-length attributes.
596 * Record the extra header size in case this
597 * increase needs to be reversed due to
598 * spill-over.
599 */
600 hdrsize += sizeof (uint16_t);
601 if (*index != -1)
602 extra_hdrsize += sizeof (uint16_t);
603 } else {
604 ASSERT(buftype == SA_BONUS);
605 if (*index == -1)
606 *index = i;
607 *will_spill = B_TRUE;
608 continue;
609 }
610 }
611
612 /*
613 * find index of where spill *could* occur.
614 * Then continue to count of remainder attribute
615 * space. The sum is used later for sizing bonus
616 * and spill buffer.
617 */
618 if (buftype == SA_BONUS && *index == -1 &&
619 *total + P2ROUNDUP(hdrsize, 8) >
620 (full_space - sizeof (blkptr_t))) {
621 *index = i;
622 }
623
624 if (*total + P2ROUNDUP(hdrsize, 8) > full_space &&
625 buftype == SA_BONUS)
626 *will_spill = B_TRUE;
627 }
628
629 if (*will_spill)
630 hdrsize -= extra_hdrsize;
631
632 hdrsize = P2ROUNDUP(hdrsize, 8);
633 return (hdrsize);
634 }
635
636 #define BUF_SPACE_NEEDED(total, header) (total + header)
637
638 /*
639 * Find layout that corresponds to ordering of attributes
640 * If not found a new layout number is created and added to
641 * persistent layout tables.
642 */
643 static int
644 sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
645 dmu_tx_t *tx)
646 {
647 sa_os_t *sa = hdl->sa_os->os_sa;
648 uint64_t hash;
649 sa_buf_type_t buftype;
650 sa_hdr_phys_t *sahdr;
651 void *data_start;
652 int buf_space;
653 sa_attr_type_t *attrs, *attrs_start;
654 int i, lot_count;
655 int dnodesize;
656 int hdrsize;
657 int spillhdrsize = 0;
658 int used;
659 dmu_object_type_t bonustype;
660 sa_lot_t *lot;
661 int len_idx;
662 int spill_used;
663 int bonuslen;
664 boolean_t spilling;
665
666 dmu_buf_will_dirty(hdl->sa_bonus, tx);
667 bonustype = SA_BONUSTYPE_FROM_DB(hdl->sa_bonus);
668
669 dmu_object_dnsize_from_db(hdl->sa_bonus, &dnodesize);
670 bonuslen = DN_BONUS_SIZE(dnodesize);
671
672 /* first determine bonus header size and sum of all attributes */
673 hdrsize = sa_find_sizes(sa, attr_desc, attr_count, hdl->sa_bonus,
674 SA_BONUS, bonuslen, &i, &used, &spilling);
675
676 if (used > SPA_OLD_MAXBLOCKSIZE)
677 return (SET_ERROR(EFBIG));
678
679 VERIFY(0 == dmu_set_bonus(hdl->sa_bonus, spilling ?
680 MIN(bonuslen - sizeof (blkptr_t), used + hdrsize) :
681 used + hdrsize, tx));
682
683 ASSERT((bonustype == DMU_OT_ZNODE && spilling == 0) ||
684 bonustype == DMU_OT_SA);
685
686 /* setup and size spill buffer when needed */
687 if (spilling) {
688 boolean_t dummy;
689
690 if (hdl->sa_spill == NULL) {
691 VERIFY(dmu_spill_hold_by_bonus(hdl->sa_bonus, 0, NULL,
692 &hdl->sa_spill) == 0);
693 }
694 dmu_buf_will_dirty(hdl->sa_spill, tx);
695
696 spillhdrsize = sa_find_sizes(sa, &attr_desc[i],
697 attr_count - i, hdl->sa_spill, SA_SPILL,
698 hdl->sa_spill->db_size, &i, &spill_used, &dummy);
699
700 if (spill_used > SPA_OLD_MAXBLOCKSIZE)
701 return (SET_ERROR(EFBIG));
702
703 buf_space = hdl->sa_spill->db_size - spillhdrsize;
704 if (BUF_SPACE_NEEDED(spill_used, spillhdrsize) >
705 hdl->sa_spill->db_size)
706 VERIFY(0 == sa_resize_spill(hdl,
707 BUF_SPACE_NEEDED(spill_used, spillhdrsize), tx));
708 }
709
710 /* setup starting pointers to lay down data */
711 data_start = (void *)((uintptr_t)hdl->sa_bonus->db_data + hdrsize);
712 sahdr = (sa_hdr_phys_t *)hdl->sa_bonus->db_data;
713 buftype = SA_BONUS;
714
715 if (spilling)
716 buf_space = (sa->sa_force_spill) ?
717 0 : SA_BLKPTR_SPACE - hdrsize;
718 else
719 buf_space = hdl->sa_bonus->db_size - hdrsize;
720
721 attrs_start = attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
722 KM_SLEEP);
723 lot_count = 0;
724
725 for (i = 0, len_idx = 0, hash = -1ULL; i != attr_count; i++) {
726 uint16_t length;
727
728 ASSERT(IS_P2ALIGNED(data_start, 8));
729 ASSERT(IS_P2ALIGNED(buf_space, 8));
730 attrs[i] = attr_desc[i].sa_attr;
731 length = SA_REGISTERED_LEN(sa, attrs[i]);
732 if (length == 0)
733 length = attr_desc[i].sa_length;
734
735 if (buf_space < length) { /* switch to spill buffer */
736 VERIFY(spilling);
737 VERIFY(bonustype == DMU_OT_SA);
738 if (buftype == SA_BONUS && !sa->sa_force_spill) {
739 sa_find_layout(hdl->sa_os, hash, attrs_start,
740 lot_count, tx, &lot);
741 SA_SET_HDR(sahdr, lot->lot_num, hdrsize);
742 }
743
744 buftype = SA_SPILL;
745 hash = -1ULL;
746 len_idx = 0;
747
748 sahdr = (sa_hdr_phys_t *)hdl->sa_spill->db_data;
749 sahdr->sa_magic = SA_MAGIC;
750 data_start = (void *)((uintptr_t)sahdr +
751 spillhdrsize);
752 attrs_start = &attrs[i];
753 buf_space = hdl->sa_spill->db_size - spillhdrsize;
754 lot_count = 0;
755 }
756 hash ^= SA_ATTR_HASH(attrs[i]);
757 attr_desc[i].sa_addr = data_start;
758 attr_desc[i].sa_size = length;
759 SA_COPY_DATA(attr_desc[i].sa_data_func, attr_desc[i].sa_data,
760 data_start, length);
761 if (sa->sa_attr_table[attrs[i]].sa_length == 0) {
762 sahdr->sa_lengths[len_idx++] = length;
763 }
764 data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
765 length), 8);
766 buf_space -= P2ROUNDUP(length, 8);
767 lot_count++;
768 }
769
770 sa_find_layout(hdl->sa_os, hash, attrs_start, lot_count, tx, &lot);
771
772 /*
773 * Verify that old znodes always have layout number 0.
774 * Must be DMU_OT_SA for arbitrary layouts
775 */
776 VERIFY((bonustype == DMU_OT_ZNODE && lot->lot_num == 0) ||
777 (bonustype == DMU_OT_SA && lot->lot_num > 1));
778
779 if (bonustype == DMU_OT_SA) {
780 SA_SET_HDR(sahdr, lot->lot_num,
781 buftype == SA_BONUS ? hdrsize : spillhdrsize);
782 }
783
784 kmem_free(attrs, sizeof (sa_attr_type_t) * attr_count);
785 if (hdl->sa_bonus_tab) {
786 sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
787 hdl->sa_bonus_tab = NULL;
788 }
789 if (!sa->sa_force_spill)
790 VERIFY(0 == sa_build_index(hdl, SA_BONUS));
791 if (hdl->sa_spill) {
792 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
793 if (!spilling) {
794 /*
795 * remove spill block that is no longer needed.
796 */
797 dmu_buf_rele(hdl->sa_spill, NULL);
798 hdl->sa_spill = NULL;
799 hdl->sa_spill_tab = NULL;
800 VERIFY(0 == dmu_rm_spill(hdl->sa_os,
801 sa_handle_object(hdl), tx));
802 } else {
803 VERIFY(0 == sa_build_index(hdl, SA_SPILL));
804 }
805 }
806
807 return (0);
808 }
809
810 static void
811 sa_free_attr_table(sa_os_t *sa)
812 {
813 int i;
814
815 if (sa->sa_attr_table == NULL)
816 return;
817
818 for (i = 0; i != sa->sa_num_attrs; i++) {
819 if (sa->sa_attr_table[i].sa_name)
820 kmem_free(sa->sa_attr_table[i].sa_name,
821 strlen(sa->sa_attr_table[i].sa_name) + 1);
822 }
823
824 kmem_free(sa->sa_attr_table,
825 sizeof (sa_attr_table_t) * sa->sa_num_attrs);
826
827 sa->sa_attr_table = NULL;
828 }
829
830 static int
831 sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
832 {
833 sa_os_t *sa = os->os_sa;
834 uint64_t sa_attr_count = 0;
835 uint64_t sa_reg_count = 0;
836 int error = 0;
837 uint64_t attr_value;
838 sa_attr_table_t *tb;
839 zap_cursor_t zc;
840 zap_attribute_t za;
841 int registered_count = 0;
842 int i;
843 dmu_objset_type_t ostype = dmu_objset_type(os);
844
845 sa->sa_user_table =
846 kmem_zalloc(count * sizeof (sa_attr_type_t), KM_SLEEP);
847 sa->sa_user_table_sz = count * sizeof (sa_attr_type_t);
848
849 if (sa->sa_reg_attr_obj != 0) {
850 error = zap_count(os, sa->sa_reg_attr_obj,
851 &sa_attr_count);
852
853 /*
854 * Make sure we retrieved a count and that it isn't zero
855 */
856 if (error || (error == 0 && sa_attr_count == 0)) {
857 if (error == 0)
858 error = SET_ERROR(EINVAL);
859 goto bail;
860 }
861 sa_reg_count = sa_attr_count;
862 }
863
864 if (ostype == DMU_OST_ZFS && sa_attr_count == 0)
865 sa_attr_count += sa_legacy_attr_count;
866
867 /* Allocate attribute numbers for attributes that aren't registered */
868 for (i = 0; i != count; i++) {
869 boolean_t found = B_FALSE;
870 int j;
871
872 if (ostype == DMU_OST_ZFS) {
873 for (j = 0; j != sa_legacy_attr_count; j++) {
874 if (strcmp(reg_attrs[i].sa_name,
875 sa_legacy_attrs[j].sa_name) == 0) {
876 sa->sa_user_table[i] =
877 sa_legacy_attrs[j].sa_attr;
878 found = B_TRUE;
879 }
880 }
881 }
882 if (found)
883 continue;
884
885 if (sa->sa_reg_attr_obj)
886 error = zap_lookup(os, sa->sa_reg_attr_obj,
887 reg_attrs[i].sa_name, 8, 1, &attr_value);
888 else
889 error = SET_ERROR(ENOENT);
890 switch (error) {
891 case ENOENT:
892 sa->sa_user_table[i] = (sa_attr_type_t)sa_attr_count;
893 sa_attr_count++;
894 break;
895 case 0:
896 sa->sa_user_table[i] = ATTR_NUM(attr_value);
897 break;
898 default:
899 goto bail;
900 }
901 }
902
903 sa->sa_num_attrs = sa_attr_count;
904 tb = sa->sa_attr_table =
905 kmem_zalloc(sizeof (sa_attr_table_t) * sa_attr_count, KM_SLEEP);
906
907 /*
908 * Attribute table is constructed from requested attribute list,
909 * previously foreign registered attributes, and also the legacy
910 * ZPL set of attributes.
911 */
912
913 if (sa->sa_reg_attr_obj) {
914 for (zap_cursor_init(&zc, os, sa->sa_reg_attr_obj);
915 (error = zap_cursor_retrieve(&zc, &za)) == 0;
916 zap_cursor_advance(&zc)) {
917 uint64_t value;
918 value = za.za_first_integer;
919
920 registered_count++;
921 tb[ATTR_NUM(value)].sa_attr = ATTR_NUM(value);
922 tb[ATTR_NUM(value)].sa_length = ATTR_LENGTH(value);
923 tb[ATTR_NUM(value)].sa_byteswap = ATTR_BSWAP(value);
924 tb[ATTR_NUM(value)].sa_registered = B_TRUE;
925
926 if (tb[ATTR_NUM(value)].sa_name) {
927 continue;
928 }
929 tb[ATTR_NUM(value)].sa_name =
930 kmem_zalloc(strlen(za.za_name) +1, KM_SLEEP);
931 (void) strlcpy(tb[ATTR_NUM(value)].sa_name, za.za_name,
932 strlen(za.za_name) +1);
933 }
934 zap_cursor_fini(&zc);
935 /*
936 * Make sure we processed the correct number of registered
937 * attributes
938 */
939 if (registered_count != sa_reg_count) {
940 ASSERT(error != 0);
941 goto bail;
942 }
943
944 }
945
946 if (ostype == DMU_OST_ZFS) {
947 for (i = 0; i != sa_legacy_attr_count; i++) {
948 if (tb[i].sa_name)
949 continue;
950 tb[i].sa_attr = sa_legacy_attrs[i].sa_attr;
951 tb[i].sa_length = sa_legacy_attrs[i].sa_length;
952 tb[i].sa_byteswap = sa_legacy_attrs[i].sa_byteswap;
953 tb[i].sa_registered = B_FALSE;
954 tb[i].sa_name =
955 kmem_zalloc(strlen(sa_legacy_attrs[i].sa_name) +1,
956 KM_SLEEP);
957 (void) strlcpy(tb[i].sa_name,
958 sa_legacy_attrs[i].sa_name,
959 strlen(sa_legacy_attrs[i].sa_name) + 1);
960 }
961 }
962
963 for (i = 0; i != count; i++) {
964 sa_attr_type_t attr_id;
965
966 attr_id = sa->sa_user_table[i];
967 if (tb[attr_id].sa_name)
968 continue;
969
970 tb[attr_id].sa_length = reg_attrs[i].sa_length;
971 tb[attr_id].sa_byteswap = reg_attrs[i].sa_byteswap;
972 tb[attr_id].sa_attr = attr_id;
973 tb[attr_id].sa_name =
974 kmem_zalloc(strlen(reg_attrs[i].sa_name) + 1, KM_SLEEP);
975 (void) strlcpy(tb[attr_id].sa_name, reg_attrs[i].sa_name,
976 strlen(reg_attrs[i].sa_name) + 1);
977 }
978
979 sa->sa_need_attr_registration =
980 (sa_attr_count != registered_count);
981
982 return (0);
983 bail:
984 kmem_free(sa->sa_user_table, count * sizeof (sa_attr_type_t));
985 sa->sa_user_table = NULL;
986 sa_free_attr_table(sa);
987 return ((error != 0) ? error : EINVAL);
988 }
989
990 int
991 sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count,
992 sa_attr_type_t **user_table)
993 {
994 zap_cursor_t zc;
995 zap_attribute_t za;
996 sa_os_t *sa;
997 dmu_objset_type_t ostype = dmu_objset_type(os);
998 sa_attr_type_t *tb;
999 int error;
1000
1001 mutex_enter(&os->os_user_ptr_lock);
1002 if (os->os_sa) {
1003 mutex_enter(&os->os_sa->sa_lock);
1004 mutex_exit(&os->os_user_ptr_lock);
1005 tb = os->os_sa->sa_user_table;
1006 mutex_exit(&os->os_sa->sa_lock);
1007 *user_table = tb;
1008 return (0);
1009 }
1010
1011 sa = kmem_zalloc(sizeof (sa_os_t), KM_SLEEP);
1012 mutex_init(&sa->sa_lock, NULL, MUTEX_DEFAULT, NULL);
1013 sa->sa_master_obj = sa_obj;
1014
1015 os->os_sa = sa;
1016 mutex_enter(&sa->sa_lock);
1017 mutex_exit(&os->os_user_ptr_lock);
1018 avl_create(&sa->sa_layout_num_tree, layout_num_compare,
1019 sizeof (sa_lot_t), offsetof(sa_lot_t, lot_num_node));
1020 avl_create(&sa->sa_layout_hash_tree, layout_hash_compare,
1021 sizeof (sa_lot_t), offsetof(sa_lot_t, lot_hash_node));
1022
1023 if (sa_obj) {
1024 error = zap_lookup(os, sa_obj, SA_LAYOUTS,
1025 8, 1, &sa->sa_layout_attr_obj);
1026 if (error != 0 && error != ENOENT)
1027 goto fail;
1028 error = zap_lookup(os, sa_obj, SA_REGISTRY,
1029 8, 1, &sa->sa_reg_attr_obj);
1030 if (error != 0 && error != ENOENT)
1031 goto fail;
1032 }
1033
1034 if ((error = sa_attr_table_setup(os, reg_attrs, count)) != 0)
1035 goto fail;
1036
1037 if (sa->sa_layout_attr_obj != 0) {
1038 uint64_t layout_count;
1039
1040 error = zap_count(os, sa->sa_layout_attr_obj,
1041 &layout_count);
1042
1043 /*
1044 * Layout number count should be > 0
1045 */
1046 if (error || (error == 0 && layout_count == 0)) {
1047 if (error == 0)
1048 error = SET_ERROR(EINVAL);
1049 goto fail;
1050 }
1051
1052 for (zap_cursor_init(&zc, os, sa->sa_layout_attr_obj);
1053 (error = zap_cursor_retrieve(&zc, &za)) == 0;
1054 zap_cursor_advance(&zc)) {
1055 sa_attr_type_t *lot_attrs;
1056 uint64_t lot_num;
1057
1058 lot_attrs = kmem_zalloc(sizeof (sa_attr_type_t) *
1059 za.za_num_integers, KM_SLEEP);
1060
1061 if ((error = (zap_lookup(os, sa->sa_layout_attr_obj,
1062 za.za_name, 2, za.za_num_integers,
1063 lot_attrs))) != 0) {
1064 kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
1065 za.za_num_integers);
1066 break;
1067 }
1068 VERIFY(ddi_strtoull(za.za_name, NULL, 10,
1069 (unsigned long long *)&lot_num) == 0);
1070
1071 (void) sa_add_layout_entry(os, lot_attrs,
1072 za.za_num_integers, lot_num,
1073 sa_layout_info_hash(lot_attrs,
1074 za.za_num_integers), B_FALSE, NULL);
1075 kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
1076 za.za_num_integers);
1077 }
1078 zap_cursor_fini(&zc);
1079
1080 /*
1081 * Make sure layout count matches number of entries added
1082 * to AVL tree
1083 */
1084 if (avl_numnodes(&sa->sa_layout_num_tree) != layout_count) {
1085 ASSERT(error != 0);
1086 goto fail;
1087 }
1088 }
1089
1090 /* Add special layout number for old ZNODES */
1091 if (ostype == DMU_OST_ZFS) {
1092 (void) sa_add_layout_entry(os, sa_legacy_zpl_layout,
1093 sa_legacy_attr_count, 0,
1094 sa_layout_info_hash(sa_legacy_zpl_layout,
1095 sa_legacy_attr_count), B_FALSE, NULL);
1096
1097 (void) sa_add_layout_entry(os, sa_dummy_zpl_layout, 0, 1,
1098 0, B_FALSE, NULL);
1099 }
1100 *user_table = os->os_sa->sa_user_table;
1101 mutex_exit(&sa->sa_lock);
1102 return (0);
1103 fail:
1104 os->os_sa = NULL;
1105 sa_free_attr_table(sa);
1106 if (sa->sa_user_table)
1107 kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
1108 mutex_exit(&sa->sa_lock);
1109 avl_destroy(&sa->sa_layout_hash_tree);
1110 avl_destroy(&sa->sa_layout_num_tree);
1111 mutex_destroy(&sa->sa_lock);
1112 kmem_free(sa, sizeof (sa_os_t));
1113 return ((error == ECKSUM) ? EIO : error);
1114 }
1115
1116 void
1117 sa_tear_down(objset_t *os)
1118 {
1119 sa_os_t *sa = os->os_sa;
1120 sa_lot_t *layout;
1121 void *cookie;
1122
1123 kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
1124
1125 /* Free up attr table */
1126
1127 sa_free_attr_table(sa);
1128
1129 cookie = NULL;
1130 while (layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie)) {
1131 sa_idx_tab_t *tab;
1132 while (tab = list_head(&layout->lot_idx_tab)) {
1133 ASSERT(zfs_refcount_count(&tab->sa_refcount));
1134 sa_idx_tab_rele(os, tab);
1135 }
1136 }
1137
1138 cookie = NULL;
1139 while (layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie)) {
1140 kmem_free(layout->lot_attrs,
1141 sizeof (sa_attr_type_t) * layout->lot_attr_count);
1142 kmem_free(layout, sizeof (sa_lot_t));
1143 }
1144
1145 avl_destroy(&sa->sa_layout_hash_tree);
1146 avl_destroy(&sa->sa_layout_num_tree);
1147 mutex_destroy(&sa->sa_lock);
1148
1149 kmem_free(sa, sizeof (sa_os_t));
1150 os->os_sa = NULL;
1151 }
1152
1153 void
1154 sa_build_idx_tab(void *hdr, void *attr_addr, sa_attr_type_t attr,
1155 uint16_t length, int length_idx, boolean_t var_length, void *userp)
1156 {
1157 sa_idx_tab_t *idx_tab = userp;
1158
1159 if (var_length) {
1160 ASSERT(idx_tab->sa_variable_lengths);
1161 idx_tab->sa_variable_lengths[length_idx] = length;
1162 }
1163 TOC_ATTR_ENCODE(idx_tab->sa_idx_tab[attr], length_idx,
1164 (uint32_t)((uintptr_t)attr_addr - (uintptr_t)hdr));
1165 }
1166
1167 static void
1168 sa_attr_iter(objset_t *os, sa_hdr_phys_t *hdr, dmu_object_type_t type,
1169 sa_iterfunc_t func, sa_lot_t *tab, void *userp)
1170 {
1171 void *data_start;
1172 sa_lot_t *tb = tab;
1173 sa_lot_t search;
1174 avl_index_t loc;
1175 sa_os_t *sa = os->os_sa;
1176 int i;
1177 uint16_t *length_start = NULL;
1178 uint8_t length_idx = 0;
1179
1180 if (tab == NULL) {
1181 search.lot_num = SA_LAYOUT_NUM(hdr, type);
1182 tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
1183 ASSERT(tb);
1184 }
1185
1186 if (IS_SA_BONUSTYPE(type)) {
1187 data_start = (void *)P2ROUNDUP(((uintptr_t)hdr +
1188 offsetof(sa_hdr_phys_t, sa_lengths) +
1189 (sizeof (uint16_t) * tb->lot_var_sizes)), 8);
1190 length_start = hdr->sa_lengths;
1191 } else {
1192 data_start = hdr;
1193 }
1194
1195 for (i = 0; i != tb->lot_attr_count; i++) {
1196 int attr_length, reg_length;
1197 uint8_t idx_len;
1198
1199 reg_length = sa->sa_attr_table[tb->lot_attrs[i]].sa_length;
1200 if (reg_length) {
1201 attr_length = reg_length;
1202 idx_len = 0;
1203 } else {
1204 attr_length = length_start[length_idx];
1205 idx_len = length_idx++;
1206 }
1207
1208 func(hdr, data_start, tb->lot_attrs[i], attr_length,
1209 idx_len, reg_length == 0 ? B_TRUE : B_FALSE, userp);
1210
1211 data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
1212 attr_length), 8);
1213 }
1214 }
1215
1216 /*ARGSUSED*/
1217 void
1218 sa_byteswap_cb(void *hdr, void *attr_addr, sa_attr_type_t attr,
1219 uint16_t length, int length_idx, boolean_t variable_length, void *userp)
1220 {
1221 sa_handle_t *hdl = userp;
1222 sa_os_t *sa = hdl->sa_os->os_sa;
1223
1224 sa_bswap_table[sa->sa_attr_table[attr].sa_byteswap](attr_addr, length);
1225 }
1226
1227 void
1228 sa_byteswap(sa_handle_t *hdl, sa_buf_type_t buftype)
1229 {
1230 sa_hdr_phys_t *sa_hdr_phys = SA_GET_HDR(hdl, buftype);
1231 dmu_buf_impl_t *db;
1232 sa_os_t *sa = hdl->sa_os->os_sa;
1233 int num_lengths = 1;
1234 int i;
1235
1236 ASSERT(MUTEX_HELD(&sa->sa_lock));
1237 if (sa_hdr_phys->sa_magic == SA_MAGIC)
1238 return;
1239
1240 db = SA_GET_DB(hdl, buftype);
1241
1242 if (buftype == SA_SPILL) {
1243 arc_release(db->db_buf, NULL);
1244 arc_buf_thaw(db->db_buf);
1245 }
1246
1247 sa_hdr_phys->sa_magic = BSWAP_32(sa_hdr_phys->sa_magic);
1248 sa_hdr_phys->sa_layout_info = BSWAP_16(sa_hdr_phys->sa_layout_info);
1249
1250 /*
1251 * Determine number of variable lenghts in header
1252 * The standard 8 byte header has one for free and a
1253 * 16 byte header would have 4 + 1;
1254 */
1255 if (SA_HDR_SIZE(sa_hdr_phys) > 8)
1256 num_lengths += (SA_HDR_SIZE(sa_hdr_phys) - 8) >> 1;
1257 for (i = 0; i != num_lengths; i++)
1258 sa_hdr_phys->sa_lengths[i] =
1259 BSWAP_16(sa_hdr_phys->sa_lengths[i]);
1260
1261 sa_attr_iter(hdl->sa_os, sa_hdr_phys, DMU_OT_SA,
1262 sa_byteswap_cb, NULL, hdl);
1263
1264 if (buftype == SA_SPILL)
1265 arc_buf_freeze(((dmu_buf_impl_t *)hdl->sa_spill)->db_buf);
1266 }
1267
1268 static int
1269 sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype)
1270 {
1271 sa_hdr_phys_t *sa_hdr_phys;
1272 dmu_buf_impl_t *db = SA_GET_DB(hdl, buftype);
1273 dmu_object_type_t bonustype = SA_BONUSTYPE_FROM_DB(db);
1274 sa_os_t *sa = hdl->sa_os->os_sa;
1275 sa_idx_tab_t *idx_tab;
1276
1277 sa_hdr_phys = SA_GET_HDR(hdl, buftype);
1278
1279 mutex_enter(&sa->sa_lock);
1280
1281 /* Do we need to byteswap? */
1282
1283 /* only check if not old znode */
1284 if (IS_SA_BONUSTYPE(bonustype) && sa_hdr_phys->sa_magic != SA_MAGIC &&
1285 sa_hdr_phys->sa_magic != 0) {
1286 VERIFY(BSWAP_32(sa_hdr_phys->sa_magic) == SA_MAGIC);
1287 sa_byteswap(hdl, buftype);
1288 }
1289
1290 idx_tab = sa_find_idx_tab(hdl->sa_os, bonustype, sa_hdr_phys);
1291
1292 if (buftype == SA_BONUS)
1293 hdl->sa_bonus_tab = idx_tab;
1294 else
1295 hdl->sa_spill_tab = idx_tab;
1296
1297 mutex_exit(&sa->sa_lock);
1298 return (0);
1299 }
1300
1301 /*ARGSUSED*/
1302 static void
1303 sa_evict_sync(void *dbu)
1304 {
1305 panic("evicting sa dbuf\n");
1306 }
1307
1308 static void
1309 sa_idx_tab_rele(objset_t *os, void *arg)
1310 {
1311 sa_os_t *sa = os->os_sa;
1312 sa_idx_tab_t *idx_tab = arg;
1313
1314 if (idx_tab == NULL)
1315 return;
1316
1317 mutex_enter(&sa->sa_lock);
1318 if (zfs_refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
1319 list_remove(&idx_tab->sa_layout->lot_idx_tab, idx_tab);
1320 if (idx_tab->sa_variable_lengths)
1321 kmem_free(idx_tab->sa_variable_lengths,
1322 sizeof (uint16_t) *
1323 idx_tab->sa_layout->lot_var_sizes);
1324 zfs_refcount_destroy(&idx_tab->sa_refcount);
1325 kmem_free(idx_tab->sa_idx_tab,
1326 sizeof (uint32_t) * sa->sa_num_attrs);
1327 kmem_free(idx_tab, sizeof (sa_idx_tab_t));
1328 }
1329 mutex_exit(&sa->sa_lock);
1330 }
1331
1332 static void
1333 sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab)
1334 {
1335 sa_os_t *sa = os->os_sa;
1336
1337 ASSERT(MUTEX_HELD(&sa->sa_lock));
1338 (void) zfs_refcount_add(&idx_tab->sa_refcount, NULL);
1339 }
1340
1341 void
1342 sa_handle_destroy(sa_handle_t *hdl)
1343 {
1344 dmu_buf_t *db = hdl->sa_bonus;
1345
1346 mutex_enter(&hdl->sa_lock);
1347 (void) dmu_buf_remove_user(db, &hdl->sa_dbu);
1348
1349 if (hdl->sa_bonus_tab)
1350 sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
1351
1352 if (hdl->sa_spill_tab)
1353 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
1354
1355 dmu_buf_rele(hdl->sa_bonus, NULL);
1356
1357 if (hdl->sa_spill)
1358 dmu_buf_rele((dmu_buf_t *)hdl->sa_spill, NULL);
1359 mutex_exit(&hdl->sa_lock);
1360
1361 kmem_cache_free(sa_cache, hdl);
1362 }
1363
1364 int
1365 sa_handle_get_from_db(objset_t *os, dmu_buf_t *db, void *userp,
1366 sa_handle_type_t hdl_type, sa_handle_t **handlepp)
1367 {
1368 int error = 0;
1369 dmu_object_info_t doi;
1370 sa_handle_t *handle = NULL;
1371
1372 #ifdef ZFS_DEBUG
1373 dmu_object_info_from_db(db, &doi);
1374 ASSERT(doi.doi_bonus_type == DMU_OT_SA ||
1375 doi.doi_bonus_type == DMU_OT_ZNODE);
1376 #endif
1377 /* find handle, if it exists */
1378 /* if one doesn't exist then create a new one, and initialize it */
1379
1380 if (hdl_type == SA_HDL_SHARED)
1381 handle = dmu_buf_get_user(db);
1382
1383 if (handle == NULL) {
1384 sa_handle_t *winner = NULL;
1385
1386 handle = kmem_cache_alloc(sa_cache, KM_SLEEP);
1387 handle->sa_dbu.dbu_evict_func_sync = NULL;
1388 handle->sa_dbu.dbu_evict_func_async = NULL;
1389 handle->sa_userp = userp;
1390 handle->sa_bonus = db;
1391 handle->sa_os = os;
1392 handle->sa_spill = NULL;
1393 handle->sa_bonus_tab = NULL;
1394 handle->sa_spill_tab = NULL;
1395
1396 error = sa_build_index(handle, SA_BONUS);
1397
1398 if (hdl_type == SA_HDL_SHARED) {
1399 dmu_buf_init_user(&handle->sa_dbu, sa_evict_sync, NULL,
1400 NULL);
1401 winner = dmu_buf_set_user_ie(db, &handle->sa_dbu);
1402 }
1403
1404 if (winner != NULL) {
1405 kmem_cache_free(sa_cache, handle);
1406 handle = winner;
1407 }
1408 }
1409 *handlepp = handle;
1410
1411 return (error);
1412 }
1413
1414 int
1415 sa_handle_get(objset_t *objset, uint64_t objid, void *userp,
1416 sa_handle_type_t hdl_type, sa_handle_t **handlepp)
1417 {
1418 dmu_buf_t *db;
1419 int error;
1420
1421 if (error = dmu_bonus_hold(objset, objid, NULL, &db))
1422 return (error);
1423
1424 return (sa_handle_get_from_db(objset, db, userp, hdl_type,
1425 handlepp));
1426 }
1427
1428 int
1429 sa_buf_hold(objset_t *objset, uint64_t obj_num, void *tag, dmu_buf_t **db)
1430 {
1431 return (dmu_bonus_hold(objset, obj_num, tag, db));
1432 }
1433
1434 void
1435 sa_buf_rele(dmu_buf_t *db, void *tag)
1436 {
1437 dmu_buf_rele(db, tag);
1438 }
1439
1440 int
1441 sa_lookup_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count)
1442 {
1443 ASSERT(hdl);
1444 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1445 return (sa_attr_op(hdl, bulk, count, SA_LOOKUP, NULL));
1446 }
1447
1448 static int
1449 sa_lookup_locked(sa_handle_t *hdl, sa_attr_type_t attr, void *buf,
1450 uint32_t buflen)
1451 {
1452 int error;
1453 sa_bulk_attr_t bulk;
1454
1455 bulk.sa_attr = attr;
1456 bulk.sa_data = buf;
1457 bulk.sa_length = buflen;
1458 bulk.sa_data_func = NULL;
1459
1460 ASSERT(hdl);
1461 error = sa_lookup_impl(hdl, &bulk, 1);
1462 return (error);
1463 }
1464
1465 int
1466 sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
1467 {
1468 int error;
1469
1470 mutex_enter(&hdl->sa_lock);
1471 error = sa_lookup_locked(hdl, attr, buf, buflen);
1472 mutex_exit(&hdl->sa_lock);
1473
1474 return (error);
1475 }
1476
1477 #ifdef _KERNEL
1478 int
1479 sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, uio_t *uio)
1480 {
1481 int error;
1482 sa_bulk_attr_t bulk;
1483
1484 bulk.sa_data = NULL;
1485 bulk.sa_attr = attr;
1486 bulk.sa_data_func = NULL;
1487
1488 ASSERT(hdl);
1489
1490 mutex_enter(&hdl->sa_lock);
1491 if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) == 0) {
1492 error = uiomove((void *)bulk.sa_addr, MIN(bulk.sa_size,
1493 uio->uio_resid), UIO_READ, uio);
1494 }
1495 mutex_exit(&hdl->sa_lock);
1496 return (error);
1497
1498 }
1499
1500 /*
1501 * For the existing object that is upgraded from old system, its ondisk layout
1502 * has no slot for the project ID attribute. But quota accounting logic needs
1503 * to access related slots by offset directly. So we need to adjust these old
1504 * objects' layout to make the project ID to some unified and fixed offset.
1505 */
1506 int
1507 sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
1508 {
1509 znode_t *zp = sa_get_userdata(hdl);
1510 dmu_buf_t *db = sa_get_db(hdl);
1511 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1512 int count = 0, err = 0;
1513 sa_bulk_attr_t *bulk, *attrs;
1514 zfs_acl_locator_cb_t locate = { 0 };
1515 uint64_t uid, gid, mode, rdev, xattr = 0, parent, gen, links;
1516 uint64_t crtime[2], mtime[2], ctime[2], atime[2];
1517 zfs_acl_phys_t znode_acl = { 0 };
1518 char scanstamp[AV_SCANSTAMP_SZ];
1519
1520 if (zp->z_acl_cached == NULL) {
1521 zfs_acl_t *aclp;
1522
1523 mutex_enter(&zp->z_acl_lock);
1524 err = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
1525 mutex_exit(&zp->z_acl_lock);
1526 if (err != 0 && err != ENOENT)
1527 return (err);
1528 }
1529
1530 bulk = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
1531 attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
1532 mutex_enter(&hdl->sa_lock);
1533 mutex_enter(&zp->z_lock);
1534
1535 err = sa_lookup_locked(hdl, SA_ZPL_PROJID(zfsvfs), &projid,
1536 sizeof (uint64_t));
1537 if (unlikely(err == 0))
1538 /* Someone has added project ID attr by race. */
1539 err = EEXIST;
1540 if (err != ENOENT)
1541 goto out;
1542
1543 /* First do a bulk query of the attributes that aren't cached */
1544 if (zp->z_is_sa) {
1545 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1546 &mode, 8);
1547 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1548 &gen, 8);
1549 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1550 &uid, 8);
1551 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1552 &gid, 8);
1553 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
1554 &parent, 8);
1555 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1556 &atime, 16);
1557 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
1558 &mtime, 16);
1559 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
1560 &ctime, 16);
1561 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1562 &crtime, 16);
1563 if (S_ISBLK(zp->z_mode) || S_ISCHR(zp->z_mode))
1564 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
1565 &rdev, 8);
1566 } else {
1567 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1568 &atime, 16);
1569 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
1570 &mtime, 16);
1571 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
1572 &ctime, 16);
1573 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1574 &crtime, 16);
1575 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1576 &gen, 8);
1577 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1578 &mode, 8);
1579 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
1580 &parent, 8);
1581 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL,
1582 &xattr, 8);
1583 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
1584 &rdev, 8);
1585 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1586 &uid, 8);
1587 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1588 &gid, 8);
1589 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
1590 &znode_acl, 88);
1591 }
1592 err = sa_bulk_lookup_locked(hdl, bulk, count);
1593 if (err != 0)
1594 goto out;
1595
1596 err = sa_lookup_locked(hdl, SA_ZPL_XATTR(zfsvfs), &xattr, 8);
1597 if (err != 0 && err != ENOENT)
1598 goto out;
1599
1600 zp->z_projid = projid;
1601 zp->z_pflags |= ZFS_PROJID;
1602 links = zp->z_links;
1603 count = 0;
1604 err = 0;
1605
1606 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
1607 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
1608 &zp->z_size, 8);
1609 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GEN(zfsvfs), NULL, &gen, 8);
1610 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
1611 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
1612 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
1613 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
1614 &zp->z_pflags, 8);
1615 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
1616 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
1617 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
1618 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1619 &crtime, 16);
1620 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
1621 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PROJID(zfsvfs), NULL, &projid, 8);
1622
1623 if (S_ISBLK(zp->z_mode) || S_ISCHR(zp->z_mode))
1624 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
1625 &rdev, 8);
1626
1627 if (zp->z_acl_cached != NULL) {
1628 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
1629 &zp->z_acl_cached->z_acl_count, 8);
1630 if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
1631 zfs_acl_xform(zp, zp->z_acl_cached, CRED());
1632 locate.cb_aclp = zp->z_acl_cached;
1633 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
1634 zfs_acl_data_locator, &locate,
1635 zp->z_acl_cached->z_acl_bytes);
1636 }
1637
1638 if (xattr)
1639 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_XATTR(zfsvfs), NULL,
1640 &xattr, 8);
1641
1642 if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
1643 bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
1644 scanstamp, AV_SCANSTAMP_SZ);
1645 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SCANSTAMP(zfsvfs), NULL,
1646 scanstamp, AV_SCANSTAMP_SZ);
1647 zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
1648 }
1649
1650 VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
1651 VERIFY(sa_replace_all_by_template_locked(hdl, attrs, count, tx) == 0);
1652 if (znode_acl.z_acl_extern_obj) {
1653 VERIFY(0 == dmu_object_free(zfsvfs->z_os,
1654 znode_acl.z_acl_extern_obj, tx));
1655 }
1656
1657 zp->z_is_sa = B_TRUE;
1658
1659 out:
1660 mutex_exit(&zp->z_lock);
1661 mutex_exit(&hdl->sa_lock);
1662 kmem_free(attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
1663 kmem_free(bulk, sizeof (sa_bulk_attr_t) * ZPL_END);
1664 return (err);
1665 }
1666 #endif
1667
1668 static sa_idx_tab_t *
1669 sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, sa_hdr_phys_t *hdr)
1670 {
1671 sa_idx_tab_t *idx_tab;
1672 sa_os_t *sa = os->os_sa;
1673 sa_lot_t *tb, search;
1674 avl_index_t loc;
1675
1676 /*
1677 * Deterimine layout number. If SA node and header == 0 then
1678 * force the index table to the dummy "1" empty layout.
1679 *
1680 * The layout number would only be zero for a newly created file
1681 * that has not added any attributes yet, or with crypto enabled which
1682 * doesn't write any attributes to the bonus buffer.
1683 */
1684
1685 search.lot_num = SA_LAYOUT_NUM(hdr, bonustype);
1686
1687 tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
1688
1689 /* Verify header size is consistent with layout information */
1690 ASSERT(tb);
1691 ASSERT(IS_SA_BONUSTYPE(bonustype) &&
1692 SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb) || !IS_SA_BONUSTYPE(bonustype) ||
1693 (IS_SA_BONUSTYPE(bonustype) && hdr->sa_layout_info == 0));
1694
1695 /*
1696 * See if any of the already existing TOC entries can be reused?
1697 */
1698
1699 for (idx_tab = list_head(&tb->lot_idx_tab); idx_tab;
1700 idx_tab = list_next(&tb->lot_idx_tab, idx_tab)) {
1701 boolean_t valid_idx = B_TRUE;
1702 int i;
1703
1704 if (tb->lot_var_sizes != 0 &&
1705 idx_tab->sa_variable_lengths != NULL) {
1706 for (i = 0; i != tb->lot_var_sizes; i++) {
1707 if (hdr->sa_lengths[i] !=
1708 idx_tab->sa_variable_lengths[i]) {
1709 valid_idx = B_FALSE;
1710 break;
1711 }
1712 }
1713 }
1714 if (valid_idx) {
1715 sa_idx_tab_hold(os, idx_tab);
1716 return (idx_tab);
1717 }
1718 }
1719
1720 /* No such luck, create a new entry */
1721 idx_tab = kmem_zalloc(sizeof (sa_idx_tab_t), KM_SLEEP);
1722 idx_tab->sa_idx_tab =
1723 kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_SLEEP);
1724 idx_tab->sa_layout = tb;
1725 zfs_refcount_create(&idx_tab->sa_refcount);
1726 if (tb->lot_var_sizes)
1727 idx_tab->sa_variable_lengths = kmem_alloc(sizeof (uint16_t) *
1728 tb->lot_var_sizes, KM_SLEEP);
1729
1730 sa_attr_iter(os, hdr, bonustype, sa_build_idx_tab,
1731 tb, idx_tab);
1732 sa_idx_tab_hold(os, idx_tab); /* one hold for consumer */
1733 sa_idx_tab_hold(os, idx_tab); /* one for layout */
1734 list_insert_tail(&tb->lot_idx_tab, idx_tab);
1735 return (idx_tab);
1736 }
1737
1738 void
1739 sa_default_locator(void **dataptr, uint32_t *len, uint32_t total_len,
1740 boolean_t start, void *userdata)
1741 {
1742 ASSERT(start);
1743
1744 *dataptr = userdata;
1745 *len = total_len;
1746 }
1747
1748 static void
1749 sa_attr_register_sync(sa_handle_t *hdl, dmu_tx_t *tx)
1750 {
1751 uint64_t attr_value = 0;
1752 sa_os_t *sa = hdl->sa_os->os_sa;
1753 sa_attr_table_t *tb = sa->sa_attr_table;
1754 int i;
1755
1756 mutex_enter(&sa->sa_lock);
1757
1758 if (!sa->sa_need_attr_registration || sa->sa_master_obj == 0) {
1759 mutex_exit(&sa->sa_lock);
1760 return;
1761 }
1762
1763 if (sa->sa_reg_attr_obj == 0) {
1764 sa->sa_reg_attr_obj = zap_create_link(hdl->sa_os,
1765 DMU_OT_SA_ATTR_REGISTRATION,
1766 sa->sa_master_obj, SA_REGISTRY, tx);
1767 }
1768 for (i = 0; i != sa->sa_num_attrs; i++) {
1769 if (sa->sa_attr_table[i].sa_registered)
1770 continue;
1771 ATTR_ENCODE(attr_value, tb[i].sa_attr, tb[i].sa_length,
1772 tb[i].sa_byteswap);
1773 VERIFY(0 == zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
1774 tb[i].sa_name, 8, 1, &attr_value, tx));
1775 tb[i].sa_registered = B_TRUE;
1776 }
1777 sa->sa_need_attr_registration = B_FALSE;
1778 mutex_exit(&sa->sa_lock);
1779 }
1780
1781 /*
1782 * Replace all attributes with attributes specified in template.
1783 * If dnode had a spill buffer then those attributes will be
1784 * also be replaced, possibly with just an empty spill block
1785 *
1786 * This interface is intended to only be used for bulk adding of
1787 * attributes for a new file. It will also be used by the ZPL
1788 * when converting and old formatted znode to native SA support.
1789 */
1790 int
1791 sa_replace_all_by_template_locked(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
1792 int attr_count, dmu_tx_t *tx)
1793 {
1794 sa_os_t *sa = hdl->sa_os->os_sa;
1795
1796 if (sa->sa_need_attr_registration)
1797 sa_attr_register_sync(hdl, tx);
1798 return (sa_build_layouts(hdl, attr_desc, attr_count, tx));
1799 }
1800
1801 int
1802 sa_replace_all_by_template(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
1803 int attr_count, dmu_tx_t *tx)
1804 {
1805 int error;
1806
1807 mutex_enter(&hdl->sa_lock);
1808 error = sa_replace_all_by_template_locked(hdl, attr_desc,
1809 attr_count, tx);
1810 mutex_exit(&hdl->sa_lock);
1811 return (error);
1812 }
1813
1814 /*
1815 * Add/remove a single attribute or replace a variable-sized attribute value
1816 * with a value of a different size, and then rewrite the entire set
1817 * of attributes.
1818 * Same-length attribute value replacement (including fixed-length attributes)
1819 * is handled more efficiently by the upper layers.
1820 */
1821 static int
1822 sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
1823 sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
1824 uint16_t buflen, dmu_tx_t *tx)
1825 {
1826 sa_os_t *sa = hdl->sa_os->os_sa;
1827 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1828 dnode_t *dn;
1829 sa_bulk_attr_t *attr_desc;
1830 void *old_data[2];
1831 int bonus_attr_count = 0;
1832 int bonus_data_size = 0;
1833 int spill_data_size = 0;
1834 int spill_attr_count = 0;
1835 int error;
1836 uint16_t length, reg_length;
1837 int i, j, k, length_idx;
1838 sa_hdr_phys_t *hdr;
1839 sa_idx_tab_t *idx_tab;
1840 int attr_count;
1841 int count;
1842
1843 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1844
1845 /* First make of copy of the old data */
1846
1847 DB_DNODE_ENTER(db);
1848 dn = DB_DNODE(db);
1849 if (dn->dn_bonuslen != 0) {
1850 bonus_data_size = hdl->sa_bonus->db_size;
1851 old_data[0] = kmem_alloc(bonus_data_size, KM_SLEEP);
1852 bcopy(hdl->sa_bonus->db_data, old_data[0],
1853 hdl->sa_bonus->db_size);
1854 bonus_attr_count = hdl->sa_bonus_tab->sa_layout->lot_attr_count;
1855 } else {
1856 old_data[0] = NULL;
1857 }
1858 DB_DNODE_EXIT(db);
1859
1860 /* Bring spill buffer online if it isn't currently */
1861
1862 if ((error = sa_get_spill(hdl)) == 0) {
1863 spill_data_size = hdl->sa_spill->db_size;
1864 old_data[1] = kmem_alloc(spill_data_size, KM_SLEEP);
1865 bcopy(hdl->sa_spill->db_data, old_data[1],
1866 hdl->sa_spill->db_size);
1867 spill_attr_count =
1868 hdl->sa_spill_tab->sa_layout->lot_attr_count;
1869 } else if (error && error != ENOENT) {
1870 if (old_data[0])
1871 kmem_free(old_data[0], bonus_data_size);
1872 return (error);
1873 } else {
1874 old_data[1] = NULL;
1875 }
1876
1877 /* build descriptor of all attributes */
1878
1879 attr_count = bonus_attr_count + spill_attr_count;
1880 if (action == SA_ADD)
1881 attr_count++;
1882 else if (action == SA_REMOVE)
1883 attr_count--;
1884
1885 attr_desc = kmem_zalloc(sizeof (sa_bulk_attr_t) * attr_count, KM_SLEEP);
1886
1887 /*
1888 * loop through bonus and spill buffer if it exists, and
1889 * build up new attr_descriptor to reset the attributes
1890 */
1891 k = j = 0;
1892 count = bonus_attr_count;
1893 hdr = SA_GET_HDR(hdl, SA_BONUS);
1894 idx_tab = SA_IDX_TAB_GET(hdl, SA_BONUS);
1895 for (; k != 2; k++) {
1896 /*
1897 * Iterate over each attribute in layout. Fetch the
1898 * size of variable-length attributes needing rewrite
1899 * from sa_lengths[].
1900 */
1901 for (i = 0, length_idx = 0; i != count; i++) {
1902 sa_attr_type_t attr;
1903
1904 attr = idx_tab->sa_layout->lot_attrs[i];
1905 reg_length = SA_REGISTERED_LEN(sa, attr);
1906 if (reg_length == 0) {
1907 length = hdr->sa_lengths[length_idx];
1908 length_idx++;
1909 } else {
1910 length = reg_length;
1911 }
1912 if (attr == newattr) {
1913 /*
1914 * There is nothing to do for SA_REMOVE,
1915 * so it is just skipped.
1916 */
1917 if (action == SA_REMOVE)
1918 continue;
1919
1920 /*
1921 * Duplicate attributes are not allowed, so the
1922 * action can not be SA_ADD here.
1923 */
1924 ASSERT3S(action, ==, SA_REPLACE);
1925
1926 /*
1927 * Only a variable-sized attribute can be
1928 * replaced here, and its size must be changing.
1929 */
1930 ASSERT3U(reg_length, ==, 0);
1931 ASSERT3U(length, !=, buflen);
1932 SA_ADD_BULK_ATTR(attr_desc, j, attr,
1933 locator, datastart, buflen);
1934 } else {
1935 SA_ADD_BULK_ATTR(attr_desc, j, attr,
1936 NULL, (void *)
1937 (TOC_OFF(idx_tab->sa_idx_tab[attr]) +
1938 (uintptr_t)old_data[k]), length);
1939 }
1940 }
1941 if (k == 0 && hdl->sa_spill) {
1942 hdr = SA_GET_HDR(hdl, SA_SPILL);
1943 idx_tab = SA_IDX_TAB_GET(hdl, SA_SPILL);
1944 count = spill_attr_count;
1945 } else {
1946 break;
1947 }
1948 }
1949 if (action == SA_ADD) {
1950 reg_length = SA_REGISTERED_LEN(sa, newattr);
1951 IMPLY(reg_length != 0, reg_length == buflen);
1952 SA_ADD_BULK_ATTR(attr_desc, j, newattr, locator,
1953 datastart, buflen);
1954 }
1955 ASSERT3U(j, ==, attr_count);
1956
1957 error = sa_build_layouts(hdl, attr_desc, attr_count, tx);
1958
1959 if (old_data[0])
1960 kmem_free(old_data[0], bonus_data_size);
1961 if (old_data[1])
1962 kmem_free(old_data[1], spill_data_size);
1963 kmem_free(attr_desc, sizeof (sa_bulk_attr_t) * attr_count);
1964
1965 return (error);
1966 }
1967
1968 static int
1969 sa_bulk_update_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
1970 dmu_tx_t *tx)
1971 {
1972 int error;
1973 sa_os_t *sa = hdl->sa_os->os_sa;
1974 dmu_object_type_t bonustype;
1975
1976 bonustype = SA_BONUSTYPE_FROM_DB(SA_GET_DB(hdl, SA_BONUS));
1977
1978 ASSERT(hdl);
1979 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1980
1981 /* sync out registration table if necessary */
1982 if (sa->sa_need_attr_registration)
1983 sa_attr_register_sync(hdl, tx);
1984
1985 error = sa_attr_op(hdl, bulk, count, SA_UPDATE, tx);
1986 if (error == 0 && !IS_SA_BONUSTYPE(bonustype) && sa->sa_update_cb)
1987 sa->sa_update_cb(hdl, tx);
1988
1989 return (error);
1990 }
1991
1992 /*
1993 * update or add new attribute
1994 */
1995 int
1996 sa_update(sa_handle_t *hdl, sa_attr_type_t type,
1997 void *buf, uint32_t buflen, dmu_tx_t *tx)
1998 {
1999 int error;
2000 sa_bulk_attr_t bulk;
2001
2002 bulk.sa_attr = type;
2003 bulk.sa_data_func = NULL;
2004 bulk.sa_length = buflen;
2005 bulk.sa_data = buf;
2006
2007 mutex_enter(&hdl->sa_lock);
2008 error = sa_bulk_update_impl(hdl, &bulk, 1, tx);
2009 mutex_exit(&hdl->sa_lock);
2010 return (error);
2011 }
2012
2013 int
2014 sa_update_from_cb(sa_handle_t *hdl, sa_attr_type_t attr,
2015 uint32_t buflen, sa_data_locator_t *locator, void *userdata, dmu_tx_t *tx)
2016 {
2017 int error;
2018 sa_bulk_attr_t bulk;
2019
2020 bulk.sa_attr = attr;
2021 bulk.sa_data = userdata;
2022 bulk.sa_data_func = locator;
2023 bulk.sa_length = buflen;
2024
2025 mutex_enter(&hdl->sa_lock);
2026 error = sa_bulk_update_impl(hdl, &bulk, 1, tx);
2027 mutex_exit(&hdl->sa_lock);
2028 return (error);
2029 }
2030
2031 /*
2032 * Return size of an attribute
2033 */
2034
2035 int
2036 sa_size(sa_handle_t *hdl, sa_attr_type_t attr, int *size)
2037 {
2038 sa_bulk_attr_t bulk;
2039 int error;
2040
2041 bulk.sa_data = NULL;
2042 bulk.sa_attr = attr;
2043 bulk.sa_data_func = NULL;
2044
2045 ASSERT(hdl);
2046 mutex_enter(&hdl->sa_lock);
2047 if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) != 0) {
2048 mutex_exit(&hdl->sa_lock);
2049 return (error);
2050 }
2051 *size = bulk.sa_size;
2052
2053 mutex_exit(&hdl->sa_lock);
2054 return (0);
2055 }
2056
2057 int
2058 sa_bulk_lookup_locked(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
2059 {
2060 ASSERT(hdl);
2061 ASSERT(MUTEX_HELD(&hdl->sa_lock));
2062 return (sa_lookup_impl(hdl, attrs, count));
2063 }
2064
2065 int
2066 sa_bulk_lookup(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
2067 {
2068 int error;
2069
2070 ASSERT(hdl);
2071 mutex_enter(&hdl->sa_lock);
2072 error = sa_bulk_lookup_locked(hdl, attrs, count);
2073 mutex_exit(&hdl->sa_lock);
2074 return (error);
2075 }
2076
2077 int
2078 sa_bulk_update(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count, dmu_tx_t *tx)
2079 {
2080 int error;
2081
2082 ASSERT(hdl);
2083 mutex_enter(&hdl->sa_lock);
2084 error = sa_bulk_update_impl(hdl, attrs, count, tx);
2085 mutex_exit(&hdl->sa_lock);
2086 return (error);
2087 }
2088
2089 int
2090 sa_remove(sa_handle_t *hdl, sa_attr_type_t attr, dmu_tx_t *tx)
2091 {
2092 int error;
2093
2094 mutex_enter(&hdl->sa_lock);
2095 error = sa_modify_attrs(hdl, attr, SA_REMOVE, NULL,
2096 NULL, 0, tx);
2097 mutex_exit(&hdl->sa_lock);
2098 return (error);
2099 }
2100
2101 void
2102 sa_object_info(sa_handle_t *hdl, dmu_object_info_t *doi)
2103 {
2104 dmu_object_info_from_db((dmu_buf_t *)hdl->sa_bonus, doi);
2105 }
2106
2107 void
2108 sa_object_size(sa_handle_t *hdl, uint32_t *blksize, u_longlong_t *nblocks)
2109 {
2110 dmu_object_size_from_db((dmu_buf_t *)hdl->sa_bonus,
2111 blksize, nblocks);
2112 }
2113
2114 void
2115 sa_set_userp(sa_handle_t *hdl, void *ptr)
2116 {
2117 hdl->sa_userp = ptr;
2118 }
2119
2120 dmu_buf_t *
2121 sa_get_db(sa_handle_t *hdl)
2122 {
2123 return ((dmu_buf_t *)hdl->sa_bonus);
2124 }
2125
2126 void *
2127 sa_get_userdata(sa_handle_t *hdl)
2128 {
2129 return (hdl->sa_userp);
2130 }
2131
2132 void
2133 sa_register_update_callback_locked(objset_t *os, sa_update_cb_t *func)
2134 {
2135 ASSERT(MUTEX_HELD(&os->os_sa->sa_lock));
2136 os->os_sa->sa_update_cb = func;
2137 }
2138
2139 void
2140 sa_register_update_callback(objset_t *os, sa_update_cb_t *func)
2141 {
2142
2143 mutex_enter(&os->os_sa->sa_lock);
2144 sa_register_update_callback_locked(os, func);
2145 mutex_exit(&os->os_sa->sa_lock);
2146 }
2147
2148 uint64_t
2149 sa_handle_object(sa_handle_t *hdl)
2150 {
2151 return (hdl->sa_bonus->db_object);
2152 }
2153
2154 boolean_t
2155 sa_enabled(objset_t *os)
2156 {
2157 return (os->os_sa == NULL);
2158 }
2159
2160 int
2161 sa_set_sa_object(objset_t *os, uint64_t sa_object)
2162 {
2163 sa_os_t *sa = os->os_sa;
2164
2165 if (sa->sa_master_obj)
2166 return (1);
2167
2168 sa->sa_master_obj = sa_object;
2169
2170 return (0);
2171 }
2172
2173 int
2174 sa_hdrsize(void *arg)
2175 {
2176 sa_hdr_phys_t *hdr = arg;
2177
2178 return (SA_HDR_SIZE(hdr));
2179 }
2180
2181 void
2182 sa_handle_lock(sa_handle_t *hdl)
2183 {
2184 ASSERT(hdl);
2185 mutex_enter(&hdl->sa_lock);
2186 }
2187
2188 void
2189 sa_handle_unlock(sa_handle_t *hdl)
2190 {
2191 ASSERT(hdl);
2192 mutex_exit(&hdl->sa_lock);
2193 }