1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Portions Copyright 2011 iXsystems, Inc
25 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright (c) 2014 Integros [integros.com]
28 * Copyright 2019 Joyent, Inc.
29 */
30
31 #include <sys/zfs_context.h>
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/dmu.h>
37 #include <sys/dmu_impl.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dmu_tx.h>
40 #include <sys/dbuf.h>
41 #include <sys/dnode.h>
42 #include <sys/zap.h>
43 #include <sys/sa.h>
44 #include <sys/sunddi.h>
45 #include <sys/sa_impl.h>
46 #include <sys/dnode.h>
47 #include <sys/errno.h>
48 #include <sys/zfs_context.h>
49
50 #ifdef _KERNEL
51 #include <sys/zfs_znode.h>
52 #endif
53
54 /*
55 * ZFS System attributes:
56 *
57 * A generic mechanism to allow for arbitrary attributes
58 * to be stored in a dnode. The data will be stored in the bonus buffer of
59 * the dnode and if necessary a special "spill" block will be used to handle
60 * overflow situations. The spill block will be sized to fit the data
61 * from 512 - 128K. When a spill block is used the BP (blkptr_t) for the
62 * spill block is stored at the end of the current bonus buffer. Any
63 * attributes that would be in the way of the blkptr_t will be relocated
64 * into the spill block.
65 *
66 * Attribute registration:
67 *
68 * Stored persistently on a per dataset basis
69 * a mapping between attribute "string" names and their actual attribute
70 * numeric values, length, and byteswap function. The names are only used
71 * during registration. All attributes are known by their unique attribute
72 * id value. If an attribute can have a variable size then the value
73 * 0 will be used to indicate this.
74 *
75 * Attribute Layout:
76 *
77 * Attribute layouts are a way to compactly store multiple attributes, but
78 * without taking the overhead associated with managing each attribute
79 * individually. Since you will typically have the same set of attributes
80 * stored in the same order a single table will be used to represent that
81 * layout. The ZPL for example will usually have only about 10 different
82 * layouts (regular files, device files, symlinks,
83 * regular files + scanstamp, files/dir with extended attributes, and then
84 * you have the possibility of all of those minus ACL, because it would
85 * be kicked out into the spill block)
86 *
87 * Layouts are simply an array of the attributes and their
88 * ordering i.e. [0, 1, 4, 5, 2]
89 *
90 * Each distinct layout is given a unique layout number and that is whats
91 * stored in the header at the beginning of the SA data buffer.
92 *
93 * A layout only covers a single dbuf (bonus or spill). If a set of
94 * attributes is split up between the bonus buffer and a spill buffer then
95 * two different layouts will be used. This allows us to byteswap the
96 * spill without looking at the bonus buffer and keeps the on disk format of
97 * the bonus and spill buffer the same.
98 *
99 * Adding a single attribute will cause the entire set of attributes to
100 * be rewritten and could result in a new layout number being constructed
101 * as part of the rewrite if no such layout exists for the new set of
102 * attribues. The new attribute will be appended to the end of the already
103 * existing attributes.
104 *
105 * Both the attribute registration and attribute layout information are
106 * stored in normal ZAP attributes. Their should be a small number of
107 * known layouts and the set of attributes is assumed to typically be quite
108 * small.
109 *
110 * The registered attributes and layout "table" information is maintained
111 * in core and a special "sa_os_t" is attached to the objset_t.
112 *
113 * A special interface is provided to allow for quickly applying
114 * a large set of attributes at once. sa_replace_all_by_template() is
115 * used to set an array of attributes. This is used by the ZPL when
116 * creating a brand new file. The template that is passed into the function
117 * specifies the attribute, size for variable length attributes, location of
118 * data and special "data locator" function if the data isn't in a contiguous
119 * location.
120 *
121 * Byteswap implications:
122 *
123 * Since the SA attributes are not entirely self describing we can't do
124 * the normal byteswap processing. The special ZAP layout attribute and
125 * attribute registration attributes define the byteswap function and the
126 * size of the attributes, unless it is variable sized.
127 * The normal ZFS byteswapping infrastructure assumes you don't need
128 * to read any objects in order to do the necessary byteswapping. Whereas
129 * SA attributes can only be properly byteswapped if the dataset is opened
130 * and the layout/attribute ZAP attributes are available. Because of this
131 * the SA attributes will be byteswapped when they are first accessed by
132 * the SA code that will read the SA data.
133 */
134
135 typedef void (sa_iterfunc_t)(void *hdr, void *addr, sa_attr_type_t,
136 uint16_t length, int length_idx, boolean_t, void *userp);
137
138 static int sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype);
139 static void sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab);
140 static sa_idx_tab_t *sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype,
141 sa_hdr_phys_t *hdr);
142 static void sa_idx_tab_rele(objset_t *os, void *arg);
143 static void sa_copy_data(sa_data_locator_t *func, void *start, void *target,
144 int buflen);
145 static int sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
146 sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
147 uint16_t buflen, dmu_tx_t *tx);
148
149 arc_byteswap_func_t *sa_bswap_table[] = {
150 byteswap_uint64_array,
151 byteswap_uint32_array,
152 byteswap_uint16_array,
153 byteswap_uint8_array,
154 zfs_acl_byteswap,
155 };
156
157 #define SA_COPY_DATA(f, s, t, l) \
158 { \
159 if (f == NULL) { \
160 if (l == 8) { \
161 *(uint64_t *)t = *(uint64_t *)s; \
162 } else if (l == 16) { \
163 *(uint64_t *)t = *(uint64_t *)s; \
164 *(uint64_t *)((uintptr_t)t + 8) = \
165 *(uint64_t *)((uintptr_t)s + 8); \
166 } else { \
167 bcopy(s, t, l); \
168 } \
169 } else \
170 sa_copy_data(f, s, t, l); \
171 }
172
173 /*
174 * This table is fixed and cannot be changed. Its purpose is to
175 * allow the SA code to work with both old/new ZPL file systems.
176 * It contains the list of legacy attributes. These attributes aren't
177 * stored in the "attribute" registry zap objects, since older ZPL file systems
178 * won't have the registry. Only objsets of type ZFS_TYPE_FILESYSTEM will
179 * use this static table.
180 */
181 sa_attr_reg_t sa_legacy_attrs[] = {
182 {"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 0},
183 {"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 1},
184 {"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 2},
185 {"ZPL_CRTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 3},
186 {"ZPL_GEN", sizeof (uint64_t), SA_UINT64_ARRAY, 4},
187 {"ZPL_MODE", sizeof (uint64_t), SA_UINT64_ARRAY, 5},
188 {"ZPL_SIZE", sizeof (uint64_t), SA_UINT64_ARRAY, 6},
189 {"ZPL_PARENT", sizeof (uint64_t), SA_UINT64_ARRAY, 7},
190 {"ZPL_LINKS", sizeof (uint64_t), SA_UINT64_ARRAY, 8},
191 {"ZPL_XATTR", sizeof (uint64_t), SA_UINT64_ARRAY, 9},
192 {"ZPL_RDEV", sizeof (uint64_t), SA_UINT64_ARRAY, 10},
193 {"ZPL_FLAGS", sizeof (uint64_t), SA_UINT64_ARRAY, 11},
194 {"ZPL_UID", sizeof (uint64_t), SA_UINT64_ARRAY, 12},
195 {"ZPL_GID", sizeof (uint64_t), SA_UINT64_ARRAY, 13},
196 {"ZPL_PAD", sizeof (uint64_t) * 4, SA_UINT64_ARRAY, 14},
197 {"ZPL_ZNODE_ACL", 88, SA_UINT8_ARRAY, 15},
198 };
199
200 /*
201 * This is only used for objects of type DMU_OT_ZNODE
202 */
203 sa_attr_type_t sa_legacy_zpl_layout[] = {
204 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
205 };
206
207 /*
208 * Special dummy layout used for buffers with no attributes.
209 */
210 sa_attr_type_t sa_dummy_zpl_layout[] = { 0 };
211
212 static int sa_legacy_attr_count = 16;
213 static kmem_cache_t *sa_cache = NULL;
214
215 /*ARGSUSED*/
216 static int
217 sa_cache_constructor(void *buf, void *unused, int kmflag)
218 {
219 sa_handle_t *hdl = buf;
220
221 mutex_init(&hdl->sa_lock, NULL, MUTEX_DEFAULT, NULL);
222 return (0);
223 }
224
225 /*ARGSUSED*/
226 static void
227 sa_cache_destructor(void *buf, void *unused)
228 {
229 sa_handle_t *hdl = buf;
230 mutex_destroy(&hdl->sa_lock);
231 }
232
233 void
234 sa_cache_init(void)
235 {
236 sa_cache = kmem_cache_create("sa_cache",
237 sizeof (sa_handle_t), 0, sa_cache_constructor,
238 sa_cache_destructor, NULL, NULL, NULL, 0);
239 }
240
241 void
242 sa_cache_fini(void)
243 {
244 if (sa_cache)
245 kmem_cache_destroy(sa_cache);
246 }
247
248 static int
249 layout_num_compare(const void *arg1, const void *arg2)
250 {
251 const sa_lot_t *node1 = (const sa_lot_t *)arg1;
252 const sa_lot_t *node2 = (const sa_lot_t *)arg2;
253
254 return (AVL_CMP(node1->lot_num, node2->lot_num));
255 }
256
257 static int
258 layout_hash_compare(const void *arg1, const void *arg2)
259 {
260 const sa_lot_t *node1 = (const sa_lot_t *)arg1;
261 const sa_lot_t *node2 = (const sa_lot_t *)arg2;
262
263 int cmp = AVL_CMP(node1->lot_hash, node2->lot_hash);
264 if (likely(cmp))
265 return (cmp);
266
267 return (AVL_CMP(node1->lot_instance, node2->lot_instance));
268 }
269
270 boolean_t
271 sa_layout_equal(sa_lot_t *tbf, sa_attr_type_t *attrs, int count)
272 {
273 int i;
274
275 if (count != tbf->lot_attr_count)
276 return (1);
277
278 for (i = 0; i != count; i++) {
279 if (attrs[i] != tbf->lot_attrs[i])
280 return (1);
281 }
282 return (0);
283 }
284
285 #define SA_ATTR_HASH(attr) (zfs_crc64_table[(-1ULL ^ attr) & 0xFF])
286
287 static uint64_t
288 sa_layout_info_hash(sa_attr_type_t *attrs, int attr_count)
289 {
290 int i;
291 uint64_t crc = -1ULL;
292
293 for (i = 0; i != attr_count; i++)
294 crc ^= SA_ATTR_HASH(attrs[i]);
295
296 return (crc);
297 }
298
299 static int
300 sa_get_spill(sa_handle_t *hdl)
301 {
302 int rc;
303 if (hdl->sa_spill == NULL) {
304 if ((rc = dmu_spill_hold_existing(hdl->sa_bonus, NULL,
305 &hdl->sa_spill)) == 0)
306 VERIFY(0 == sa_build_index(hdl, SA_SPILL));
307 } else {
308 rc = 0;
309 }
310
311 return (rc);
312 }
313
314 /*
315 * Main attribute lookup/update function
316 * returns 0 for success or non zero for failures
317 *
318 * Operates on bulk array, first failure will abort further processing
319 */
320 int
321 sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
322 sa_data_op_t data_op, dmu_tx_t *tx)
323 {
324 sa_os_t *sa = hdl->sa_os->os_sa;
325 int i;
326 int error = 0;
327 sa_buf_type_t buftypes;
328
329 buftypes = 0;
330
331 ASSERT(count > 0);
332 for (i = 0; i != count; i++) {
333 ASSERT(bulk[i].sa_attr <= hdl->sa_os->os_sa->sa_num_attrs);
334
335 bulk[i].sa_addr = NULL;
336 /* First check the bonus buffer */
337
338 if (hdl->sa_bonus_tab && TOC_ATTR_PRESENT(
339 hdl->sa_bonus_tab->sa_idx_tab[bulk[i].sa_attr])) {
340 SA_ATTR_INFO(sa, hdl->sa_bonus_tab,
341 SA_GET_HDR(hdl, SA_BONUS),
342 bulk[i].sa_attr, bulk[i], SA_BONUS, hdl);
343 if (tx && !(buftypes & SA_BONUS)) {
344 dmu_buf_will_dirty(hdl->sa_bonus, tx);
345 buftypes |= SA_BONUS;
346 }
347 }
348 if (bulk[i].sa_addr == NULL &&
349 ((error = sa_get_spill(hdl)) == 0)) {
350 if (TOC_ATTR_PRESENT(
351 hdl->sa_spill_tab->sa_idx_tab[bulk[i].sa_attr])) {
352 SA_ATTR_INFO(sa, hdl->sa_spill_tab,
353 SA_GET_HDR(hdl, SA_SPILL),
354 bulk[i].sa_attr, bulk[i], SA_SPILL, hdl);
355 if (tx && !(buftypes & SA_SPILL) &&
356 bulk[i].sa_size == bulk[i].sa_length) {
357 dmu_buf_will_dirty(hdl->sa_spill, tx);
358 buftypes |= SA_SPILL;
359 }
360 }
361 }
362 if (error && error != ENOENT) {
363 return ((error == ECKSUM) ? EIO : error);
364 }
365
366 switch (data_op) {
367 case SA_LOOKUP:
368 if (bulk[i].sa_addr == NULL)
369 return (SET_ERROR(ENOENT));
370 if (bulk[i].sa_data) {
371 SA_COPY_DATA(bulk[i].sa_data_func,
372 bulk[i].sa_addr, bulk[i].sa_data,
373 bulk[i].sa_size);
374 }
375 continue;
376
377 case SA_UPDATE:
378 /* existing rewrite of attr */
379 if (bulk[i].sa_addr &&
380 bulk[i].sa_size == bulk[i].sa_length) {
381 SA_COPY_DATA(bulk[i].sa_data_func,
382 bulk[i].sa_data, bulk[i].sa_addr,
383 bulk[i].sa_length);
384 continue;
385 } else if (bulk[i].sa_addr) { /* attr size change */
386 error = sa_modify_attrs(hdl, bulk[i].sa_attr,
387 SA_REPLACE, bulk[i].sa_data_func,
388 bulk[i].sa_data, bulk[i].sa_length, tx);
389 } else { /* adding new attribute */
390 error = sa_modify_attrs(hdl, bulk[i].sa_attr,
391 SA_ADD, bulk[i].sa_data_func,
392 bulk[i].sa_data, bulk[i].sa_length, tx);
393 }
394 if (error)
395 return (error);
396 break;
397 }
398 }
399 return (error);
400 }
401
402 static sa_lot_t *
403 sa_add_layout_entry(objset_t *os, sa_attr_type_t *attrs, int attr_count,
404 uint64_t lot_num, uint64_t hash, boolean_t zapadd, dmu_tx_t *tx)
405 {
406 sa_os_t *sa = os->os_sa;
407 sa_lot_t *tb, *findtb;
408 int i;
409 avl_index_t loc;
410
411 ASSERT(MUTEX_HELD(&sa->sa_lock));
412 tb = kmem_zalloc(sizeof (sa_lot_t), KM_SLEEP);
413 tb->lot_attr_count = attr_count;
414 tb->lot_attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
415 KM_SLEEP);
416 bcopy(attrs, tb->lot_attrs, sizeof (sa_attr_type_t) * attr_count);
417 tb->lot_num = lot_num;
418 tb->lot_hash = hash;
419 tb->lot_instance = 0;
420
421 if (zapadd) {
422 char attr_name[8];
423
424 if (sa->sa_layout_attr_obj == 0) {
425 sa->sa_layout_attr_obj = zap_create_link(os,
426 DMU_OT_SA_ATTR_LAYOUTS,
427 sa->sa_master_obj, SA_LAYOUTS, tx);
428 }
429
430 (void) snprintf(attr_name, sizeof (attr_name),
431 "%d", (int)lot_num);
432 VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj,
433 attr_name, 2, attr_count, attrs, tx));
434 }
435
436 list_create(&tb->lot_idx_tab, sizeof (sa_idx_tab_t),
437 offsetof(sa_idx_tab_t, sa_next));
438
439 for (i = 0; i != attr_count; i++) {
440 if (sa->sa_attr_table[tb->lot_attrs[i]].sa_length == 0)
441 tb->lot_var_sizes++;
442 }
443
444 avl_add(&sa->sa_layout_num_tree, tb);
445
446 /* verify we don't have a hash collision */
447 if ((findtb = avl_find(&sa->sa_layout_hash_tree, tb, &loc)) != NULL) {
448 for (; findtb && findtb->lot_hash == hash;
449 findtb = AVL_NEXT(&sa->sa_layout_hash_tree, findtb)) {
450 if (findtb->lot_instance != tb->lot_instance)
451 break;
452 tb->lot_instance++;
453 }
454 }
455 avl_add(&sa->sa_layout_hash_tree, tb);
456 return (tb);
457 }
458
459 static void
460 sa_find_layout(objset_t *os, uint64_t hash, sa_attr_type_t *attrs,
461 int count, dmu_tx_t *tx, sa_lot_t **lot)
462 {
463 sa_lot_t *tb, tbsearch;
464 avl_index_t loc;
465 sa_os_t *sa = os->os_sa;
466 boolean_t found = B_FALSE;
467
468 mutex_enter(&sa->sa_lock);
469 tbsearch.lot_hash = hash;
470 tbsearch.lot_instance = 0;
471 tb = avl_find(&sa->sa_layout_hash_tree, &tbsearch, &loc);
472 if (tb) {
473 for (; tb && tb->lot_hash == hash;
474 tb = AVL_NEXT(&sa->sa_layout_hash_tree, tb)) {
475 if (sa_layout_equal(tb, attrs, count) == 0) {
476 found = B_TRUE;
477 break;
478 }
479 }
480 }
481 if (!found) {
482 tb = sa_add_layout_entry(os, attrs, count,
483 avl_numnodes(&sa->sa_layout_num_tree), hash, B_TRUE, tx);
484 }
485 mutex_exit(&sa->sa_lock);
486 *lot = tb;
487 }
488
489 static int
490 sa_resize_spill(sa_handle_t *hdl, uint32_t size, dmu_tx_t *tx)
491 {
492 int error;
493 uint32_t blocksize;
494
495 if (size == 0) {
496 blocksize = SPA_MINBLOCKSIZE;
497 } else if (size > SPA_OLD_MAXBLOCKSIZE) {
498 ASSERT(0);
499 return (SET_ERROR(EFBIG));
500 } else {
501 blocksize = P2ROUNDUP_TYPED(size, SPA_MINBLOCKSIZE, uint32_t);
502 }
503
504 error = dbuf_spill_set_blksz(hdl->sa_spill, blocksize, tx);
505 ASSERT(error == 0);
506 return (error);
507 }
508
509 static void
510 sa_copy_data(sa_data_locator_t *func, void *datastart, void *target, int buflen)
511 {
512 if (func == NULL) {
513 bcopy(datastart, target, buflen);
514 } else {
515 boolean_t start;
516 int bytes;
517 void *dataptr;
518 void *saptr = target;
519 uint32_t length;
520
521 start = B_TRUE;
522 bytes = 0;
523 while (bytes < buflen) {
524 func(&dataptr, &length, buflen, start, datastart);
525 bcopy(dataptr, saptr, length);
526 saptr = (void *)((caddr_t)saptr + length);
527 bytes += length;
528 start = B_FALSE;
529 }
530 }
531 }
532
533 /*
534 * Determine several different sizes
535 * first the sa header size
536 * the number of bytes to be stored
537 * if spill would occur the index in the attribute array is returned
538 *
539 * the boolean will_spill will be set when spilling is necessary. It
540 * is only set when the buftype is SA_BONUS
541 */
542 static int
543 sa_find_sizes(sa_os_t *sa, sa_bulk_attr_t *attr_desc, int attr_count,
544 dmu_buf_t *db, sa_buf_type_t buftype, int full_space, int *index,
545 int *total, boolean_t *will_spill)
546 {
547 int var_size = 0;
548 int i;
549 int hdrsize;
550 int extra_hdrsize;
551
552 if (buftype == SA_BONUS && sa->sa_force_spill) {
553 *total = 0;
554 *index = 0;
555 *will_spill = B_TRUE;
556 return (0);
557 }
558
559 *index = -1;
560 *total = 0;
561 *will_spill = B_FALSE;
562
563 extra_hdrsize = 0;
564 hdrsize = (SA_BONUSTYPE_FROM_DB(db) == DMU_OT_ZNODE) ? 0 :
565 sizeof (sa_hdr_phys_t);
566
567 ASSERT(IS_P2ALIGNED(full_space, 8));
568
569 for (i = 0; i != attr_count; i++) {
570 boolean_t is_var_sz;
571
572 *total = P2ROUNDUP(*total, 8);
573 *total += attr_desc[i].sa_length;
574 if (*will_spill)
575 continue;
576
577 is_var_sz = (SA_REGISTERED_LEN(sa, attr_desc[i].sa_attr) == 0);
578 if (is_var_sz) {
579 var_size++;
580 }
581
582 if (is_var_sz && var_size > 1) {
583 /*
584 * Don't worry that the spill block might overflow.
585 * It will be resized if needed in sa_build_layouts().
586 */
587 if (buftype == SA_SPILL ||
588 P2ROUNDUP(hdrsize + sizeof (uint16_t), 8) +
589 *total < full_space) {
590 /*
591 * Account for header space used by array of
592 * optional sizes of variable-length attributes.
593 * Record the extra header size in case this
594 * increase needs to be reversed due to
595 * spill-over.
596 */
597 hdrsize += sizeof (uint16_t);
598 if (*index != -1)
599 extra_hdrsize += sizeof (uint16_t);
600 } else {
601 ASSERT(buftype == SA_BONUS);
602 if (*index == -1)
603 *index = i;
604 *will_spill = B_TRUE;
605 continue;
606 }
607 }
608
609 /*
610 * find index of where spill *could* occur.
611 * Then continue to count of remainder attribute
612 * space. The sum is used later for sizing bonus
613 * and spill buffer.
614 */
615 if (buftype == SA_BONUS && *index == -1 &&
616 *total + P2ROUNDUP(hdrsize, 8) >
617 (full_space - sizeof (blkptr_t))) {
618 *index = i;
619 }
620
621 if (*total + P2ROUNDUP(hdrsize, 8) > full_space &&
622 buftype == SA_BONUS)
623 *will_spill = B_TRUE;
624 }
625
626 if (*will_spill)
627 hdrsize -= extra_hdrsize;
628
629 hdrsize = P2ROUNDUP(hdrsize, 8);
630 return (hdrsize);
631 }
632
633 #define BUF_SPACE_NEEDED(total, header) (total + header)
634
635 /*
636 * Find layout that corresponds to ordering of attributes
637 * If not found a new layout number is created and added to
638 * persistent layout tables.
639 */
640 static int
641 sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
642 dmu_tx_t *tx)
643 {
644 sa_os_t *sa = hdl->sa_os->os_sa;
645 uint64_t hash;
646 sa_buf_type_t buftype;
647 sa_hdr_phys_t *sahdr;
648 void *data_start;
649 int buf_space;
650 sa_attr_type_t *attrs, *attrs_start;
651 int i, lot_count;
652 int dnodesize;
653 int hdrsize;
654 int spillhdrsize = 0;
655 int used;
656 dmu_object_type_t bonustype;
657 sa_lot_t *lot;
658 int len_idx;
659 int spill_used;
660 int bonuslen;
661 boolean_t spilling;
662
663 dmu_buf_will_dirty(hdl->sa_bonus, tx);
664 bonustype = SA_BONUSTYPE_FROM_DB(hdl->sa_bonus);
665
666 dmu_object_dnsize_from_db(hdl->sa_bonus, &dnodesize);
667 bonuslen = DN_BONUS_SIZE(dnodesize);
668
669 /* first determine bonus header size and sum of all attributes */
670 hdrsize = sa_find_sizes(sa, attr_desc, attr_count, hdl->sa_bonus,
671 SA_BONUS, bonuslen, &i, &used, &spilling);
672
673 if (used > SPA_OLD_MAXBLOCKSIZE)
674 return (SET_ERROR(EFBIG));
675
676 VERIFY(0 == dmu_set_bonus(hdl->sa_bonus, spilling ?
677 MIN(bonuslen - sizeof (blkptr_t), used + hdrsize) :
678 used + hdrsize, tx));
679
680 ASSERT((bonustype == DMU_OT_ZNODE && spilling == 0) ||
681 bonustype == DMU_OT_SA);
682
683 /* setup and size spill buffer when needed */
684 if (spilling) {
685 boolean_t dummy;
686
687 if (hdl->sa_spill == NULL) {
688 VERIFY(dmu_spill_hold_by_bonus(hdl->sa_bonus, 0, NULL,
689 &hdl->sa_spill) == 0);
690 }
691 dmu_buf_will_dirty(hdl->sa_spill, tx);
692
693 spillhdrsize = sa_find_sizes(sa, &attr_desc[i],
694 attr_count - i, hdl->sa_spill, SA_SPILL,
695 hdl->sa_spill->db_size, &i, &spill_used, &dummy);
696
697 if (spill_used > SPA_OLD_MAXBLOCKSIZE)
698 return (SET_ERROR(EFBIG));
699
700 buf_space = hdl->sa_spill->db_size - spillhdrsize;
701 if (BUF_SPACE_NEEDED(spill_used, spillhdrsize) >
702 hdl->sa_spill->db_size)
703 VERIFY(0 == sa_resize_spill(hdl,
704 BUF_SPACE_NEEDED(spill_used, spillhdrsize), tx));
705 }
706
707 /* setup starting pointers to lay down data */
708 data_start = (void *)((uintptr_t)hdl->sa_bonus->db_data + hdrsize);
709 sahdr = (sa_hdr_phys_t *)hdl->sa_bonus->db_data;
710 buftype = SA_BONUS;
711
712 if (spilling)
713 buf_space = (sa->sa_force_spill) ?
714 0 : SA_BLKPTR_SPACE - hdrsize;
715 else
716 buf_space = hdl->sa_bonus->db_size - hdrsize;
717
718 attrs_start = attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
719 KM_SLEEP);
720 lot_count = 0;
721
722 for (i = 0, len_idx = 0, hash = -1ULL; i != attr_count; i++) {
723 uint16_t length;
724
725 ASSERT(IS_P2ALIGNED(data_start, 8));
726 ASSERT(IS_P2ALIGNED(buf_space, 8));
727 attrs[i] = attr_desc[i].sa_attr;
728 length = SA_REGISTERED_LEN(sa, attrs[i]);
729 if (length == 0)
730 length = attr_desc[i].sa_length;
731
732 if (buf_space < length) { /* switch to spill buffer */
733 VERIFY(spilling);
734 VERIFY(bonustype == DMU_OT_SA);
735 if (buftype == SA_BONUS && !sa->sa_force_spill) {
736 sa_find_layout(hdl->sa_os, hash, attrs_start,
737 lot_count, tx, &lot);
738 SA_SET_HDR(sahdr, lot->lot_num, hdrsize);
739 }
740
741 buftype = SA_SPILL;
742 hash = -1ULL;
743 len_idx = 0;
744
745 sahdr = (sa_hdr_phys_t *)hdl->sa_spill->db_data;
746 sahdr->sa_magic = SA_MAGIC;
747 data_start = (void *)((uintptr_t)sahdr +
748 spillhdrsize);
749 attrs_start = &attrs[i];
750 buf_space = hdl->sa_spill->db_size - spillhdrsize;
751 lot_count = 0;
752 }
753 hash ^= SA_ATTR_HASH(attrs[i]);
754 attr_desc[i].sa_addr = data_start;
755 attr_desc[i].sa_size = length;
756 SA_COPY_DATA(attr_desc[i].sa_data_func, attr_desc[i].sa_data,
757 data_start, length);
758 if (sa->sa_attr_table[attrs[i]].sa_length == 0) {
759 sahdr->sa_lengths[len_idx++] = length;
760 }
761 data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
762 length), 8);
763 buf_space -= P2ROUNDUP(length, 8);
764 lot_count++;
765 }
766
767 sa_find_layout(hdl->sa_os, hash, attrs_start, lot_count, tx, &lot);
768
769 /*
770 * Verify that old znodes always have layout number 0.
771 * Must be DMU_OT_SA for arbitrary layouts
772 */
773 VERIFY((bonustype == DMU_OT_ZNODE && lot->lot_num == 0) ||
774 (bonustype == DMU_OT_SA && lot->lot_num > 1));
775
776 if (bonustype == DMU_OT_SA) {
777 SA_SET_HDR(sahdr, lot->lot_num,
778 buftype == SA_BONUS ? hdrsize : spillhdrsize);
779 }
780
781 kmem_free(attrs, sizeof (sa_attr_type_t) * attr_count);
782 if (hdl->sa_bonus_tab) {
783 sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
784 hdl->sa_bonus_tab = NULL;
785 }
786 if (!sa->sa_force_spill)
787 VERIFY(0 == sa_build_index(hdl, SA_BONUS));
788 if (hdl->sa_spill) {
789 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
790 if (!spilling) {
791 /*
792 * remove spill block that is no longer needed.
793 */
794 dmu_buf_rele(hdl->sa_spill, NULL);
795 hdl->sa_spill = NULL;
796 hdl->sa_spill_tab = NULL;
797 VERIFY(0 == dmu_rm_spill(hdl->sa_os,
798 sa_handle_object(hdl), tx));
799 } else {
800 VERIFY(0 == sa_build_index(hdl, SA_SPILL));
801 }
802 }
803
804 return (0);
805 }
806
807 static void
808 sa_free_attr_table(sa_os_t *sa)
809 {
810 int i;
811
812 if (sa->sa_attr_table == NULL)
813 return;
814
815 for (i = 0; i != sa->sa_num_attrs; i++) {
816 if (sa->sa_attr_table[i].sa_name)
817 kmem_free(sa->sa_attr_table[i].sa_name,
818 strlen(sa->sa_attr_table[i].sa_name) + 1);
819 }
820
821 kmem_free(sa->sa_attr_table,
822 sizeof (sa_attr_table_t) * sa->sa_num_attrs);
823
824 sa->sa_attr_table = NULL;
825 }
826
827 static int
828 sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
829 {
830 sa_os_t *sa = os->os_sa;
831 uint64_t sa_attr_count = 0;
832 uint64_t sa_reg_count = 0;
833 int error = 0;
834 uint64_t attr_value;
835 sa_attr_table_t *tb;
836 zap_cursor_t zc;
837 zap_attribute_t za;
838 int registered_count = 0;
839 int i;
840 dmu_objset_type_t ostype = dmu_objset_type(os);
841
842 sa->sa_user_table =
843 kmem_zalloc(count * sizeof (sa_attr_type_t), KM_SLEEP);
844 sa->sa_user_table_sz = count * sizeof (sa_attr_type_t);
845
846 if (sa->sa_reg_attr_obj != 0) {
847 error = zap_count(os, sa->sa_reg_attr_obj,
848 &sa_attr_count);
849
850 /*
851 * Make sure we retrieved a count and that it isn't zero
852 */
853 if (error || (error == 0 && sa_attr_count == 0)) {
854 if (error == 0)
855 error = SET_ERROR(EINVAL);
856 goto bail;
857 }
858 sa_reg_count = sa_attr_count;
859 }
860
861 if (ostype == DMU_OST_ZFS && sa_attr_count == 0)
862 sa_attr_count += sa_legacy_attr_count;
863
864 /* Allocate attribute numbers for attributes that aren't registered */
865 for (i = 0; i != count; i++) {
866 boolean_t found = B_FALSE;
867 int j;
868
869 if (ostype == DMU_OST_ZFS) {
870 for (j = 0; j != sa_legacy_attr_count; j++) {
871 if (strcmp(reg_attrs[i].sa_name,
872 sa_legacy_attrs[j].sa_name) == 0) {
873 sa->sa_user_table[i] =
874 sa_legacy_attrs[j].sa_attr;
875 found = B_TRUE;
876 }
877 }
878 }
879 if (found)
880 continue;
881
882 if (sa->sa_reg_attr_obj)
883 error = zap_lookup(os, sa->sa_reg_attr_obj,
884 reg_attrs[i].sa_name, 8, 1, &attr_value);
885 else
886 error = SET_ERROR(ENOENT);
887 switch (error) {
888 case ENOENT:
889 sa->sa_user_table[i] = (sa_attr_type_t)sa_attr_count;
890 sa_attr_count++;
891 break;
892 case 0:
893 sa->sa_user_table[i] = ATTR_NUM(attr_value);
894 break;
895 default:
896 goto bail;
897 }
898 }
899
900 sa->sa_num_attrs = sa_attr_count;
901 tb = sa->sa_attr_table =
902 kmem_zalloc(sizeof (sa_attr_table_t) * sa_attr_count, KM_SLEEP);
903
904 /*
905 * Attribute table is constructed from requested attribute list,
906 * previously foreign registered attributes, and also the legacy
907 * ZPL set of attributes.
908 */
909
910 if (sa->sa_reg_attr_obj) {
911 for (zap_cursor_init(&zc, os, sa->sa_reg_attr_obj);
912 (error = zap_cursor_retrieve(&zc, &za)) == 0;
913 zap_cursor_advance(&zc)) {
914 uint64_t value;
915 value = za.za_first_integer;
916
917 registered_count++;
918 tb[ATTR_NUM(value)].sa_attr = ATTR_NUM(value);
919 tb[ATTR_NUM(value)].sa_length = ATTR_LENGTH(value);
920 tb[ATTR_NUM(value)].sa_byteswap = ATTR_BSWAP(value);
921 tb[ATTR_NUM(value)].sa_registered = B_TRUE;
922
923 if (tb[ATTR_NUM(value)].sa_name) {
924 continue;
925 }
926 tb[ATTR_NUM(value)].sa_name =
927 kmem_zalloc(strlen(za.za_name) +1, KM_SLEEP);
928 (void) strlcpy(tb[ATTR_NUM(value)].sa_name, za.za_name,
929 strlen(za.za_name) +1);
930 }
931 zap_cursor_fini(&zc);
932 /*
933 * Make sure we processed the correct number of registered
934 * attributes
935 */
936 if (registered_count != sa_reg_count) {
937 ASSERT(error != 0);
938 goto bail;
939 }
940
941 }
942
943 if (ostype == DMU_OST_ZFS) {
944 for (i = 0; i != sa_legacy_attr_count; i++) {
945 if (tb[i].sa_name)
946 continue;
947 tb[i].sa_attr = sa_legacy_attrs[i].sa_attr;
948 tb[i].sa_length = sa_legacy_attrs[i].sa_length;
949 tb[i].sa_byteswap = sa_legacy_attrs[i].sa_byteswap;
950 tb[i].sa_registered = B_FALSE;
951 tb[i].sa_name =
952 kmem_zalloc(strlen(sa_legacy_attrs[i].sa_name) +1,
953 KM_SLEEP);
954 (void) strlcpy(tb[i].sa_name,
955 sa_legacy_attrs[i].sa_name,
956 strlen(sa_legacy_attrs[i].sa_name) + 1);
957 }
958 }
959
960 for (i = 0; i != count; i++) {
961 sa_attr_type_t attr_id;
962
963 attr_id = sa->sa_user_table[i];
964 if (tb[attr_id].sa_name)
965 continue;
966
967 tb[attr_id].sa_length = reg_attrs[i].sa_length;
968 tb[attr_id].sa_byteswap = reg_attrs[i].sa_byteswap;
969 tb[attr_id].sa_attr = attr_id;
970 tb[attr_id].sa_name =
971 kmem_zalloc(strlen(reg_attrs[i].sa_name) + 1, KM_SLEEP);
972 (void) strlcpy(tb[attr_id].sa_name, reg_attrs[i].sa_name,
973 strlen(reg_attrs[i].sa_name) + 1);
974 }
975
976 sa->sa_need_attr_registration =
977 (sa_attr_count != registered_count);
978
979 return (0);
980 bail:
981 kmem_free(sa->sa_user_table, count * sizeof (sa_attr_type_t));
982 sa->sa_user_table = NULL;
983 sa_free_attr_table(sa);
984 return ((error != 0) ? error : EINVAL);
985 }
986
987 int
988 sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count,
989 sa_attr_type_t **user_table)
990 {
991 zap_cursor_t zc;
992 zap_attribute_t za;
993 sa_os_t *sa;
994 dmu_objset_type_t ostype = dmu_objset_type(os);
995 sa_attr_type_t *tb;
996 int error;
997
998 mutex_enter(&os->os_user_ptr_lock);
999 if (os->os_sa) {
1000 mutex_enter(&os->os_sa->sa_lock);
1001 mutex_exit(&os->os_user_ptr_lock);
1002 tb = os->os_sa->sa_user_table;
1003 mutex_exit(&os->os_sa->sa_lock);
1004 *user_table = tb;
1005 return (0);
1006 }
1007
1008 sa = kmem_zalloc(sizeof (sa_os_t), KM_SLEEP);
1009 mutex_init(&sa->sa_lock, NULL, MUTEX_DEFAULT, NULL);
1010 sa->sa_master_obj = sa_obj;
1011
1012 os->os_sa = sa;
1013 mutex_enter(&sa->sa_lock);
1014 mutex_exit(&os->os_user_ptr_lock);
1015 avl_create(&sa->sa_layout_num_tree, layout_num_compare,
1016 sizeof (sa_lot_t), offsetof(sa_lot_t, lot_num_node));
1017 avl_create(&sa->sa_layout_hash_tree, layout_hash_compare,
1018 sizeof (sa_lot_t), offsetof(sa_lot_t, lot_hash_node));
1019
1020 if (sa_obj) {
1021 error = zap_lookup(os, sa_obj, SA_LAYOUTS,
1022 8, 1, &sa->sa_layout_attr_obj);
1023 if (error != 0 && error != ENOENT)
1024 goto fail;
1025 error = zap_lookup(os, sa_obj, SA_REGISTRY,
1026 8, 1, &sa->sa_reg_attr_obj);
1027 if (error != 0 && error != ENOENT)
1028 goto fail;
1029 }
1030
1031 if ((error = sa_attr_table_setup(os, reg_attrs, count)) != 0)
1032 goto fail;
1033
1034 if (sa->sa_layout_attr_obj != 0) {
1035 uint64_t layout_count;
1036
1037 error = zap_count(os, sa->sa_layout_attr_obj,
1038 &layout_count);
1039
1040 /*
1041 * Layout number count should be > 0
1042 */
1043 if (error || (error == 0 && layout_count == 0)) {
1044 if (error == 0)
1045 error = SET_ERROR(EINVAL);
1046 goto fail;
1047 }
1048
1049 for (zap_cursor_init(&zc, os, sa->sa_layout_attr_obj);
1050 (error = zap_cursor_retrieve(&zc, &za)) == 0;
1051 zap_cursor_advance(&zc)) {
1052 sa_attr_type_t *lot_attrs;
1053 uint64_t lot_num;
1054
1055 lot_attrs = kmem_zalloc(sizeof (sa_attr_type_t) *
1056 za.za_num_integers, KM_SLEEP);
1057
1058 if ((error = (zap_lookup(os, sa->sa_layout_attr_obj,
1059 za.za_name, 2, za.za_num_integers,
1060 lot_attrs))) != 0) {
1061 kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
1062 za.za_num_integers);
1063 break;
1064 }
1065 VERIFY(ddi_strtoull(za.za_name, NULL, 10,
1066 (unsigned long long *)&lot_num) == 0);
1067
1068 (void) sa_add_layout_entry(os, lot_attrs,
1069 za.za_num_integers, lot_num,
1070 sa_layout_info_hash(lot_attrs,
1071 za.za_num_integers), B_FALSE, NULL);
1072 kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
1073 za.za_num_integers);
1074 }
1075 zap_cursor_fini(&zc);
1076
1077 /*
1078 * Make sure layout count matches number of entries added
1079 * to AVL tree
1080 */
1081 if (avl_numnodes(&sa->sa_layout_num_tree) != layout_count) {
1082 ASSERT(error != 0);
1083 goto fail;
1084 }
1085 }
1086
1087 /* Add special layout number for old ZNODES */
1088 if (ostype == DMU_OST_ZFS) {
1089 (void) sa_add_layout_entry(os, sa_legacy_zpl_layout,
1090 sa_legacy_attr_count, 0,
1091 sa_layout_info_hash(sa_legacy_zpl_layout,
1092 sa_legacy_attr_count), B_FALSE, NULL);
1093
1094 (void) sa_add_layout_entry(os, sa_dummy_zpl_layout, 0, 1,
1095 0, B_FALSE, NULL);
1096 }
1097 *user_table = os->os_sa->sa_user_table;
1098 mutex_exit(&sa->sa_lock);
1099 return (0);
1100 fail:
1101 os->os_sa = NULL;
1102 sa_free_attr_table(sa);
1103 if (sa->sa_user_table)
1104 kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
1105 mutex_exit(&sa->sa_lock);
1106 avl_destroy(&sa->sa_layout_hash_tree);
1107 avl_destroy(&sa->sa_layout_num_tree);
1108 mutex_destroy(&sa->sa_lock);
1109 kmem_free(sa, sizeof (sa_os_t));
1110 return ((error == ECKSUM) ? EIO : error);
1111 }
1112
1113 void
1114 sa_tear_down(objset_t *os)
1115 {
1116 sa_os_t *sa = os->os_sa;
1117 sa_lot_t *layout;
1118 void *cookie;
1119
1120 kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
1121
1122 /* Free up attr table */
1123
1124 sa_free_attr_table(sa);
1125
1126 cookie = NULL;
1127 while (layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie)) {
1128 sa_idx_tab_t *tab;
1129 while (tab = list_head(&layout->lot_idx_tab)) {
1130 ASSERT(zfs_refcount_count(&tab->sa_refcount));
1131 sa_idx_tab_rele(os, tab);
1132 }
1133 }
1134
1135 cookie = NULL;
1136 while (layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie)) {
1137 kmem_free(layout->lot_attrs,
1138 sizeof (sa_attr_type_t) * layout->lot_attr_count);
1139 kmem_free(layout, sizeof (sa_lot_t));
1140 }
1141
1142 avl_destroy(&sa->sa_layout_hash_tree);
1143 avl_destroy(&sa->sa_layout_num_tree);
1144 mutex_destroy(&sa->sa_lock);
1145
1146 kmem_free(sa, sizeof (sa_os_t));
1147 os->os_sa = NULL;
1148 }
1149
1150 void
1151 sa_build_idx_tab(void *hdr, void *attr_addr, sa_attr_type_t attr,
1152 uint16_t length, int length_idx, boolean_t var_length, void *userp)
1153 {
1154 sa_idx_tab_t *idx_tab = userp;
1155
1156 if (var_length) {
1157 ASSERT(idx_tab->sa_variable_lengths);
1158 idx_tab->sa_variable_lengths[length_idx] = length;
1159 }
1160 TOC_ATTR_ENCODE(idx_tab->sa_idx_tab[attr], length_idx,
1161 (uint32_t)((uintptr_t)attr_addr - (uintptr_t)hdr));
1162 }
1163
1164 static void
1165 sa_attr_iter(objset_t *os, sa_hdr_phys_t *hdr, dmu_object_type_t type,
1166 sa_iterfunc_t func, sa_lot_t *tab, void *userp)
1167 {
1168 void *data_start;
1169 sa_lot_t *tb = tab;
1170 sa_lot_t search;
1171 avl_index_t loc;
1172 sa_os_t *sa = os->os_sa;
1173 int i;
1174 uint16_t *length_start = NULL;
1175 uint8_t length_idx = 0;
1176
1177 if (tab == NULL) {
1178 search.lot_num = SA_LAYOUT_NUM(hdr, type);
1179 tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
1180 ASSERT(tb);
1181 }
1182
1183 if (IS_SA_BONUSTYPE(type)) {
1184 data_start = (void *)P2ROUNDUP(((uintptr_t)hdr +
1185 offsetof(sa_hdr_phys_t, sa_lengths) +
1186 (sizeof (uint16_t) * tb->lot_var_sizes)), 8);
1187 length_start = hdr->sa_lengths;
1188 } else {
1189 data_start = hdr;
1190 }
1191
1192 for (i = 0; i != tb->lot_attr_count; i++) {
1193 int attr_length, reg_length;
1194 uint8_t idx_len;
1195
1196 reg_length = sa->sa_attr_table[tb->lot_attrs[i]].sa_length;
1197 if (reg_length) {
1198 attr_length = reg_length;
1199 idx_len = 0;
1200 } else {
1201 attr_length = length_start[length_idx];
1202 idx_len = length_idx++;
1203 }
1204
1205 func(hdr, data_start, tb->lot_attrs[i], attr_length,
1206 idx_len, reg_length == 0 ? B_TRUE : B_FALSE, userp);
1207
1208 data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
1209 attr_length), 8);
1210 }
1211 }
1212
1213 /*ARGSUSED*/
1214 void
1215 sa_byteswap_cb(void *hdr, void *attr_addr, sa_attr_type_t attr,
1216 uint16_t length, int length_idx, boolean_t variable_length, void *userp)
1217 {
1218 sa_handle_t *hdl = userp;
1219 sa_os_t *sa = hdl->sa_os->os_sa;
1220
1221 sa_bswap_table[sa->sa_attr_table[attr].sa_byteswap](attr_addr, length);
1222 }
1223
1224 void
1225 sa_byteswap(sa_handle_t *hdl, sa_buf_type_t buftype)
1226 {
1227 sa_hdr_phys_t *sa_hdr_phys = SA_GET_HDR(hdl, buftype);
1228 dmu_buf_impl_t *db;
1229 sa_os_t *sa = hdl->sa_os->os_sa;
1230 int num_lengths = 1;
1231 int i;
1232
1233 ASSERT(MUTEX_HELD(&sa->sa_lock));
1234 if (sa_hdr_phys->sa_magic == SA_MAGIC)
1235 return;
1236
1237 db = SA_GET_DB(hdl, buftype);
1238
1239 if (buftype == SA_SPILL) {
1240 arc_release(db->db_buf, NULL);
1241 arc_buf_thaw(db->db_buf);
1242 }
1243
1244 sa_hdr_phys->sa_magic = BSWAP_32(sa_hdr_phys->sa_magic);
1245 sa_hdr_phys->sa_layout_info = BSWAP_16(sa_hdr_phys->sa_layout_info);
1246
1247 /*
1248 * Determine number of variable lenghts in header
1249 * The standard 8 byte header has one for free and a
1250 * 16 byte header would have 4 + 1;
1251 */
1252 if (SA_HDR_SIZE(sa_hdr_phys) > 8)
1253 num_lengths += (SA_HDR_SIZE(sa_hdr_phys) - 8) >> 1;
1254 for (i = 0; i != num_lengths; i++)
1255 sa_hdr_phys->sa_lengths[i] =
1256 BSWAP_16(sa_hdr_phys->sa_lengths[i]);
1257
1258 sa_attr_iter(hdl->sa_os, sa_hdr_phys, DMU_OT_SA,
1259 sa_byteswap_cb, NULL, hdl);
1260
1261 if (buftype == SA_SPILL)
1262 arc_buf_freeze(((dmu_buf_impl_t *)hdl->sa_spill)->db_buf);
1263 }
1264
1265 static int
1266 sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype)
1267 {
1268 sa_hdr_phys_t *sa_hdr_phys;
1269 dmu_buf_impl_t *db = SA_GET_DB(hdl, buftype);
1270 dmu_object_type_t bonustype = SA_BONUSTYPE_FROM_DB(db);
1271 sa_os_t *sa = hdl->sa_os->os_sa;
1272 sa_idx_tab_t *idx_tab;
1273
1274 sa_hdr_phys = SA_GET_HDR(hdl, buftype);
1275
1276 mutex_enter(&sa->sa_lock);
1277
1278 /* Do we need to byteswap? */
1279
1280 /* only check if not old znode */
1281 if (IS_SA_BONUSTYPE(bonustype) && sa_hdr_phys->sa_magic != SA_MAGIC &&
1282 sa_hdr_phys->sa_magic != 0) {
1283 VERIFY(BSWAP_32(sa_hdr_phys->sa_magic) == SA_MAGIC);
1284 sa_byteswap(hdl, buftype);
1285 }
1286
1287 idx_tab = sa_find_idx_tab(hdl->sa_os, bonustype, sa_hdr_phys);
1288
1289 if (buftype == SA_BONUS)
1290 hdl->sa_bonus_tab = idx_tab;
1291 else
1292 hdl->sa_spill_tab = idx_tab;
1293
1294 mutex_exit(&sa->sa_lock);
1295 return (0);
1296 }
1297
1298 /*ARGSUSED*/
1299 static void
1300 sa_evict_sync(void *dbu)
1301 {
1302 panic("evicting sa dbuf\n");
1303 }
1304
1305 static void
1306 sa_idx_tab_rele(objset_t *os, void *arg)
1307 {
1308 sa_os_t *sa = os->os_sa;
1309 sa_idx_tab_t *idx_tab = arg;
1310
1311 if (idx_tab == NULL)
1312 return;
1313
1314 mutex_enter(&sa->sa_lock);
1315 if (zfs_refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
1316 list_remove(&idx_tab->sa_layout->lot_idx_tab, idx_tab);
1317 if (idx_tab->sa_variable_lengths)
1318 kmem_free(idx_tab->sa_variable_lengths,
1319 sizeof (uint16_t) *
1320 idx_tab->sa_layout->lot_var_sizes);
1321 zfs_refcount_destroy(&idx_tab->sa_refcount);
1322 kmem_free(idx_tab->sa_idx_tab,
1323 sizeof (uint32_t) * sa->sa_num_attrs);
1324 kmem_free(idx_tab, sizeof (sa_idx_tab_t));
1325 }
1326 mutex_exit(&sa->sa_lock);
1327 }
1328
1329 static void
1330 sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab)
1331 {
1332 sa_os_t *sa = os->os_sa;
1333
1334 ASSERT(MUTEX_HELD(&sa->sa_lock));
1335 (void) zfs_refcount_add(&idx_tab->sa_refcount, NULL);
1336 }
1337
1338 void
1339 sa_handle_destroy(sa_handle_t *hdl)
1340 {
1341 dmu_buf_t *db = hdl->sa_bonus;
1342
1343 mutex_enter(&hdl->sa_lock);
1344 (void) dmu_buf_remove_user(db, &hdl->sa_dbu);
1345
1346 if (hdl->sa_bonus_tab)
1347 sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
1348
1349 if (hdl->sa_spill_tab)
1350 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
1351
1352 dmu_buf_rele(hdl->sa_bonus, NULL);
1353
1354 if (hdl->sa_spill)
1355 dmu_buf_rele((dmu_buf_t *)hdl->sa_spill, NULL);
1356 mutex_exit(&hdl->sa_lock);
1357
1358 kmem_cache_free(sa_cache, hdl);
1359 }
1360
1361 int
1362 sa_handle_get_from_db(objset_t *os, dmu_buf_t *db, void *userp,
1363 sa_handle_type_t hdl_type, sa_handle_t **handlepp)
1364 {
1365 int error = 0;
1366 dmu_object_info_t doi;
1367 sa_handle_t *handle = NULL;
1368
1369 #ifdef ZFS_DEBUG
1370 dmu_object_info_from_db(db, &doi);
1371 ASSERT(doi.doi_bonus_type == DMU_OT_SA ||
1372 doi.doi_bonus_type == DMU_OT_ZNODE);
1373 #endif
1374 /* find handle, if it exists */
1375 /* if one doesn't exist then create a new one, and initialize it */
1376
1377 if (hdl_type == SA_HDL_SHARED)
1378 handle = dmu_buf_get_user(db);
1379
1380 if (handle == NULL) {
1381 sa_handle_t *winner = NULL;
1382
1383 handle = kmem_cache_alloc(sa_cache, KM_SLEEP);
1384 handle->sa_dbu.dbu_evict_func_sync = NULL;
1385 handle->sa_dbu.dbu_evict_func_async = NULL;
1386 handle->sa_userp = userp;
1387 handle->sa_bonus = db;
1388 handle->sa_os = os;
1389 handle->sa_spill = NULL;
1390 handle->sa_bonus_tab = NULL;
1391 handle->sa_spill_tab = NULL;
1392
1393 error = sa_build_index(handle, SA_BONUS);
1394
1395 if (hdl_type == SA_HDL_SHARED) {
1396 dmu_buf_init_user(&handle->sa_dbu, sa_evict_sync, NULL,
1397 NULL);
1398 winner = dmu_buf_set_user_ie(db, &handle->sa_dbu);
1399 }
1400
1401 if (winner != NULL) {
1402 kmem_cache_free(sa_cache, handle);
1403 handle = winner;
1404 }
1405 }
1406 *handlepp = handle;
1407
1408 return (error);
1409 }
1410
1411 int
1412 sa_handle_get(objset_t *objset, uint64_t objid, void *userp,
1413 sa_handle_type_t hdl_type, sa_handle_t **handlepp)
1414 {
1415 dmu_buf_t *db;
1416 int error;
1417
1418 if (error = dmu_bonus_hold(objset, objid, NULL, &db))
1419 return (error);
1420
1421 return (sa_handle_get_from_db(objset, db, userp, hdl_type,
1422 handlepp));
1423 }
1424
1425 int
1426 sa_buf_hold(objset_t *objset, uint64_t obj_num, void *tag, dmu_buf_t **db)
1427 {
1428 return (dmu_bonus_hold(objset, obj_num, tag, db));
1429 }
1430
1431 void
1432 sa_buf_rele(dmu_buf_t *db, void *tag)
1433 {
1434 dmu_buf_rele(db, tag);
1435 }
1436
1437 int
1438 sa_lookup_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count)
1439 {
1440 ASSERT(hdl);
1441 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1442 return (sa_attr_op(hdl, bulk, count, SA_LOOKUP, NULL));
1443 }
1444
1445 static int
1446 sa_lookup_locked(sa_handle_t *hdl, sa_attr_type_t attr, void *buf,
1447 uint32_t buflen)
1448 {
1449 int error;
1450 sa_bulk_attr_t bulk;
1451
1452 bulk.sa_attr = attr;
1453 bulk.sa_data = buf;
1454 bulk.sa_length = buflen;
1455 bulk.sa_data_func = NULL;
1456
1457 ASSERT(hdl);
1458 error = sa_lookup_impl(hdl, &bulk, 1);
1459 return (error);
1460 }
1461
1462 int
1463 sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
1464 {
1465 int error;
1466
1467 mutex_enter(&hdl->sa_lock);
1468 error = sa_lookup_locked(hdl, attr, buf, buflen);
1469 mutex_exit(&hdl->sa_lock);
1470
1471 return (error);
1472 }
1473
1474 #ifdef _KERNEL
1475 int
1476 sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, uio_t *uio)
1477 {
1478 int error;
1479 sa_bulk_attr_t bulk;
1480
1481 bulk.sa_data = NULL;
1482 bulk.sa_attr = attr;
1483 bulk.sa_data_func = NULL;
1484
1485 ASSERT(hdl);
1486
1487 mutex_enter(&hdl->sa_lock);
1488 if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) == 0) {
1489 error = uiomove((void *)bulk.sa_addr, MIN(bulk.sa_size,
1490 uio->uio_resid), UIO_READ, uio);
1491 }
1492 mutex_exit(&hdl->sa_lock);
1493 return (error);
1494
1495 }
1496
1497 /*
1498 * For the existing object that is upgraded from old system, its ondisk layout
1499 * has no slot for the project ID attribute. But quota accounting logic needs
1500 * to access related slots by offset directly. So we need to adjust these old
1501 * objects' layout to make the project ID to some unified and fixed offset.
1502 */
1503 int
1504 sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
1505 {
1506 znode_t *zp = sa_get_userdata(hdl);
1507 dmu_buf_t *db = sa_get_db(hdl);
1508 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1509 int count = 0, err = 0;
1510 sa_bulk_attr_t *bulk, *attrs;
1511 zfs_acl_locator_cb_t locate = { 0 };
1512 uint64_t uid, gid, mode, rdev, xattr = 0, parent, gen, links;
1513 uint64_t crtime[2], mtime[2], ctime[2], atime[2];
1514 zfs_acl_phys_t znode_acl = { 0 };
1515 char scanstamp[AV_SCANSTAMP_SZ];
1516
1517 if (zp->z_acl_cached == NULL) {
1518 zfs_acl_t *aclp;
1519
1520 mutex_enter(&zp->z_acl_lock);
1521 err = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
1522 mutex_exit(&zp->z_acl_lock);
1523 if (err != 0 && err != ENOENT)
1524 return (err);
1525 }
1526
1527 bulk = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
1528 attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
1529 mutex_enter(&hdl->sa_lock);
1530 mutex_enter(&zp->z_lock);
1531
1532 err = sa_lookup_locked(hdl, SA_ZPL_PROJID(zfsvfs), &projid,
1533 sizeof (uint64_t));
1534 if (unlikely(err == 0))
1535 /* Someone has added project ID attr by race. */
1536 err = EEXIST;
1537 if (err != ENOENT)
1538 goto out;
1539
1540 /* First do a bulk query of the attributes that aren't cached */
1541 if (zp->z_is_sa) {
1542 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1543 &mode, 8);
1544 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1545 &gen, 8);
1546 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1547 &uid, 8);
1548 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1549 &gid, 8);
1550 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
1551 &parent, 8);
1552 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1553 &atime, 16);
1554 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
1555 &mtime, 16);
1556 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
1557 &ctime, 16);
1558 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1559 &crtime, 16);
1560 if (S_ISBLK(zp->z_mode) || S_ISCHR(zp->z_mode))
1561 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
1562 &rdev, 8);
1563 } else {
1564 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1565 &atime, 16);
1566 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
1567 &mtime, 16);
1568 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
1569 &ctime, 16);
1570 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1571 &crtime, 16);
1572 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1573 &gen, 8);
1574 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1575 &mode, 8);
1576 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
1577 &parent, 8);
1578 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL,
1579 &xattr, 8);
1580 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
1581 &rdev, 8);
1582 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1583 &uid, 8);
1584 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1585 &gid, 8);
1586 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
1587 &znode_acl, 88);
1588 }
1589 err = sa_bulk_lookup_locked(hdl, bulk, count);
1590 if (err != 0)
1591 goto out;
1592
1593 err = sa_lookup_locked(hdl, SA_ZPL_XATTR(zfsvfs), &xattr, 8);
1594 if (err != 0 && err != ENOENT)
1595 goto out;
1596
1597 zp->z_projid = projid;
1598 zp->z_pflags |= ZFS_PROJID;
1599 links = zp->z_links;
1600 count = 0;
1601 err = 0;
1602
1603 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
1604 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
1605 &zp->z_size, 8);
1606 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GEN(zfsvfs), NULL, &gen, 8);
1607 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
1608 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
1609 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
1610 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
1611 &zp->z_pflags, 8);
1612 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
1613 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
1614 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
1615 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1616 &crtime, 16);
1617 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
1618 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PROJID(zfsvfs), NULL, &projid, 8);
1619
1620 if (S_ISBLK(zp->z_mode) || S_ISCHR(zp->z_mode))
1621 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
1622 &rdev, 8);
1623
1624 if (zp->z_acl_cached != NULL) {
1625 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
1626 &zp->z_acl_cached->z_acl_count, 8);
1627 if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
1628 zfs_acl_xform(zp, zp->z_acl_cached, CRED());
1629 locate.cb_aclp = zp->z_acl_cached;
1630 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
1631 zfs_acl_data_locator, &locate,
1632 zp->z_acl_cached->z_acl_bytes);
1633 }
1634
1635 if (xattr)
1636 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_XATTR(zfsvfs), NULL,
1637 &xattr, 8);
1638
1639 if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
1640 bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
1641 scanstamp, AV_SCANSTAMP_SZ);
1642 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SCANSTAMP(zfsvfs), NULL,
1643 scanstamp, AV_SCANSTAMP_SZ);
1644 zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
1645 }
1646
1647 VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
1648 VERIFY(sa_replace_all_by_template_locked(hdl, attrs, count, tx) == 0);
1649 if (znode_acl.z_acl_extern_obj) {
1650 VERIFY(0 == dmu_object_free(zfsvfs->z_os,
1651 znode_acl.z_acl_extern_obj, tx));
1652 }
1653
1654 zp->z_is_sa = B_TRUE;
1655
1656 out:
1657 mutex_exit(&zp->z_lock);
1658 mutex_exit(&hdl->sa_lock);
1659 kmem_free(attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
1660 kmem_free(bulk, sizeof (sa_bulk_attr_t) * ZPL_END);
1661 return (err);
1662 }
1663 #endif
1664
1665 static sa_idx_tab_t *
1666 sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, sa_hdr_phys_t *hdr)
1667 {
1668 sa_idx_tab_t *idx_tab;
1669 sa_os_t *sa = os->os_sa;
1670 sa_lot_t *tb, search;
1671 avl_index_t loc;
1672
1673 /*
1674 * Deterimine layout number. If SA node and header == 0 then
1675 * force the index table to the dummy "1" empty layout.
1676 *
1677 * The layout number would only be zero for a newly created file
1678 * that has not added any attributes yet, or with crypto enabled which
1679 * doesn't write any attributes to the bonus buffer.
1680 */
1681
1682 search.lot_num = SA_LAYOUT_NUM(hdr, bonustype);
1683
1684 tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
1685
1686 /* Verify header size is consistent with layout information */
1687 ASSERT(tb);
1688 ASSERT(IS_SA_BONUSTYPE(bonustype) &&
1689 SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb) || !IS_SA_BONUSTYPE(bonustype) ||
1690 (IS_SA_BONUSTYPE(bonustype) && hdr->sa_layout_info == 0));
1691
1692 /*
1693 * See if any of the already existing TOC entries can be reused?
1694 */
1695
1696 for (idx_tab = list_head(&tb->lot_idx_tab); idx_tab;
1697 idx_tab = list_next(&tb->lot_idx_tab, idx_tab)) {
1698 boolean_t valid_idx = B_TRUE;
1699 int i;
1700
1701 if (tb->lot_var_sizes != 0 &&
1702 idx_tab->sa_variable_lengths != NULL) {
1703 for (i = 0; i != tb->lot_var_sizes; i++) {
1704 if (hdr->sa_lengths[i] !=
1705 idx_tab->sa_variable_lengths[i]) {
1706 valid_idx = B_FALSE;
1707 break;
1708 }
1709 }
1710 }
1711 if (valid_idx) {
1712 sa_idx_tab_hold(os, idx_tab);
1713 return (idx_tab);
1714 }
1715 }
1716
1717 /* No such luck, create a new entry */
1718 idx_tab = kmem_zalloc(sizeof (sa_idx_tab_t), KM_SLEEP);
1719 idx_tab->sa_idx_tab =
1720 kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_SLEEP);
1721 idx_tab->sa_layout = tb;
1722 zfs_refcount_create(&idx_tab->sa_refcount);
1723 if (tb->lot_var_sizes)
1724 idx_tab->sa_variable_lengths = kmem_alloc(sizeof (uint16_t) *
1725 tb->lot_var_sizes, KM_SLEEP);
1726
1727 sa_attr_iter(os, hdr, bonustype, sa_build_idx_tab,
1728 tb, idx_tab);
1729 sa_idx_tab_hold(os, idx_tab); /* one hold for consumer */
1730 sa_idx_tab_hold(os, idx_tab); /* one for layout */
1731 list_insert_tail(&tb->lot_idx_tab, idx_tab);
1732 return (idx_tab);
1733 }
1734
1735 void
1736 sa_default_locator(void **dataptr, uint32_t *len, uint32_t total_len,
1737 boolean_t start, void *userdata)
1738 {
1739 ASSERT(start);
1740
1741 *dataptr = userdata;
1742 *len = total_len;
1743 }
1744
1745 static void
1746 sa_attr_register_sync(sa_handle_t *hdl, dmu_tx_t *tx)
1747 {
1748 uint64_t attr_value = 0;
1749 sa_os_t *sa = hdl->sa_os->os_sa;
1750 sa_attr_table_t *tb = sa->sa_attr_table;
1751 int i;
1752
1753 mutex_enter(&sa->sa_lock);
1754
1755 if (!sa->sa_need_attr_registration || sa->sa_master_obj == 0) {
1756 mutex_exit(&sa->sa_lock);
1757 return;
1758 }
1759
1760 if (sa->sa_reg_attr_obj == 0) {
1761 sa->sa_reg_attr_obj = zap_create_link(hdl->sa_os,
1762 DMU_OT_SA_ATTR_REGISTRATION,
1763 sa->sa_master_obj, SA_REGISTRY, tx);
1764 }
1765 for (i = 0; i != sa->sa_num_attrs; i++) {
1766 if (sa->sa_attr_table[i].sa_registered)
1767 continue;
1768 ATTR_ENCODE(attr_value, tb[i].sa_attr, tb[i].sa_length,
1769 tb[i].sa_byteswap);
1770 VERIFY(0 == zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
1771 tb[i].sa_name, 8, 1, &attr_value, tx));
1772 tb[i].sa_registered = B_TRUE;
1773 }
1774 sa->sa_need_attr_registration = B_FALSE;
1775 mutex_exit(&sa->sa_lock);
1776 }
1777
1778 /*
1779 * Replace all attributes with attributes specified in template.
1780 * If dnode had a spill buffer then those attributes will be
1781 * also be replaced, possibly with just an empty spill block
1782 *
1783 * This interface is intended to only be used for bulk adding of
1784 * attributes for a new file. It will also be used by the ZPL
1785 * when converting and old formatted znode to native SA support.
1786 */
1787 int
1788 sa_replace_all_by_template_locked(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
1789 int attr_count, dmu_tx_t *tx)
1790 {
1791 sa_os_t *sa = hdl->sa_os->os_sa;
1792
1793 if (sa->sa_need_attr_registration)
1794 sa_attr_register_sync(hdl, tx);
1795 return (sa_build_layouts(hdl, attr_desc, attr_count, tx));
1796 }
1797
1798 int
1799 sa_replace_all_by_template(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
1800 int attr_count, dmu_tx_t *tx)
1801 {
1802 int error;
1803
1804 mutex_enter(&hdl->sa_lock);
1805 error = sa_replace_all_by_template_locked(hdl, attr_desc,
1806 attr_count, tx);
1807 mutex_exit(&hdl->sa_lock);
1808 return (error);
1809 }
1810
1811 /*
1812 * Add/remove a single attribute or replace a variable-sized attribute value
1813 * with a value of a different size, and then rewrite the entire set
1814 * of attributes.
1815 * Same-length attribute value replacement (including fixed-length attributes)
1816 * is handled more efficiently by the upper layers.
1817 */
1818 static int
1819 sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
1820 sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
1821 uint16_t buflen, dmu_tx_t *tx)
1822 {
1823 sa_os_t *sa = hdl->sa_os->os_sa;
1824 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1825 dnode_t *dn;
1826 sa_bulk_attr_t *attr_desc;
1827 void *old_data[2];
1828 int bonus_attr_count = 0;
1829 int bonus_data_size = 0;
1830 int spill_data_size = 0;
1831 int spill_attr_count = 0;
1832 int error;
1833 uint16_t length, reg_length;
1834 int i, j, k, length_idx;
1835 sa_hdr_phys_t *hdr;
1836 sa_idx_tab_t *idx_tab;
1837 int attr_count;
1838 int count;
1839
1840 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1841
1842 /* First make of copy of the old data */
1843
1844 DB_DNODE_ENTER(db);
1845 dn = DB_DNODE(db);
1846 if (dn->dn_bonuslen != 0) {
1847 bonus_data_size = hdl->sa_bonus->db_size;
1848 old_data[0] = kmem_alloc(bonus_data_size, KM_SLEEP);
1849 bcopy(hdl->sa_bonus->db_data, old_data[0],
1850 hdl->sa_bonus->db_size);
1851 bonus_attr_count = hdl->sa_bonus_tab->sa_layout->lot_attr_count;
1852 } else {
1853 old_data[0] = NULL;
1854 }
1855 DB_DNODE_EXIT(db);
1856
1857 /* Bring spill buffer online if it isn't currently */
1858
1859 if ((error = sa_get_spill(hdl)) == 0) {
1860 spill_data_size = hdl->sa_spill->db_size;
1861 old_data[1] = kmem_alloc(spill_data_size, KM_SLEEP);
1862 bcopy(hdl->sa_spill->db_data, old_data[1],
1863 hdl->sa_spill->db_size);
1864 spill_attr_count =
1865 hdl->sa_spill_tab->sa_layout->lot_attr_count;
1866 } else if (error && error != ENOENT) {
1867 if (old_data[0])
1868 kmem_free(old_data[0], bonus_data_size);
1869 return (error);
1870 } else {
1871 old_data[1] = NULL;
1872 }
1873
1874 /* build descriptor of all attributes */
1875
1876 attr_count = bonus_attr_count + spill_attr_count;
1877 if (action == SA_ADD)
1878 attr_count++;
1879 else if (action == SA_REMOVE)
1880 attr_count--;
1881
1882 attr_desc = kmem_zalloc(sizeof (sa_bulk_attr_t) * attr_count, KM_SLEEP);
1883
1884 /*
1885 * loop through bonus and spill buffer if it exists, and
1886 * build up new attr_descriptor to reset the attributes
1887 */
1888 k = j = 0;
1889 count = bonus_attr_count;
1890 hdr = SA_GET_HDR(hdl, SA_BONUS);
1891 idx_tab = SA_IDX_TAB_GET(hdl, SA_BONUS);
1892 for (; k != 2; k++) {
1893 /*
1894 * Iterate over each attribute in layout. Fetch the
1895 * size of variable-length attributes needing rewrite
1896 * from sa_lengths[].
1897 */
1898 for (i = 0, length_idx = 0; i != count; i++) {
1899 sa_attr_type_t attr;
1900
1901 attr = idx_tab->sa_layout->lot_attrs[i];
1902 reg_length = SA_REGISTERED_LEN(sa, attr);
1903 if (reg_length == 0) {
1904 length = hdr->sa_lengths[length_idx];
1905 length_idx++;
1906 } else {
1907 length = reg_length;
1908 }
1909 if (attr == newattr) {
1910 /*
1911 * There is nothing to do for SA_REMOVE,
1912 * so it is just skipped.
1913 */
1914 if (action == SA_REMOVE)
1915 continue;
1916
1917 /*
1918 * Duplicate attributes are not allowed, so the
1919 * action can not be SA_ADD here.
1920 */
1921 ASSERT3S(action, ==, SA_REPLACE);
1922
1923 /*
1924 * Only a variable-sized attribute can be
1925 * replaced here, and its size must be changing.
1926 */
1927 ASSERT3U(reg_length, ==, 0);
1928 ASSERT3U(length, !=, buflen);
1929 SA_ADD_BULK_ATTR(attr_desc, j, attr,
1930 locator, datastart, buflen);
1931 } else {
1932 SA_ADD_BULK_ATTR(attr_desc, j, attr,
1933 NULL, (void *)
1934 (TOC_OFF(idx_tab->sa_idx_tab[attr]) +
1935 (uintptr_t)old_data[k]), length);
1936 }
1937 }
1938 if (k == 0 && hdl->sa_spill) {
1939 hdr = SA_GET_HDR(hdl, SA_SPILL);
1940 idx_tab = SA_IDX_TAB_GET(hdl, SA_SPILL);
1941 count = spill_attr_count;
1942 } else {
1943 break;
1944 }
1945 }
1946 if (action == SA_ADD) {
1947 reg_length = SA_REGISTERED_LEN(sa, newattr);
1948 IMPLY(reg_length != 0, reg_length == buflen);
1949 SA_ADD_BULK_ATTR(attr_desc, j, newattr, locator,
1950 datastart, buflen);
1951 }
1952 ASSERT3U(j, ==, attr_count);
1953
1954 error = sa_build_layouts(hdl, attr_desc, attr_count, tx);
1955
1956 if (old_data[0])
1957 kmem_free(old_data[0], bonus_data_size);
1958 if (old_data[1])
1959 kmem_free(old_data[1], spill_data_size);
1960 kmem_free(attr_desc, sizeof (sa_bulk_attr_t) * attr_count);
1961
1962 return (error);
1963 }
1964
1965 static int
1966 sa_bulk_update_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
1967 dmu_tx_t *tx)
1968 {
1969 int error;
1970 sa_os_t *sa = hdl->sa_os->os_sa;
1971 dmu_object_type_t bonustype;
1972
1973 bonustype = SA_BONUSTYPE_FROM_DB(SA_GET_DB(hdl, SA_BONUS));
1974
1975 ASSERT(hdl);
1976 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1977
1978 /* sync out registration table if necessary */
1979 if (sa->sa_need_attr_registration)
1980 sa_attr_register_sync(hdl, tx);
1981
1982 error = sa_attr_op(hdl, bulk, count, SA_UPDATE, tx);
1983 if (error == 0 && !IS_SA_BONUSTYPE(bonustype) && sa->sa_update_cb)
1984 sa->sa_update_cb(hdl, tx);
1985
1986 return (error);
1987 }
1988
1989 /*
1990 * update or add new attribute
1991 */
1992 int
1993 sa_update(sa_handle_t *hdl, sa_attr_type_t type,
1994 void *buf, uint32_t buflen, dmu_tx_t *tx)
1995 {
1996 int error;
1997 sa_bulk_attr_t bulk;
1998
1999 bulk.sa_attr = type;
2000 bulk.sa_data_func = NULL;
2001 bulk.sa_length = buflen;
2002 bulk.sa_data = buf;
2003
2004 mutex_enter(&hdl->sa_lock);
2005 error = sa_bulk_update_impl(hdl, &bulk, 1, tx);
2006 mutex_exit(&hdl->sa_lock);
2007 return (error);
2008 }
2009
2010 int
2011 sa_update_from_cb(sa_handle_t *hdl, sa_attr_type_t attr,
2012 uint32_t buflen, sa_data_locator_t *locator, void *userdata, dmu_tx_t *tx)
2013 {
2014 int error;
2015 sa_bulk_attr_t bulk;
2016
2017 bulk.sa_attr = attr;
2018 bulk.sa_data = userdata;
2019 bulk.sa_data_func = locator;
2020 bulk.sa_length = buflen;
2021
2022 mutex_enter(&hdl->sa_lock);
2023 error = sa_bulk_update_impl(hdl, &bulk, 1, tx);
2024 mutex_exit(&hdl->sa_lock);
2025 return (error);
2026 }
2027
2028 /*
2029 * Return size of an attribute
2030 */
2031
2032 int
2033 sa_size(sa_handle_t *hdl, sa_attr_type_t attr, int *size)
2034 {
2035 sa_bulk_attr_t bulk;
2036 int error;
2037
2038 bulk.sa_data = NULL;
2039 bulk.sa_attr = attr;
2040 bulk.sa_data_func = NULL;
2041
2042 ASSERT(hdl);
2043 mutex_enter(&hdl->sa_lock);
2044 if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) != 0) {
2045 mutex_exit(&hdl->sa_lock);
2046 return (error);
2047 }
2048 *size = bulk.sa_size;
2049
2050 mutex_exit(&hdl->sa_lock);
2051 return (0);
2052 }
2053
2054 int
2055 sa_bulk_lookup_locked(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
2056 {
2057 ASSERT(hdl);
2058 ASSERT(MUTEX_HELD(&hdl->sa_lock));
2059 return (sa_lookup_impl(hdl, attrs, count));
2060 }
2061
2062 int
2063 sa_bulk_lookup(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
2064 {
2065 int error;
2066
2067 ASSERT(hdl);
2068 mutex_enter(&hdl->sa_lock);
2069 error = sa_bulk_lookup_locked(hdl, attrs, count);
2070 mutex_exit(&hdl->sa_lock);
2071 return (error);
2072 }
2073
2074 int
2075 sa_bulk_update(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count, dmu_tx_t *tx)
2076 {
2077 int error;
2078
2079 ASSERT(hdl);
2080 mutex_enter(&hdl->sa_lock);
2081 error = sa_bulk_update_impl(hdl, attrs, count, tx);
2082 mutex_exit(&hdl->sa_lock);
2083 return (error);
2084 }
2085
2086 int
2087 sa_remove(sa_handle_t *hdl, sa_attr_type_t attr, dmu_tx_t *tx)
2088 {
2089 int error;
2090
2091 mutex_enter(&hdl->sa_lock);
2092 error = sa_modify_attrs(hdl, attr, SA_REMOVE, NULL,
2093 NULL, 0, tx);
2094 mutex_exit(&hdl->sa_lock);
2095 return (error);
2096 }
2097
2098 void
2099 sa_object_info(sa_handle_t *hdl, dmu_object_info_t *doi)
2100 {
2101 dmu_object_info_from_db((dmu_buf_t *)hdl->sa_bonus, doi);
2102 }
2103
2104 void
2105 sa_object_size(sa_handle_t *hdl, uint32_t *blksize, u_longlong_t *nblocks)
2106 {
2107 dmu_object_size_from_db((dmu_buf_t *)hdl->sa_bonus,
2108 blksize, nblocks);
2109 }
2110
2111 void
2112 sa_set_userp(sa_handle_t *hdl, void *ptr)
2113 {
2114 hdl->sa_userp = ptr;
2115 }
2116
2117 dmu_buf_t *
2118 sa_get_db(sa_handle_t *hdl)
2119 {
2120 return ((dmu_buf_t *)hdl->sa_bonus);
2121 }
2122
2123 void *
2124 sa_get_userdata(sa_handle_t *hdl)
2125 {
2126 return (hdl->sa_userp);
2127 }
2128
2129 void
2130 sa_register_update_callback_locked(objset_t *os, sa_update_cb_t *func)
2131 {
2132 ASSERT(MUTEX_HELD(&os->os_sa->sa_lock));
2133 os->os_sa->sa_update_cb = func;
2134 }
2135
2136 void
2137 sa_register_update_callback(objset_t *os, sa_update_cb_t *func)
2138 {
2139
2140 mutex_enter(&os->os_sa->sa_lock);
2141 sa_register_update_callback_locked(os, func);
2142 mutex_exit(&os->os_sa->sa_lock);
2143 }
2144
2145 uint64_t
2146 sa_handle_object(sa_handle_t *hdl)
2147 {
2148 return (hdl->sa_bonus->db_object);
2149 }
2150
2151 boolean_t
2152 sa_enabled(objset_t *os)
2153 {
2154 return (os->os_sa == NULL);
2155 }
2156
2157 int
2158 sa_set_sa_object(objset_t *os, uint64_t sa_object)
2159 {
2160 sa_os_t *sa = os->os_sa;
2161
2162 if (sa->sa_master_obj)
2163 return (1);
2164
2165 sa->sa_master_obj = sa_object;
2166
2167 return (0);
2168 }
2169
2170 int
2171 sa_hdrsize(void *arg)
2172 {
2173 sa_hdr_phys_t *hdr = arg;
2174
2175 return (SA_HDR_SIZE(hdr));
2176 }
2177
2178 void
2179 sa_handle_lock(sa_handle_t *hdl)
2180 {
2181 ASSERT(hdl);
2182 mutex_enter(&hdl->sa_lock);
2183 }
2184
2185 void
2186 sa_handle_unlock(sa_handle_t *hdl)
2187 {
2188 ASSERT(hdl);
2189 mutex_exit(&hdl->sa_lock);
2190 }