Print this page
11927 Log, or optionally panic, on zero-length kmem allocations
Reviewed by: Dan McDonald <danmcd@joyent.com>
Reviewed by: Jason King <jason.brian.king@gmail.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/zfs/sa.c
+++ new/usr/src/uts/common/fs/zfs/sa.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Portions Copyright 2011 iXsystems, Inc
25 25 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
26 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 27 * Copyright (c) 2014 Integros [integros.com]
28 28 * Copyright 2019 Joyent, Inc.
29 29 */
30 30
31 31 #include <sys/zfs_context.h>
32 32 #include <sys/types.h>
33 33 #include <sys/param.h>
34 34 #include <sys/systm.h>
35 35 #include <sys/sysmacros.h>
36 36 #include <sys/dmu.h>
37 37 #include <sys/dmu_impl.h>
38 38 #include <sys/dmu_objset.h>
39 39 #include <sys/dmu_tx.h>
40 40 #include <sys/dbuf.h>
41 41 #include <sys/dnode.h>
42 42 #include <sys/zap.h>
43 43 #include <sys/sa.h>
44 44 #include <sys/sunddi.h>
45 45 #include <sys/sa_impl.h>
46 46 #include <sys/dnode.h>
47 47 #include <sys/errno.h>
48 48 #include <sys/zfs_context.h>
49 49
50 50 #ifdef _KERNEL
51 51 #include <sys/zfs_znode.h>
52 52 #endif
53 53
54 54 /*
55 55 * ZFS System attributes:
56 56 *
57 57 * A generic mechanism to allow for arbitrary attributes
58 58 * to be stored in a dnode. The data will be stored in the bonus buffer of
59 59 * the dnode and if necessary a special "spill" block will be used to handle
60 60 * overflow situations. The spill block will be sized to fit the data
61 61 * from 512 - 128K. When a spill block is used the BP (blkptr_t) for the
62 62 * spill block is stored at the end of the current bonus buffer. Any
63 63 * attributes that would be in the way of the blkptr_t will be relocated
64 64 * into the spill block.
65 65 *
66 66 * Attribute registration:
67 67 *
68 68 * Stored persistently on a per dataset basis
69 69 * a mapping between attribute "string" names and their actual attribute
70 70 * numeric values, length, and byteswap function. The names are only used
71 71 * during registration. All attributes are known by their unique attribute
72 72 * id value. If an attribute can have a variable size then the value
73 73 * 0 will be used to indicate this.
74 74 *
75 75 * Attribute Layout:
76 76 *
77 77 * Attribute layouts are a way to compactly store multiple attributes, but
78 78 * without taking the overhead associated with managing each attribute
79 79 * individually. Since you will typically have the same set of attributes
80 80 * stored in the same order a single table will be used to represent that
81 81 * layout. The ZPL for example will usually have only about 10 different
82 82 * layouts (regular files, device files, symlinks,
83 83 * regular files + scanstamp, files/dir with extended attributes, and then
84 84 * you have the possibility of all of those minus ACL, because it would
85 85 * be kicked out into the spill block)
86 86 *
87 87 * Layouts are simply an array of the attributes and their
88 88 * ordering i.e. [0, 1, 4, 5, 2]
89 89 *
90 90 * Each distinct layout is given a unique layout number and that is whats
91 91 * stored in the header at the beginning of the SA data buffer.
92 92 *
93 93 * A layout only covers a single dbuf (bonus or spill). If a set of
94 94 * attributes is split up between the bonus buffer and a spill buffer then
95 95 * two different layouts will be used. This allows us to byteswap the
96 96 * spill without looking at the bonus buffer and keeps the on disk format of
97 97 * the bonus and spill buffer the same.
98 98 *
99 99 * Adding a single attribute will cause the entire set of attributes to
100 100 * be rewritten and could result in a new layout number being constructed
101 101 * as part of the rewrite if no such layout exists for the new set of
102 102 * attribues. The new attribute will be appended to the end of the already
103 103 * existing attributes.
104 104 *
105 105 * Both the attribute registration and attribute layout information are
106 106 * stored in normal ZAP attributes. Their should be a small number of
107 107 * known layouts and the set of attributes is assumed to typically be quite
108 108 * small.
109 109 *
110 110 * The registered attributes and layout "table" information is maintained
111 111 * in core and a special "sa_os_t" is attached to the objset_t.
112 112 *
113 113 * A special interface is provided to allow for quickly applying
114 114 * a large set of attributes at once. sa_replace_all_by_template() is
115 115 * used to set an array of attributes. This is used by the ZPL when
116 116 * creating a brand new file. The template that is passed into the function
117 117 * specifies the attribute, size for variable length attributes, location of
118 118 * data and special "data locator" function if the data isn't in a contiguous
119 119 * location.
120 120 *
121 121 * Byteswap implications:
122 122 *
123 123 * Since the SA attributes are not entirely self describing we can't do
124 124 * the normal byteswap processing. The special ZAP layout attribute and
125 125 * attribute registration attributes define the byteswap function and the
126 126 * size of the attributes, unless it is variable sized.
127 127 * The normal ZFS byteswapping infrastructure assumes you don't need
128 128 * to read any objects in order to do the necessary byteswapping. Whereas
129 129 * SA attributes can only be properly byteswapped if the dataset is opened
130 130 * and the layout/attribute ZAP attributes are available. Because of this
131 131 * the SA attributes will be byteswapped when they are first accessed by
132 132 * the SA code that will read the SA data.
133 133 */
134 134
135 135 typedef void (sa_iterfunc_t)(void *hdr, void *addr, sa_attr_type_t,
136 136 uint16_t length, int length_idx, boolean_t, void *userp);
137 137
138 138 static int sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype);
139 139 static void sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab);
140 140 static sa_idx_tab_t *sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype,
141 141 sa_hdr_phys_t *hdr);
142 142 static void sa_idx_tab_rele(objset_t *os, void *arg);
143 143 static void sa_copy_data(sa_data_locator_t *func, void *start, void *target,
144 144 int buflen);
145 145 static int sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
146 146 sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
147 147 uint16_t buflen, dmu_tx_t *tx);
148 148
149 149 arc_byteswap_func_t *sa_bswap_table[] = {
150 150 byteswap_uint64_array,
151 151 byteswap_uint32_array,
152 152 byteswap_uint16_array,
153 153 byteswap_uint8_array,
154 154 zfs_acl_byteswap,
155 155 };
156 156
157 157 #define SA_COPY_DATA(f, s, t, l) \
158 158 { \
159 159 if (f == NULL) { \
160 160 if (l == 8) { \
161 161 *(uint64_t *)t = *(uint64_t *)s; \
162 162 } else if (l == 16) { \
163 163 *(uint64_t *)t = *(uint64_t *)s; \
164 164 *(uint64_t *)((uintptr_t)t + 8) = \
165 165 *(uint64_t *)((uintptr_t)s + 8); \
166 166 } else { \
167 167 bcopy(s, t, l); \
168 168 } \
169 169 } else \
170 170 sa_copy_data(f, s, t, l); \
171 171 }
172 172
173 173 /*
174 174 * This table is fixed and cannot be changed. Its purpose is to
175 175 * allow the SA code to work with both old/new ZPL file systems.
176 176 * It contains the list of legacy attributes. These attributes aren't
177 177 * stored in the "attribute" registry zap objects, since older ZPL file systems
178 178 * won't have the registry. Only objsets of type ZFS_TYPE_FILESYSTEM will
179 179 * use this static table.
180 180 */
181 181 sa_attr_reg_t sa_legacy_attrs[] = {
182 182 {"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 0},
183 183 {"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 1},
184 184 {"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 2},
185 185 {"ZPL_CRTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 3},
186 186 {"ZPL_GEN", sizeof (uint64_t), SA_UINT64_ARRAY, 4},
187 187 {"ZPL_MODE", sizeof (uint64_t), SA_UINT64_ARRAY, 5},
188 188 {"ZPL_SIZE", sizeof (uint64_t), SA_UINT64_ARRAY, 6},
189 189 {"ZPL_PARENT", sizeof (uint64_t), SA_UINT64_ARRAY, 7},
190 190 {"ZPL_LINKS", sizeof (uint64_t), SA_UINT64_ARRAY, 8},
191 191 {"ZPL_XATTR", sizeof (uint64_t), SA_UINT64_ARRAY, 9},
192 192 {"ZPL_RDEV", sizeof (uint64_t), SA_UINT64_ARRAY, 10},
193 193 {"ZPL_FLAGS", sizeof (uint64_t), SA_UINT64_ARRAY, 11},
194 194 {"ZPL_UID", sizeof (uint64_t), SA_UINT64_ARRAY, 12},
195 195 {"ZPL_GID", sizeof (uint64_t), SA_UINT64_ARRAY, 13},
196 196 {"ZPL_PAD", sizeof (uint64_t) * 4, SA_UINT64_ARRAY, 14},
197 197 {"ZPL_ZNODE_ACL", 88, SA_UINT8_ARRAY, 15},
198 198 };
199 199
200 200 /*
201 201 * This is only used for objects of type DMU_OT_ZNODE
202 202 */
203 203 sa_attr_type_t sa_legacy_zpl_layout[] = {
204 204 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
205 205 };
206 206
207 207 /*
208 208 * Special dummy layout used for buffers with no attributes.
209 209 */
210 210 sa_attr_type_t sa_dummy_zpl_layout[] = { 0 };
211 211
212 212 static int sa_legacy_attr_count = 16;
213 213 static kmem_cache_t *sa_cache = NULL;
214 214
215 215 /*ARGSUSED*/
216 216 static int
217 217 sa_cache_constructor(void *buf, void *unused, int kmflag)
218 218 {
219 219 sa_handle_t *hdl = buf;
220 220
221 221 mutex_init(&hdl->sa_lock, NULL, MUTEX_DEFAULT, NULL);
222 222 return (0);
223 223 }
224 224
225 225 /*ARGSUSED*/
226 226 static void
227 227 sa_cache_destructor(void *buf, void *unused)
228 228 {
229 229 sa_handle_t *hdl = buf;
230 230 mutex_destroy(&hdl->sa_lock);
231 231 }
232 232
233 233 void
234 234 sa_cache_init(void)
235 235 {
236 236 sa_cache = kmem_cache_create("sa_cache",
237 237 sizeof (sa_handle_t), 0, sa_cache_constructor,
238 238 sa_cache_destructor, NULL, NULL, NULL, 0);
239 239 }
240 240
241 241 void
242 242 sa_cache_fini(void)
243 243 {
244 244 if (sa_cache)
245 245 kmem_cache_destroy(sa_cache);
246 246 }
247 247
248 248 static int
249 249 layout_num_compare(const void *arg1, const void *arg2)
250 250 {
251 251 const sa_lot_t *node1 = (const sa_lot_t *)arg1;
252 252 const sa_lot_t *node2 = (const sa_lot_t *)arg2;
253 253
254 254 return (AVL_CMP(node1->lot_num, node2->lot_num));
255 255 }
256 256
257 257 static int
258 258 layout_hash_compare(const void *arg1, const void *arg2)
259 259 {
260 260 const sa_lot_t *node1 = (const sa_lot_t *)arg1;
261 261 const sa_lot_t *node2 = (const sa_lot_t *)arg2;
262 262
263 263 int cmp = AVL_CMP(node1->lot_hash, node2->lot_hash);
264 264 if (likely(cmp))
265 265 return (cmp);
266 266
267 267 return (AVL_CMP(node1->lot_instance, node2->lot_instance));
268 268 }
269 269
270 270 boolean_t
271 271 sa_layout_equal(sa_lot_t *tbf, sa_attr_type_t *attrs, int count)
272 272 {
273 273 int i;
274 274
275 275 if (count != tbf->lot_attr_count)
276 276 return (1);
277 277
278 278 for (i = 0; i != count; i++) {
279 279 if (attrs[i] != tbf->lot_attrs[i])
280 280 return (1);
281 281 }
282 282 return (0);
283 283 }
284 284
285 285 #define SA_ATTR_HASH(attr) (zfs_crc64_table[(-1ULL ^ attr) & 0xFF])
286 286
287 287 static uint64_t
288 288 sa_layout_info_hash(sa_attr_type_t *attrs, int attr_count)
289 289 {
290 290 int i;
291 291 uint64_t crc = -1ULL;
292 292
293 293 for (i = 0; i != attr_count; i++)
294 294 crc ^= SA_ATTR_HASH(attrs[i]);
295 295
296 296 return (crc);
297 297 }
298 298
299 299 static int
300 300 sa_get_spill(sa_handle_t *hdl)
301 301 {
302 302 int rc;
303 303 if (hdl->sa_spill == NULL) {
304 304 if ((rc = dmu_spill_hold_existing(hdl->sa_bonus, NULL,
305 305 &hdl->sa_spill)) == 0)
306 306 VERIFY(0 == sa_build_index(hdl, SA_SPILL));
307 307 } else {
308 308 rc = 0;
309 309 }
310 310
311 311 return (rc);
312 312 }
313 313
314 314 /*
315 315 * Main attribute lookup/update function
316 316 * returns 0 for success or non zero for failures
317 317 *
318 318 * Operates on bulk array, first failure will abort further processing
319 319 */
320 320 int
321 321 sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
322 322 sa_data_op_t data_op, dmu_tx_t *tx)
323 323 {
324 324 sa_os_t *sa = hdl->sa_os->os_sa;
325 325 int i;
326 326 int error = 0;
327 327 sa_buf_type_t buftypes;
328 328
329 329 buftypes = 0;
330 330
331 331 ASSERT(count > 0);
332 332 for (i = 0; i != count; i++) {
333 333 ASSERT(bulk[i].sa_attr <= hdl->sa_os->os_sa->sa_num_attrs);
334 334
335 335 bulk[i].sa_addr = NULL;
336 336 /* First check the bonus buffer */
337 337
338 338 if (hdl->sa_bonus_tab && TOC_ATTR_PRESENT(
339 339 hdl->sa_bonus_tab->sa_idx_tab[bulk[i].sa_attr])) {
340 340 SA_ATTR_INFO(sa, hdl->sa_bonus_tab,
341 341 SA_GET_HDR(hdl, SA_BONUS),
342 342 bulk[i].sa_attr, bulk[i], SA_BONUS, hdl);
343 343 if (tx && !(buftypes & SA_BONUS)) {
344 344 dmu_buf_will_dirty(hdl->sa_bonus, tx);
345 345 buftypes |= SA_BONUS;
346 346 }
347 347 }
348 348 if (bulk[i].sa_addr == NULL &&
349 349 ((error = sa_get_spill(hdl)) == 0)) {
350 350 if (TOC_ATTR_PRESENT(
351 351 hdl->sa_spill_tab->sa_idx_tab[bulk[i].sa_attr])) {
352 352 SA_ATTR_INFO(sa, hdl->sa_spill_tab,
353 353 SA_GET_HDR(hdl, SA_SPILL),
354 354 bulk[i].sa_attr, bulk[i], SA_SPILL, hdl);
355 355 if (tx && !(buftypes & SA_SPILL) &&
356 356 bulk[i].sa_size == bulk[i].sa_length) {
357 357 dmu_buf_will_dirty(hdl->sa_spill, tx);
358 358 buftypes |= SA_SPILL;
359 359 }
360 360 }
361 361 }
362 362 if (error && error != ENOENT) {
363 363 return ((error == ECKSUM) ? EIO : error);
364 364 }
365 365
366 366 switch (data_op) {
367 367 case SA_LOOKUP:
368 368 if (bulk[i].sa_addr == NULL)
369 369 return (SET_ERROR(ENOENT));
370 370 if (bulk[i].sa_data) {
371 371 SA_COPY_DATA(bulk[i].sa_data_func,
372 372 bulk[i].sa_addr, bulk[i].sa_data,
373 373 bulk[i].sa_size);
374 374 }
375 375 continue;
376 376
377 377 case SA_UPDATE:
378 378 /* existing rewrite of attr */
379 379 if (bulk[i].sa_addr &&
380 380 bulk[i].sa_size == bulk[i].sa_length) {
381 381 SA_COPY_DATA(bulk[i].sa_data_func,
382 382 bulk[i].sa_data, bulk[i].sa_addr,
383 383 bulk[i].sa_length);
384 384 continue;
385 385 } else if (bulk[i].sa_addr) { /* attr size change */
386 386 error = sa_modify_attrs(hdl, bulk[i].sa_attr,
387 387 SA_REPLACE, bulk[i].sa_data_func,
388 388 bulk[i].sa_data, bulk[i].sa_length, tx);
389 389 } else { /* adding new attribute */
390 390 error = sa_modify_attrs(hdl, bulk[i].sa_attr,
391 391 SA_ADD, bulk[i].sa_data_func,
392 392 bulk[i].sa_data, bulk[i].sa_length, tx);
393 393 }
394 394 if (error)
395 395 return (error);
396 396 break;
397 397 }
|
↓ open down ↓ |
397 lines elided |
↑ open up ↑ |
398 398 }
399 399 return (error);
400 400 }
401 401
402 402 static sa_lot_t *
403 403 sa_add_layout_entry(objset_t *os, sa_attr_type_t *attrs, int attr_count,
404 404 uint64_t lot_num, uint64_t hash, boolean_t zapadd, dmu_tx_t *tx)
405 405 {
406 406 sa_os_t *sa = os->os_sa;
407 407 sa_lot_t *tb, *findtb;
408 - int i;
408 + int i, size;
409 409 avl_index_t loc;
410 410
411 411 ASSERT(MUTEX_HELD(&sa->sa_lock));
412 412 tb = kmem_zalloc(sizeof (sa_lot_t), KM_SLEEP);
413 413 tb->lot_attr_count = attr_count;
414 - tb->lot_attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
415 - KM_SLEEP);
416 - bcopy(attrs, tb->lot_attrs, sizeof (sa_attr_type_t) * attr_count);
414 +
415 + if ((size = sizeof (sa_attr_type_t) * attr_count) != 0) {
416 + tb->lot_attrs = kmem_alloc(size, KM_SLEEP);
417 + bcopy(attrs, tb->lot_attrs, size);
418 + }
419 +
417 420 tb->lot_num = lot_num;
418 421 tb->lot_hash = hash;
419 422 tb->lot_instance = 0;
420 423
421 424 if (zapadd) {
422 425 char attr_name[8];
423 426
424 427 if (sa->sa_layout_attr_obj == 0) {
425 428 sa->sa_layout_attr_obj = zap_create_link(os,
426 429 DMU_OT_SA_ATTR_LAYOUTS,
427 430 sa->sa_master_obj, SA_LAYOUTS, tx);
428 431 }
429 432
430 433 (void) snprintf(attr_name, sizeof (attr_name),
431 434 "%d", (int)lot_num);
432 435 VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj,
433 436 attr_name, 2, attr_count, attrs, tx));
434 437 }
435 438
436 439 list_create(&tb->lot_idx_tab, sizeof (sa_idx_tab_t),
437 440 offsetof(sa_idx_tab_t, sa_next));
438 441
439 442 for (i = 0; i != attr_count; i++) {
440 443 if (sa->sa_attr_table[tb->lot_attrs[i]].sa_length == 0)
441 444 tb->lot_var_sizes++;
442 445 }
443 446
444 447 avl_add(&sa->sa_layout_num_tree, tb);
445 448
446 449 /* verify we don't have a hash collision */
447 450 if ((findtb = avl_find(&sa->sa_layout_hash_tree, tb, &loc)) != NULL) {
448 451 for (; findtb && findtb->lot_hash == hash;
449 452 findtb = AVL_NEXT(&sa->sa_layout_hash_tree, findtb)) {
450 453 if (findtb->lot_instance != tb->lot_instance)
451 454 break;
452 455 tb->lot_instance++;
453 456 }
454 457 }
455 458 avl_add(&sa->sa_layout_hash_tree, tb);
456 459 return (tb);
457 460 }
458 461
459 462 static void
460 463 sa_find_layout(objset_t *os, uint64_t hash, sa_attr_type_t *attrs,
461 464 int count, dmu_tx_t *tx, sa_lot_t **lot)
462 465 {
463 466 sa_lot_t *tb, tbsearch;
464 467 avl_index_t loc;
465 468 sa_os_t *sa = os->os_sa;
466 469 boolean_t found = B_FALSE;
467 470
468 471 mutex_enter(&sa->sa_lock);
469 472 tbsearch.lot_hash = hash;
470 473 tbsearch.lot_instance = 0;
471 474 tb = avl_find(&sa->sa_layout_hash_tree, &tbsearch, &loc);
472 475 if (tb) {
473 476 for (; tb && tb->lot_hash == hash;
474 477 tb = AVL_NEXT(&sa->sa_layout_hash_tree, tb)) {
475 478 if (sa_layout_equal(tb, attrs, count) == 0) {
476 479 found = B_TRUE;
477 480 break;
478 481 }
479 482 }
480 483 }
481 484 if (!found) {
482 485 tb = sa_add_layout_entry(os, attrs, count,
483 486 avl_numnodes(&sa->sa_layout_num_tree), hash, B_TRUE, tx);
484 487 }
485 488 mutex_exit(&sa->sa_lock);
486 489 *lot = tb;
487 490 }
488 491
489 492 static int
490 493 sa_resize_spill(sa_handle_t *hdl, uint32_t size, dmu_tx_t *tx)
491 494 {
492 495 int error;
493 496 uint32_t blocksize;
494 497
495 498 if (size == 0) {
496 499 blocksize = SPA_MINBLOCKSIZE;
497 500 } else if (size > SPA_OLD_MAXBLOCKSIZE) {
498 501 ASSERT(0);
499 502 return (SET_ERROR(EFBIG));
500 503 } else {
501 504 blocksize = P2ROUNDUP_TYPED(size, SPA_MINBLOCKSIZE, uint32_t);
502 505 }
503 506
504 507 error = dbuf_spill_set_blksz(hdl->sa_spill, blocksize, tx);
505 508 ASSERT(error == 0);
506 509 return (error);
507 510 }
508 511
509 512 static void
510 513 sa_copy_data(sa_data_locator_t *func, void *datastart, void *target, int buflen)
511 514 {
512 515 if (func == NULL) {
513 516 bcopy(datastart, target, buflen);
514 517 } else {
515 518 boolean_t start;
516 519 int bytes;
517 520 void *dataptr;
518 521 void *saptr = target;
519 522 uint32_t length;
520 523
521 524 start = B_TRUE;
522 525 bytes = 0;
523 526 while (bytes < buflen) {
524 527 func(&dataptr, &length, buflen, start, datastart);
525 528 bcopy(dataptr, saptr, length);
526 529 saptr = (void *)((caddr_t)saptr + length);
527 530 bytes += length;
528 531 start = B_FALSE;
529 532 }
530 533 }
531 534 }
532 535
533 536 /*
534 537 * Determine several different sizes
535 538 * first the sa header size
536 539 * the number of bytes to be stored
537 540 * if spill would occur the index in the attribute array is returned
538 541 *
539 542 * the boolean will_spill will be set when spilling is necessary. It
540 543 * is only set when the buftype is SA_BONUS
541 544 */
542 545 static int
543 546 sa_find_sizes(sa_os_t *sa, sa_bulk_attr_t *attr_desc, int attr_count,
544 547 dmu_buf_t *db, sa_buf_type_t buftype, int full_space, int *index,
545 548 int *total, boolean_t *will_spill)
546 549 {
547 550 int var_size = 0;
548 551 int i;
549 552 int hdrsize;
550 553 int extra_hdrsize;
551 554
552 555 if (buftype == SA_BONUS && sa->sa_force_spill) {
553 556 *total = 0;
554 557 *index = 0;
555 558 *will_spill = B_TRUE;
556 559 return (0);
557 560 }
558 561
559 562 *index = -1;
560 563 *total = 0;
561 564 *will_spill = B_FALSE;
562 565
563 566 extra_hdrsize = 0;
564 567 hdrsize = (SA_BONUSTYPE_FROM_DB(db) == DMU_OT_ZNODE) ? 0 :
565 568 sizeof (sa_hdr_phys_t);
566 569
567 570 ASSERT(IS_P2ALIGNED(full_space, 8));
568 571
569 572 for (i = 0; i != attr_count; i++) {
570 573 boolean_t is_var_sz;
571 574
572 575 *total = P2ROUNDUP(*total, 8);
573 576 *total += attr_desc[i].sa_length;
574 577 if (*will_spill)
575 578 continue;
576 579
577 580 is_var_sz = (SA_REGISTERED_LEN(sa, attr_desc[i].sa_attr) == 0);
578 581 if (is_var_sz) {
579 582 var_size++;
580 583 }
581 584
582 585 if (is_var_sz && var_size > 1) {
583 586 /*
584 587 * Don't worry that the spill block might overflow.
585 588 * It will be resized if needed in sa_build_layouts().
586 589 */
587 590 if (buftype == SA_SPILL ||
588 591 P2ROUNDUP(hdrsize + sizeof (uint16_t), 8) +
589 592 *total < full_space) {
590 593 /*
591 594 * Account for header space used by array of
592 595 * optional sizes of variable-length attributes.
593 596 * Record the extra header size in case this
594 597 * increase needs to be reversed due to
595 598 * spill-over.
596 599 */
597 600 hdrsize += sizeof (uint16_t);
598 601 if (*index != -1)
599 602 extra_hdrsize += sizeof (uint16_t);
600 603 } else {
601 604 ASSERT(buftype == SA_BONUS);
602 605 if (*index == -1)
603 606 *index = i;
604 607 *will_spill = B_TRUE;
605 608 continue;
606 609 }
607 610 }
608 611
609 612 /*
610 613 * find index of where spill *could* occur.
611 614 * Then continue to count of remainder attribute
612 615 * space. The sum is used later for sizing bonus
613 616 * and spill buffer.
614 617 */
615 618 if (buftype == SA_BONUS && *index == -1 &&
616 619 *total + P2ROUNDUP(hdrsize, 8) >
617 620 (full_space - sizeof (blkptr_t))) {
618 621 *index = i;
619 622 }
620 623
621 624 if (*total + P2ROUNDUP(hdrsize, 8) > full_space &&
622 625 buftype == SA_BONUS)
623 626 *will_spill = B_TRUE;
624 627 }
625 628
626 629 if (*will_spill)
627 630 hdrsize -= extra_hdrsize;
628 631
629 632 hdrsize = P2ROUNDUP(hdrsize, 8);
630 633 return (hdrsize);
631 634 }
632 635
633 636 #define BUF_SPACE_NEEDED(total, header) (total + header)
634 637
635 638 /*
636 639 * Find layout that corresponds to ordering of attributes
637 640 * If not found a new layout number is created and added to
638 641 * persistent layout tables.
639 642 */
640 643 static int
641 644 sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
642 645 dmu_tx_t *tx)
643 646 {
644 647 sa_os_t *sa = hdl->sa_os->os_sa;
645 648 uint64_t hash;
646 649 sa_buf_type_t buftype;
647 650 sa_hdr_phys_t *sahdr;
648 651 void *data_start;
649 652 int buf_space;
650 653 sa_attr_type_t *attrs, *attrs_start;
651 654 int i, lot_count;
652 655 int dnodesize;
653 656 int hdrsize;
654 657 int spillhdrsize = 0;
655 658 int used;
656 659 dmu_object_type_t bonustype;
657 660 sa_lot_t *lot;
658 661 int len_idx;
659 662 int spill_used;
660 663 int bonuslen;
661 664 boolean_t spilling;
662 665
663 666 dmu_buf_will_dirty(hdl->sa_bonus, tx);
664 667 bonustype = SA_BONUSTYPE_FROM_DB(hdl->sa_bonus);
665 668
666 669 dmu_object_dnsize_from_db(hdl->sa_bonus, &dnodesize);
667 670 bonuslen = DN_BONUS_SIZE(dnodesize);
668 671
669 672 /* first determine bonus header size and sum of all attributes */
670 673 hdrsize = sa_find_sizes(sa, attr_desc, attr_count, hdl->sa_bonus,
671 674 SA_BONUS, bonuslen, &i, &used, &spilling);
672 675
673 676 if (used > SPA_OLD_MAXBLOCKSIZE)
674 677 return (SET_ERROR(EFBIG));
675 678
676 679 VERIFY(0 == dmu_set_bonus(hdl->sa_bonus, spilling ?
677 680 MIN(bonuslen - sizeof (blkptr_t), used + hdrsize) :
678 681 used + hdrsize, tx));
679 682
680 683 ASSERT((bonustype == DMU_OT_ZNODE && spilling == 0) ||
681 684 bonustype == DMU_OT_SA);
682 685
683 686 /* setup and size spill buffer when needed */
684 687 if (spilling) {
685 688 boolean_t dummy;
686 689
687 690 if (hdl->sa_spill == NULL) {
688 691 VERIFY(dmu_spill_hold_by_bonus(hdl->sa_bonus, 0, NULL,
689 692 &hdl->sa_spill) == 0);
690 693 }
691 694 dmu_buf_will_dirty(hdl->sa_spill, tx);
692 695
693 696 spillhdrsize = sa_find_sizes(sa, &attr_desc[i],
694 697 attr_count - i, hdl->sa_spill, SA_SPILL,
695 698 hdl->sa_spill->db_size, &i, &spill_used, &dummy);
696 699
697 700 if (spill_used > SPA_OLD_MAXBLOCKSIZE)
698 701 return (SET_ERROR(EFBIG));
699 702
700 703 buf_space = hdl->sa_spill->db_size - spillhdrsize;
701 704 if (BUF_SPACE_NEEDED(spill_used, spillhdrsize) >
702 705 hdl->sa_spill->db_size)
703 706 VERIFY(0 == sa_resize_spill(hdl,
704 707 BUF_SPACE_NEEDED(spill_used, spillhdrsize), tx));
705 708 }
706 709
707 710 /* setup starting pointers to lay down data */
708 711 data_start = (void *)((uintptr_t)hdl->sa_bonus->db_data + hdrsize);
709 712 sahdr = (sa_hdr_phys_t *)hdl->sa_bonus->db_data;
710 713 buftype = SA_BONUS;
711 714
712 715 if (spilling)
713 716 buf_space = (sa->sa_force_spill) ?
714 717 0 : SA_BLKPTR_SPACE - hdrsize;
715 718 else
716 719 buf_space = hdl->sa_bonus->db_size - hdrsize;
717 720
718 721 attrs_start = attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
719 722 KM_SLEEP);
720 723 lot_count = 0;
721 724
722 725 for (i = 0, len_idx = 0, hash = -1ULL; i != attr_count; i++) {
723 726 uint16_t length;
724 727
725 728 ASSERT(IS_P2ALIGNED(data_start, 8));
726 729 ASSERT(IS_P2ALIGNED(buf_space, 8));
727 730 attrs[i] = attr_desc[i].sa_attr;
728 731 length = SA_REGISTERED_LEN(sa, attrs[i]);
729 732 if (length == 0)
730 733 length = attr_desc[i].sa_length;
731 734
732 735 if (buf_space < length) { /* switch to spill buffer */
733 736 VERIFY(spilling);
734 737 VERIFY(bonustype == DMU_OT_SA);
735 738 if (buftype == SA_BONUS && !sa->sa_force_spill) {
736 739 sa_find_layout(hdl->sa_os, hash, attrs_start,
737 740 lot_count, tx, &lot);
738 741 SA_SET_HDR(sahdr, lot->lot_num, hdrsize);
739 742 }
740 743
741 744 buftype = SA_SPILL;
742 745 hash = -1ULL;
743 746 len_idx = 0;
744 747
745 748 sahdr = (sa_hdr_phys_t *)hdl->sa_spill->db_data;
746 749 sahdr->sa_magic = SA_MAGIC;
747 750 data_start = (void *)((uintptr_t)sahdr +
748 751 spillhdrsize);
749 752 attrs_start = &attrs[i];
750 753 buf_space = hdl->sa_spill->db_size - spillhdrsize;
751 754 lot_count = 0;
752 755 }
753 756 hash ^= SA_ATTR_HASH(attrs[i]);
754 757 attr_desc[i].sa_addr = data_start;
755 758 attr_desc[i].sa_size = length;
756 759 SA_COPY_DATA(attr_desc[i].sa_data_func, attr_desc[i].sa_data,
757 760 data_start, length);
758 761 if (sa->sa_attr_table[attrs[i]].sa_length == 0) {
759 762 sahdr->sa_lengths[len_idx++] = length;
760 763 }
761 764 data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
762 765 length), 8);
763 766 buf_space -= P2ROUNDUP(length, 8);
764 767 lot_count++;
765 768 }
766 769
767 770 sa_find_layout(hdl->sa_os, hash, attrs_start, lot_count, tx, &lot);
768 771
769 772 /*
770 773 * Verify that old znodes always have layout number 0.
771 774 * Must be DMU_OT_SA for arbitrary layouts
772 775 */
773 776 VERIFY((bonustype == DMU_OT_ZNODE && lot->lot_num == 0) ||
774 777 (bonustype == DMU_OT_SA && lot->lot_num > 1));
775 778
776 779 if (bonustype == DMU_OT_SA) {
777 780 SA_SET_HDR(sahdr, lot->lot_num,
778 781 buftype == SA_BONUS ? hdrsize : spillhdrsize);
779 782 }
780 783
781 784 kmem_free(attrs, sizeof (sa_attr_type_t) * attr_count);
782 785 if (hdl->sa_bonus_tab) {
783 786 sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
784 787 hdl->sa_bonus_tab = NULL;
785 788 }
786 789 if (!sa->sa_force_spill)
787 790 VERIFY(0 == sa_build_index(hdl, SA_BONUS));
788 791 if (hdl->sa_spill) {
789 792 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
790 793 if (!spilling) {
791 794 /*
792 795 * remove spill block that is no longer needed.
793 796 */
794 797 dmu_buf_rele(hdl->sa_spill, NULL);
795 798 hdl->sa_spill = NULL;
796 799 hdl->sa_spill_tab = NULL;
797 800 VERIFY(0 == dmu_rm_spill(hdl->sa_os,
798 801 sa_handle_object(hdl), tx));
799 802 } else {
800 803 VERIFY(0 == sa_build_index(hdl, SA_SPILL));
801 804 }
802 805 }
803 806
804 807 return (0);
805 808 }
806 809
807 810 static void
808 811 sa_free_attr_table(sa_os_t *sa)
809 812 {
810 813 int i;
811 814
812 815 if (sa->sa_attr_table == NULL)
813 816 return;
814 817
815 818 for (i = 0; i != sa->sa_num_attrs; i++) {
816 819 if (sa->sa_attr_table[i].sa_name)
817 820 kmem_free(sa->sa_attr_table[i].sa_name,
818 821 strlen(sa->sa_attr_table[i].sa_name) + 1);
819 822 }
820 823
821 824 kmem_free(sa->sa_attr_table,
822 825 sizeof (sa_attr_table_t) * sa->sa_num_attrs);
823 826
824 827 sa->sa_attr_table = NULL;
825 828 }
826 829
827 830 static int
828 831 sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
829 832 {
830 833 sa_os_t *sa = os->os_sa;
831 834 uint64_t sa_attr_count = 0;
832 835 uint64_t sa_reg_count = 0;
833 836 int error = 0;
834 837 uint64_t attr_value;
835 838 sa_attr_table_t *tb;
836 839 zap_cursor_t zc;
837 840 zap_attribute_t za;
838 841 int registered_count = 0;
839 842 int i;
840 843 dmu_objset_type_t ostype = dmu_objset_type(os);
841 844
842 845 sa->sa_user_table =
843 846 kmem_zalloc(count * sizeof (sa_attr_type_t), KM_SLEEP);
844 847 sa->sa_user_table_sz = count * sizeof (sa_attr_type_t);
845 848
846 849 if (sa->sa_reg_attr_obj != 0) {
847 850 error = zap_count(os, sa->sa_reg_attr_obj,
848 851 &sa_attr_count);
849 852
850 853 /*
851 854 * Make sure we retrieved a count and that it isn't zero
852 855 */
853 856 if (error || (error == 0 && sa_attr_count == 0)) {
854 857 if (error == 0)
855 858 error = SET_ERROR(EINVAL);
856 859 goto bail;
857 860 }
858 861 sa_reg_count = sa_attr_count;
859 862 }
860 863
861 864 if (ostype == DMU_OST_ZFS && sa_attr_count == 0)
862 865 sa_attr_count += sa_legacy_attr_count;
863 866
864 867 /* Allocate attribute numbers for attributes that aren't registered */
865 868 for (i = 0; i != count; i++) {
866 869 boolean_t found = B_FALSE;
867 870 int j;
868 871
869 872 if (ostype == DMU_OST_ZFS) {
870 873 for (j = 0; j != sa_legacy_attr_count; j++) {
871 874 if (strcmp(reg_attrs[i].sa_name,
872 875 sa_legacy_attrs[j].sa_name) == 0) {
873 876 sa->sa_user_table[i] =
874 877 sa_legacy_attrs[j].sa_attr;
875 878 found = B_TRUE;
876 879 }
877 880 }
878 881 }
879 882 if (found)
880 883 continue;
881 884
882 885 if (sa->sa_reg_attr_obj)
883 886 error = zap_lookup(os, sa->sa_reg_attr_obj,
884 887 reg_attrs[i].sa_name, 8, 1, &attr_value);
885 888 else
886 889 error = SET_ERROR(ENOENT);
887 890 switch (error) {
888 891 case ENOENT:
889 892 sa->sa_user_table[i] = (sa_attr_type_t)sa_attr_count;
890 893 sa_attr_count++;
891 894 break;
892 895 case 0:
893 896 sa->sa_user_table[i] = ATTR_NUM(attr_value);
894 897 break;
895 898 default:
896 899 goto bail;
897 900 }
898 901 }
899 902
900 903 sa->sa_num_attrs = sa_attr_count;
901 904 tb = sa->sa_attr_table =
902 905 kmem_zalloc(sizeof (sa_attr_table_t) * sa_attr_count, KM_SLEEP);
903 906
904 907 /*
905 908 * Attribute table is constructed from requested attribute list,
906 909 * previously foreign registered attributes, and also the legacy
907 910 * ZPL set of attributes.
908 911 */
909 912
910 913 if (sa->sa_reg_attr_obj) {
911 914 for (zap_cursor_init(&zc, os, sa->sa_reg_attr_obj);
912 915 (error = zap_cursor_retrieve(&zc, &za)) == 0;
913 916 zap_cursor_advance(&zc)) {
914 917 uint64_t value;
915 918 value = za.za_first_integer;
916 919
917 920 registered_count++;
918 921 tb[ATTR_NUM(value)].sa_attr = ATTR_NUM(value);
919 922 tb[ATTR_NUM(value)].sa_length = ATTR_LENGTH(value);
920 923 tb[ATTR_NUM(value)].sa_byteswap = ATTR_BSWAP(value);
921 924 tb[ATTR_NUM(value)].sa_registered = B_TRUE;
922 925
923 926 if (tb[ATTR_NUM(value)].sa_name) {
924 927 continue;
925 928 }
926 929 tb[ATTR_NUM(value)].sa_name =
927 930 kmem_zalloc(strlen(za.za_name) +1, KM_SLEEP);
928 931 (void) strlcpy(tb[ATTR_NUM(value)].sa_name, za.za_name,
929 932 strlen(za.za_name) +1);
930 933 }
931 934 zap_cursor_fini(&zc);
932 935 /*
933 936 * Make sure we processed the correct number of registered
934 937 * attributes
935 938 */
936 939 if (registered_count != sa_reg_count) {
937 940 ASSERT(error != 0);
938 941 goto bail;
939 942 }
940 943
941 944 }
942 945
943 946 if (ostype == DMU_OST_ZFS) {
944 947 for (i = 0; i != sa_legacy_attr_count; i++) {
945 948 if (tb[i].sa_name)
946 949 continue;
947 950 tb[i].sa_attr = sa_legacy_attrs[i].sa_attr;
948 951 tb[i].sa_length = sa_legacy_attrs[i].sa_length;
949 952 tb[i].sa_byteswap = sa_legacy_attrs[i].sa_byteswap;
950 953 tb[i].sa_registered = B_FALSE;
951 954 tb[i].sa_name =
952 955 kmem_zalloc(strlen(sa_legacy_attrs[i].sa_name) +1,
953 956 KM_SLEEP);
954 957 (void) strlcpy(tb[i].sa_name,
955 958 sa_legacy_attrs[i].sa_name,
956 959 strlen(sa_legacy_attrs[i].sa_name) + 1);
957 960 }
958 961 }
959 962
960 963 for (i = 0; i != count; i++) {
961 964 sa_attr_type_t attr_id;
962 965
963 966 attr_id = sa->sa_user_table[i];
964 967 if (tb[attr_id].sa_name)
965 968 continue;
966 969
967 970 tb[attr_id].sa_length = reg_attrs[i].sa_length;
968 971 tb[attr_id].sa_byteswap = reg_attrs[i].sa_byteswap;
969 972 tb[attr_id].sa_attr = attr_id;
970 973 tb[attr_id].sa_name =
971 974 kmem_zalloc(strlen(reg_attrs[i].sa_name) + 1, KM_SLEEP);
972 975 (void) strlcpy(tb[attr_id].sa_name, reg_attrs[i].sa_name,
973 976 strlen(reg_attrs[i].sa_name) + 1);
974 977 }
975 978
976 979 sa->sa_need_attr_registration =
977 980 (sa_attr_count != registered_count);
978 981
979 982 return (0);
980 983 bail:
981 984 kmem_free(sa->sa_user_table, count * sizeof (sa_attr_type_t));
982 985 sa->sa_user_table = NULL;
983 986 sa_free_attr_table(sa);
984 987 return ((error != 0) ? error : EINVAL);
985 988 }
986 989
987 990 int
988 991 sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count,
989 992 sa_attr_type_t **user_table)
990 993 {
991 994 zap_cursor_t zc;
992 995 zap_attribute_t za;
993 996 sa_os_t *sa;
994 997 dmu_objset_type_t ostype = dmu_objset_type(os);
995 998 sa_attr_type_t *tb;
996 999 int error;
997 1000
998 1001 mutex_enter(&os->os_user_ptr_lock);
999 1002 if (os->os_sa) {
1000 1003 mutex_enter(&os->os_sa->sa_lock);
1001 1004 mutex_exit(&os->os_user_ptr_lock);
1002 1005 tb = os->os_sa->sa_user_table;
1003 1006 mutex_exit(&os->os_sa->sa_lock);
1004 1007 *user_table = tb;
1005 1008 return (0);
1006 1009 }
1007 1010
1008 1011 sa = kmem_zalloc(sizeof (sa_os_t), KM_SLEEP);
1009 1012 mutex_init(&sa->sa_lock, NULL, MUTEX_DEFAULT, NULL);
1010 1013 sa->sa_master_obj = sa_obj;
1011 1014
1012 1015 os->os_sa = sa;
1013 1016 mutex_enter(&sa->sa_lock);
1014 1017 mutex_exit(&os->os_user_ptr_lock);
1015 1018 avl_create(&sa->sa_layout_num_tree, layout_num_compare,
1016 1019 sizeof (sa_lot_t), offsetof(sa_lot_t, lot_num_node));
1017 1020 avl_create(&sa->sa_layout_hash_tree, layout_hash_compare,
1018 1021 sizeof (sa_lot_t), offsetof(sa_lot_t, lot_hash_node));
1019 1022
1020 1023 if (sa_obj) {
1021 1024 error = zap_lookup(os, sa_obj, SA_LAYOUTS,
1022 1025 8, 1, &sa->sa_layout_attr_obj);
1023 1026 if (error != 0 && error != ENOENT)
1024 1027 goto fail;
1025 1028 error = zap_lookup(os, sa_obj, SA_REGISTRY,
1026 1029 8, 1, &sa->sa_reg_attr_obj);
1027 1030 if (error != 0 && error != ENOENT)
1028 1031 goto fail;
1029 1032 }
1030 1033
1031 1034 if ((error = sa_attr_table_setup(os, reg_attrs, count)) != 0)
1032 1035 goto fail;
1033 1036
1034 1037 if (sa->sa_layout_attr_obj != 0) {
1035 1038 uint64_t layout_count;
1036 1039
1037 1040 error = zap_count(os, sa->sa_layout_attr_obj,
1038 1041 &layout_count);
1039 1042
1040 1043 /*
1041 1044 * Layout number count should be > 0
1042 1045 */
1043 1046 if (error || (error == 0 && layout_count == 0)) {
1044 1047 if (error == 0)
1045 1048 error = SET_ERROR(EINVAL);
1046 1049 goto fail;
1047 1050 }
1048 1051
1049 1052 for (zap_cursor_init(&zc, os, sa->sa_layout_attr_obj);
1050 1053 (error = zap_cursor_retrieve(&zc, &za)) == 0;
1051 1054 zap_cursor_advance(&zc)) {
1052 1055 sa_attr_type_t *lot_attrs;
1053 1056 uint64_t lot_num;
1054 1057
1055 1058 lot_attrs = kmem_zalloc(sizeof (sa_attr_type_t) *
1056 1059 za.za_num_integers, KM_SLEEP);
1057 1060
1058 1061 if ((error = (zap_lookup(os, sa->sa_layout_attr_obj,
1059 1062 za.za_name, 2, za.za_num_integers,
1060 1063 lot_attrs))) != 0) {
1061 1064 kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
1062 1065 za.za_num_integers);
1063 1066 break;
1064 1067 }
1065 1068 VERIFY(ddi_strtoull(za.za_name, NULL, 10,
1066 1069 (unsigned long long *)&lot_num) == 0);
1067 1070
1068 1071 (void) sa_add_layout_entry(os, lot_attrs,
1069 1072 za.za_num_integers, lot_num,
1070 1073 sa_layout_info_hash(lot_attrs,
1071 1074 za.za_num_integers), B_FALSE, NULL);
1072 1075 kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
1073 1076 za.za_num_integers);
1074 1077 }
1075 1078 zap_cursor_fini(&zc);
1076 1079
1077 1080 /*
1078 1081 * Make sure layout count matches number of entries added
1079 1082 * to AVL tree
1080 1083 */
1081 1084 if (avl_numnodes(&sa->sa_layout_num_tree) != layout_count) {
1082 1085 ASSERT(error != 0);
1083 1086 goto fail;
1084 1087 }
1085 1088 }
1086 1089
1087 1090 /* Add special layout number for old ZNODES */
1088 1091 if (ostype == DMU_OST_ZFS) {
1089 1092 (void) sa_add_layout_entry(os, sa_legacy_zpl_layout,
1090 1093 sa_legacy_attr_count, 0,
1091 1094 sa_layout_info_hash(sa_legacy_zpl_layout,
1092 1095 sa_legacy_attr_count), B_FALSE, NULL);
1093 1096
1094 1097 (void) sa_add_layout_entry(os, sa_dummy_zpl_layout, 0, 1,
1095 1098 0, B_FALSE, NULL);
1096 1099 }
1097 1100 *user_table = os->os_sa->sa_user_table;
1098 1101 mutex_exit(&sa->sa_lock);
1099 1102 return (0);
1100 1103 fail:
1101 1104 os->os_sa = NULL;
1102 1105 sa_free_attr_table(sa);
1103 1106 if (sa->sa_user_table)
1104 1107 kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
1105 1108 mutex_exit(&sa->sa_lock);
1106 1109 avl_destroy(&sa->sa_layout_hash_tree);
1107 1110 avl_destroy(&sa->sa_layout_num_tree);
1108 1111 mutex_destroy(&sa->sa_lock);
1109 1112 kmem_free(sa, sizeof (sa_os_t));
1110 1113 return ((error == ECKSUM) ? EIO : error);
1111 1114 }
1112 1115
1113 1116 void
1114 1117 sa_tear_down(objset_t *os)
1115 1118 {
1116 1119 sa_os_t *sa = os->os_sa;
1117 1120 sa_lot_t *layout;
1118 1121 void *cookie;
1119 1122
1120 1123 kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
1121 1124
1122 1125 /* Free up attr table */
1123 1126
1124 1127 sa_free_attr_table(sa);
1125 1128
1126 1129 cookie = NULL;
1127 1130 while (layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie)) {
1128 1131 sa_idx_tab_t *tab;
1129 1132 while (tab = list_head(&layout->lot_idx_tab)) {
1130 1133 ASSERT(zfs_refcount_count(&tab->sa_refcount));
1131 1134 sa_idx_tab_rele(os, tab);
1132 1135 }
1133 1136 }
1134 1137
1135 1138 cookie = NULL;
1136 1139 while (layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie)) {
1137 1140 kmem_free(layout->lot_attrs,
1138 1141 sizeof (sa_attr_type_t) * layout->lot_attr_count);
1139 1142 kmem_free(layout, sizeof (sa_lot_t));
1140 1143 }
1141 1144
1142 1145 avl_destroy(&sa->sa_layout_hash_tree);
1143 1146 avl_destroy(&sa->sa_layout_num_tree);
1144 1147 mutex_destroy(&sa->sa_lock);
1145 1148
1146 1149 kmem_free(sa, sizeof (sa_os_t));
1147 1150 os->os_sa = NULL;
1148 1151 }
1149 1152
1150 1153 void
1151 1154 sa_build_idx_tab(void *hdr, void *attr_addr, sa_attr_type_t attr,
1152 1155 uint16_t length, int length_idx, boolean_t var_length, void *userp)
1153 1156 {
1154 1157 sa_idx_tab_t *idx_tab = userp;
1155 1158
1156 1159 if (var_length) {
1157 1160 ASSERT(idx_tab->sa_variable_lengths);
1158 1161 idx_tab->sa_variable_lengths[length_idx] = length;
1159 1162 }
1160 1163 TOC_ATTR_ENCODE(idx_tab->sa_idx_tab[attr], length_idx,
1161 1164 (uint32_t)((uintptr_t)attr_addr - (uintptr_t)hdr));
1162 1165 }
1163 1166
1164 1167 static void
1165 1168 sa_attr_iter(objset_t *os, sa_hdr_phys_t *hdr, dmu_object_type_t type,
1166 1169 sa_iterfunc_t func, sa_lot_t *tab, void *userp)
1167 1170 {
1168 1171 void *data_start;
1169 1172 sa_lot_t *tb = tab;
1170 1173 sa_lot_t search;
1171 1174 avl_index_t loc;
1172 1175 sa_os_t *sa = os->os_sa;
1173 1176 int i;
1174 1177 uint16_t *length_start = NULL;
1175 1178 uint8_t length_idx = 0;
1176 1179
1177 1180 if (tab == NULL) {
1178 1181 search.lot_num = SA_LAYOUT_NUM(hdr, type);
1179 1182 tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
1180 1183 ASSERT(tb);
1181 1184 }
1182 1185
1183 1186 if (IS_SA_BONUSTYPE(type)) {
1184 1187 data_start = (void *)P2ROUNDUP(((uintptr_t)hdr +
1185 1188 offsetof(sa_hdr_phys_t, sa_lengths) +
1186 1189 (sizeof (uint16_t) * tb->lot_var_sizes)), 8);
1187 1190 length_start = hdr->sa_lengths;
1188 1191 } else {
1189 1192 data_start = hdr;
1190 1193 }
1191 1194
1192 1195 for (i = 0; i != tb->lot_attr_count; i++) {
1193 1196 int attr_length, reg_length;
1194 1197 uint8_t idx_len;
1195 1198
1196 1199 reg_length = sa->sa_attr_table[tb->lot_attrs[i]].sa_length;
1197 1200 if (reg_length) {
1198 1201 attr_length = reg_length;
1199 1202 idx_len = 0;
1200 1203 } else {
1201 1204 attr_length = length_start[length_idx];
1202 1205 idx_len = length_idx++;
1203 1206 }
1204 1207
1205 1208 func(hdr, data_start, tb->lot_attrs[i], attr_length,
1206 1209 idx_len, reg_length == 0 ? B_TRUE : B_FALSE, userp);
1207 1210
1208 1211 data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
1209 1212 attr_length), 8);
1210 1213 }
1211 1214 }
1212 1215
1213 1216 /*ARGSUSED*/
1214 1217 void
1215 1218 sa_byteswap_cb(void *hdr, void *attr_addr, sa_attr_type_t attr,
1216 1219 uint16_t length, int length_idx, boolean_t variable_length, void *userp)
1217 1220 {
1218 1221 sa_handle_t *hdl = userp;
1219 1222 sa_os_t *sa = hdl->sa_os->os_sa;
1220 1223
1221 1224 sa_bswap_table[sa->sa_attr_table[attr].sa_byteswap](attr_addr, length);
1222 1225 }
1223 1226
1224 1227 void
1225 1228 sa_byteswap(sa_handle_t *hdl, sa_buf_type_t buftype)
1226 1229 {
1227 1230 sa_hdr_phys_t *sa_hdr_phys = SA_GET_HDR(hdl, buftype);
1228 1231 dmu_buf_impl_t *db;
1229 1232 sa_os_t *sa = hdl->sa_os->os_sa;
1230 1233 int num_lengths = 1;
1231 1234 int i;
1232 1235
1233 1236 ASSERT(MUTEX_HELD(&sa->sa_lock));
1234 1237 if (sa_hdr_phys->sa_magic == SA_MAGIC)
1235 1238 return;
1236 1239
1237 1240 db = SA_GET_DB(hdl, buftype);
1238 1241
1239 1242 if (buftype == SA_SPILL) {
1240 1243 arc_release(db->db_buf, NULL);
1241 1244 arc_buf_thaw(db->db_buf);
1242 1245 }
1243 1246
1244 1247 sa_hdr_phys->sa_magic = BSWAP_32(sa_hdr_phys->sa_magic);
1245 1248 sa_hdr_phys->sa_layout_info = BSWAP_16(sa_hdr_phys->sa_layout_info);
1246 1249
1247 1250 /*
1248 1251 * Determine number of variable lenghts in header
1249 1252 * The standard 8 byte header has one for free and a
1250 1253 * 16 byte header would have 4 + 1;
1251 1254 */
1252 1255 if (SA_HDR_SIZE(sa_hdr_phys) > 8)
1253 1256 num_lengths += (SA_HDR_SIZE(sa_hdr_phys) - 8) >> 1;
1254 1257 for (i = 0; i != num_lengths; i++)
1255 1258 sa_hdr_phys->sa_lengths[i] =
1256 1259 BSWAP_16(sa_hdr_phys->sa_lengths[i]);
1257 1260
1258 1261 sa_attr_iter(hdl->sa_os, sa_hdr_phys, DMU_OT_SA,
1259 1262 sa_byteswap_cb, NULL, hdl);
1260 1263
1261 1264 if (buftype == SA_SPILL)
1262 1265 arc_buf_freeze(((dmu_buf_impl_t *)hdl->sa_spill)->db_buf);
1263 1266 }
1264 1267
1265 1268 static int
1266 1269 sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype)
1267 1270 {
1268 1271 sa_hdr_phys_t *sa_hdr_phys;
1269 1272 dmu_buf_impl_t *db = SA_GET_DB(hdl, buftype);
1270 1273 dmu_object_type_t bonustype = SA_BONUSTYPE_FROM_DB(db);
1271 1274 sa_os_t *sa = hdl->sa_os->os_sa;
1272 1275 sa_idx_tab_t *idx_tab;
1273 1276
1274 1277 sa_hdr_phys = SA_GET_HDR(hdl, buftype);
1275 1278
1276 1279 mutex_enter(&sa->sa_lock);
1277 1280
1278 1281 /* Do we need to byteswap? */
1279 1282
1280 1283 /* only check if not old znode */
1281 1284 if (IS_SA_BONUSTYPE(bonustype) && sa_hdr_phys->sa_magic != SA_MAGIC &&
1282 1285 sa_hdr_phys->sa_magic != 0) {
1283 1286 VERIFY(BSWAP_32(sa_hdr_phys->sa_magic) == SA_MAGIC);
1284 1287 sa_byteswap(hdl, buftype);
1285 1288 }
1286 1289
1287 1290 idx_tab = sa_find_idx_tab(hdl->sa_os, bonustype, sa_hdr_phys);
1288 1291
1289 1292 if (buftype == SA_BONUS)
1290 1293 hdl->sa_bonus_tab = idx_tab;
1291 1294 else
1292 1295 hdl->sa_spill_tab = idx_tab;
1293 1296
1294 1297 mutex_exit(&sa->sa_lock);
1295 1298 return (0);
1296 1299 }
1297 1300
1298 1301 /*ARGSUSED*/
1299 1302 static void
1300 1303 sa_evict_sync(void *dbu)
1301 1304 {
1302 1305 panic("evicting sa dbuf\n");
1303 1306 }
1304 1307
1305 1308 static void
1306 1309 sa_idx_tab_rele(objset_t *os, void *arg)
1307 1310 {
1308 1311 sa_os_t *sa = os->os_sa;
1309 1312 sa_idx_tab_t *idx_tab = arg;
1310 1313
1311 1314 if (idx_tab == NULL)
1312 1315 return;
1313 1316
1314 1317 mutex_enter(&sa->sa_lock);
1315 1318 if (zfs_refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
1316 1319 list_remove(&idx_tab->sa_layout->lot_idx_tab, idx_tab);
1317 1320 if (idx_tab->sa_variable_lengths)
1318 1321 kmem_free(idx_tab->sa_variable_lengths,
1319 1322 sizeof (uint16_t) *
1320 1323 idx_tab->sa_layout->lot_var_sizes);
1321 1324 zfs_refcount_destroy(&idx_tab->sa_refcount);
1322 1325 kmem_free(idx_tab->sa_idx_tab,
1323 1326 sizeof (uint32_t) * sa->sa_num_attrs);
1324 1327 kmem_free(idx_tab, sizeof (sa_idx_tab_t));
1325 1328 }
1326 1329 mutex_exit(&sa->sa_lock);
1327 1330 }
1328 1331
1329 1332 static void
1330 1333 sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab)
1331 1334 {
1332 1335 sa_os_t *sa = os->os_sa;
1333 1336
1334 1337 ASSERT(MUTEX_HELD(&sa->sa_lock));
1335 1338 (void) zfs_refcount_add(&idx_tab->sa_refcount, NULL);
1336 1339 }
1337 1340
1338 1341 void
1339 1342 sa_handle_destroy(sa_handle_t *hdl)
1340 1343 {
1341 1344 dmu_buf_t *db = hdl->sa_bonus;
1342 1345
1343 1346 mutex_enter(&hdl->sa_lock);
1344 1347 (void) dmu_buf_remove_user(db, &hdl->sa_dbu);
1345 1348
1346 1349 if (hdl->sa_bonus_tab)
1347 1350 sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
1348 1351
1349 1352 if (hdl->sa_spill_tab)
1350 1353 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
1351 1354
1352 1355 dmu_buf_rele(hdl->sa_bonus, NULL);
1353 1356
1354 1357 if (hdl->sa_spill)
1355 1358 dmu_buf_rele((dmu_buf_t *)hdl->sa_spill, NULL);
1356 1359 mutex_exit(&hdl->sa_lock);
1357 1360
1358 1361 kmem_cache_free(sa_cache, hdl);
1359 1362 }
1360 1363
1361 1364 int
1362 1365 sa_handle_get_from_db(objset_t *os, dmu_buf_t *db, void *userp,
1363 1366 sa_handle_type_t hdl_type, sa_handle_t **handlepp)
1364 1367 {
1365 1368 int error = 0;
1366 1369 dmu_object_info_t doi;
1367 1370 sa_handle_t *handle = NULL;
1368 1371
1369 1372 #ifdef ZFS_DEBUG
1370 1373 dmu_object_info_from_db(db, &doi);
1371 1374 ASSERT(doi.doi_bonus_type == DMU_OT_SA ||
1372 1375 doi.doi_bonus_type == DMU_OT_ZNODE);
1373 1376 #endif
1374 1377 /* find handle, if it exists */
1375 1378 /* if one doesn't exist then create a new one, and initialize it */
1376 1379
1377 1380 if (hdl_type == SA_HDL_SHARED)
1378 1381 handle = dmu_buf_get_user(db);
1379 1382
1380 1383 if (handle == NULL) {
1381 1384 sa_handle_t *winner = NULL;
1382 1385
1383 1386 handle = kmem_cache_alloc(sa_cache, KM_SLEEP);
1384 1387 handle->sa_dbu.dbu_evict_func_sync = NULL;
1385 1388 handle->sa_dbu.dbu_evict_func_async = NULL;
1386 1389 handle->sa_userp = userp;
1387 1390 handle->sa_bonus = db;
1388 1391 handle->sa_os = os;
1389 1392 handle->sa_spill = NULL;
1390 1393 handle->sa_bonus_tab = NULL;
1391 1394 handle->sa_spill_tab = NULL;
1392 1395
1393 1396 error = sa_build_index(handle, SA_BONUS);
1394 1397
1395 1398 if (hdl_type == SA_HDL_SHARED) {
1396 1399 dmu_buf_init_user(&handle->sa_dbu, sa_evict_sync, NULL,
1397 1400 NULL);
1398 1401 winner = dmu_buf_set_user_ie(db, &handle->sa_dbu);
1399 1402 }
1400 1403
1401 1404 if (winner != NULL) {
1402 1405 kmem_cache_free(sa_cache, handle);
1403 1406 handle = winner;
1404 1407 }
1405 1408 }
1406 1409 *handlepp = handle;
1407 1410
1408 1411 return (error);
1409 1412 }
1410 1413
1411 1414 int
1412 1415 sa_handle_get(objset_t *objset, uint64_t objid, void *userp,
1413 1416 sa_handle_type_t hdl_type, sa_handle_t **handlepp)
1414 1417 {
1415 1418 dmu_buf_t *db;
1416 1419 int error;
1417 1420
1418 1421 if (error = dmu_bonus_hold(objset, objid, NULL, &db))
1419 1422 return (error);
1420 1423
1421 1424 return (sa_handle_get_from_db(objset, db, userp, hdl_type,
1422 1425 handlepp));
1423 1426 }
1424 1427
1425 1428 int
1426 1429 sa_buf_hold(objset_t *objset, uint64_t obj_num, void *tag, dmu_buf_t **db)
1427 1430 {
1428 1431 return (dmu_bonus_hold(objset, obj_num, tag, db));
1429 1432 }
1430 1433
1431 1434 void
1432 1435 sa_buf_rele(dmu_buf_t *db, void *tag)
1433 1436 {
1434 1437 dmu_buf_rele(db, tag);
1435 1438 }
1436 1439
1437 1440 int
1438 1441 sa_lookup_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count)
1439 1442 {
1440 1443 ASSERT(hdl);
1441 1444 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1442 1445 return (sa_attr_op(hdl, bulk, count, SA_LOOKUP, NULL));
1443 1446 }
1444 1447
1445 1448 static int
1446 1449 sa_lookup_locked(sa_handle_t *hdl, sa_attr_type_t attr, void *buf,
1447 1450 uint32_t buflen)
1448 1451 {
1449 1452 int error;
1450 1453 sa_bulk_attr_t bulk;
1451 1454
1452 1455 bulk.sa_attr = attr;
1453 1456 bulk.sa_data = buf;
1454 1457 bulk.sa_length = buflen;
1455 1458 bulk.sa_data_func = NULL;
1456 1459
1457 1460 ASSERT(hdl);
1458 1461 error = sa_lookup_impl(hdl, &bulk, 1);
1459 1462 return (error);
1460 1463 }
1461 1464
1462 1465 int
1463 1466 sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
1464 1467 {
1465 1468 int error;
1466 1469
1467 1470 mutex_enter(&hdl->sa_lock);
1468 1471 error = sa_lookup_locked(hdl, attr, buf, buflen);
1469 1472 mutex_exit(&hdl->sa_lock);
1470 1473
1471 1474 return (error);
1472 1475 }
1473 1476
1474 1477 #ifdef _KERNEL
1475 1478 int
1476 1479 sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, uio_t *uio)
1477 1480 {
1478 1481 int error;
1479 1482 sa_bulk_attr_t bulk;
1480 1483
1481 1484 bulk.sa_data = NULL;
1482 1485 bulk.sa_attr = attr;
1483 1486 bulk.sa_data_func = NULL;
1484 1487
1485 1488 ASSERT(hdl);
1486 1489
1487 1490 mutex_enter(&hdl->sa_lock);
1488 1491 if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) == 0) {
1489 1492 error = uiomove((void *)bulk.sa_addr, MIN(bulk.sa_size,
1490 1493 uio->uio_resid), UIO_READ, uio);
1491 1494 }
1492 1495 mutex_exit(&hdl->sa_lock);
1493 1496 return (error);
1494 1497
1495 1498 }
1496 1499
1497 1500 /*
1498 1501 * For the existing object that is upgraded from old system, its ondisk layout
1499 1502 * has no slot for the project ID attribute. But quota accounting logic needs
1500 1503 * to access related slots by offset directly. So we need to adjust these old
1501 1504 * objects' layout to make the project ID to some unified and fixed offset.
1502 1505 */
1503 1506 int
1504 1507 sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
1505 1508 {
1506 1509 znode_t *zp = sa_get_userdata(hdl);
1507 1510 dmu_buf_t *db = sa_get_db(hdl);
1508 1511 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1509 1512 int count = 0, err = 0;
1510 1513 sa_bulk_attr_t *bulk, *attrs;
1511 1514 zfs_acl_locator_cb_t locate = { 0 };
1512 1515 uint64_t uid, gid, mode, rdev, xattr = 0, parent, gen, links;
1513 1516 uint64_t crtime[2], mtime[2], ctime[2], atime[2];
1514 1517 zfs_acl_phys_t znode_acl = { 0 };
1515 1518 char scanstamp[AV_SCANSTAMP_SZ];
1516 1519
1517 1520 if (zp->z_acl_cached == NULL) {
1518 1521 zfs_acl_t *aclp;
1519 1522
1520 1523 mutex_enter(&zp->z_acl_lock);
1521 1524 err = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
1522 1525 mutex_exit(&zp->z_acl_lock);
1523 1526 if (err != 0 && err != ENOENT)
1524 1527 return (err);
1525 1528 }
1526 1529
1527 1530 bulk = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
1528 1531 attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
1529 1532 mutex_enter(&hdl->sa_lock);
1530 1533 mutex_enter(&zp->z_lock);
1531 1534
1532 1535 err = sa_lookup_locked(hdl, SA_ZPL_PROJID(zfsvfs), &projid,
1533 1536 sizeof (uint64_t));
1534 1537 if (unlikely(err == 0))
1535 1538 /* Someone has added project ID attr by race. */
1536 1539 err = EEXIST;
1537 1540 if (err != ENOENT)
1538 1541 goto out;
1539 1542
1540 1543 /* First do a bulk query of the attributes that aren't cached */
1541 1544 if (zp->z_is_sa) {
1542 1545 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1543 1546 &mode, 8);
1544 1547 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1545 1548 &gen, 8);
1546 1549 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1547 1550 &uid, 8);
1548 1551 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1549 1552 &gid, 8);
1550 1553 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
1551 1554 &parent, 8);
1552 1555 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1553 1556 &atime, 16);
1554 1557 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
1555 1558 &mtime, 16);
1556 1559 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
1557 1560 &ctime, 16);
1558 1561 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1559 1562 &crtime, 16);
1560 1563 if (S_ISBLK(zp->z_mode) || S_ISCHR(zp->z_mode))
1561 1564 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
1562 1565 &rdev, 8);
1563 1566 } else {
1564 1567 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1565 1568 &atime, 16);
1566 1569 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
1567 1570 &mtime, 16);
1568 1571 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
1569 1572 &ctime, 16);
1570 1573 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1571 1574 &crtime, 16);
1572 1575 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1573 1576 &gen, 8);
1574 1577 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1575 1578 &mode, 8);
1576 1579 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
1577 1580 &parent, 8);
1578 1581 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL,
1579 1582 &xattr, 8);
1580 1583 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
1581 1584 &rdev, 8);
1582 1585 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1583 1586 &uid, 8);
1584 1587 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1585 1588 &gid, 8);
1586 1589 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
1587 1590 &znode_acl, 88);
1588 1591 }
1589 1592 err = sa_bulk_lookup_locked(hdl, bulk, count);
1590 1593 if (err != 0)
1591 1594 goto out;
1592 1595
1593 1596 err = sa_lookup_locked(hdl, SA_ZPL_XATTR(zfsvfs), &xattr, 8);
1594 1597 if (err != 0 && err != ENOENT)
1595 1598 goto out;
1596 1599
1597 1600 zp->z_projid = projid;
1598 1601 zp->z_pflags |= ZFS_PROJID;
1599 1602 links = zp->z_links;
1600 1603 count = 0;
1601 1604 err = 0;
1602 1605
1603 1606 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
1604 1607 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
1605 1608 &zp->z_size, 8);
1606 1609 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GEN(zfsvfs), NULL, &gen, 8);
1607 1610 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
1608 1611 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
1609 1612 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
1610 1613 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
1611 1614 &zp->z_pflags, 8);
1612 1615 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
1613 1616 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
1614 1617 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
1615 1618 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1616 1619 &crtime, 16);
1617 1620 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
1618 1621 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PROJID(zfsvfs), NULL, &projid, 8);
1619 1622
1620 1623 if (S_ISBLK(zp->z_mode) || S_ISCHR(zp->z_mode))
1621 1624 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
1622 1625 &rdev, 8);
1623 1626
1624 1627 if (zp->z_acl_cached != NULL) {
1625 1628 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
1626 1629 &zp->z_acl_cached->z_acl_count, 8);
1627 1630 if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
1628 1631 zfs_acl_xform(zp, zp->z_acl_cached, CRED());
1629 1632 locate.cb_aclp = zp->z_acl_cached;
1630 1633 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
1631 1634 zfs_acl_data_locator, &locate,
1632 1635 zp->z_acl_cached->z_acl_bytes);
1633 1636 }
1634 1637
1635 1638 if (xattr)
1636 1639 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_XATTR(zfsvfs), NULL,
1637 1640 &xattr, 8);
1638 1641
1639 1642 if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
1640 1643 bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
1641 1644 scanstamp, AV_SCANSTAMP_SZ);
1642 1645 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SCANSTAMP(zfsvfs), NULL,
1643 1646 scanstamp, AV_SCANSTAMP_SZ);
1644 1647 zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
1645 1648 }
1646 1649
1647 1650 VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
1648 1651 VERIFY(sa_replace_all_by_template_locked(hdl, attrs, count, tx) == 0);
1649 1652 if (znode_acl.z_acl_extern_obj) {
1650 1653 VERIFY(0 == dmu_object_free(zfsvfs->z_os,
1651 1654 znode_acl.z_acl_extern_obj, tx));
1652 1655 }
1653 1656
1654 1657 zp->z_is_sa = B_TRUE;
1655 1658
1656 1659 out:
1657 1660 mutex_exit(&zp->z_lock);
1658 1661 mutex_exit(&hdl->sa_lock);
1659 1662 kmem_free(attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
1660 1663 kmem_free(bulk, sizeof (sa_bulk_attr_t) * ZPL_END);
1661 1664 return (err);
1662 1665 }
1663 1666 #endif
1664 1667
1665 1668 static sa_idx_tab_t *
1666 1669 sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, sa_hdr_phys_t *hdr)
1667 1670 {
1668 1671 sa_idx_tab_t *idx_tab;
1669 1672 sa_os_t *sa = os->os_sa;
1670 1673 sa_lot_t *tb, search;
1671 1674 avl_index_t loc;
1672 1675
1673 1676 /*
1674 1677 * Deterimine layout number. If SA node and header == 0 then
1675 1678 * force the index table to the dummy "1" empty layout.
1676 1679 *
1677 1680 * The layout number would only be zero for a newly created file
1678 1681 * that has not added any attributes yet, or with crypto enabled which
1679 1682 * doesn't write any attributes to the bonus buffer.
1680 1683 */
1681 1684
1682 1685 search.lot_num = SA_LAYOUT_NUM(hdr, bonustype);
1683 1686
1684 1687 tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
1685 1688
1686 1689 /* Verify header size is consistent with layout information */
1687 1690 ASSERT(tb);
1688 1691 ASSERT(IS_SA_BONUSTYPE(bonustype) &&
1689 1692 SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb) || !IS_SA_BONUSTYPE(bonustype) ||
1690 1693 (IS_SA_BONUSTYPE(bonustype) && hdr->sa_layout_info == 0));
1691 1694
1692 1695 /*
1693 1696 * See if any of the already existing TOC entries can be reused?
1694 1697 */
1695 1698
1696 1699 for (idx_tab = list_head(&tb->lot_idx_tab); idx_tab;
1697 1700 idx_tab = list_next(&tb->lot_idx_tab, idx_tab)) {
1698 1701 boolean_t valid_idx = B_TRUE;
1699 1702 int i;
1700 1703
1701 1704 if (tb->lot_var_sizes != 0 &&
1702 1705 idx_tab->sa_variable_lengths != NULL) {
1703 1706 for (i = 0; i != tb->lot_var_sizes; i++) {
1704 1707 if (hdr->sa_lengths[i] !=
1705 1708 idx_tab->sa_variable_lengths[i]) {
1706 1709 valid_idx = B_FALSE;
1707 1710 break;
1708 1711 }
1709 1712 }
1710 1713 }
1711 1714 if (valid_idx) {
1712 1715 sa_idx_tab_hold(os, idx_tab);
1713 1716 return (idx_tab);
1714 1717 }
1715 1718 }
1716 1719
1717 1720 /* No such luck, create a new entry */
1718 1721 idx_tab = kmem_zalloc(sizeof (sa_idx_tab_t), KM_SLEEP);
1719 1722 idx_tab->sa_idx_tab =
1720 1723 kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_SLEEP);
1721 1724 idx_tab->sa_layout = tb;
1722 1725 zfs_refcount_create(&idx_tab->sa_refcount);
1723 1726 if (tb->lot_var_sizes)
1724 1727 idx_tab->sa_variable_lengths = kmem_alloc(sizeof (uint16_t) *
1725 1728 tb->lot_var_sizes, KM_SLEEP);
1726 1729
1727 1730 sa_attr_iter(os, hdr, bonustype, sa_build_idx_tab,
1728 1731 tb, idx_tab);
1729 1732 sa_idx_tab_hold(os, idx_tab); /* one hold for consumer */
1730 1733 sa_idx_tab_hold(os, idx_tab); /* one for layout */
1731 1734 list_insert_tail(&tb->lot_idx_tab, idx_tab);
1732 1735 return (idx_tab);
1733 1736 }
1734 1737
1735 1738 void
1736 1739 sa_default_locator(void **dataptr, uint32_t *len, uint32_t total_len,
1737 1740 boolean_t start, void *userdata)
1738 1741 {
1739 1742 ASSERT(start);
1740 1743
1741 1744 *dataptr = userdata;
1742 1745 *len = total_len;
1743 1746 }
1744 1747
1745 1748 static void
1746 1749 sa_attr_register_sync(sa_handle_t *hdl, dmu_tx_t *tx)
1747 1750 {
1748 1751 uint64_t attr_value = 0;
1749 1752 sa_os_t *sa = hdl->sa_os->os_sa;
1750 1753 sa_attr_table_t *tb = sa->sa_attr_table;
1751 1754 int i;
1752 1755
1753 1756 mutex_enter(&sa->sa_lock);
1754 1757
1755 1758 if (!sa->sa_need_attr_registration || sa->sa_master_obj == 0) {
1756 1759 mutex_exit(&sa->sa_lock);
1757 1760 return;
1758 1761 }
1759 1762
1760 1763 if (sa->sa_reg_attr_obj == 0) {
1761 1764 sa->sa_reg_attr_obj = zap_create_link(hdl->sa_os,
1762 1765 DMU_OT_SA_ATTR_REGISTRATION,
1763 1766 sa->sa_master_obj, SA_REGISTRY, tx);
1764 1767 }
1765 1768 for (i = 0; i != sa->sa_num_attrs; i++) {
1766 1769 if (sa->sa_attr_table[i].sa_registered)
1767 1770 continue;
1768 1771 ATTR_ENCODE(attr_value, tb[i].sa_attr, tb[i].sa_length,
1769 1772 tb[i].sa_byteswap);
1770 1773 VERIFY(0 == zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
1771 1774 tb[i].sa_name, 8, 1, &attr_value, tx));
1772 1775 tb[i].sa_registered = B_TRUE;
1773 1776 }
1774 1777 sa->sa_need_attr_registration = B_FALSE;
1775 1778 mutex_exit(&sa->sa_lock);
1776 1779 }
1777 1780
1778 1781 /*
1779 1782 * Replace all attributes with attributes specified in template.
1780 1783 * If dnode had a spill buffer then those attributes will be
1781 1784 * also be replaced, possibly with just an empty spill block
1782 1785 *
1783 1786 * This interface is intended to only be used for bulk adding of
1784 1787 * attributes for a new file. It will also be used by the ZPL
1785 1788 * when converting and old formatted znode to native SA support.
1786 1789 */
1787 1790 int
1788 1791 sa_replace_all_by_template_locked(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
1789 1792 int attr_count, dmu_tx_t *tx)
1790 1793 {
1791 1794 sa_os_t *sa = hdl->sa_os->os_sa;
1792 1795
1793 1796 if (sa->sa_need_attr_registration)
1794 1797 sa_attr_register_sync(hdl, tx);
1795 1798 return (sa_build_layouts(hdl, attr_desc, attr_count, tx));
1796 1799 }
1797 1800
1798 1801 int
1799 1802 sa_replace_all_by_template(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
1800 1803 int attr_count, dmu_tx_t *tx)
1801 1804 {
1802 1805 int error;
1803 1806
1804 1807 mutex_enter(&hdl->sa_lock);
1805 1808 error = sa_replace_all_by_template_locked(hdl, attr_desc,
1806 1809 attr_count, tx);
1807 1810 mutex_exit(&hdl->sa_lock);
1808 1811 return (error);
1809 1812 }
1810 1813
1811 1814 /*
1812 1815 * Add/remove a single attribute or replace a variable-sized attribute value
1813 1816 * with a value of a different size, and then rewrite the entire set
1814 1817 * of attributes.
1815 1818 * Same-length attribute value replacement (including fixed-length attributes)
1816 1819 * is handled more efficiently by the upper layers.
1817 1820 */
1818 1821 static int
1819 1822 sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
1820 1823 sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
1821 1824 uint16_t buflen, dmu_tx_t *tx)
1822 1825 {
1823 1826 sa_os_t *sa = hdl->sa_os->os_sa;
1824 1827 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1825 1828 dnode_t *dn;
1826 1829 sa_bulk_attr_t *attr_desc;
1827 1830 void *old_data[2];
1828 1831 int bonus_attr_count = 0;
1829 1832 int bonus_data_size = 0;
1830 1833 int spill_data_size = 0;
1831 1834 int spill_attr_count = 0;
1832 1835 int error;
1833 1836 uint16_t length, reg_length;
1834 1837 int i, j, k, length_idx;
1835 1838 sa_hdr_phys_t *hdr;
1836 1839 sa_idx_tab_t *idx_tab;
1837 1840 int attr_count;
1838 1841 int count;
1839 1842
1840 1843 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1841 1844
1842 1845 /* First make of copy of the old data */
1843 1846
1844 1847 DB_DNODE_ENTER(db);
1845 1848 dn = DB_DNODE(db);
1846 1849 if (dn->dn_bonuslen != 0) {
1847 1850 bonus_data_size = hdl->sa_bonus->db_size;
1848 1851 old_data[0] = kmem_alloc(bonus_data_size, KM_SLEEP);
1849 1852 bcopy(hdl->sa_bonus->db_data, old_data[0],
1850 1853 hdl->sa_bonus->db_size);
1851 1854 bonus_attr_count = hdl->sa_bonus_tab->sa_layout->lot_attr_count;
1852 1855 } else {
1853 1856 old_data[0] = NULL;
1854 1857 }
1855 1858 DB_DNODE_EXIT(db);
1856 1859
1857 1860 /* Bring spill buffer online if it isn't currently */
1858 1861
1859 1862 if ((error = sa_get_spill(hdl)) == 0) {
1860 1863 spill_data_size = hdl->sa_spill->db_size;
1861 1864 old_data[1] = kmem_alloc(spill_data_size, KM_SLEEP);
1862 1865 bcopy(hdl->sa_spill->db_data, old_data[1],
1863 1866 hdl->sa_spill->db_size);
1864 1867 spill_attr_count =
1865 1868 hdl->sa_spill_tab->sa_layout->lot_attr_count;
1866 1869 } else if (error && error != ENOENT) {
1867 1870 if (old_data[0])
1868 1871 kmem_free(old_data[0], bonus_data_size);
1869 1872 return (error);
1870 1873 } else {
1871 1874 old_data[1] = NULL;
1872 1875 }
1873 1876
1874 1877 /* build descriptor of all attributes */
1875 1878
1876 1879 attr_count = bonus_attr_count + spill_attr_count;
1877 1880 if (action == SA_ADD)
1878 1881 attr_count++;
1879 1882 else if (action == SA_REMOVE)
1880 1883 attr_count--;
1881 1884
1882 1885 attr_desc = kmem_zalloc(sizeof (sa_bulk_attr_t) * attr_count, KM_SLEEP);
1883 1886
1884 1887 /*
1885 1888 * loop through bonus and spill buffer if it exists, and
1886 1889 * build up new attr_descriptor to reset the attributes
1887 1890 */
1888 1891 k = j = 0;
1889 1892 count = bonus_attr_count;
1890 1893 hdr = SA_GET_HDR(hdl, SA_BONUS);
1891 1894 idx_tab = SA_IDX_TAB_GET(hdl, SA_BONUS);
1892 1895 for (; k != 2; k++) {
1893 1896 /*
1894 1897 * Iterate over each attribute in layout. Fetch the
1895 1898 * size of variable-length attributes needing rewrite
1896 1899 * from sa_lengths[].
1897 1900 */
1898 1901 for (i = 0, length_idx = 0; i != count; i++) {
1899 1902 sa_attr_type_t attr;
1900 1903
1901 1904 attr = idx_tab->sa_layout->lot_attrs[i];
1902 1905 reg_length = SA_REGISTERED_LEN(sa, attr);
1903 1906 if (reg_length == 0) {
1904 1907 length = hdr->sa_lengths[length_idx];
1905 1908 length_idx++;
1906 1909 } else {
1907 1910 length = reg_length;
1908 1911 }
1909 1912 if (attr == newattr) {
1910 1913 /*
1911 1914 * There is nothing to do for SA_REMOVE,
1912 1915 * so it is just skipped.
1913 1916 */
1914 1917 if (action == SA_REMOVE)
1915 1918 continue;
1916 1919
1917 1920 /*
1918 1921 * Duplicate attributes are not allowed, so the
1919 1922 * action can not be SA_ADD here.
1920 1923 */
1921 1924 ASSERT3S(action, ==, SA_REPLACE);
1922 1925
1923 1926 /*
1924 1927 * Only a variable-sized attribute can be
1925 1928 * replaced here, and its size must be changing.
1926 1929 */
1927 1930 ASSERT3U(reg_length, ==, 0);
1928 1931 ASSERT3U(length, !=, buflen);
1929 1932 SA_ADD_BULK_ATTR(attr_desc, j, attr,
1930 1933 locator, datastart, buflen);
1931 1934 } else {
1932 1935 SA_ADD_BULK_ATTR(attr_desc, j, attr,
1933 1936 NULL, (void *)
1934 1937 (TOC_OFF(idx_tab->sa_idx_tab[attr]) +
1935 1938 (uintptr_t)old_data[k]), length);
1936 1939 }
1937 1940 }
1938 1941 if (k == 0 && hdl->sa_spill) {
1939 1942 hdr = SA_GET_HDR(hdl, SA_SPILL);
1940 1943 idx_tab = SA_IDX_TAB_GET(hdl, SA_SPILL);
1941 1944 count = spill_attr_count;
1942 1945 } else {
1943 1946 break;
1944 1947 }
1945 1948 }
1946 1949 if (action == SA_ADD) {
1947 1950 reg_length = SA_REGISTERED_LEN(sa, newattr);
1948 1951 IMPLY(reg_length != 0, reg_length == buflen);
1949 1952 SA_ADD_BULK_ATTR(attr_desc, j, newattr, locator,
1950 1953 datastart, buflen);
1951 1954 }
1952 1955 ASSERT3U(j, ==, attr_count);
1953 1956
1954 1957 error = sa_build_layouts(hdl, attr_desc, attr_count, tx);
1955 1958
1956 1959 if (old_data[0])
1957 1960 kmem_free(old_data[0], bonus_data_size);
1958 1961 if (old_data[1])
1959 1962 kmem_free(old_data[1], spill_data_size);
1960 1963 kmem_free(attr_desc, sizeof (sa_bulk_attr_t) * attr_count);
1961 1964
1962 1965 return (error);
1963 1966 }
1964 1967
1965 1968 static int
1966 1969 sa_bulk_update_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
1967 1970 dmu_tx_t *tx)
1968 1971 {
1969 1972 int error;
1970 1973 sa_os_t *sa = hdl->sa_os->os_sa;
1971 1974 dmu_object_type_t bonustype;
1972 1975
1973 1976 bonustype = SA_BONUSTYPE_FROM_DB(SA_GET_DB(hdl, SA_BONUS));
1974 1977
1975 1978 ASSERT(hdl);
1976 1979 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1977 1980
1978 1981 /* sync out registration table if necessary */
1979 1982 if (sa->sa_need_attr_registration)
1980 1983 sa_attr_register_sync(hdl, tx);
1981 1984
1982 1985 error = sa_attr_op(hdl, bulk, count, SA_UPDATE, tx);
1983 1986 if (error == 0 && !IS_SA_BONUSTYPE(bonustype) && sa->sa_update_cb)
1984 1987 sa->sa_update_cb(hdl, tx);
1985 1988
1986 1989 return (error);
1987 1990 }
1988 1991
1989 1992 /*
1990 1993 * update or add new attribute
1991 1994 */
1992 1995 int
1993 1996 sa_update(sa_handle_t *hdl, sa_attr_type_t type,
1994 1997 void *buf, uint32_t buflen, dmu_tx_t *tx)
1995 1998 {
1996 1999 int error;
1997 2000 sa_bulk_attr_t bulk;
1998 2001
1999 2002 bulk.sa_attr = type;
2000 2003 bulk.sa_data_func = NULL;
2001 2004 bulk.sa_length = buflen;
2002 2005 bulk.sa_data = buf;
2003 2006
2004 2007 mutex_enter(&hdl->sa_lock);
2005 2008 error = sa_bulk_update_impl(hdl, &bulk, 1, tx);
2006 2009 mutex_exit(&hdl->sa_lock);
2007 2010 return (error);
2008 2011 }
2009 2012
2010 2013 int
2011 2014 sa_update_from_cb(sa_handle_t *hdl, sa_attr_type_t attr,
2012 2015 uint32_t buflen, sa_data_locator_t *locator, void *userdata, dmu_tx_t *tx)
2013 2016 {
2014 2017 int error;
2015 2018 sa_bulk_attr_t bulk;
2016 2019
2017 2020 bulk.sa_attr = attr;
2018 2021 bulk.sa_data = userdata;
2019 2022 bulk.sa_data_func = locator;
2020 2023 bulk.sa_length = buflen;
2021 2024
2022 2025 mutex_enter(&hdl->sa_lock);
2023 2026 error = sa_bulk_update_impl(hdl, &bulk, 1, tx);
2024 2027 mutex_exit(&hdl->sa_lock);
2025 2028 return (error);
2026 2029 }
2027 2030
2028 2031 /*
2029 2032 * Return size of an attribute
2030 2033 */
2031 2034
2032 2035 int
2033 2036 sa_size(sa_handle_t *hdl, sa_attr_type_t attr, int *size)
2034 2037 {
2035 2038 sa_bulk_attr_t bulk;
2036 2039 int error;
2037 2040
2038 2041 bulk.sa_data = NULL;
2039 2042 bulk.sa_attr = attr;
2040 2043 bulk.sa_data_func = NULL;
2041 2044
2042 2045 ASSERT(hdl);
2043 2046 mutex_enter(&hdl->sa_lock);
2044 2047 if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) != 0) {
2045 2048 mutex_exit(&hdl->sa_lock);
2046 2049 return (error);
2047 2050 }
2048 2051 *size = bulk.sa_size;
2049 2052
2050 2053 mutex_exit(&hdl->sa_lock);
2051 2054 return (0);
2052 2055 }
2053 2056
2054 2057 int
2055 2058 sa_bulk_lookup_locked(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
2056 2059 {
2057 2060 ASSERT(hdl);
2058 2061 ASSERT(MUTEX_HELD(&hdl->sa_lock));
2059 2062 return (sa_lookup_impl(hdl, attrs, count));
2060 2063 }
2061 2064
2062 2065 int
2063 2066 sa_bulk_lookup(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
2064 2067 {
2065 2068 int error;
2066 2069
2067 2070 ASSERT(hdl);
2068 2071 mutex_enter(&hdl->sa_lock);
2069 2072 error = sa_bulk_lookup_locked(hdl, attrs, count);
2070 2073 mutex_exit(&hdl->sa_lock);
2071 2074 return (error);
2072 2075 }
2073 2076
2074 2077 int
2075 2078 sa_bulk_update(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count, dmu_tx_t *tx)
2076 2079 {
2077 2080 int error;
2078 2081
2079 2082 ASSERT(hdl);
2080 2083 mutex_enter(&hdl->sa_lock);
2081 2084 error = sa_bulk_update_impl(hdl, attrs, count, tx);
2082 2085 mutex_exit(&hdl->sa_lock);
2083 2086 return (error);
2084 2087 }
2085 2088
2086 2089 int
2087 2090 sa_remove(sa_handle_t *hdl, sa_attr_type_t attr, dmu_tx_t *tx)
2088 2091 {
2089 2092 int error;
2090 2093
2091 2094 mutex_enter(&hdl->sa_lock);
2092 2095 error = sa_modify_attrs(hdl, attr, SA_REMOVE, NULL,
2093 2096 NULL, 0, tx);
2094 2097 mutex_exit(&hdl->sa_lock);
2095 2098 return (error);
2096 2099 }
2097 2100
2098 2101 void
2099 2102 sa_object_info(sa_handle_t *hdl, dmu_object_info_t *doi)
2100 2103 {
2101 2104 dmu_object_info_from_db((dmu_buf_t *)hdl->sa_bonus, doi);
2102 2105 }
2103 2106
2104 2107 void
2105 2108 sa_object_size(sa_handle_t *hdl, uint32_t *blksize, u_longlong_t *nblocks)
2106 2109 {
2107 2110 dmu_object_size_from_db((dmu_buf_t *)hdl->sa_bonus,
2108 2111 blksize, nblocks);
2109 2112 }
2110 2113
2111 2114 void
2112 2115 sa_set_userp(sa_handle_t *hdl, void *ptr)
2113 2116 {
2114 2117 hdl->sa_userp = ptr;
2115 2118 }
2116 2119
2117 2120 dmu_buf_t *
2118 2121 sa_get_db(sa_handle_t *hdl)
2119 2122 {
2120 2123 return ((dmu_buf_t *)hdl->sa_bonus);
2121 2124 }
2122 2125
2123 2126 void *
2124 2127 sa_get_userdata(sa_handle_t *hdl)
2125 2128 {
2126 2129 return (hdl->sa_userp);
2127 2130 }
2128 2131
2129 2132 void
2130 2133 sa_register_update_callback_locked(objset_t *os, sa_update_cb_t *func)
2131 2134 {
2132 2135 ASSERT(MUTEX_HELD(&os->os_sa->sa_lock));
2133 2136 os->os_sa->sa_update_cb = func;
2134 2137 }
2135 2138
2136 2139 void
2137 2140 sa_register_update_callback(objset_t *os, sa_update_cb_t *func)
2138 2141 {
2139 2142
2140 2143 mutex_enter(&os->os_sa->sa_lock);
2141 2144 sa_register_update_callback_locked(os, func);
2142 2145 mutex_exit(&os->os_sa->sa_lock);
2143 2146 }
2144 2147
2145 2148 uint64_t
2146 2149 sa_handle_object(sa_handle_t *hdl)
2147 2150 {
2148 2151 return (hdl->sa_bonus->db_object);
2149 2152 }
2150 2153
2151 2154 boolean_t
2152 2155 sa_enabled(objset_t *os)
2153 2156 {
2154 2157 return (os->os_sa == NULL);
2155 2158 }
2156 2159
2157 2160 int
2158 2161 sa_set_sa_object(objset_t *os, uint64_t sa_object)
2159 2162 {
2160 2163 sa_os_t *sa = os->os_sa;
2161 2164
2162 2165 if (sa->sa_master_obj)
2163 2166 return (1);
2164 2167
2165 2168 sa->sa_master_obj = sa_object;
2166 2169
2167 2170 return (0);
2168 2171 }
2169 2172
2170 2173 int
2171 2174 sa_hdrsize(void *arg)
2172 2175 {
2173 2176 sa_hdr_phys_t *hdr = arg;
2174 2177
2175 2178 return (SA_HDR_SIZE(hdr));
2176 2179 }
2177 2180
2178 2181 void
2179 2182 sa_handle_lock(sa_handle_t *hdl)
2180 2183 {
2181 2184 ASSERT(hdl);
2182 2185 mutex_enter(&hdl->sa_lock);
2183 2186 }
2184 2187
2185 2188 void
2186 2189 sa_handle_unlock(sa_handle_t *hdl)
2187 2190 {
2188 2191 ASSERT(hdl);
2189 2192 mutex_exit(&hdl->sa_lock);
2190 2193 }
|
↓ open down ↓ |
1764 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX