Print this page
2619 asynchronous destruction of ZFS file systems
2747 SPA versioning with zfs feature flags
Reviewed by: Matt Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <gwilson@delphix.com>
Reviewed by: Richard Lowe <richlowe@richlowe.net>
Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com>
Approved by: Dan McDonald <danmcd@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/cmd/ztest/ztest.c
+++ new/usr/src/cmd/ztest/ztest.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26
27 27 /*
28 28 * The objective of this program is to provide a DMU/ZAP/SPA stress test
29 29 * that runs entirely in userland, is easy to use, and easy to extend.
30 30 *
31 31 * The overall design of the ztest program is as follows:
32 32 *
33 33 * (1) For each major functional area (e.g. adding vdevs to a pool,
34 34 * creating and destroying datasets, reading and writing objects, etc)
35 35 * we have a simple routine to test that functionality. These
36 36 * individual routines do not have to do anything "stressful".
37 37 *
38 38 * (2) We turn these simple functionality tests into a stress test by
39 39 * running them all in parallel, with as many threads as desired,
40 40 * and spread across as many datasets, objects, and vdevs as desired.
41 41 *
42 42 * (3) While all this is happening, we inject faults into the pool to
43 43 * verify that self-healing data really works.
44 44 *
45 45 * (4) Every time we open a dataset, we change its checksum and compression
46 46 * functions. Thus even individual objects vary from block to block
47 47 * in which checksum they use and whether they're compressed.
48 48 *
49 49 * (5) To verify that we never lose on-disk consistency after a crash,
50 50 * we run the entire test in a child of the main process.
51 51 * At random times, the child self-immolates with a SIGKILL.
52 52 * This is the software equivalent of pulling the power cord.
53 53 * The parent then runs the test again, using the existing
54 54 * storage pool, as many times as desired. If backwards compatability
55 55 * testing is enabled ztest will sometimes run the "older" version
56 56 * of ztest after a SIGKILL.
57 57 *
58 58 * (6) To verify that we don't have future leaks or temporal incursions,
59 59 * many of the functional tests record the transaction group number
60 60 * as part of their data. When reading old data, they verify that
61 61 * the transaction group number is less than the current, open txg.
62 62 * If you add a new test, please do this if applicable.
63 63 *
64 64 * When run with no arguments, ztest runs for about five minutes and
65 65 * produces no output if successful. To get a little bit of information,
66 66 * specify -V. To get more information, specify -VV, and so on.
67 67 *
68 68 * To turn this into an overnight stress test, use -T to specify run time.
69 69 *
70 70 * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
71 71 * to increase the pool capacity, fanout, and overall stress level.
72 72 *
73 73 * Use the -k option to set the desired frequency of kills.
74 74 *
75 75 * When ztest invokes itself it passes all relevant information through a
76 76 * temporary file which is mmap-ed in the child process. This allows shared
77 77 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
78 78 * stored at offset 0 of this file and contains information on the size and
79 79 * number of shared structures in the file. The information stored in this file
80 80 * must remain backwards compatible with older versions of ztest so that
81 81 * ztest can invoke them during backwards compatibility testing (-B).
82 82 */
83 83
84 84 #include <sys/zfs_context.h>
85 85 #include <sys/spa.h>
86 86 #include <sys/dmu.h>
87 87 #include <sys/txg.h>
88 88 #include <sys/dbuf.h>
89 89 #include <sys/zap.h>
90 90 #include <sys/dmu_objset.h>
91 91 #include <sys/poll.h>
92 92 #include <sys/stat.h>
93 93 #include <sys/time.h>
94 94 #include <sys/wait.h>
95 95 #include <sys/mman.h>
96 96 #include <sys/resource.h>
97 97 #include <sys/zio.h>
98 98 #include <sys/zil.h>
|
↓ open down ↓ |
98 lines elided |
↑ open up ↑ |
99 99 #include <sys/zil_impl.h>
100 100 #include <sys/vdev_impl.h>
101 101 #include <sys/vdev_file.h>
102 102 #include <sys/spa_impl.h>
103 103 #include <sys/metaslab_impl.h>
104 104 #include <sys/dsl_prop.h>
105 105 #include <sys/dsl_dataset.h>
106 106 #include <sys/dsl_scan.h>
107 107 #include <sys/zio_checksum.h>
108 108 #include <sys/refcount.h>
109 +#include <sys/zfeature.h>
109 110 #include <stdio.h>
110 111 #include <stdio_ext.h>
111 112 #include <stdlib.h>
112 113 #include <unistd.h>
113 114 #include <signal.h>
114 115 #include <umem.h>
115 116 #include <dlfcn.h>
116 117 #include <ctype.h>
117 118 #include <math.h>
118 119 #include <sys/fs/zfs.h>
119 120 #include <libnvpair.h>
120 121
121 122 #define ZTEST_FD_DATA 3
122 123 #define ZTEST_FD_RAND 4
123 124
124 125 typedef struct ztest_shared_hdr {
125 126 uint64_t zh_hdr_size;
126 127 uint64_t zh_opts_size;
127 128 uint64_t zh_size;
128 129 uint64_t zh_stats_size;
129 130 uint64_t zh_stats_count;
130 131 uint64_t zh_ds_size;
131 132 uint64_t zh_ds_count;
132 133 } ztest_shared_hdr_t;
133 134
134 135 static ztest_shared_hdr_t *ztest_shared_hdr;
135 136
136 137 typedef struct ztest_shared_opts {
137 138 char zo_pool[MAXNAMELEN];
138 139 char zo_dir[MAXNAMELEN];
139 140 char zo_alt_ztest[MAXNAMELEN];
140 141 char zo_alt_libpath[MAXNAMELEN];
141 142 uint64_t zo_vdevs;
142 143 uint64_t zo_vdevtime;
143 144 size_t zo_vdev_size;
144 145 int zo_ashift;
145 146 int zo_mirrors;
146 147 int zo_raidz;
147 148 int zo_raidz_parity;
148 149 int zo_datasets;
149 150 int zo_threads;
150 151 uint64_t zo_passtime;
151 152 uint64_t zo_killrate;
152 153 int zo_verbose;
153 154 int zo_init;
154 155 uint64_t zo_time;
155 156 uint64_t zo_maxloops;
156 157 uint64_t zo_metaslab_gang_bang;
157 158 } ztest_shared_opts_t;
158 159
159 160 static const ztest_shared_opts_t ztest_opts_defaults = {
160 161 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' },
161 162 .zo_dir = { '/', 't', 'm', 'p', '\0' },
162 163 .zo_alt_ztest = { '\0' },
163 164 .zo_alt_libpath = { '\0' },
164 165 .zo_vdevs = 5,
165 166 .zo_ashift = SPA_MINBLOCKSHIFT,
166 167 .zo_mirrors = 2,
167 168 .zo_raidz = 4,
168 169 .zo_raidz_parity = 1,
169 170 .zo_vdev_size = SPA_MINDEVSIZE,
170 171 .zo_datasets = 7,
171 172 .zo_threads = 23,
172 173 .zo_passtime = 60, /* 60 seconds */
173 174 .zo_killrate = 70, /* 70% kill rate */
174 175 .zo_verbose = 0,
175 176 .zo_init = 1,
176 177 .zo_time = 300, /* 5 minutes */
177 178 .zo_maxloops = 50, /* max loops during spa_freeze() */
178 179 .zo_metaslab_gang_bang = 32 << 10
179 180 };
180 181
181 182 extern uint64_t metaslab_gang_bang;
182 183 extern uint64_t metaslab_df_alloc_threshold;
183 184
184 185 static ztest_shared_opts_t *ztest_shared_opts;
185 186 static ztest_shared_opts_t ztest_opts;
186 187
187 188 typedef struct ztest_shared_ds {
188 189 uint64_t zd_seq;
189 190 } ztest_shared_ds_t;
190 191
191 192 static ztest_shared_ds_t *ztest_shared_ds;
192 193 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
193 194
194 195 #define BT_MAGIC 0x123456789abcdefULL
195 196 #define MAXFAULTS() \
196 197 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
197 198
198 199 enum ztest_io_type {
199 200 ZTEST_IO_WRITE_TAG,
200 201 ZTEST_IO_WRITE_PATTERN,
201 202 ZTEST_IO_WRITE_ZEROES,
202 203 ZTEST_IO_TRUNCATE,
203 204 ZTEST_IO_SETATTR,
204 205 ZTEST_IO_TYPES
205 206 };
206 207
207 208 typedef struct ztest_block_tag {
208 209 uint64_t bt_magic;
209 210 uint64_t bt_objset;
210 211 uint64_t bt_object;
211 212 uint64_t bt_offset;
212 213 uint64_t bt_gen;
213 214 uint64_t bt_txg;
214 215 uint64_t bt_crtxg;
215 216 } ztest_block_tag_t;
216 217
217 218 typedef struct bufwad {
218 219 uint64_t bw_index;
219 220 uint64_t bw_txg;
220 221 uint64_t bw_data;
221 222 } bufwad_t;
222 223
223 224 /*
224 225 * XXX -- fix zfs range locks to be generic so we can use them here.
225 226 */
226 227 typedef enum {
227 228 RL_READER,
228 229 RL_WRITER,
229 230 RL_APPEND
230 231 } rl_type_t;
231 232
232 233 typedef struct rll {
233 234 void *rll_writer;
234 235 int rll_readers;
235 236 mutex_t rll_lock;
236 237 cond_t rll_cv;
237 238 } rll_t;
238 239
239 240 typedef struct rl {
240 241 uint64_t rl_object;
241 242 uint64_t rl_offset;
242 243 uint64_t rl_size;
243 244 rll_t *rl_lock;
244 245 } rl_t;
245 246
246 247 #define ZTEST_RANGE_LOCKS 64
247 248 #define ZTEST_OBJECT_LOCKS 64
248 249
249 250 /*
250 251 * Object descriptor. Used as a template for object lookup/create/remove.
251 252 */
252 253 typedef struct ztest_od {
253 254 uint64_t od_dir;
254 255 uint64_t od_object;
255 256 dmu_object_type_t od_type;
256 257 dmu_object_type_t od_crtype;
257 258 uint64_t od_blocksize;
258 259 uint64_t od_crblocksize;
259 260 uint64_t od_gen;
260 261 uint64_t od_crgen;
261 262 char od_name[MAXNAMELEN];
262 263 } ztest_od_t;
263 264
264 265 /*
265 266 * Per-dataset state.
266 267 */
267 268 typedef struct ztest_ds {
268 269 ztest_shared_ds_t *zd_shared;
269 270 objset_t *zd_os;
270 271 rwlock_t zd_zilog_lock;
271 272 zilog_t *zd_zilog;
272 273 ztest_od_t *zd_od; /* debugging aid */
273 274 char zd_name[MAXNAMELEN];
274 275 mutex_t zd_dirobj_lock;
275 276 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
276 277 rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
277 278 } ztest_ds_t;
278 279
279 280 /*
280 281 * Per-iteration state.
281 282 */
282 283 typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
283 284
284 285 typedef struct ztest_info {
285 286 ztest_func_t *zi_func; /* test function */
286 287 uint64_t zi_iters; /* iterations per execution */
287 288 uint64_t *zi_interval; /* execute every <interval> seconds */
288 289 } ztest_info_t;
289 290
290 291 typedef struct ztest_shared_callstate {
291 292 uint64_t zc_count; /* per-pass count */
292 293 uint64_t zc_time; /* per-pass time */
293 294 uint64_t zc_next; /* next time to call this function */
294 295 } ztest_shared_callstate_t;
295 296
296 297 static ztest_shared_callstate_t *ztest_shared_callstate;
297 298 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
298 299
299 300 /*
300 301 * Note: these aren't static because we want dladdr() to work.
301 302 */
302 303 ztest_func_t ztest_dmu_read_write;
303 304 ztest_func_t ztest_dmu_write_parallel;
304 305 ztest_func_t ztest_dmu_object_alloc_free;
305 306 ztest_func_t ztest_dmu_commit_callbacks;
306 307 ztest_func_t ztest_zap;
307 308 ztest_func_t ztest_zap_parallel;
308 309 ztest_func_t ztest_zil_commit;
309 310 ztest_func_t ztest_zil_remount;
310 311 ztest_func_t ztest_dmu_read_write_zcopy;
311 312 ztest_func_t ztest_dmu_objset_create_destroy;
312 313 ztest_func_t ztest_dmu_prealloc;
313 314 ztest_func_t ztest_fzap;
314 315 ztest_func_t ztest_dmu_snapshot_create_destroy;
315 316 ztest_func_t ztest_dsl_prop_get_set;
316 317 ztest_func_t ztest_spa_prop_get_set;
317 318 ztest_func_t ztest_spa_create_destroy;
318 319 ztest_func_t ztest_fault_inject;
319 320 ztest_func_t ztest_ddt_repair;
320 321 ztest_func_t ztest_dmu_snapshot_hold;
321 322 ztest_func_t ztest_spa_rename;
322 323 ztest_func_t ztest_scrub;
323 324 ztest_func_t ztest_dsl_dataset_promote_busy;
324 325 ztest_func_t ztest_vdev_attach_detach;
325 326 ztest_func_t ztest_vdev_LUN_growth;
326 327 ztest_func_t ztest_vdev_add_remove;
327 328 ztest_func_t ztest_vdev_aux_add_remove;
328 329 ztest_func_t ztest_split_pool;
329 330 ztest_func_t ztest_reguid;
330 331
331 332 uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
332 333 uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
333 334 uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
334 335 uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
335 336 uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
336 337
337 338 ztest_info_t ztest_info[] = {
338 339 { ztest_dmu_read_write, 1, &zopt_always },
339 340 { ztest_dmu_write_parallel, 10, &zopt_always },
340 341 { ztest_dmu_object_alloc_free, 1, &zopt_always },
341 342 { ztest_dmu_commit_callbacks, 1, &zopt_always },
342 343 { ztest_zap, 30, &zopt_always },
343 344 { ztest_zap_parallel, 100, &zopt_always },
344 345 { ztest_split_pool, 1, &zopt_always },
345 346 { ztest_zil_commit, 1, &zopt_incessant },
346 347 { ztest_zil_remount, 1, &zopt_sometimes },
347 348 { ztest_dmu_read_write_zcopy, 1, &zopt_often },
348 349 { ztest_dmu_objset_create_destroy, 1, &zopt_often },
349 350 { ztest_dsl_prop_get_set, 1, &zopt_often },
350 351 { ztest_spa_prop_get_set, 1, &zopt_sometimes },
351 352 #if 0
352 353 { ztest_dmu_prealloc, 1, &zopt_sometimes },
353 354 #endif
354 355 { ztest_fzap, 1, &zopt_sometimes },
355 356 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes },
356 357 { ztest_spa_create_destroy, 1, &zopt_sometimes },
357 358 { ztest_fault_inject, 1, &zopt_sometimes },
358 359 { ztest_ddt_repair, 1, &zopt_sometimes },
359 360 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes },
360 361 { ztest_reguid, 1, &zopt_sometimes },
361 362 { ztest_spa_rename, 1, &zopt_rarely },
362 363 { ztest_scrub, 1, &zopt_rarely },
363 364 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely },
364 365 { ztest_vdev_attach_detach, 1, &zopt_rarely },
365 366 { ztest_vdev_LUN_growth, 1, &zopt_rarely },
366 367 { ztest_vdev_add_remove, 1,
367 368 &ztest_opts.zo_vdevtime },
368 369 { ztest_vdev_aux_add_remove, 1,
369 370 &ztest_opts.zo_vdevtime },
370 371 };
371 372
372 373 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
373 374
374 375 /*
375 376 * The following struct is used to hold a list of uncalled commit callbacks.
376 377 * The callbacks are ordered by txg number.
377 378 */
378 379 typedef struct ztest_cb_list {
379 380 mutex_t zcl_callbacks_lock;
380 381 list_t zcl_callbacks;
381 382 } ztest_cb_list_t;
382 383
383 384 /*
384 385 * Stuff we need to share writably between parent and child.
385 386 */
386 387 typedef struct ztest_shared {
387 388 boolean_t zs_do_init;
388 389 hrtime_t zs_proc_start;
389 390 hrtime_t zs_proc_stop;
390 391 hrtime_t zs_thread_start;
391 392 hrtime_t zs_thread_stop;
392 393 hrtime_t zs_thread_kill;
393 394 uint64_t zs_enospc_count;
394 395 uint64_t zs_vdev_next_leaf;
395 396 uint64_t zs_vdev_aux;
396 397 uint64_t zs_alloc;
397 398 uint64_t zs_space;
398 399 uint64_t zs_splits;
399 400 uint64_t zs_mirrors;
400 401 uint64_t zs_metaslab_sz;
401 402 uint64_t zs_metaslab_df_alloc_threshold;
402 403 uint64_t zs_guid;
403 404 } ztest_shared_t;
404 405
405 406 #define ID_PARALLEL -1ULL
406 407
407 408 static char ztest_dev_template[] = "%s/%s.%llua";
408 409 static char ztest_aux_template[] = "%s/%s.%s.%llu";
409 410 ztest_shared_t *ztest_shared;
410 411
411 412 static spa_t *ztest_spa = NULL;
412 413 static ztest_ds_t *ztest_ds;
413 414
414 415 static mutex_t ztest_vdev_lock;
415 416 static rwlock_t ztest_name_lock;
416 417
417 418 static boolean_t ztest_dump_core = B_TRUE;
418 419 static boolean_t ztest_exiting;
419 420
420 421 /* Global commit callback list */
421 422 static ztest_cb_list_t zcl;
422 423
423 424 enum ztest_object {
424 425 ZTEST_META_DNODE = 0,
425 426 ZTEST_DIROBJ,
426 427 ZTEST_OBJECTS
427 428 };
428 429
429 430 static void usage(boolean_t) __NORETURN;
430 431
431 432 /*
432 433 * These libumem hooks provide a reasonable set of defaults for the allocator's
433 434 * debugging facilities.
434 435 */
435 436 const char *
436 437 _umem_debug_init()
437 438 {
438 439 return ("default,verbose"); /* $UMEM_DEBUG setting */
439 440 }
440 441
441 442 const char *
442 443 _umem_logging_init(void)
443 444 {
444 445 return ("fail,contents"); /* $UMEM_LOGGING setting */
445 446 }
446 447
447 448 #define FATAL_MSG_SZ 1024
448 449
449 450 char *fatal_msg;
450 451
451 452 static void
452 453 fatal(int do_perror, char *message, ...)
453 454 {
454 455 va_list args;
455 456 int save_errno = errno;
456 457 char buf[FATAL_MSG_SZ];
457 458
458 459 (void) fflush(stdout);
459 460
460 461 va_start(args, message);
461 462 (void) sprintf(buf, "ztest: ");
462 463 /* LINTED */
463 464 (void) vsprintf(buf + strlen(buf), message, args);
464 465 va_end(args);
465 466 if (do_perror) {
466 467 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
467 468 ": %s", strerror(save_errno));
468 469 }
469 470 (void) fprintf(stderr, "%s\n", buf);
470 471 fatal_msg = buf; /* to ease debugging */
471 472 if (ztest_dump_core)
472 473 abort();
473 474 exit(3);
474 475 }
475 476
476 477 static int
477 478 str2shift(const char *buf)
478 479 {
479 480 const char *ends = "BKMGTPEZ";
480 481 int i;
481 482
482 483 if (buf[0] == '\0')
483 484 return (0);
484 485 for (i = 0; i < strlen(ends); i++) {
485 486 if (toupper(buf[0]) == ends[i])
486 487 break;
487 488 }
488 489 if (i == strlen(ends)) {
489 490 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
490 491 buf);
491 492 usage(B_FALSE);
492 493 }
493 494 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
494 495 return (10*i);
495 496 }
496 497 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
497 498 usage(B_FALSE);
498 499 /* NOTREACHED */
499 500 }
500 501
501 502 static uint64_t
502 503 nicenumtoull(const char *buf)
503 504 {
504 505 char *end;
505 506 uint64_t val;
506 507
507 508 val = strtoull(buf, &end, 0);
508 509 if (end == buf) {
509 510 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
510 511 usage(B_FALSE);
511 512 } else if (end[0] == '.') {
512 513 double fval = strtod(buf, &end);
513 514 fval *= pow(2, str2shift(end));
514 515 if (fval > UINT64_MAX) {
515 516 (void) fprintf(stderr, "ztest: value too large: %s\n",
516 517 buf);
517 518 usage(B_FALSE);
518 519 }
519 520 val = (uint64_t)fval;
520 521 } else {
521 522 int shift = str2shift(end);
522 523 if (shift >= 64 || (val << shift) >> shift != val) {
523 524 (void) fprintf(stderr, "ztest: value too large: %s\n",
524 525 buf);
525 526 usage(B_FALSE);
526 527 }
527 528 val <<= shift;
528 529 }
529 530 return (val);
530 531 }
531 532
532 533 static void
533 534 usage(boolean_t requested)
534 535 {
535 536 const ztest_shared_opts_t *zo = &ztest_opts_defaults;
536 537
537 538 char nice_vdev_size[10];
538 539 char nice_gang_bang[10];
539 540 FILE *fp = requested ? stdout : stderr;
540 541
541 542 nicenum(zo->zo_vdev_size, nice_vdev_size);
542 543 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang);
543 544
544 545 (void) fprintf(fp, "Usage: %s\n"
545 546 "\t[-v vdevs (default: %llu)]\n"
546 547 "\t[-s size_of_each_vdev (default: %s)]\n"
547 548 "\t[-a alignment_shift (default: %d)] use 0 for random\n"
548 549 "\t[-m mirror_copies (default: %d)]\n"
549 550 "\t[-r raidz_disks (default: %d)]\n"
550 551 "\t[-R raidz_parity (default: %d)]\n"
551 552 "\t[-d datasets (default: %d)]\n"
552 553 "\t[-t threads (default: %d)]\n"
553 554 "\t[-g gang_block_threshold (default: %s)]\n"
554 555 "\t[-i init_count (default: %d)] initialize pool i times\n"
555 556 "\t[-k kill_percentage (default: %llu%%)]\n"
556 557 "\t[-p pool_name (default: %s)]\n"
557 558 "\t[-f dir (default: %s)] file directory for vdev files\n"
558 559 "\t[-V] verbose (use multiple times for ever more blather)\n"
559 560 "\t[-E] use existing pool instead of creating new one\n"
560 561 "\t[-T time (default: %llu sec)] total run time\n"
561 562 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
562 563 "\t[-P passtime (default: %llu sec)] time per pass\n"
563 564 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n"
564 565 "\t[-h] (print help)\n"
565 566 "",
566 567 zo->zo_pool,
567 568 (u_longlong_t)zo->zo_vdevs, /* -v */
568 569 nice_vdev_size, /* -s */
569 570 zo->zo_ashift, /* -a */
570 571 zo->zo_mirrors, /* -m */
571 572 zo->zo_raidz, /* -r */
572 573 zo->zo_raidz_parity, /* -R */
573 574 zo->zo_datasets, /* -d */
574 575 zo->zo_threads, /* -t */
575 576 nice_gang_bang, /* -g */
576 577 zo->zo_init, /* -i */
577 578 (u_longlong_t)zo->zo_killrate, /* -k */
578 579 zo->zo_pool, /* -p */
579 580 zo->zo_dir, /* -f */
580 581 (u_longlong_t)zo->zo_time, /* -T */
581 582 (u_longlong_t)zo->zo_maxloops, /* -F */
582 583 (u_longlong_t)zo->zo_passtime);
583 584 exit(requested ? 0 : 1);
584 585 }
585 586
586 587 static void
587 588 process_options(int argc, char **argv)
588 589 {
589 590 char *path;
590 591 ztest_shared_opts_t *zo = &ztest_opts;
591 592
592 593 int opt;
593 594 uint64_t value;
594 595 char altdir[MAXNAMELEN] = { 0 };
595 596
596 597 bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
597 598
598 599 while ((opt = getopt(argc, argv,
599 600 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) {
600 601 value = 0;
601 602 switch (opt) {
602 603 case 'v':
603 604 case 's':
604 605 case 'a':
605 606 case 'm':
606 607 case 'r':
607 608 case 'R':
608 609 case 'd':
609 610 case 't':
610 611 case 'g':
611 612 case 'i':
612 613 case 'k':
613 614 case 'T':
614 615 case 'P':
615 616 case 'F':
616 617 value = nicenumtoull(optarg);
617 618 }
618 619 switch (opt) {
619 620 case 'v':
620 621 zo->zo_vdevs = value;
621 622 break;
622 623 case 's':
623 624 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
624 625 break;
625 626 case 'a':
626 627 zo->zo_ashift = value;
627 628 break;
628 629 case 'm':
629 630 zo->zo_mirrors = value;
630 631 break;
631 632 case 'r':
632 633 zo->zo_raidz = MAX(1, value);
633 634 break;
634 635 case 'R':
635 636 zo->zo_raidz_parity = MIN(MAX(value, 1), 3);
636 637 break;
637 638 case 'd':
638 639 zo->zo_datasets = MAX(1, value);
639 640 break;
640 641 case 't':
641 642 zo->zo_threads = MAX(1, value);
642 643 break;
643 644 case 'g':
644 645 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1,
645 646 value);
646 647 break;
647 648 case 'i':
648 649 zo->zo_init = value;
649 650 break;
650 651 case 'k':
651 652 zo->zo_killrate = value;
652 653 break;
653 654 case 'p':
654 655 (void) strlcpy(zo->zo_pool, optarg,
655 656 sizeof (zo->zo_pool));
656 657 break;
657 658 case 'f':
658 659 path = realpath(optarg, NULL);
659 660 if (path == NULL) {
660 661 (void) fprintf(stderr, "error: %s: %s\n",
661 662 optarg, strerror(errno));
662 663 usage(B_FALSE);
663 664 } else {
664 665 (void) strlcpy(zo->zo_dir, path,
665 666 sizeof (zo->zo_dir));
666 667 }
667 668 break;
668 669 case 'V':
669 670 zo->zo_verbose++;
670 671 break;
671 672 case 'E':
672 673 zo->zo_init = 0;
673 674 break;
674 675 case 'T':
675 676 zo->zo_time = value;
676 677 break;
677 678 case 'P':
678 679 zo->zo_passtime = MAX(1, value);
679 680 break;
680 681 case 'F':
681 682 zo->zo_maxloops = MAX(1, value);
682 683 break;
683 684 case 'B':
684 685 (void) strlcpy(altdir, optarg, sizeof (altdir));
685 686 break;
686 687 case 'h':
687 688 usage(B_TRUE);
688 689 break;
689 690 case '?':
690 691 default:
691 692 usage(B_FALSE);
692 693 break;
693 694 }
694 695 }
695 696
696 697 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1);
697 698
698 699 zo->zo_vdevtime =
699 700 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
700 701 UINT64_MAX >> 2);
701 702
702 703 if (strlen(altdir) > 0) {
703 704 char cmd[MAXNAMELEN];
704 705 char realaltdir[MAXNAMELEN];
705 706 char *bin;
706 707 char *ztest;
707 708 char *isa;
708 709 int isalen;
709 710
710 711 (void) realpath(getexecname(), cmd);
711 712 if (0 != access(altdir, F_OK)) {
712 713 ztest_dump_core = B_FALSE;
713 714 fatal(B_TRUE, "invalid alternate ztest path: %s",
714 715 altdir);
715 716 }
716 717 VERIFY(NULL != realpath(altdir, realaltdir));
717 718
718 719 /*
719 720 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
720 721 * We want to extract <isa> to determine if we should use
721 722 * 32 or 64 bit binaries.
722 723 */
723 724 bin = strstr(cmd, "/usr/bin/");
724 725 ztest = strstr(bin, "/ztest");
725 726 isa = bin + 9;
726 727 isalen = ztest - isa;
727 728 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest),
728 729 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa);
729 730 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath),
730 731 "%s/usr/lib/%.*s", realaltdir, isalen, isa);
731 732
732 733 if (0 != access(zo->zo_alt_ztest, X_OK)) {
733 734 ztest_dump_core = B_FALSE;
734 735 fatal(B_TRUE, "invalid alternate ztest: %s",
735 736 zo->zo_alt_ztest);
736 737 } else if (0 != access(zo->zo_alt_libpath, X_OK)) {
737 738 ztest_dump_core = B_FALSE;
738 739 fatal(B_TRUE, "invalid alternate lib directory %s",
739 740 zo->zo_alt_libpath);
740 741 }
741 742 }
742 743 }
743 744
744 745 static void
745 746 ztest_kill(ztest_shared_t *zs)
746 747 {
747 748 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
748 749 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
749 750 (void) kill(getpid(), SIGKILL);
750 751 }
751 752
752 753 static uint64_t
753 754 ztest_random(uint64_t range)
754 755 {
755 756 uint64_t r;
756 757
757 758 if (range == 0)
758 759 return (0);
759 760
760 761 if (read(ZTEST_FD_RAND, &r, sizeof (r)) != sizeof (r))
761 762 fatal(1, "short read from /dev/urandom");
762 763
763 764 return (r % range);
764 765 }
765 766
766 767 /* ARGSUSED */
767 768 static void
768 769 ztest_record_enospc(const char *s)
769 770 {
770 771 ztest_shared->zs_enospc_count++;
771 772 }
772 773
773 774 static uint64_t
774 775 ztest_get_ashift(void)
775 776 {
776 777 if (ztest_opts.zo_ashift == 0)
777 778 return (SPA_MINBLOCKSHIFT + ztest_random(3));
778 779 return (ztest_opts.zo_ashift);
779 780 }
780 781
781 782 static nvlist_t *
782 783 make_vdev_file(char *path, char *aux, size_t size, uint64_t ashift)
783 784 {
784 785 char pathbuf[MAXPATHLEN];
785 786 uint64_t vdev;
786 787 nvlist_t *file;
787 788
788 789 if (ashift == 0)
789 790 ashift = ztest_get_ashift();
790 791
791 792 if (path == NULL) {
792 793 path = pathbuf;
793 794
794 795 if (aux != NULL) {
795 796 vdev = ztest_shared->zs_vdev_aux;
796 797 (void) snprintf(path, sizeof (pathbuf),
797 798 ztest_aux_template, ztest_opts.zo_dir,
798 799 ztest_opts.zo_pool, aux, vdev);
799 800 } else {
800 801 vdev = ztest_shared->zs_vdev_next_leaf++;
801 802 (void) snprintf(path, sizeof (pathbuf),
802 803 ztest_dev_template, ztest_opts.zo_dir,
803 804 ztest_opts.zo_pool, vdev);
804 805 }
805 806 }
806 807
807 808 if (size != 0) {
808 809 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
809 810 if (fd == -1)
810 811 fatal(1, "can't open %s", path);
811 812 if (ftruncate(fd, size) != 0)
812 813 fatal(1, "can't ftruncate %s", path);
813 814 (void) close(fd);
814 815 }
815 816
816 817 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
817 818 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
818 819 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
819 820 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
820 821
821 822 return (file);
822 823 }
823 824
824 825 static nvlist_t *
825 826 make_vdev_raidz(char *path, char *aux, size_t size, uint64_t ashift, int r)
826 827 {
827 828 nvlist_t *raidz, **child;
828 829 int c;
829 830
830 831 if (r < 2)
831 832 return (make_vdev_file(path, aux, size, ashift));
832 833 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
833 834
834 835 for (c = 0; c < r; c++)
835 836 child[c] = make_vdev_file(path, aux, size, ashift);
836 837
837 838 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
838 839 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
839 840 VDEV_TYPE_RAIDZ) == 0);
840 841 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
841 842 ztest_opts.zo_raidz_parity) == 0);
842 843 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
843 844 child, r) == 0);
844 845
845 846 for (c = 0; c < r; c++)
846 847 nvlist_free(child[c]);
847 848
848 849 umem_free(child, r * sizeof (nvlist_t *));
849 850
850 851 return (raidz);
851 852 }
852 853
853 854 static nvlist_t *
854 855 make_vdev_mirror(char *path, char *aux, size_t size, uint64_t ashift,
855 856 int r, int m)
856 857 {
857 858 nvlist_t *mirror, **child;
858 859 int c;
859 860
860 861 if (m < 1)
861 862 return (make_vdev_raidz(path, aux, size, ashift, r));
862 863
863 864 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
864 865
865 866 for (c = 0; c < m; c++)
866 867 child[c] = make_vdev_raidz(path, aux, size, ashift, r);
867 868
868 869 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
869 870 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
870 871 VDEV_TYPE_MIRROR) == 0);
871 872 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
872 873 child, m) == 0);
873 874
874 875 for (c = 0; c < m; c++)
875 876 nvlist_free(child[c]);
876 877
877 878 umem_free(child, m * sizeof (nvlist_t *));
878 879
879 880 return (mirror);
880 881 }
881 882
882 883 static nvlist_t *
883 884 make_vdev_root(char *path, char *aux, size_t size, uint64_t ashift,
884 885 int log, int r, int m, int t)
885 886 {
886 887 nvlist_t *root, **child;
887 888 int c;
888 889
889 890 ASSERT(t > 0);
890 891
891 892 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
892 893
893 894 for (c = 0; c < t; c++) {
894 895 child[c] = make_vdev_mirror(path, aux, size, ashift, r, m);
895 896 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
896 897 log) == 0);
897 898 }
898 899
899 900 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
900 901 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
901 902 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
902 903 child, t) == 0);
903 904
904 905 for (c = 0; c < t; c++)
905 906 nvlist_free(child[c]);
906 907
907 908 umem_free(child, t * sizeof (nvlist_t *));
908 909
909 910 return (root);
910 911 }
911 912
912 913 static int
913 914 ztest_random_blocksize(void)
914 915 {
915 916 return (1 << (SPA_MINBLOCKSHIFT +
916 917 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)));
917 918 }
918 919
919 920 static int
920 921 ztest_random_ibshift(void)
921 922 {
922 923 return (DN_MIN_INDBLKSHIFT +
923 924 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
924 925 }
925 926
926 927 static uint64_t
927 928 ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
928 929 {
929 930 uint64_t top;
930 931 vdev_t *rvd = spa->spa_root_vdev;
931 932 vdev_t *tvd;
932 933
933 934 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
934 935
935 936 do {
936 937 top = ztest_random(rvd->vdev_children);
937 938 tvd = rvd->vdev_child[top];
938 939 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) ||
939 940 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
940 941
941 942 return (top);
942 943 }
943 944
944 945 static uint64_t
945 946 ztest_random_dsl_prop(zfs_prop_t prop)
946 947 {
947 948 uint64_t value;
948 949
949 950 do {
950 951 value = zfs_prop_random_value(prop, ztest_random(-1ULL));
951 952 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
952 953
953 954 return (value);
954 955 }
955 956
956 957 static int
957 958 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
958 959 boolean_t inherit)
959 960 {
960 961 const char *propname = zfs_prop_to_name(prop);
961 962 const char *valname;
962 963 char setpoint[MAXPATHLEN];
963 964 uint64_t curval;
964 965 int error;
965 966
966 967 error = dsl_prop_set(osname, propname,
967 968 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL),
968 969 sizeof (value), 1, &value);
969 970
970 971 if (error == ENOSPC) {
971 972 ztest_record_enospc(FTAG);
972 973 return (error);
973 974 }
974 975 ASSERT3U(error, ==, 0);
975 976
976 977 VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval),
977 978 1, &curval, setpoint), ==, 0);
978 979
979 980 if (ztest_opts.zo_verbose >= 6) {
980 981 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0);
981 982 (void) printf("%s %s = %s at '%s'\n",
982 983 osname, propname, valname, setpoint);
983 984 }
984 985
985 986 return (error);
986 987 }
987 988
988 989 static int
989 990 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
990 991 {
991 992 spa_t *spa = ztest_spa;
992 993 nvlist_t *props = NULL;
993 994 int error;
994 995
995 996 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
996 997 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
997 998
998 999 error = spa_prop_set(spa, props);
999 1000
1000 1001 nvlist_free(props);
1001 1002
1002 1003 if (error == ENOSPC) {
1003 1004 ztest_record_enospc(FTAG);
1004 1005 return (error);
1005 1006 }
1006 1007 ASSERT3U(error, ==, 0);
1007 1008
1008 1009 return (error);
1009 1010 }
1010 1011
1011 1012 static void
1012 1013 ztest_rll_init(rll_t *rll)
1013 1014 {
1014 1015 rll->rll_writer = NULL;
1015 1016 rll->rll_readers = 0;
1016 1017 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
1017 1018 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
1018 1019 }
1019 1020
1020 1021 static void
1021 1022 ztest_rll_destroy(rll_t *rll)
1022 1023 {
1023 1024 ASSERT(rll->rll_writer == NULL);
1024 1025 ASSERT(rll->rll_readers == 0);
1025 1026 VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
1026 1027 VERIFY(cond_destroy(&rll->rll_cv) == 0);
1027 1028 }
1028 1029
1029 1030 static void
1030 1031 ztest_rll_lock(rll_t *rll, rl_type_t type)
1031 1032 {
1032 1033 VERIFY(mutex_lock(&rll->rll_lock) == 0);
1033 1034
1034 1035 if (type == RL_READER) {
1035 1036 while (rll->rll_writer != NULL)
1036 1037 (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
1037 1038 rll->rll_readers++;
1038 1039 } else {
1039 1040 while (rll->rll_writer != NULL || rll->rll_readers)
1040 1041 (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
1041 1042 rll->rll_writer = curthread;
1042 1043 }
1043 1044
1044 1045 VERIFY(mutex_unlock(&rll->rll_lock) == 0);
1045 1046 }
1046 1047
1047 1048 static void
1048 1049 ztest_rll_unlock(rll_t *rll)
1049 1050 {
1050 1051 VERIFY(mutex_lock(&rll->rll_lock) == 0);
1051 1052
1052 1053 if (rll->rll_writer) {
1053 1054 ASSERT(rll->rll_readers == 0);
1054 1055 rll->rll_writer = NULL;
1055 1056 } else {
1056 1057 ASSERT(rll->rll_readers != 0);
1057 1058 ASSERT(rll->rll_writer == NULL);
1058 1059 rll->rll_readers--;
1059 1060 }
1060 1061
1061 1062 if (rll->rll_writer == NULL && rll->rll_readers == 0)
1062 1063 VERIFY(cond_broadcast(&rll->rll_cv) == 0);
1063 1064
1064 1065 VERIFY(mutex_unlock(&rll->rll_lock) == 0);
1065 1066 }
1066 1067
1067 1068 static void
1068 1069 ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
1069 1070 {
1070 1071 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1071 1072
1072 1073 ztest_rll_lock(rll, type);
1073 1074 }
1074 1075
1075 1076 static void
1076 1077 ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
1077 1078 {
1078 1079 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1079 1080
1080 1081 ztest_rll_unlock(rll);
1081 1082 }
1082 1083
1083 1084 static rl_t *
1084 1085 ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
1085 1086 uint64_t size, rl_type_t type)
1086 1087 {
1087 1088 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
1088 1089 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
1089 1090 rl_t *rl;
1090 1091
1091 1092 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
1092 1093 rl->rl_object = object;
1093 1094 rl->rl_offset = offset;
1094 1095 rl->rl_size = size;
1095 1096 rl->rl_lock = rll;
1096 1097
1097 1098 ztest_rll_lock(rll, type);
1098 1099
1099 1100 return (rl);
1100 1101 }
1101 1102
1102 1103 static void
1103 1104 ztest_range_unlock(rl_t *rl)
1104 1105 {
1105 1106 rll_t *rll = rl->rl_lock;
1106 1107
1107 1108 ztest_rll_unlock(rll);
1108 1109
1109 1110 umem_free(rl, sizeof (*rl));
1110 1111 }
1111 1112
1112 1113 static void
1113 1114 ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
1114 1115 {
1115 1116 zd->zd_os = os;
1116 1117 zd->zd_zilog = dmu_objset_zil(os);
1117 1118 zd->zd_shared = szd;
1118 1119 dmu_objset_name(os, zd->zd_name);
1119 1120
1120 1121 if (zd->zd_shared != NULL)
1121 1122 zd->zd_shared->zd_seq = 0;
1122 1123
1123 1124 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0);
1124 1125 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0);
1125 1126
1126 1127 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1127 1128 ztest_rll_init(&zd->zd_object_lock[l]);
1128 1129
1129 1130 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1130 1131 ztest_rll_init(&zd->zd_range_lock[l]);
1131 1132 }
1132 1133
1133 1134 static void
1134 1135 ztest_zd_fini(ztest_ds_t *zd)
1135 1136 {
1136 1137 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0);
1137 1138
1138 1139 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1139 1140 ztest_rll_destroy(&zd->zd_object_lock[l]);
1140 1141
1141 1142 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1142 1143 ztest_rll_destroy(&zd->zd_range_lock[l]);
1143 1144 }
1144 1145
1145 1146 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1146 1147
1147 1148 static uint64_t
1148 1149 ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
1149 1150 {
1150 1151 uint64_t txg;
1151 1152 int error;
1152 1153
1153 1154 /*
1154 1155 * Attempt to assign tx to some transaction group.
1155 1156 */
1156 1157 error = dmu_tx_assign(tx, txg_how);
1157 1158 if (error) {
1158 1159 if (error == ERESTART) {
1159 1160 ASSERT(txg_how == TXG_NOWAIT);
1160 1161 dmu_tx_wait(tx);
1161 1162 } else {
1162 1163 ASSERT3U(error, ==, ENOSPC);
1163 1164 ztest_record_enospc(tag);
1164 1165 }
1165 1166 dmu_tx_abort(tx);
1166 1167 return (0);
1167 1168 }
1168 1169 txg = dmu_tx_get_txg(tx);
1169 1170 ASSERT(txg != 0);
1170 1171 return (txg);
1171 1172 }
1172 1173
1173 1174 static void
1174 1175 ztest_pattern_set(void *buf, uint64_t size, uint64_t value)
1175 1176 {
1176 1177 uint64_t *ip = buf;
1177 1178 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1178 1179
1179 1180 while (ip < ip_end)
1180 1181 *ip++ = value;
1181 1182 }
1182 1183
1183 1184 static boolean_t
1184 1185 ztest_pattern_match(void *buf, uint64_t size, uint64_t value)
1185 1186 {
1186 1187 uint64_t *ip = buf;
1187 1188 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1188 1189 uint64_t diff = 0;
1189 1190
1190 1191 while (ip < ip_end)
1191 1192 diff |= (value - *ip++);
1192 1193
1193 1194 return (diff == 0);
1194 1195 }
1195 1196
1196 1197 static void
1197 1198 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1198 1199 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1199 1200 {
1200 1201 bt->bt_magic = BT_MAGIC;
1201 1202 bt->bt_objset = dmu_objset_id(os);
1202 1203 bt->bt_object = object;
1203 1204 bt->bt_offset = offset;
1204 1205 bt->bt_gen = gen;
1205 1206 bt->bt_txg = txg;
1206 1207 bt->bt_crtxg = crtxg;
1207 1208 }
1208 1209
1209 1210 static void
1210 1211 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1211 1212 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1212 1213 {
1213 1214 ASSERT(bt->bt_magic == BT_MAGIC);
1214 1215 ASSERT(bt->bt_objset == dmu_objset_id(os));
1215 1216 ASSERT(bt->bt_object == object);
1216 1217 ASSERT(bt->bt_offset == offset);
1217 1218 ASSERT(bt->bt_gen <= gen);
1218 1219 ASSERT(bt->bt_txg <= txg);
1219 1220 ASSERT(bt->bt_crtxg == crtxg);
1220 1221 }
1221 1222
1222 1223 static ztest_block_tag_t *
1223 1224 ztest_bt_bonus(dmu_buf_t *db)
1224 1225 {
1225 1226 dmu_object_info_t doi;
1226 1227 ztest_block_tag_t *bt;
1227 1228
1228 1229 dmu_object_info_from_db(db, &doi);
1229 1230 ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
1230 1231 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
1231 1232 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
1232 1233
1233 1234 return (bt);
1234 1235 }
1235 1236
1236 1237 /*
1237 1238 * ZIL logging ops
1238 1239 */
1239 1240
1240 1241 #define lrz_type lr_mode
1241 1242 #define lrz_blocksize lr_uid
1242 1243 #define lrz_ibshift lr_gid
1243 1244 #define lrz_bonustype lr_rdev
1244 1245 #define lrz_bonuslen lr_crtime[1]
1245 1246
1246 1247 static void
1247 1248 ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
1248 1249 {
1249 1250 char *name = (void *)(lr + 1); /* name follows lr */
1250 1251 size_t namesize = strlen(name) + 1;
1251 1252 itx_t *itx;
1252 1253
1253 1254 if (zil_replaying(zd->zd_zilog, tx))
1254 1255 return;
1255 1256
1256 1257 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
1257 1258 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1258 1259 sizeof (*lr) + namesize - sizeof (lr_t));
1259 1260
1260 1261 zil_itx_assign(zd->zd_zilog, itx, tx);
1261 1262 }
1262 1263
1263 1264 static void
1264 1265 ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
1265 1266 {
1266 1267 char *name = (void *)(lr + 1); /* name follows lr */
1267 1268 size_t namesize = strlen(name) + 1;
1268 1269 itx_t *itx;
1269 1270
1270 1271 if (zil_replaying(zd->zd_zilog, tx))
1271 1272 return;
1272 1273
1273 1274 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
1274 1275 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1275 1276 sizeof (*lr) + namesize - sizeof (lr_t));
1276 1277
1277 1278 itx->itx_oid = object;
1278 1279 zil_itx_assign(zd->zd_zilog, itx, tx);
1279 1280 }
1280 1281
1281 1282 static void
1282 1283 ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
1283 1284 {
1284 1285 itx_t *itx;
1285 1286 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
1286 1287
1287 1288 if (zil_replaying(zd->zd_zilog, tx))
1288 1289 return;
1289 1290
1290 1291 if (lr->lr_length > ZIL_MAX_LOG_DATA)
1291 1292 write_state = WR_INDIRECT;
1292 1293
1293 1294 itx = zil_itx_create(TX_WRITE,
1294 1295 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
1295 1296
1296 1297 if (write_state == WR_COPIED &&
1297 1298 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
1298 1299 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
1299 1300 zil_itx_destroy(itx);
1300 1301 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1301 1302 write_state = WR_NEED_COPY;
1302 1303 }
1303 1304 itx->itx_private = zd;
1304 1305 itx->itx_wr_state = write_state;
1305 1306 itx->itx_sync = (ztest_random(8) == 0);
1306 1307 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0);
1307 1308
1308 1309 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1309 1310 sizeof (*lr) - sizeof (lr_t));
1310 1311
1311 1312 zil_itx_assign(zd->zd_zilog, itx, tx);
1312 1313 }
1313 1314
1314 1315 static void
1315 1316 ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
1316 1317 {
1317 1318 itx_t *itx;
1318 1319
1319 1320 if (zil_replaying(zd->zd_zilog, tx))
1320 1321 return;
1321 1322
1322 1323 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1323 1324 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1324 1325 sizeof (*lr) - sizeof (lr_t));
1325 1326
1326 1327 itx->itx_sync = B_FALSE;
1327 1328 zil_itx_assign(zd->zd_zilog, itx, tx);
1328 1329 }
1329 1330
1330 1331 static void
1331 1332 ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
1332 1333 {
1333 1334 itx_t *itx;
1334 1335
1335 1336 if (zil_replaying(zd->zd_zilog, tx))
1336 1337 return;
1337 1338
1338 1339 itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
1339 1340 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1340 1341 sizeof (*lr) - sizeof (lr_t));
1341 1342
1342 1343 itx->itx_sync = B_FALSE;
1343 1344 zil_itx_assign(zd->zd_zilog, itx, tx);
1344 1345 }
1345 1346
1346 1347 /*
1347 1348 * ZIL replay ops
1348 1349 */
1349 1350 static int
1350 1351 ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap)
1351 1352 {
1352 1353 char *name = (void *)(lr + 1); /* name follows lr */
1353 1354 objset_t *os = zd->zd_os;
1354 1355 ztest_block_tag_t *bbt;
1355 1356 dmu_buf_t *db;
1356 1357 dmu_tx_t *tx;
1357 1358 uint64_t txg;
1358 1359 int error = 0;
1359 1360
1360 1361 if (byteswap)
1361 1362 byteswap_uint64_array(lr, sizeof (*lr));
1362 1363
1363 1364 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1364 1365 ASSERT(name[0] != '\0');
1365 1366
1366 1367 tx = dmu_tx_create(os);
1367 1368
1368 1369 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
1369 1370
1370 1371 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1371 1372 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1372 1373 } else {
1373 1374 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1374 1375 }
1375 1376
1376 1377 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1377 1378 if (txg == 0)
1378 1379 return (ENOSPC);
1379 1380
1380 1381 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid);
1381 1382
1382 1383 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1383 1384 if (lr->lr_foid == 0) {
1384 1385 lr->lr_foid = zap_create(os,
1385 1386 lr->lrz_type, lr->lrz_bonustype,
1386 1387 lr->lrz_bonuslen, tx);
1387 1388 } else {
1388 1389 error = zap_create_claim(os, lr->lr_foid,
1389 1390 lr->lrz_type, lr->lrz_bonustype,
1390 1391 lr->lrz_bonuslen, tx);
1391 1392 }
1392 1393 } else {
1393 1394 if (lr->lr_foid == 0) {
1394 1395 lr->lr_foid = dmu_object_alloc(os,
1395 1396 lr->lrz_type, 0, lr->lrz_bonustype,
1396 1397 lr->lrz_bonuslen, tx);
1397 1398 } else {
1398 1399 error = dmu_object_claim(os, lr->lr_foid,
1399 1400 lr->lrz_type, 0, lr->lrz_bonustype,
1400 1401 lr->lrz_bonuslen, tx);
1401 1402 }
1402 1403 }
1403 1404
1404 1405 if (error) {
1405 1406 ASSERT3U(error, ==, EEXIST);
1406 1407 ASSERT(zd->zd_zilog->zl_replay);
1407 1408 dmu_tx_commit(tx);
1408 1409 return (error);
1409 1410 }
1410 1411
1411 1412 ASSERT(lr->lr_foid != 0);
1412 1413
1413 1414 if (lr->lrz_type != DMU_OT_ZAP_OTHER)
1414 1415 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid,
1415 1416 lr->lrz_blocksize, lr->lrz_ibshift, tx));
1416 1417
1417 1418 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1418 1419 bbt = ztest_bt_bonus(db);
1419 1420 dmu_buf_will_dirty(db, tx);
1420 1421 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
1421 1422 dmu_buf_rele(db, FTAG);
1422 1423
1423 1424 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
1424 1425 &lr->lr_foid, tx));
1425 1426
1426 1427 (void) ztest_log_create(zd, tx, lr);
1427 1428
1428 1429 dmu_tx_commit(tx);
1429 1430
1430 1431 return (0);
1431 1432 }
1432 1433
1433 1434 static int
1434 1435 ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap)
1435 1436 {
1436 1437 char *name = (void *)(lr + 1); /* name follows lr */
1437 1438 objset_t *os = zd->zd_os;
1438 1439 dmu_object_info_t doi;
1439 1440 dmu_tx_t *tx;
1440 1441 uint64_t object, txg;
1441 1442
1442 1443 if (byteswap)
1443 1444 byteswap_uint64_array(lr, sizeof (*lr));
1444 1445
1445 1446 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1446 1447 ASSERT(name[0] != '\0');
1447 1448
1448 1449 VERIFY3U(0, ==,
1449 1450 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
1450 1451 ASSERT(object != 0);
1451 1452
1452 1453 ztest_object_lock(zd, object, RL_WRITER);
1453 1454
1454 1455 VERIFY3U(0, ==, dmu_object_info(os, object, &doi));
1455 1456
1456 1457 tx = dmu_tx_create(os);
1457 1458
1458 1459 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
1459 1460 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1460 1461
1461 1462 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1462 1463 if (txg == 0) {
1463 1464 ztest_object_unlock(zd, object);
1464 1465 return (ENOSPC);
1465 1466 }
1466 1467
1467 1468 if (doi.doi_type == DMU_OT_ZAP_OTHER) {
1468 1469 VERIFY3U(0, ==, zap_destroy(os, object, tx));
1469 1470 } else {
1470 1471 VERIFY3U(0, ==, dmu_object_free(os, object, tx));
1471 1472 }
1472 1473
1473 1474 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx));
1474 1475
1475 1476 (void) ztest_log_remove(zd, tx, lr, object);
1476 1477
1477 1478 dmu_tx_commit(tx);
1478 1479
1479 1480 ztest_object_unlock(zd, object);
1480 1481
1481 1482 return (0);
1482 1483 }
1483 1484
1484 1485 static int
1485 1486 ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
1486 1487 {
1487 1488 objset_t *os = zd->zd_os;
1488 1489 void *data = lr + 1; /* data follows lr */
1489 1490 uint64_t offset, length;
1490 1491 ztest_block_tag_t *bt = data;
1491 1492 ztest_block_tag_t *bbt;
1492 1493 uint64_t gen, txg, lrtxg, crtxg;
1493 1494 dmu_object_info_t doi;
1494 1495 dmu_tx_t *tx;
1495 1496 dmu_buf_t *db;
1496 1497 arc_buf_t *abuf = NULL;
1497 1498 rl_t *rl;
1498 1499
1499 1500 if (byteswap)
1500 1501 byteswap_uint64_array(lr, sizeof (*lr));
1501 1502
1502 1503 offset = lr->lr_offset;
1503 1504 length = lr->lr_length;
1504 1505
1505 1506 /* If it's a dmu_sync() block, write the whole block */
1506 1507 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
1507 1508 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
1508 1509 if (length < blocksize) {
1509 1510 offset -= offset % blocksize;
1510 1511 length = blocksize;
1511 1512 }
1512 1513 }
1513 1514
1514 1515 if (bt->bt_magic == BSWAP_64(BT_MAGIC))
1515 1516 byteswap_uint64_array(bt, sizeof (*bt));
1516 1517
1517 1518 if (bt->bt_magic != BT_MAGIC)
1518 1519 bt = NULL;
1519 1520
1520 1521 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1521 1522 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
1522 1523
1523 1524 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1524 1525
1525 1526 dmu_object_info_from_db(db, &doi);
1526 1527
1527 1528 bbt = ztest_bt_bonus(db);
1528 1529 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1529 1530 gen = bbt->bt_gen;
1530 1531 crtxg = bbt->bt_crtxg;
1531 1532 lrtxg = lr->lr_common.lrc_txg;
1532 1533
1533 1534 tx = dmu_tx_create(os);
1534 1535
1535 1536 dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
1536 1537
1537 1538 if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
1538 1539 P2PHASE(offset, length) == 0)
1539 1540 abuf = dmu_request_arcbuf(db, length);
1540 1541
1541 1542 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1542 1543 if (txg == 0) {
1543 1544 if (abuf != NULL)
1544 1545 dmu_return_arcbuf(abuf);
1545 1546 dmu_buf_rele(db, FTAG);
1546 1547 ztest_range_unlock(rl);
1547 1548 ztest_object_unlock(zd, lr->lr_foid);
1548 1549 return (ENOSPC);
1549 1550 }
1550 1551
1551 1552 if (bt != NULL) {
1552 1553 /*
1553 1554 * Usually, verify the old data before writing new data --
1554 1555 * but not always, because we also want to verify correct
1555 1556 * behavior when the data was not recently read into cache.
1556 1557 */
1557 1558 ASSERT(offset % doi.doi_data_block_size == 0);
1558 1559 if (ztest_random(4) != 0) {
1559 1560 int prefetch = ztest_random(2) ?
1560 1561 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
1561 1562 ztest_block_tag_t rbt;
1562 1563
1563 1564 VERIFY(dmu_read(os, lr->lr_foid, offset,
1564 1565 sizeof (rbt), &rbt, prefetch) == 0);
1565 1566 if (rbt.bt_magic == BT_MAGIC) {
1566 1567 ztest_bt_verify(&rbt, os, lr->lr_foid,
1567 1568 offset, gen, txg, crtxg);
1568 1569 }
1569 1570 }
1570 1571
1571 1572 /*
1572 1573 * Writes can appear to be newer than the bonus buffer because
1573 1574 * the ztest_get_data() callback does a dmu_read() of the
1574 1575 * open-context data, which may be different than the data
1575 1576 * as it was when the write was generated.
1576 1577 */
1577 1578 if (zd->zd_zilog->zl_replay) {
1578 1579 ztest_bt_verify(bt, os, lr->lr_foid, offset,
1579 1580 MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
1580 1581 bt->bt_crtxg);
1581 1582 }
1582 1583
1583 1584 /*
1584 1585 * Set the bt's gen/txg to the bonus buffer's gen/txg
1585 1586 * so that all of the usual ASSERTs will work.
1586 1587 */
1587 1588 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg);
1588 1589 }
1589 1590
1590 1591 if (abuf == NULL) {
1591 1592 dmu_write(os, lr->lr_foid, offset, length, data, tx);
1592 1593 } else {
1593 1594 bcopy(data, abuf->b_data, length);
1594 1595 dmu_assign_arcbuf(db, offset, abuf, tx);
1595 1596 }
1596 1597
1597 1598 (void) ztest_log_write(zd, tx, lr);
1598 1599
1599 1600 dmu_buf_rele(db, FTAG);
1600 1601
1601 1602 dmu_tx_commit(tx);
1602 1603
1603 1604 ztest_range_unlock(rl);
1604 1605 ztest_object_unlock(zd, lr->lr_foid);
1605 1606
1606 1607 return (0);
1607 1608 }
1608 1609
1609 1610 static int
1610 1611 ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
1611 1612 {
1612 1613 objset_t *os = zd->zd_os;
1613 1614 dmu_tx_t *tx;
1614 1615 uint64_t txg;
1615 1616 rl_t *rl;
1616 1617
1617 1618 if (byteswap)
1618 1619 byteswap_uint64_array(lr, sizeof (*lr));
1619 1620
1620 1621 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1621 1622 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
1622 1623 RL_WRITER);
1623 1624
1624 1625 tx = dmu_tx_create(os);
1625 1626
1626 1627 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
1627 1628
1628 1629 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1629 1630 if (txg == 0) {
1630 1631 ztest_range_unlock(rl);
1631 1632 ztest_object_unlock(zd, lr->lr_foid);
1632 1633 return (ENOSPC);
1633 1634 }
1634 1635
1635 1636 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
1636 1637 lr->lr_length, tx) == 0);
1637 1638
1638 1639 (void) ztest_log_truncate(zd, tx, lr);
1639 1640
1640 1641 dmu_tx_commit(tx);
1641 1642
1642 1643 ztest_range_unlock(rl);
1643 1644 ztest_object_unlock(zd, lr->lr_foid);
1644 1645
1645 1646 return (0);
1646 1647 }
1647 1648
1648 1649 static int
1649 1650 ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap)
1650 1651 {
1651 1652 objset_t *os = zd->zd_os;
1652 1653 dmu_tx_t *tx;
1653 1654 dmu_buf_t *db;
1654 1655 ztest_block_tag_t *bbt;
1655 1656 uint64_t txg, lrtxg, crtxg;
1656 1657
1657 1658 if (byteswap)
1658 1659 byteswap_uint64_array(lr, sizeof (*lr));
1659 1660
1660 1661 ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
1661 1662
1662 1663 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1663 1664
1664 1665 tx = dmu_tx_create(os);
1665 1666 dmu_tx_hold_bonus(tx, lr->lr_foid);
1666 1667
1667 1668 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1668 1669 if (txg == 0) {
1669 1670 dmu_buf_rele(db, FTAG);
1670 1671 ztest_object_unlock(zd, lr->lr_foid);
1671 1672 return (ENOSPC);
1672 1673 }
1673 1674
1674 1675 bbt = ztest_bt_bonus(db);
1675 1676 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1676 1677 crtxg = bbt->bt_crtxg;
1677 1678 lrtxg = lr->lr_common.lrc_txg;
1678 1679
1679 1680 if (zd->zd_zilog->zl_replay) {
1680 1681 ASSERT(lr->lr_size != 0);
1681 1682 ASSERT(lr->lr_mode != 0);
1682 1683 ASSERT(lrtxg != 0);
1683 1684 } else {
1684 1685 /*
1685 1686 * Randomly change the size and increment the generation.
1686 1687 */
1687 1688 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
1688 1689 sizeof (*bbt);
1689 1690 lr->lr_mode = bbt->bt_gen + 1;
1690 1691 ASSERT(lrtxg == 0);
1691 1692 }
1692 1693
1693 1694 /*
1694 1695 * Verify that the current bonus buffer is not newer than our txg.
1695 1696 */
1696 1697 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
1697 1698 MAX(txg, lrtxg), crtxg);
1698 1699
1699 1700 dmu_buf_will_dirty(db, tx);
1700 1701
1701 1702 ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
1702 1703 ASSERT3U(lr->lr_size, <=, db->db_size);
1703 1704 VERIFY3U(dmu_set_bonus(db, lr->lr_size, tx), ==, 0);
1704 1705 bbt = ztest_bt_bonus(db);
1705 1706
1706 1707 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
1707 1708
1708 1709 dmu_buf_rele(db, FTAG);
1709 1710
1710 1711 (void) ztest_log_setattr(zd, tx, lr);
1711 1712
1712 1713 dmu_tx_commit(tx);
1713 1714
1714 1715 ztest_object_unlock(zd, lr->lr_foid);
1715 1716
1716 1717 return (0);
1717 1718 }
1718 1719
1719 1720 zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
1720 1721 NULL, /* 0 no such transaction type */
1721 1722 ztest_replay_create, /* TX_CREATE */
1722 1723 NULL, /* TX_MKDIR */
1723 1724 NULL, /* TX_MKXATTR */
1724 1725 NULL, /* TX_SYMLINK */
1725 1726 ztest_replay_remove, /* TX_REMOVE */
1726 1727 NULL, /* TX_RMDIR */
1727 1728 NULL, /* TX_LINK */
1728 1729 NULL, /* TX_RENAME */
1729 1730 ztest_replay_write, /* TX_WRITE */
1730 1731 ztest_replay_truncate, /* TX_TRUNCATE */
1731 1732 ztest_replay_setattr, /* TX_SETATTR */
1732 1733 NULL, /* TX_ACL */
1733 1734 NULL, /* TX_CREATE_ACL */
1734 1735 NULL, /* TX_CREATE_ATTR */
1735 1736 NULL, /* TX_CREATE_ACL_ATTR */
1736 1737 NULL, /* TX_MKDIR_ACL */
1737 1738 NULL, /* TX_MKDIR_ATTR */
1738 1739 NULL, /* TX_MKDIR_ACL_ATTR */
1739 1740 NULL, /* TX_WRITE2 */
1740 1741 };
1741 1742
1742 1743 /*
1743 1744 * ZIL get_data callbacks
1744 1745 */
1745 1746
1746 1747 static void
1747 1748 ztest_get_done(zgd_t *zgd, int error)
1748 1749 {
1749 1750 ztest_ds_t *zd = zgd->zgd_private;
1750 1751 uint64_t object = zgd->zgd_rl->rl_object;
1751 1752
1752 1753 if (zgd->zgd_db)
1753 1754 dmu_buf_rele(zgd->zgd_db, zgd);
1754 1755
1755 1756 ztest_range_unlock(zgd->zgd_rl);
1756 1757 ztest_object_unlock(zd, object);
1757 1758
1758 1759 if (error == 0 && zgd->zgd_bp)
1759 1760 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1760 1761
1761 1762 umem_free(zgd, sizeof (*zgd));
1762 1763 }
1763 1764
1764 1765 static int
1765 1766 ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1766 1767 {
1767 1768 ztest_ds_t *zd = arg;
1768 1769 objset_t *os = zd->zd_os;
1769 1770 uint64_t object = lr->lr_foid;
1770 1771 uint64_t offset = lr->lr_offset;
1771 1772 uint64_t size = lr->lr_length;
1772 1773 blkptr_t *bp = &lr->lr_blkptr;
1773 1774 uint64_t txg = lr->lr_common.lrc_txg;
1774 1775 uint64_t crtxg;
1775 1776 dmu_object_info_t doi;
1776 1777 dmu_buf_t *db;
1777 1778 zgd_t *zgd;
1778 1779 int error;
1779 1780
1780 1781 ztest_object_lock(zd, object, RL_READER);
1781 1782 error = dmu_bonus_hold(os, object, FTAG, &db);
1782 1783 if (error) {
1783 1784 ztest_object_unlock(zd, object);
1784 1785 return (error);
1785 1786 }
1786 1787
1787 1788 crtxg = ztest_bt_bonus(db)->bt_crtxg;
1788 1789
1789 1790 if (crtxg == 0 || crtxg > txg) {
1790 1791 dmu_buf_rele(db, FTAG);
1791 1792 ztest_object_unlock(zd, object);
1792 1793 return (ENOENT);
1793 1794 }
1794 1795
1795 1796 dmu_object_info_from_db(db, &doi);
1796 1797 dmu_buf_rele(db, FTAG);
1797 1798 db = NULL;
1798 1799
1799 1800 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
1800 1801 zgd->zgd_zilog = zd->zd_zilog;
1801 1802 zgd->zgd_private = zd;
1802 1803
1803 1804 if (buf != NULL) { /* immediate write */
1804 1805 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1805 1806 RL_READER);
1806 1807
1807 1808 error = dmu_read(os, object, offset, size, buf,
1808 1809 DMU_READ_NO_PREFETCH);
1809 1810 ASSERT(error == 0);
1810 1811 } else {
1811 1812 size = doi.doi_data_block_size;
1812 1813 if (ISP2(size)) {
1813 1814 offset = P2ALIGN(offset, size);
1814 1815 } else {
1815 1816 ASSERT(offset < size);
1816 1817 offset = 0;
1817 1818 }
1818 1819
1819 1820 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1820 1821 RL_READER);
1821 1822
1822 1823 error = dmu_buf_hold(os, object, offset, zgd, &db,
1823 1824 DMU_READ_NO_PREFETCH);
1824 1825
1825 1826 if (error == 0) {
1826 1827 zgd->zgd_db = db;
1827 1828 zgd->zgd_bp = bp;
1828 1829
1829 1830 ASSERT(db->db_offset == offset);
1830 1831 ASSERT(db->db_size == size);
1831 1832
1832 1833 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1833 1834 ztest_get_done, zgd);
1834 1835
1835 1836 if (error == 0)
1836 1837 return (0);
1837 1838 }
1838 1839 }
1839 1840
1840 1841 ztest_get_done(zgd, error);
1841 1842
1842 1843 return (error);
1843 1844 }
1844 1845
1845 1846 static void *
1846 1847 ztest_lr_alloc(size_t lrsize, char *name)
1847 1848 {
1848 1849 char *lr;
1849 1850 size_t namesize = name ? strlen(name) + 1 : 0;
1850 1851
1851 1852 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
1852 1853
1853 1854 if (name)
1854 1855 bcopy(name, lr + lrsize, namesize);
1855 1856
1856 1857 return (lr);
1857 1858 }
1858 1859
1859 1860 void
1860 1861 ztest_lr_free(void *lr, size_t lrsize, char *name)
1861 1862 {
1862 1863 size_t namesize = name ? strlen(name) + 1 : 0;
1863 1864
1864 1865 umem_free(lr, lrsize + namesize);
1865 1866 }
1866 1867
1867 1868 /*
1868 1869 * Lookup a bunch of objects. Returns the number of objects not found.
1869 1870 */
1870 1871 static int
1871 1872 ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
1872 1873 {
1873 1874 int missing = 0;
1874 1875 int error;
1875 1876
1876 1877 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1877 1878
1878 1879 for (int i = 0; i < count; i++, od++) {
1879 1880 od->od_object = 0;
1880 1881 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
1881 1882 sizeof (uint64_t), 1, &od->od_object);
1882 1883 if (error) {
1883 1884 ASSERT(error == ENOENT);
1884 1885 ASSERT(od->od_object == 0);
1885 1886 missing++;
1886 1887 } else {
1887 1888 dmu_buf_t *db;
1888 1889 ztest_block_tag_t *bbt;
1889 1890 dmu_object_info_t doi;
1890 1891
1891 1892 ASSERT(od->od_object != 0);
1892 1893 ASSERT(missing == 0); /* there should be no gaps */
1893 1894
1894 1895 ztest_object_lock(zd, od->od_object, RL_READER);
1895 1896 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os,
1896 1897 od->od_object, FTAG, &db));
1897 1898 dmu_object_info_from_db(db, &doi);
1898 1899 bbt = ztest_bt_bonus(db);
1899 1900 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1900 1901 od->od_type = doi.doi_type;
1901 1902 od->od_blocksize = doi.doi_data_block_size;
1902 1903 od->od_gen = bbt->bt_gen;
1903 1904 dmu_buf_rele(db, FTAG);
1904 1905 ztest_object_unlock(zd, od->od_object);
1905 1906 }
1906 1907 }
1907 1908
1908 1909 return (missing);
1909 1910 }
1910 1911
1911 1912 static int
1912 1913 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
1913 1914 {
1914 1915 int missing = 0;
1915 1916
1916 1917 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1917 1918
1918 1919 for (int i = 0; i < count; i++, od++) {
1919 1920 if (missing) {
1920 1921 od->od_object = 0;
1921 1922 missing++;
1922 1923 continue;
1923 1924 }
1924 1925
1925 1926 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
1926 1927
1927 1928 lr->lr_doid = od->od_dir;
1928 1929 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
1929 1930 lr->lrz_type = od->od_crtype;
1930 1931 lr->lrz_blocksize = od->od_crblocksize;
1931 1932 lr->lrz_ibshift = ztest_random_ibshift();
1932 1933 lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
1933 1934 lr->lrz_bonuslen = dmu_bonus_max();
1934 1935 lr->lr_gen = od->od_crgen;
1935 1936 lr->lr_crtime[0] = time(NULL);
1936 1937
1937 1938 if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
1938 1939 ASSERT(missing == 0);
1939 1940 od->od_object = 0;
1940 1941 missing++;
1941 1942 } else {
1942 1943 od->od_object = lr->lr_foid;
1943 1944 od->od_type = od->od_crtype;
1944 1945 od->od_blocksize = od->od_crblocksize;
1945 1946 od->od_gen = od->od_crgen;
1946 1947 ASSERT(od->od_object != 0);
1947 1948 }
1948 1949
1949 1950 ztest_lr_free(lr, sizeof (*lr), od->od_name);
1950 1951 }
1951 1952
1952 1953 return (missing);
1953 1954 }
1954 1955
1955 1956 static int
1956 1957 ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
1957 1958 {
1958 1959 int missing = 0;
1959 1960 int error;
1960 1961
1961 1962 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1962 1963
1963 1964 od += count - 1;
1964 1965
1965 1966 for (int i = count - 1; i >= 0; i--, od--) {
1966 1967 if (missing) {
1967 1968 missing++;
1968 1969 continue;
1969 1970 }
1970 1971
1971 1972 if (od->od_object == 0)
1972 1973 continue;
1973 1974
1974 1975 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
1975 1976
1976 1977 lr->lr_doid = od->od_dir;
1977 1978
1978 1979 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
1979 1980 ASSERT3U(error, ==, ENOSPC);
1980 1981 missing++;
1981 1982 } else {
1982 1983 od->od_object = 0;
1983 1984 }
1984 1985 ztest_lr_free(lr, sizeof (*lr), od->od_name);
1985 1986 }
1986 1987
1987 1988 return (missing);
1988 1989 }
1989 1990
1990 1991 static int
1991 1992 ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
1992 1993 void *data)
1993 1994 {
1994 1995 lr_write_t *lr;
1995 1996 int error;
1996 1997
1997 1998 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
1998 1999
1999 2000 lr->lr_foid = object;
2000 2001 lr->lr_offset = offset;
2001 2002 lr->lr_length = size;
2002 2003 lr->lr_blkoff = 0;
2003 2004 BP_ZERO(&lr->lr_blkptr);
2004 2005
2005 2006 bcopy(data, lr + 1, size);
2006 2007
2007 2008 error = ztest_replay_write(zd, lr, B_FALSE);
2008 2009
2009 2010 ztest_lr_free(lr, sizeof (*lr) + size, NULL);
2010 2011
2011 2012 return (error);
2012 2013 }
2013 2014
2014 2015 static int
2015 2016 ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2016 2017 {
2017 2018 lr_truncate_t *lr;
2018 2019 int error;
2019 2020
2020 2021 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2021 2022
2022 2023 lr->lr_foid = object;
2023 2024 lr->lr_offset = offset;
2024 2025 lr->lr_length = size;
2025 2026
2026 2027 error = ztest_replay_truncate(zd, lr, B_FALSE);
2027 2028
2028 2029 ztest_lr_free(lr, sizeof (*lr), NULL);
2029 2030
2030 2031 return (error);
2031 2032 }
2032 2033
2033 2034 static int
2034 2035 ztest_setattr(ztest_ds_t *zd, uint64_t object)
2035 2036 {
2036 2037 lr_setattr_t *lr;
2037 2038 int error;
2038 2039
2039 2040 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2040 2041
2041 2042 lr->lr_foid = object;
2042 2043 lr->lr_size = 0;
2043 2044 lr->lr_mode = 0;
2044 2045
2045 2046 error = ztest_replay_setattr(zd, lr, B_FALSE);
2046 2047
2047 2048 ztest_lr_free(lr, sizeof (*lr), NULL);
2048 2049
2049 2050 return (error);
2050 2051 }
2051 2052
2052 2053 static void
2053 2054 ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2054 2055 {
2055 2056 objset_t *os = zd->zd_os;
2056 2057 dmu_tx_t *tx;
2057 2058 uint64_t txg;
2058 2059 rl_t *rl;
2059 2060
2060 2061 txg_wait_synced(dmu_objset_pool(os), 0);
2061 2062
2062 2063 ztest_object_lock(zd, object, RL_READER);
2063 2064 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
2064 2065
2065 2066 tx = dmu_tx_create(os);
2066 2067
2067 2068 dmu_tx_hold_write(tx, object, offset, size);
2068 2069
2069 2070 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
2070 2071
2071 2072 if (txg != 0) {
2072 2073 dmu_prealloc(os, object, offset, size, tx);
2073 2074 dmu_tx_commit(tx);
2074 2075 txg_wait_synced(dmu_objset_pool(os), txg);
2075 2076 } else {
2076 2077 (void) dmu_free_long_range(os, object, offset, size);
2077 2078 }
2078 2079
2079 2080 ztest_range_unlock(rl);
2080 2081 ztest_object_unlock(zd, object);
2081 2082 }
2082 2083
2083 2084 static void
2084 2085 ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
2085 2086 {
2086 2087 ztest_block_tag_t wbt;
2087 2088 dmu_object_info_t doi;
2088 2089 enum ztest_io_type io_type;
2089 2090 uint64_t blocksize;
2090 2091 void *data;
2091 2092
2092 2093 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0);
2093 2094 blocksize = doi.doi_data_block_size;
2094 2095 data = umem_alloc(blocksize, UMEM_NOFAIL);
2095 2096
2096 2097 /*
2097 2098 * Pick an i/o type at random, biased toward writing block tags.
2098 2099 */
2099 2100 io_type = ztest_random(ZTEST_IO_TYPES);
2100 2101 if (ztest_random(2) == 0)
2101 2102 io_type = ZTEST_IO_WRITE_TAG;
2102 2103
2103 2104 (void) rw_rdlock(&zd->zd_zilog_lock);
2104 2105
2105 2106 switch (io_type) {
2106 2107
2107 2108 case ZTEST_IO_WRITE_TAG:
2108 2109 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
2109 2110 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
2110 2111 break;
2111 2112
2112 2113 case ZTEST_IO_WRITE_PATTERN:
2113 2114 (void) memset(data, 'a' + (object + offset) % 5, blocksize);
2114 2115 if (ztest_random(2) == 0) {
2115 2116 /*
2116 2117 * Induce fletcher2 collisions to ensure that
2117 2118 * zio_ddt_collision() detects and resolves them
2118 2119 * when using fletcher2-verify for deduplication.
2119 2120 */
2120 2121 ((uint64_t *)data)[0] ^= 1ULL << 63;
2121 2122 ((uint64_t *)data)[4] ^= 1ULL << 63;
2122 2123 }
2123 2124 (void) ztest_write(zd, object, offset, blocksize, data);
2124 2125 break;
2125 2126
2126 2127 case ZTEST_IO_WRITE_ZEROES:
2127 2128 bzero(data, blocksize);
2128 2129 (void) ztest_write(zd, object, offset, blocksize, data);
2129 2130 break;
2130 2131
2131 2132 case ZTEST_IO_TRUNCATE:
2132 2133 (void) ztest_truncate(zd, object, offset, blocksize);
2133 2134 break;
2134 2135
2135 2136 case ZTEST_IO_SETATTR:
2136 2137 (void) ztest_setattr(zd, object);
2137 2138 break;
2138 2139 }
2139 2140
2140 2141 (void) rw_unlock(&zd->zd_zilog_lock);
2141 2142
2142 2143 umem_free(data, blocksize);
2143 2144 }
2144 2145
2145 2146 /*
2146 2147 * Initialize an object description template.
2147 2148 */
2148 2149 static void
2149 2150 ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
2150 2151 dmu_object_type_t type, uint64_t blocksize, uint64_t gen)
2151 2152 {
2152 2153 od->od_dir = ZTEST_DIROBJ;
2153 2154 od->od_object = 0;
2154 2155
2155 2156 od->od_crtype = type;
2156 2157 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
2157 2158 od->od_crgen = gen;
2158 2159
2159 2160 od->od_type = DMU_OT_NONE;
2160 2161 od->od_blocksize = 0;
2161 2162 od->od_gen = 0;
2162 2163
2163 2164 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]",
2164 2165 tag, (int64_t)id, index);
2165 2166 }
2166 2167
2167 2168 /*
2168 2169 * Lookup or create the objects for a test using the od template.
2169 2170 * If the objects do not all exist, or if 'remove' is specified,
2170 2171 * remove any existing objects and create new ones. Otherwise,
2171 2172 * use the existing objects.
2172 2173 */
2173 2174 static int
2174 2175 ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
2175 2176 {
2176 2177 int count = size / sizeof (*od);
2177 2178 int rv = 0;
2178 2179
2179 2180 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0);
2180 2181 if ((ztest_lookup(zd, od, count) != 0 || remove) &&
2181 2182 (ztest_remove(zd, od, count) != 0 ||
2182 2183 ztest_create(zd, od, count) != 0))
2183 2184 rv = -1;
2184 2185 zd->zd_od = od;
2185 2186 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
2186 2187
2187 2188 return (rv);
2188 2189 }
2189 2190
2190 2191 /* ARGSUSED */
2191 2192 void
2192 2193 ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
2193 2194 {
2194 2195 zilog_t *zilog = zd->zd_zilog;
2195 2196
2196 2197 (void) rw_rdlock(&zd->zd_zilog_lock);
2197 2198
2198 2199 zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
2199 2200
2200 2201 /*
2201 2202 * Remember the committed values in zd, which is in parent/child
2202 2203 * shared memory. If we die, the next iteration of ztest_run()
2203 2204 * will verify that the log really does contain this record.
2204 2205 */
2205 2206 mutex_enter(&zilog->zl_lock);
2206 2207 ASSERT(zd->zd_shared != NULL);
2207 2208 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
2208 2209 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
2209 2210 mutex_exit(&zilog->zl_lock);
2210 2211
2211 2212 (void) rw_unlock(&zd->zd_zilog_lock);
2212 2213 }
2213 2214
2214 2215 /*
2215 2216 * This function is designed to simulate the operations that occur during a
2216 2217 * mount/unmount operation. We hold the dataset across these operations in an
2217 2218 * attempt to expose any implicit assumptions about ZIL management.
2218 2219 */
2219 2220 /* ARGSUSED */
2220 2221 void
2221 2222 ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
2222 2223 {
2223 2224 objset_t *os = zd->zd_os;
2224 2225
2225 2226 (void) rw_wrlock(&zd->zd_zilog_lock);
2226 2227
2227 2228 /* zfsvfs_teardown() */
2228 2229 zil_close(zd->zd_zilog);
2229 2230
2230 2231 /* zfsvfs_setup() */
2231 2232 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
2232 2233 zil_replay(os, zd, ztest_replay_vector);
2233 2234
2234 2235 (void) rw_unlock(&zd->zd_zilog_lock);
2235 2236 }
2236 2237
2237 2238 /*
2238 2239 * Verify that we can't destroy an active pool, create an existing pool,
2239 2240 * or create a pool with a bad vdev spec.
2240 2241 */
2241 2242 /* ARGSUSED */
2242 2243 void
2243 2244 ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
2244 2245 {
2245 2246 ztest_shared_opts_t *zo = &ztest_opts;
2246 2247 spa_t *spa;
2247 2248 nvlist_t *nvroot;
2248 2249
2249 2250 /*
2250 2251 * Attempt to create using a bad file.
2251 2252 */
2252 2253 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
2253 2254 VERIFY3U(ENOENT, ==,
2254 2255 spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL));
2255 2256 nvlist_free(nvroot);
2256 2257
2257 2258 /*
2258 2259 * Attempt to create using a bad mirror.
2259 2260 */
2260 2261 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 2, 1);
2261 2262 VERIFY3U(ENOENT, ==,
2262 2263 spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL));
2263 2264 nvlist_free(nvroot);
2264 2265
2265 2266 /*
2266 2267 * Attempt to create an existing pool. It shouldn't matter
2267 2268 * what's in the nvroot; we should fail with EEXIST.
2268 2269 */
2269 2270 (void) rw_rdlock(&ztest_name_lock);
2270 2271 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
2271 2272 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL, NULL));
2272 2273 nvlist_free(nvroot);
2273 2274 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
2274 2275 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
2275 2276 spa_close(spa, FTAG);
2276 2277
2277 2278 (void) rw_unlock(&ztest_name_lock);
2278 2279 }
2279 2280
2280 2281 static vdev_t *
2281 2282 vdev_lookup_by_path(vdev_t *vd, const char *path)
2282 2283 {
2283 2284 vdev_t *mvd;
2284 2285
2285 2286 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
2286 2287 return (vd);
2287 2288
2288 2289 for (int c = 0; c < vd->vdev_children; c++)
2289 2290 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
2290 2291 NULL)
2291 2292 return (mvd);
2292 2293
2293 2294 return (NULL);
2294 2295 }
2295 2296
2296 2297 /*
2297 2298 * Find the first available hole which can be used as a top-level.
2298 2299 */
2299 2300 int
2300 2301 find_vdev_hole(spa_t *spa)
2301 2302 {
2302 2303 vdev_t *rvd = spa->spa_root_vdev;
2303 2304 int c;
2304 2305
2305 2306 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV);
2306 2307
2307 2308 for (c = 0; c < rvd->vdev_children; c++) {
2308 2309 vdev_t *cvd = rvd->vdev_child[c];
2309 2310
2310 2311 if (cvd->vdev_ishole)
2311 2312 break;
2312 2313 }
2313 2314 return (c);
2314 2315 }
2315 2316
2316 2317 /*
2317 2318 * Verify that vdev_add() works as expected.
2318 2319 */
2319 2320 /* ARGSUSED */
2320 2321 void
2321 2322 ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
2322 2323 {
2323 2324 ztest_shared_t *zs = ztest_shared;
2324 2325 spa_t *spa = ztest_spa;
2325 2326 uint64_t leaves;
2326 2327 uint64_t guid;
2327 2328 nvlist_t *nvroot;
2328 2329 int error;
2329 2330
2330 2331 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2331 2332 leaves =
2332 2333 MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
2333 2334
2334 2335 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2335 2336
2336 2337 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
2337 2338
2338 2339 /*
2339 2340 * If we have slogs then remove them 1/4 of the time.
2340 2341 */
2341 2342 if (spa_has_slogs(spa) && ztest_random(4) == 0) {
2342 2343 /*
2343 2344 * Grab the guid from the head of the log class rotor.
2344 2345 */
2345 2346 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid;
2346 2347
2347 2348 spa_config_exit(spa, SCL_VDEV, FTAG);
2348 2349
2349 2350 /*
2350 2351 * We have to grab the zs_name_lock as writer to
2351 2352 * prevent a race between removing a slog (dmu_objset_find)
2352 2353 * and destroying a dataset. Removing the slog will
2353 2354 * grab a reference on the dataset which may cause
2354 2355 * dmu_objset_destroy() to fail with EBUSY thus
2355 2356 * leaving the dataset in an inconsistent state.
2356 2357 */
2357 2358 VERIFY(rw_wrlock(&ztest_name_lock) == 0);
2358 2359 error = spa_vdev_remove(spa, guid, B_FALSE);
2359 2360 VERIFY(rw_unlock(&ztest_name_lock) == 0);
2360 2361
2361 2362 if (error && error != EEXIST)
2362 2363 fatal(0, "spa_vdev_remove() = %d", error);
2363 2364 } else {
2364 2365 spa_config_exit(spa, SCL_VDEV, FTAG);
2365 2366
2366 2367 /*
2367 2368 * Make 1/4 of the devices be log devices.
2368 2369 */
2369 2370 nvroot = make_vdev_root(NULL, NULL,
2370 2371 ztest_opts.zo_vdev_size, 0,
2371 2372 ztest_random(4) == 0, ztest_opts.zo_raidz,
2372 2373 zs->zs_mirrors, 1);
2373 2374
2374 2375 error = spa_vdev_add(spa, nvroot);
2375 2376 nvlist_free(nvroot);
2376 2377
2377 2378 if (error == ENOSPC)
2378 2379 ztest_record_enospc("spa_vdev_add");
2379 2380 else if (error != 0)
2380 2381 fatal(0, "spa_vdev_add() = %d", error);
2381 2382 }
2382 2383
2383 2384 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2384 2385 }
2385 2386
2386 2387 /*
2387 2388 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2388 2389 */
2389 2390 /* ARGSUSED */
2390 2391 void
2391 2392 ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
2392 2393 {
2393 2394 ztest_shared_t *zs = ztest_shared;
2394 2395 spa_t *spa = ztest_spa;
2395 2396 vdev_t *rvd = spa->spa_root_vdev;
2396 2397 spa_aux_vdev_t *sav;
2397 2398 char *aux;
2398 2399 uint64_t guid = 0;
2399 2400 int error;
2400 2401
2401 2402 if (ztest_random(2) == 0) {
2402 2403 sav = &spa->spa_spares;
2403 2404 aux = ZPOOL_CONFIG_SPARES;
2404 2405 } else {
2405 2406 sav = &spa->spa_l2cache;
2406 2407 aux = ZPOOL_CONFIG_L2CACHE;
2407 2408 }
2408 2409
2409 2410 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2410 2411
2411 2412 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2412 2413
2413 2414 if (sav->sav_count != 0 && ztest_random(4) == 0) {
2414 2415 /*
2415 2416 * Pick a random device to remove.
2416 2417 */
2417 2418 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
2418 2419 } else {
2419 2420 /*
2420 2421 * Find an unused device we can add.
2421 2422 */
2422 2423 zs->zs_vdev_aux = 0;
2423 2424 for (;;) {
2424 2425 char path[MAXPATHLEN];
2425 2426 int c;
2426 2427 (void) snprintf(path, sizeof (path), ztest_aux_template,
2427 2428 ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
2428 2429 zs->zs_vdev_aux);
2429 2430 for (c = 0; c < sav->sav_count; c++)
2430 2431 if (strcmp(sav->sav_vdevs[c]->vdev_path,
2431 2432 path) == 0)
2432 2433 break;
2433 2434 if (c == sav->sav_count &&
2434 2435 vdev_lookup_by_path(rvd, path) == NULL)
2435 2436 break;
2436 2437 zs->zs_vdev_aux++;
2437 2438 }
2438 2439 }
2439 2440
2440 2441 spa_config_exit(spa, SCL_VDEV, FTAG);
2441 2442
2442 2443 if (guid == 0) {
2443 2444 /*
2444 2445 * Add a new device.
2445 2446 */
2446 2447 nvlist_t *nvroot = make_vdev_root(NULL, aux,
2447 2448 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1);
2448 2449 error = spa_vdev_add(spa, nvroot);
2449 2450 if (error != 0)
2450 2451 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error);
2451 2452 nvlist_free(nvroot);
2452 2453 } else {
2453 2454 /*
2454 2455 * Remove an existing device. Sometimes, dirty its
2455 2456 * vdev state first to make sure we handle removal
2456 2457 * of devices that have pending state changes.
2457 2458 */
2458 2459 if (ztest_random(2) == 0)
2459 2460 (void) vdev_online(spa, guid, 0, NULL);
2460 2461
2461 2462 error = spa_vdev_remove(spa, guid, B_FALSE);
2462 2463 if (error != 0 && error != EBUSY)
2463 2464 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
2464 2465 }
2465 2466
2466 2467 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2467 2468 }
2468 2469
2469 2470 /*
2470 2471 * split a pool if it has mirror tlvdevs
2471 2472 */
2472 2473 /* ARGSUSED */
2473 2474 void
2474 2475 ztest_split_pool(ztest_ds_t *zd, uint64_t id)
2475 2476 {
2476 2477 ztest_shared_t *zs = ztest_shared;
2477 2478 spa_t *spa = ztest_spa;
2478 2479 vdev_t *rvd = spa->spa_root_vdev;
2479 2480 nvlist_t *tree, **child, *config, *split, **schild;
2480 2481 uint_t c, children, schildren = 0, lastlogid = 0;
2481 2482 int error = 0;
2482 2483
2483 2484 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2484 2485
2485 2486 /* ensure we have a useable config; mirrors of raidz aren't supported */
2486 2487 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
2487 2488 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2488 2489 return;
2489 2490 }
2490 2491
2491 2492 /* clean up the old pool, if any */
2492 2493 (void) spa_destroy("splitp");
2493 2494
2494 2495 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2495 2496
2496 2497 /* generate a config from the existing config */
2497 2498 mutex_enter(&spa->spa_props_lock);
2498 2499 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE,
2499 2500 &tree) == 0);
2500 2501 mutex_exit(&spa->spa_props_lock);
2501 2502
2502 2503 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2503 2504 &children) == 0);
2504 2505
2505 2506 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
2506 2507 for (c = 0; c < children; c++) {
2507 2508 vdev_t *tvd = rvd->vdev_child[c];
2508 2509 nvlist_t **mchild;
2509 2510 uint_t mchildren;
2510 2511
2511 2512 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
2512 2513 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME,
2513 2514 0) == 0);
2514 2515 VERIFY(nvlist_add_string(schild[schildren],
2515 2516 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0);
2516 2517 VERIFY(nvlist_add_uint64(schild[schildren],
2517 2518 ZPOOL_CONFIG_IS_HOLE, 1) == 0);
2518 2519 if (lastlogid == 0)
2519 2520 lastlogid = schildren;
2520 2521 ++schildren;
2521 2522 continue;
2522 2523 }
2523 2524 lastlogid = 0;
2524 2525 VERIFY(nvlist_lookup_nvlist_array(child[c],
2525 2526 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2526 2527 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0);
2527 2528 }
2528 2529
2529 2530 /* OK, create a config that can be used to split */
2530 2531 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0);
2531 2532 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE,
2532 2533 VDEV_TYPE_ROOT) == 0);
2533 2534 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild,
2534 2535 lastlogid != 0 ? lastlogid : schildren) == 0);
2535 2536
2536 2537 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
2537 2538 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0);
2538 2539
2539 2540 for (c = 0; c < schildren; c++)
2540 2541 nvlist_free(schild[c]);
2541 2542 free(schild);
2542 2543 nvlist_free(split);
2543 2544
2544 2545 spa_config_exit(spa, SCL_VDEV, FTAG);
2545 2546
2546 2547 (void) rw_wrlock(&ztest_name_lock);
2547 2548 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
2548 2549 (void) rw_unlock(&ztest_name_lock);
2549 2550
2550 2551 nvlist_free(config);
2551 2552
2552 2553 if (error == 0) {
2553 2554 (void) printf("successful split - results:\n");
2554 2555 mutex_enter(&spa_namespace_lock);
2555 2556 show_pool_stats(spa);
2556 2557 show_pool_stats(spa_lookup("splitp"));
2557 2558 mutex_exit(&spa_namespace_lock);
2558 2559 ++zs->zs_splits;
2559 2560 --zs->zs_mirrors;
2560 2561 }
2561 2562 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2562 2563
2563 2564 }
2564 2565
2565 2566 /*
2566 2567 * Verify that we can attach and detach devices.
2567 2568 */
2568 2569 /* ARGSUSED */
2569 2570 void
2570 2571 ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
2571 2572 {
2572 2573 ztest_shared_t *zs = ztest_shared;
2573 2574 spa_t *spa = ztest_spa;
2574 2575 spa_aux_vdev_t *sav = &spa->spa_spares;
2575 2576 vdev_t *rvd = spa->spa_root_vdev;
2576 2577 vdev_t *oldvd, *newvd, *pvd;
2577 2578 nvlist_t *root;
2578 2579 uint64_t leaves;
2579 2580 uint64_t leaf, top;
2580 2581 uint64_t ashift = ztest_get_ashift();
2581 2582 uint64_t oldguid, pguid;
2582 2583 size_t oldsize, newsize;
2583 2584 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN];
2584 2585 int replacing;
2585 2586 int oldvd_has_siblings = B_FALSE;
2586 2587 int newvd_is_spare = B_FALSE;
2587 2588 int oldvd_is_log;
2588 2589 int error, expected_error;
2589 2590
2590 2591 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2591 2592 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
2592 2593
2593 2594 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2594 2595
2595 2596 /*
2596 2597 * Decide whether to do an attach or a replace.
2597 2598 */
2598 2599 replacing = ztest_random(2);
2599 2600
2600 2601 /*
2601 2602 * Pick a random top-level vdev.
2602 2603 */
2603 2604 top = ztest_random_vdev_top(spa, B_TRUE);
2604 2605
2605 2606 /*
2606 2607 * Pick a random leaf within it.
2607 2608 */
2608 2609 leaf = ztest_random(leaves);
2609 2610
2610 2611 /*
2611 2612 * Locate this vdev.
2612 2613 */
2613 2614 oldvd = rvd->vdev_child[top];
2614 2615 if (zs->zs_mirrors >= 1) {
2615 2616 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops);
2616 2617 ASSERT(oldvd->vdev_children >= zs->zs_mirrors);
2617 2618 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz];
2618 2619 }
2619 2620 if (ztest_opts.zo_raidz > 1) {
2620 2621 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
2621 2622 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz);
2622 2623 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz];
2623 2624 }
2624 2625
2625 2626 /*
2626 2627 * If we're already doing an attach or replace, oldvd may be a
2627 2628 * mirror vdev -- in which case, pick a random child.
2628 2629 */
2629 2630 while (oldvd->vdev_children != 0) {
2630 2631 oldvd_has_siblings = B_TRUE;
2631 2632 ASSERT(oldvd->vdev_children >= 2);
2632 2633 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
2633 2634 }
2634 2635
2635 2636 oldguid = oldvd->vdev_guid;
2636 2637 oldsize = vdev_get_min_asize(oldvd);
2637 2638 oldvd_is_log = oldvd->vdev_top->vdev_islog;
2638 2639 (void) strcpy(oldpath, oldvd->vdev_path);
2639 2640 pvd = oldvd->vdev_parent;
2640 2641 pguid = pvd->vdev_guid;
2641 2642
2642 2643 /*
2643 2644 * If oldvd has siblings, then half of the time, detach it.
2644 2645 */
2645 2646 if (oldvd_has_siblings && ztest_random(2) == 0) {
2646 2647 spa_config_exit(spa, SCL_VDEV, FTAG);
2647 2648 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
2648 2649 if (error != 0 && error != ENODEV && error != EBUSY &&
2649 2650 error != ENOTSUP)
2650 2651 fatal(0, "detach (%s) returned %d", oldpath, error);
2651 2652 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2652 2653 return;
2653 2654 }
2654 2655
2655 2656 /*
2656 2657 * For the new vdev, choose with equal probability between the two
2657 2658 * standard paths (ending in either 'a' or 'b') or a random hot spare.
2658 2659 */
2659 2660 if (sav->sav_count != 0 && ztest_random(3) == 0) {
2660 2661 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
2661 2662 newvd_is_spare = B_TRUE;
2662 2663 (void) strcpy(newpath, newvd->vdev_path);
2663 2664 } else {
2664 2665 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template,
2665 2666 ztest_opts.zo_dir, ztest_opts.zo_pool,
2666 2667 top * leaves + leaf);
2667 2668 if (ztest_random(2) == 0)
2668 2669 newpath[strlen(newpath) - 1] = 'b';
2669 2670 newvd = vdev_lookup_by_path(rvd, newpath);
2670 2671 }
2671 2672
2672 2673 if (newvd) {
2673 2674 newsize = vdev_get_min_asize(newvd);
2674 2675 } else {
2675 2676 /*
2676 2677 * Make newsize a little bigger or smaller than oldsize.
2677 2678 * If it's smaller, the attach should fail.
2678 2679 * If it's larger, and we're doing a replace,
2679 2680 * we should get dynamic LUN growth when we're done.
2680 2681 */
2681 2682 newsize = 10 * oldsize / (9 + ztest_random(3));
2682 2683 }
2683 2684
2684 2685 /*
2685 2686 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
2686 2687 * unless it's a replace; in that case any non-replacing parent is OK.
2687 2688 *
2688 2689 * If newvd is already part of the pool, it should fail with EBUSY.
2689 2690 *
2690 2691 * If newvd is too small, it should fail with EOVERFLOW.
2691 2692 */
2692 2693 if (pvd->vdev_ops != &vdev_mirror_ops &&
2693 2694 pvd->vdev_ops != &vdev_root_ops && (!replacing ||
2694 2695 pvd->vdev_ops == &vdev_replacing_ops ||
2695 2696 pvd->vdev_ops == &vdev_spare_ops))
2696 2697 expected_error = ENOTSUP;
2697 2698 else if (newvd_is_spare && (!replacing || oldvd_is_log))
2698 2699 expected_error = ENOTSUP;
2699 2700 else if (newvd == oldvd)
2700 2701 expected_error = replacing ? 0 : EBUSY;
2701 2702 else if (vdev_lookup_by_path(rvd, newpath) != NULL)
2702 2703 expected_error = EBUSY;
2703 2704 else if (newsize < oldsize)
2704 2705 expected_error = EOVERFLOW;
2705 2706 else if (ashift > oldvd->vdev_top->vdev_ashift)
2706 2707 expected_error = EDOM;
2707 2708 else
2708 2709 expected_error = 0;
2709 2710
2710 2711 spa_config_exit(spa, SCL_VDEV, FTAG);
2711 2712
2712 2713 /*
2713 2714 * Build the nvlist describing newpath.
2714 2715 */
2715 2716 root = make_vdev_root(newpath, NULL, newvd == NULL ? newsize : 0,
2716 2717 ashift, 0, 0, 0, 1);
2717 2718
2718 2719 error = spa_vdev_attach(spa, oldguid, root, replacing);
2719 2720
2720 2721 nvlist_free(root);
2721 2722
2722 2723 /*
2723 2724 * If our parent was the replacing vdev, but the replace completed,
2724 2725 * then instead of failing with ENOTSUP we may either succeed,
2725 2726 * fail with ENODEV, or fail with EOVERFLOW.
2726 2727 */
2727 2728 if (expected_error == ENOTSUP &&
2728 2729 (error == 0 || error == ENODEV || error == EOVERFLOW))
2729 2730 expected_error = error;
2730 2731
2731 2732 /*
2732 2733 * If someone grew the LUN, the replacement may be too small.
2733 2734 */
2734 2735 if (error == EOVERFLOW || error == EBUSY)
2735 2736 expected_error = error;
2736 2737
2737 2738 /* XXX workaround 6690467 */
2738 2739 if (error != expected_error && expected_error != EBUSY) {
2739 2740 fatal(0, "attach (%s %llu, %s %llu, %d) "
2740 2741 "returned %d, expected %d",
2741 2742 oldpath, (longlong_t)oldsize, newpath,
2742 2743 (longlong_t)newsize, replacing, error, expected_error);
2743 2744 }
2744 2745
2745 2746 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2746 2747 }
2747 2748
2748 2749 /*
2749 2750 * Callback function which expands the physical size of the vdev.
2750 2751 */
2751 2752 vdev_t *
2752 2753 grow_vdev(vdev_t *vd, void *arg)
2753 2754 {
2754 2755 spa_t *spa = vd->vdev_spa;
2755 2756 size_t *newsize = arg;
2756 2757 size_t fsize;
2757 2758 int fd;
2758 2759
2759 2760 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2760 2761 ASSERT(vd->vdev_ops->vdev_op_leaf);
2761 2762
2762 2763 if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
2763 2764 return (vd);
2764 2765
2765 2766 fsize = lseek(fd, 0, SEEK_END);
2766 2767 (void) ftruncate(fd, *newsize);
2767 2768
2768 2769 if (ztest_opts.zo_verbose >= 6) {
2769 2770 (void) printf("%s grew from %lu to %lu bytes\n",
2770 2771 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
2771 2772 }
2772 2773 (void) close(fd);
2773 2774 return (NULL);
2774 2775 }
2775 2776
2776 2777 /*
2777 2778 * Callback function which expands a given vdev by calling vdev_online().
2778 2779 */
2779 2780 /* ARGSUSED */
2780 2781 vdev_t *
2781 2782 online_vdev(vdev_t *vd, void *arg)
2782 2783 {
2783 2784 spa_t *spa = vd->vdev_spa;
2784 2785 vdev_t *tvd = vd->vdev_top;
2785 2786 uint64_t guid = vd->vdev_guid;
2786 2787 uint64_t generation = spa->spa_config_generation + 1;
2787 2788 vdev_state_t newstate = VDEV_STATE_UNKNOWN;
2788 2789 int error;
2789 2790
2790 2791 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2791 2792 ASSERT(vd->vdev_ops->vdev_op_leaf);
2792 2793
2793 2794 /* Calling vdev_online will initialize the new metaslabs */
2794 2795 spa_config_exit(spa, SCL_STATE, spa);
2795 2796 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
2796 2797 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2797 2798
2798 2799 /*
2799 2800 * If vdev_online returned an error or the underlying vdev_open
2800 2801 * failed then we abort the expand. The only way to know that
2801 2802 * vdev_open fails is by checking the returned newstate.
2802 2803 */
2803 2804 if (error || newstate != VDEV_STATE_HEALTHY) {
2804 2805 if (ztest_opts.zo_verbose >= 5) {
2805 2806 (void) printf("Unable to expand vdev, state %llu, "
2806 2807 "error %d\n", (u_longlong_t)newstate, error);
2807 2808 }
2808 2809 return (vd);
2809 2810 }
2810 2811 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
2811 2812
2812 2813 /*
2813 2814 * Since we dropped the lock we need to ensure that we're
2814 2815 * still talking to the original vdev. It's possible this
2815 2816 * vdev may have been detached/replaced while we were
2816 2817 * trying to online it.
2817 2818 */
2818 2819 if (generation != spa->spa_config_generation) {
2819 2820 if (ztest_opts.zo_verbose >= 5) {
2820 2821 (void) printf("vdev configuration has changed, "
2821 2822 "guid %llu, state %llu, expected gen %llu, "
2822 2823 "got gen %llu\n",
2823 2824 (u_longlong_t)guid,
2824 2825 (u_longlong_t)tvd->vdev_state,
2825 2826 (u_longlong_t)generation,
2826 2827 (u_longlong_t)spa->spa_config_generation);
2827 2828 }
2828 2829 return (vd);
2829 2830 }
2830 2831 return (NULL);
2831 2832 }
2832 2833
2833 2834 /*
2834 2835 * Traverse the vdev tree calling the supplied function.
2835 2836 * We continue to walk the tree until we either have walked all
2836 2837 * children or we receive a non-NULL return from the callback.
2837 2838 * If a NULL callback is passed, then we just return back the first
2838 2839 * leaf vdev we encounter.
2839 2840 */
2840 2841 vdev_t *
2841 2842 vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
2842 2843 {
2843 2844 if (vd->vdev_ops->vdev_op_leaf) {
2844 2845 if (func == NULL)
2845 2846 return (vd);
2846 2847 else
2847 2848 return (func(vd, arg));
2848 2849 }
2849 2850
2850 2851 for (uint_t c = 0; c < vd->vdev_children; c++) {
2851 2852 vdev_t *cvd = vd->vdev_child[c];
2852 2853 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
2853 2854 return (cvd);
2854 2855 }
2855 2856 return (NULL);
2856 2857 }
2857 2858
2858 2859 /*
2859 2860 * Verify that dynamic LUN growth works as expected.
2860 2861 */
2861 2862 /* ARGSUSED */
2862 2863 void
2863 2864 ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
2864 2865 {
2865 2866 spa_t *spa = ztest_spa;
2866 2867 vdev_t *vd, *tvd;
2867 2868 metaslab_class_t *mc;
2868 2869 metaslab_group_t *mg;
2869 2870 size_t psize, newsize;
2870 2871 uint64_t top;
2871 2872 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
2872 2873
2873 2874 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2874 2875 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2875 2876
2876 2877 top = ztest_random_vdev_top(spa, B_TRUE);
2877 2878
2878 2879 tvd = spa->spa_root_vdev->vdev_child[top];
2879 2880 mg = tvd->vdev_mg;
2880 2881 mc = mg->mg_class;
2881 2882 old_ms_count = tvd->vdev_ms_count;
2882 2883 old_class_space = metaslab_class_get_space(mc);
2883 2884
2884 2885 /*
2885 2886 * Determine the size of the first leaf vdev associated with
2886 2887 * our top-level device.
2887 2888 */
2888 2889 vd = vdev_walk_tree(tvd, NULL, NULL);
2889 2890 ASSERT3P(vd, !=, NULL);
2890 2891 ASSERT(vd->vdev_ops->vdev_op_leaf);
2891 2892
2892 2893 psize = vd->vdev_psize;
2893 2894
2894 2895 /*
2895 2896 * We only try to expand the vdev if it's healthy, less than 4x its
2896 2897 * original size, and it has a valid psize.
2897 2898 */
2898 2899 if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
2899 2900 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
2900 2901 spa_config_exit(spa, SCL_STATE, spa);
2901 2902 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2902 2903 return;
2903 2904 }
2904 2905 ASSERT(psize > 0);
2905 2906 newsize = psize + psize / 8;
2906 2907 ASSERT3U(newsize, >, psize);
2907 2908
2908 2909 if (ztest_opts.zo_verbose >= 6) {
2909 2910 (void) printf("Expanding LUN %s from %lu to %lu\n",
2910 2911 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
2911 2912 }
2912 2913
2913 2914 /*
2914 2915 * Growing the vdev is a two step process:
2915 2916 * 1). expand the physical size (i.e. relabel)
2916 2917 * 2). online the vdev to create the new metaslabs
2917 2918 */
2918 2919 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
2919 2920 vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
2920 2921 tvd->vdev_state != VDEV_STATE_HEALTHY) {
2921 2922 if (ztest_opts.zo_verbose >= 5) {
2922 2923 (void) printf("Could not expand LUN because "
2923 2924 "the vdev configuration changed.\n");
2924 2925 }
2925 2926 spa_config_exit(spa, SCL_STATE, spa);
2926 2927 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2927 2928 return;
2928 2929 }
2929 2930
2930 2931 spa_config_exit(spa, SCL_STATE, spa);
2931 2932
2932 2933 /*
2933 2934 * Expanding the LUN will update the config asynchronously,
2934 2935 * thus we must wait for the async thread to complete any
2935 2936 * pending tasks before proceeding.
2936 2937 */
2937 2938 for (;;) {
2938 2939 boolean_t done;
2939 2940 mutex_enter(&spa->spa_async_lock);
2940 2941 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
2941 2942 mutex_exit(&spa->spa_async_lock);
2942 2943 if (done)
2943 2944 break;
2944 2945 txg_wait_synced(spa_get_dsl(spa), 0);
2945 2946 (void) poll(NULL, 0, 100);
2946 2947 }
2947 2948
2948 2949 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2949 2950
2950 2951 tvd = spa->spa_root_vdev->vdev_child[top];
2951 2952 new_ms_count = tvd->vdev_ms_count;
2952 2953 new_class_space = metaslab_class_get_space(mc);
2953 2954
2954 2955 if (tvd->vdev_mg != mg || mg->mg_class != mc) {
2955 2956 if (ztest_opts.zo_verbose >= 5) {
2956 2957 (void) printf("Could not verify LUN expansion due to "
2957 2958 "intervening vdev offline or remove.\n");
2958 2959 }
2959 2960 spa_config_exit(spa, SCL_STATE, spa);
2960 2961 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2961 2962 return;
2962 2963 }
2963 2964
2964 2965 /*
2965 2966 * Make sure we were able to grow the vdev.
2966 2967 */
2967 2968 if (new_ms_count <= old_ms_count)
2968 2969 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
2969 2970 old_ms_count, new_ms_count);
2970 2971
2971 2972 /*
2972 2973 * Make sure we were able to grow the pool.
2973 2974 */
2974 2975 if (new_class_space <= old_class_space)
2975 2976 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
2976 2977 old_class_space, new_class_space);
2977 2978
2978 2979 if (ztest_opts.zo_verbose >= 5) {
2979 2980 char oldnumbuf[6], newnumbuf[6];
2980 2981
2981 2982 nicenum(old_class_space, oldnumbuf);
2982 2983 nicenum(new_class_space, newnumbuf);
2983 2984 (void) printf("%s grew from %s to %s\n",
2984 2985 spa->spa_name, oldnumbuf, newnumbuf);
2985 2986 }
2986 2987
2987 2988 spa_config_exit(spa, SCL_STATE, spa);
2988 2989 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2989 2990 }
2990 2991
2991 2992 /*
2992 2993 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
2993 2994 */
2994 2995 /* ARGSUSED */
2995 2996 static void
2996 2997 ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
2997 2998 {
2998 2999 /*
2999 3000 * Create the objects common to all ztest datasets.
3000 3001 */
3001 3002 VERIFY(zap_create_claim(os, ZTEST_DIROBJ,
3002 3003 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
3003 3004 }
3004 3005
3005 3006 static int
3006 3007 ztest_dataset_create(char *dsname)
3007 3008 {
3008 3009 uint64_t zilset = ztest_random(100);
3009 3010 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0,
3010 3011 ztest_objset_create_cb, NULL);
3011 3012
3012 3013 if (err || zilset < 80)
3013 3014 return (err);
3014 3015
3015 3016 if (ztest_opts.zo_verbose >= 6)
3016 3017 (void) printf("Setting dataset %s to sync always\n", dsname);
3017 3018 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
3018 3019 ZFS_SYNC_ALWAYS, B_FALSE));
3019 3020 }
3020 3021
3021 3022 /* ARGSUSED */
3022 3023 static int
3023 3024 ztest_objset_destroy_cb(const char *name, void *arg)
3024 3025 {
3025 3026 objset_t *os;
3026 3027 dmu_object_info_t doi;
3027 3028 int error;
3028 3029
3029 3030 /*
3030 3031 * Verify that the dataset contains a directory object.
3031 3032 */
3032 3033 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os));
3033 3034 error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
3034 3035 if (error != ENOENT) {
3035 3036 /* We could have crashed in the middle of destroying it */
3036 3037 ASSERT3U(error, ==, 0);
3037 3038 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
3038 3039 ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
3039 3040 }
3040 3041 dmu_objset_rele(os, FTAG);
3041 3042
3042 3043 /*
3043 3044 * Destroy the dataset.
3044 3045 */
3045 3046 VERIFY3U(0, ==, dmu_objset_destroy(name, B_FALSE));
3046 3047 return (0);
3047 3048 }
3048 3049
3049 3050 static boolean_t
3050 3051 ztest_snapshot_create(char *osname, uint64_t id)
3051 3052 {
3052 3053 char snapname[MAXNAMELEN];
3053 3054 int error;
3054 3055
3055 3056 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3056 3057 (u_longlong_t)id);
3057 3058
3058 3059 error = dmu_objset_snapshot(osname, strchr(snapname, '@') + 1,
3059 3060 NULL, NULL, B_FALSE, B_FALSE, -1);
3060 3061 if (error == ENOSPC) {
3061 3062 ztest_record_enospc(FTAG);
3062 3063 return (B_FALSE);
3063 3064 }
3064 3065 if (error != 0 && error != EEXIST)
3065 3066 fatal(0, "ztest_snapshot_create(%s) = %d", snapname, error);
3066 3067 return (B_TRUE);
3067 3068 }
3068 3069
3069 3070 static boolean_t
3070 3071 ztest_snapshot_destroy(char *osname, uint64_t id)
3071 3072 {
3072 3073 char snapname[MAXNAMELEN];
3073 3074 int error;
3074 3075
3075 3076 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3076 3077 (u_longlong_t)id);
3077 3078
3078 3079 error = dmu_objset_destroy(snapname, B_FALSE);
3079 3080 if (error != 0 && error != ENOENT)
3080 3081 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error);
3081 3082 return (B_TRUE);
3082 3083 }
3083 3084
3084 3085 /* ARGSUSED */
3085 3086 void
3086 3087 ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
3087 3088 {
3088 3089 ztest_ds_t zdtmp;
3089 3090 int iters;
3090 3091 int error;
3091 3092 objset_t *os, *os2;
3092 3093 char name[MAXNAMELEN];
3093 3094 zilog_t *zilog;
3094 3095
3095 3096 (void) rw_rdlock(&ztest_name_lock);
3096 3097
3097 3098 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu",
3098 3099 ztest_opts.zo_pool, (u_longlong_t)id);
3099 3100
3100 3101 /*
3101 3102 * If this dataset exists from a previous run, process its replay log
3102 3103 * half of the time. If we don't replay it, then dmu_objset_destroy()
3103 3104 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
3104 3105 */
3105 3106 if (ztest_random(2) == 0 &&
3106 3107 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) {
3107 3108 ztest_zd_init(&zdtmp, NULL, os);
3108 3109 zil_replay(os, &zdtmp, ztest_replay_vector);
3109 3110 ztest_zd_fini(&zdtmp);
3110 3111 dmu_objset_disown(os, FTAG);
3111 3112 }
3112 3113
3113 3114 /*
3114 3115 * There may be an old instance of the dataset we're about to
3115 3116 * create lying around from a previous run. If so, destroy it
3116 3117 * and all of its snapshots.
3117 3118 */
3118 3119 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
3119 3120 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
3120 3121
3121 3122 /*
3122 3123 * Verify that the destroyed dataset is no longer in the namespace.
3123 3124 */
3124 3125 VERIFY3U(ENOENT, ==, dmu_objset_hold(name, FTAG, &os));
3125 3126
3126 3127 /*
3127 3128 * Verify that we can create a new dataset.
3128 3129 */
3129 3130 error = ztest_dataset_create(name);
3130 3131 if (error) {
3131 3132 if (error == ENOSPC) {
3132 3133 ztest_record_enospc(FTAG);
3133 3134 (void) rw_unlock(&ztest_name_lock);
3134 3135 return;
3135 3136 }
3136 3137 fatal(0, "dmu_objset_create(%s) = %d", name, error);
3137 3138 }
3138 3139
3139 3140 VERIFY3U(0, ==,
3140 3141 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
3141 3142
3142 3143 ztest_zd_init(&zdtmp, NULL, os);
3143 3144
3144 3145 /*
3145 3146 * Open the intent log for it.
3146 3147 */
3147 3148 zilog = zil_open(os, ztest_get_data);
3148 3149
3149 3150 /*
3150 3151 * Put some objects in there, do a little I/O to them,
3151 3152 * and randomly take a couple of snapshots along the way.
3152 3153 */
3153 3154 iters = ztest_random(5);
3154 3155 for (int i = 0; i < iters; i++) {
3155 3156 ztest_dmu_object_alloc_free(&zdtmp, id);
3156 3157 if (ztest_random(iters) == 0)
3157 3158 (void) ztest_snapshot_create(name, i);
3158 3159 }
3159 3160
3160 3161 /*
3161 3162 * Verify that we cannot create an existing dataset.
3162 3163 */
3163 3164 VERIFY3U(EEXIST, ==,
3164 3165 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL));
3165 3166
3166 3167 /*
3167 3168 * Verify that we can hold an objset that is also owned.
3168 3169 */
3169 3170 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2));
3170 3171 dmu_objset_rele(os2, FTAG);
3171 3172
3172 3173 /*
3173 3174 * Verify that we cannot own an objset that is already owned.
3174 3175 */
3175 3176 VERIFY3U(EBUSY, ==,
3176 3177 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
3177 3178
3178 3179 zil_close(zilog);
3179 3180 dmu_objset_disown(os, FTAG);
3180 3181 ztest_zd_fini(&zdtmp);
3181 3182
3182 3183 (void) rw_unlock(&ztest_name_lock);
3183 3184 }
3184 3185
3185 3186 /*
3186 3187 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3187 3188 */
3188 3189 void
3189 3190 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
3190 3191 {
3191 3192 (void) rw_rdlock(&ztest_name_lock);
3192 3193 (void) ztest_snapshot_destroy(zd->zd_name, id);
3193 3194 (void) ztest_snapshot_create(zd->zd_name, id);
3194 3195 (void) rw_unlock(&ztest_name_lock);
3195 3196 }
3196 3197
3197 3198 /*
3198 3199 * Cleanup non-standard snapshots and clones.
3199 3200 */
3200 3201 void
3201 3202 ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
3202 3203 {
3203 3204 char snap1name[MAXNAMELEN];
3204 3205 char clone1name[MAXNAMELEN];
3205 3206 char snap2name[MAXNAMELEN];
3206 3207 char clone2name[MAXNAMELEN];
3207 3208 char snap3name[MAXNAMELEN];
3208 3209 int error;
3209 3210
3210 3211 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
3211 3212 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
3212 3213 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
3213 3214 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
3214 3215 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
3215 3216
3216 3217 error = dmu_objset_destroy(clone2name, B_FALSE);
3217 3218 if (error && error != ENOENT)
3218 3219 fatal(0, "dmu_objset_destroy(%s) = %d", clone2name, error);
3219 3220 error = dmu_objset_destroy(snap3name, B_FALSE);
3220 3221 if (error && error != ENOENT)
3221 3222 fatal(0, "dmu_objset_destroy(%s) = %d", snap3name, error);
3222 3223 error = dmu_objset_destroy(snap2name, B_FALSE);
3223 3224 if (error && error != ENOENT)
3224 3225 fatal(0, "dmu_objset_destroy(%s) = %d", snap2name, error);
3225 3226 error = dmu_objset_destroy(clone1name, B_FALSE);
3226 3227 if (error && error != ENOENT)
3227 3228 fatal(0, "dmu_objset_destroy(%s) = %d", clone1name, error);
3228 3229 error = dmu_objset_destroy(snap1name, B_FALSE);
3229 3230 if (error && error != ENOENT)
3230 3231 fatal(0, "dmu_objset_destroy(%s) = %d", snap1name, error);
3231 3232 }
3232 3233
3233 3234 /*
3234 3235 * Verify dsl_dataset_promote handles EBUSY
3235 3236 */
3236 3237 void
3237 3238 ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
3238 3239 {
3239 3240 objset_t *clone;
3240 3241 dsl_dataset_t *ds;
3241 3242 char snap1name[MAXNAMELEN];
3242 3243 char clone1name[MAXNAMELEN];
3243 3244 char snap2name[MAXNAMELEN];
3244 3245 char clone2name[MAXNAMELEN];
3245 3246 char snap3name[MAXNAMELEN];
3246 3247 char *osname = zd->zd_name;
3247 3248 int error;
3248 3249
3249 3250 (void) rw_rdlock(&ztest_name_lock);
3250 3251
3251 3252 ztest_dsl_dataset_cleanup(osname, id);
3252 3253
3253 3254 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
3254 3255 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
3255 3256 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
3256 3257 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
3257 3258 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
3258 3259
3259 3260 error = dmu_objset_snapshot(osname, strchr(snap1name, '@')+1,
3260 3261 NULL, NULL, B_FALSE, B_FALSE, -1);
3261 3262 if (error && error != EEXIST) {
3262 3263 if (error == ENOSPC) {
3263 3264 ztest_record_enospc(FTAG);
3264 3265 goto out;
3265 3266 }
3266 3267 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error);
3267 3268 }
3268 3269
3269 3270 error = dmu_objset_hold(snap1name, FTAG, &clone);
3270 3271 if (error)
3271 3272 fatal(0, "dmu_open_snapshot(%s) = %d", snap1name, error);
3272 3273
3273 3274 error = dmu_objset_clone(clone1name, dmu_objset_ds(clone), 0);
3274 3275 dmu_objset_rele(clone, FTAG);
3275 3276 if (error) {
3276 3277 if (error == ENOSPC) {
3277 3278 ztest_record_enospc(FTAG);
3278 3279 goto out;
3279 3280 }
3280 3281 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error);
3281 3282 }
3282 3283
3283 3284 error = dmu_objset_snapshot(clone1name, strchr(snap2name, '@')+1,
3284 3285 NULL, NULL, B_FALSE, B_FALSE, -1);
3285 3286 if (error && error != EEXIST) {
3286 3287 if (error == ENOSPC) {
3287 3288 ztest_record_enospc(FTAG);
3288 3289 goto out;
3289 3290 }
3290 3291 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error);
3291 3292 }
3292 3293
3293 3294 error = dmu_objset_snapshot(clone1name, strchr(snap3name, '@')+1,
3294 3295 NULL, NULL, B_FALSE, B_FALSE, -1);
3295 3296 if (error && error != EEXIST) {
3296 3297 if (error == ENOSPC) {
3297 3298 ztest_record_enospc(FTAG);
3298 3299 goto out;
3299 3300 }
3300 3301 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3301 3302 }
3302 3303
3303 3304 error = dmu_objset_hold(snap3name, FTAG, &clone);
3304 3305 if (error)
3305 3306 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3306 3307
3307 3308 error = dmu_objset_clone(clone2name, dmu_objset_ds(clone), 0);
3308 3309 dmu_objset_rele(clone, FTAG);
3309 3310 if (error) {
3310 3311 if (error == ENOSPC) {
3311 3312 ztest_record_enospc(FTAG);
3312 3313 goto out;
3313 3314 }
3314 3315 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error);
3315 3316 }
3316 3317
3317 3318 error = dsl_dataset_own(snap2name, B_FALSE, FTAG, &ds);
3318 3319 if (error)
3319 3320 fatal(0, "dsl_dataset_own(%s) = %d", snap2name, error);
3320 3321 error = dsl_dataset_promote(clone2name, NULL);
3321 3322 if (error != EBUSY)
3322 3323 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
3323 3324 error);
3324 3325 dsl_dataset_disown(ds, FTAG);
3325 3326
3326 3327 out:
3327 3328 ztest_dsl_dataset_cleanup(osname, id);
3328 3329
3329 3330 (void) rw_unlock(&ztest_name_lock);
3330 3331 }
3331 3332
3332 3333 /*
3333 3334 * Verify that dmu_object_{alloc,free} work as expected.
3334 3335 */
3335 3336 void
3336 3337 ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
3337 3338 {
3338 3339 ztest_od_t od[4];
3339 3340 int batchsize = sizeof (od) / sizeof (od[0]);
3340 3341
3341 3342 for (int b = 0; b < batchsize; b++)
3342 3343 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
3343 3344
3344 3345 /*
3345 3346 * Destroy the previous batch of objects, create a new batch,
3346 3347 * and do some I/O on the new objects.
3347 3348 */
3348 3349 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0)
3349 3350 return;
3350 3351
3351 3352 while (ztest_random(4 * batchsize) != 0)
3352 3353 ztest_io(zd, od[ztest_random(batchsize)].od_object,
3353 3354 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3354 3355 }
3355 3356
3356 3357 /*
3357 3358 * Verify that dmu_{read,write} work as expected.
3358 3359 */
3359 3360 void
3360 3361 ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
3361 3362 {
3362 3363 objset_t *os = zd->zd_os;
3363 3364 ztest_od_t od[2];
3364 3365 dmu_tx_t *tx;
3365 3366 int i, freeit, error;
3366 3367 uint64_t n, s, txg;
3367 3368 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
3368 3369 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3369 3370 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
3370 3371 uint64_t regions = 997;
3371 3372 uint64_t stride = 123456789ULL;
3372 3373 uint64_t width = 40;
3373 3374 int free_percent = 5;
3374 3375
3375 3376 /*
3376 3377 * This test uses two objects, packobj and bigobj, that are always
3377 3378 * updated together (i.e. in the same tx) so that their contents are
3378 3379 * in sync and can be compared. Their contents relate to each other
3379 3380 * in a simple way: packobj is a dense array of 'bufwad' structures,
3380 3381 * while bigobj is a sparse array of the same bufwads. Specifically,
3381 3382 * for any index n, there are three bufwads that should be identical:
3382 3383 *
3383 3384 * packobj, at offset n * sizeof (bufwad_t)
3384 3385 * bigobj, at the head of the nth chunk
3385 3386 * bigobj, at the tail of the nth chunk
3386 3387 *
3387 3388 * The chunk size is arbitrary. It doesn't have to be a power of two,
3388 3389 * and it doesn't have any relation to the object blocksize.
3389 3390 * The only requirement is that it can hold at least two bufwads.
3390 3391 *
3391 3392 * Normally, we write the bufwad to each of these locations.
3392 3393 * However, free_percent of the time we instead write zeroes to
3393 3394 * packobj and perform a dmu_free_range() on bigobj. By comparing
3394 3395 * bigobj to packobj, we can verify that the DMU is correctly
3395 3396 * tracking which parts of an object are allocated and free,
3396 3397 * and that the contents of the allocated blocks are correct.
3397 3398 */
3398 3399
3399 3400 /*
3400 3401 * Read the directory info. If it's the first time, set things up.
3401 3402 */
3402 3403 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
3403 3404 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3404 3405
3405 3406 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3406 3407 return;
3407 3408
3408 3409 bigobj = od[0].od_object;
3409 3410 packobj = od[1].od_object;
3410 3411 chunksize = od[0].od_gen;
3411 3412 ASSERT(chunksize == od[1].od_gen);
3412 3413
3413 3414 /*
3414 3415 * Prefetch a random chunk of the big object.
3415 3416 * Our aim here is to get some async reads in flight
3416 3417 * for blocks that we may free below; the DMU should
3417 3418 * handle this race correctly.
3418 3419 */
3419 3420 n = ztest_random(regions) * stride + ztest_random(width);
3420 3421 s = 1 + ztest_random(2 * width - 1);
3421 3422 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize);
3422 3423
3423 3424 /*
3424 3425 * Pick a random index and compute the offsets into packobj and bigobj.
3425 3426 */
3426 3427 n = ztest_random(regions) * stride + ztest_random(width);
3427 3428 s = 1 + ztest_random(width - 1);
3428 3429
3429 3430 packoff = n * sizeof (bufwad_t);
3430 3431 packsize = s * sizeof (bufwad_t);
3431 3432
3432 3433 bigoff = n * chunksize;
3433 3434 bigsize = s * chunksize;
3434 3435
3435 3436 packbuf = umem_alloc(packsize, UMEM_NOFAIL);
3436 3437 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
3437 3438
3438 3439 /*
3439 3440 * free_percent of the time, free a range of bigobj rather than
3440 3441 * overwriting it.
3441 3442 */
3442 3443 freeit = (ztest_random(100) < free_percent);
3443 3444
3444 3445 /*
3445 3446 * Read the current contents of our objects.
3446 3447 */
3447 3448 error = dmu_read(os, packobj, packoff, packsize, packbuf,
3448 3449 DMU_READ_PREFETCH);
3449 3450 ASSERT3U(error, ==, 0);
3450 3451 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
3451 3452 DMU_READ_PREFETCH);
3452 3453 ASSERT3U(error, ==, 0);
3453 3454
3454 3455 /*
3455 3456 * Get a tx for the mods to both packobj and bigobj.
3456 3457 */
3457 3458 tx = dmu_tx_create(os);
3458 3459
3459 3460 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3460 3461
3461 3462 if (freeit)
3462 3463 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
3463 3464 else
3464 3465 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3465 3466
3466 3467 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3467 3468 if (txg == 0) {
3468 3469 umem_free(packbuf, packsize);
3469 3470 umem_free(bigbuf, bigsize);
3470 3471 return;
3471 3472 }
3472 3473
3473 3474 dmu_object_set_checksum(os, bigobj,
3474 3475 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx);
3475 3476
3476 3477 dmu_object_set_compress(os, bigobj,
3477 3478 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx);
3478 3479
3479 3480 /*
3480 3481 * For each index from n to n + s, verify that the existing bufwad
3481 3482 * in packobj matches the bufwads at the head and tail of the
3482 3483 * corresponding chunk in bigobj. Then update all three bufwads
3483 3484 * with the new values we want to write out.
3484 3485 */
3485 3486 for (i = 0; i < s; i++) {
3486 3487 /* LINTED */
3487 3488 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3488 3489 /* LINTED */
3489 3490 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3490 3491 /* LINTED */
3491 3492 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3492 3493
3493 3494 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3494 3495 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3495 3496
3496 3497 if (pack->bw_txg > txg)
3497 3498 fatal(0, "future leak: got %llx, open txg is %llx",
3498 3499 pack->bw_txg, txg);
3499 3500
3500 3501 if (pack->bw_data != 0 && pack->bw_index != n + i)
3501 3502 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3502 3503 pack->bw_index, n, i);
3503 3504
3504 3505 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3505 3506 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3506 3507
3507 3508 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3508 3509 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3509 3510
3510 3511 if (freeit) {
3511 3512 bzero(pack, sizeof (bufwad_t));
3512 3513 } else {
3513 3514 pack->bw_index = n + i;
3514 3515 pack->bw_txg = txg;
3515 3516 pack->bw_data = 1 + ztest_random(-2ULL);
3516 3517 }
3517 3518 *bigH = *pack;
3518 3519 *bigT = *pack;
3519 3520 }
3520 3521
3521 3522 /*
3522 3523 * We've verified all the old bufwads, and made new ones.
3523 3524 * Now write them out.
3524 3525 */
3525 3526 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3526 3527
3527 3528 if (freeit) {
3528 3529 if (ztest_opts.zo_verbose >= 7) {
3529 3530 (void) printf("freeing offset %llx size %llx"
3530 3531 " txg %llx\n",
3531 3532 (u_longlong_t)bigoff,
3532 3533 (u_longlong_t)bigsize,
3533 3534 (u_longlong_t)txg);
3534 3535 }
3535 3536 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx));
3536 3537 } else {
3537 3538 if (ztest_opts.zo_verbose >= 7) {
3538 3539 (void) printf("writing offset %llx size %llx"
3539 3540 " txg %llx\n",
3540 3541 (u_longlong_t)bigoff,
3541 3542 (u_longlong_t)bigsize,
3542 3543 (u_longlong_t)txg);
3543 3544 }
3544 3545 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
3545 3546 }
3546 3547
3547 3548 dmu_tx_commit(tx);
3548 3549
3549 3550 /*
3550 3551 * Sanity check the stuff we just wrote.
3551 3552 */
3552 3553 {
3553 3554 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3554 3555 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3555 3556
3556 3557 VERIFY(0 == dmu_read(os, packobj, packoff,
3557 3558 packsize, packcheck, DMU_READ_PREFETCH));
3558 3559 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3559 3560 bigsize, bigcheck, DMU_READ_PREFETCH));
3560 3561
3561 3562 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3562 3563 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3563 3564
3564 3565 umem_free(packcheck, packsize);
3565 3566 umem_free(bigcheck, bigsize);
3566 3567 }
3567 3568
3568 3569 umem_free(packbuf, packsize);
3569 3570 umem_free(bigbuf, bigsize);
3570 3571 }
3571 3572
3572 3573 void
3573 3574 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
3574 3575 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
3575 3576 {
3576 3577 uint64_t i;
3577 3578 bufwad_t *pack;
3578 3579 bufwad_t *bigH;
3579 3580 bufwad_t *bigT;
3580 3581
3581 3582 /*
3582 3583 * For each index from n to n + s, verify that the existing bufwad
3583 3584 * in packobj matches the bufwads at the head and tail of the
3584 3585 * corresponding chunk in bigobj. Then update all three bufwads
3585 3586 * with the new values we want to write out.
3586 3587 */
3587 3588 for (i = 0; i < s; i++) {
3588 3589 /* LINTED */
3589 3590 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3590 3591 /* LINTED */
3591 3592 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3592 3593 /* LINTED */
3593 3594 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3594 3595
3595 3596 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3596 3597 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3597 3598
3598 3599 if (pack->bw_txg > txg)
3599 3600 fatal(0, "future leak: got %llx, open txg is %llx",
3600 3601 pack->bw_txg, txg);
3601 3602
3602 3603 if (pack->bw_data != 0 && pack->bw_index != n + i)
3603 3604 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3604 3605 pack->bw_index, n, i);
3605 3606
3606 3607 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3607 3608 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3608 3609
3609 3610 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3610 3611 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3611 3612
3612 3613 pack->bw_index = n + i;
3613 3614 pack->bw_txg = txg;
3614 3615 pack->bw_data = 1 + ztest_random(-2ULL);
3615 3616
3616 3617 *bigH = *pack;
3617 3618 *bigT = *pack;
3618 3619 }
3619 3620 }
3620 3621
3621 3622 void
3622 3623 ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
3623 3624 {
3624 3625 objset_t *os = zd->zd_os;
3625 3626 ztest_od_t od[2];
3626 3627 dmu_tx_t *tx;
3627 3628 uint64_t i;
3628 3629 int error;
3629 3630 uint64_t n, s, txg;
3630 3631 bufwad_t *packbuf, *bigbuf;
3631 3632 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3632 3633 uint64_t blocksize = ztest_random_blocksize();
3633 3634 uint64_t chunksize = blocksize;
3634 3635 uint64_t regions = 997;
3635 3636 uint64_t stride = 123456789ULL;
3636 3637 uint64_t width = 9;
3637 3638 dmu_buf_t *bonus_db;
3638 3639 arc_buf_t **bigbuf_arcbufs;
3639 3640 dmu_object_info_t doi;
3640 3641
3641 3642 /*
3642 3643 * This test uses two objects, packobj and bigobj, that are always
3643 3644 * updated together (i.e. in the same tx) so that their contents are
3644 3645 * in sync and can be compared. Their contents relate to each other
3645 3646 * in a simple way: packobj is a dense array of 'bufwad' structures,
3646 3647 * while bigobj is a sparse array of the same bufwads. Specifically,
3647 3648 * for any index n, there are three bufwads that should be identical:
3648 3649 *
3649 3650 * packobj, at offset n * sizeof (bufwad_t)
3650 3651 * bigobj, at the head of the nth chunk
3651 3652 * bigobj, at the tail of the nth chunk
3652 3653 *
3653 3654 * The chunk size is set equal to bigobj block size so that
3654 3655 * dmu_assign_arcbuf() can be tested for object updates.
3655 3656 */
3656 3657
3657 3658 /*
3658 3659 * Read the directory info. If it's the first time, set things up.
3659 3660 */
3660 3661 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3661 3662 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3662 3663
3663 3664 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3664 3665 return;
3665 3666
3666 3667 bigobj = od[0].od_object;
3667 3668 packobj = od[1].od_object;
3668 3669 blocksize = od[0].od_blocksize;
3669 3670 chunksize = blocksize;
3670 3671 ASSERT(chunksize == od[1].od_gen);
3671 3672
3672 3673 VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
3673 3674 VERIFY(ISP2(doi.doi_data_block_size));
3674 3675 VERIFY(chunksize == doi.doi_data_block_size);
3675 3676 VERIFY(chunksize >= 2 * sizeof (bufwad_t));
3676 3677
3677 3678 /*
3678 3679 * Pick a random index and compute the offsets into packobj and bigobj.
3679 3680 */
3680 3681 n = ztest_random(regions) * stride + ztest_random(width);
3681 3682 s = 1 + ztest_random(width - 1);
3682 3683
3683 3684 packoff = n * sizeof (bufwad_t);
3684 3685 packsize = s * sizeof (bufwad_t);
3685 3686
3686 3687 bigoff = n * chunksize;
3687 3688 bigsize = s * chunksize;
3688 3689
3689 3690 packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
3690 3691 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
3691 3692
3692 3693 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
3693 3694
3694 3695 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
3695 3696
3696 3697 /*
3697 3698 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3698 3699 * Iteration 1 test zcopy to already referenced dbufs.
3699 3700 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3700 3701 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3701 3702 * Iteration 4 test zcopy when dbuf is no longer dirty.
3702 3703 * Iteration 5 test zcopy when it can't be done.
3703 3704 * Iteration 6 one more zcopy write.
3704 3705 */
3705 3706 for (i = 0; i < 7; i++) {
3706 3707 uint64_t j;
3707 3708 uint64_t off;
3708 3709
3709 3710 /*
3710 3711 * In iteration 5 (i == 5) use arcbufs
3711 3712 * that don't match bigobj blksz to test
3712 3713 * dmu_assign_arcbuf() when it can't directly
3713 3714 * assign an arcbuf to a dbuf.
3714 3715 */
3715 3716 for (j = 0; j < s; j++) {
3716 3717 if (i != 5) {
3717 3718 bigbuf_arcbufs[j] =
3718 3719 dmu_request_arcbuf(bonus_db, chunksize);
3719 3720 } else {
3720 3721 bigbuf_arcbufs[2 * j] =
3721 3722 dmu_request_arcbuf(bonus_db, chunksize / 2);
3722 3723 bigbuf_arcbufs[2 * j + 1] =
3723 3724 dmu_request_arcbuf(bonus_db, chunksize / 2);
3724 3725 }
3725 3726 }
3726 3727
3727 3728 /*
3728 3729 * Get a tx for the mods to both packobj and bigobj.
3729 3730 */
3730 3731 tx = dmu_tx_create(os);
3731 3732
3732 3733 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3733 3734 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3734 3735
3735 3736 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3736 3737 if (txg == 0) {
3737 3738 umem_free(packbuf, packsize);
3738 3739 umem_free(bigbuf, bigsize);
3739 3740 for (j = 0; j < s; j++) {
3740 3741 if (i != 5) {
3741 3742 dmu_return_arcbuf(bigbuf_arcbufs[j]);
3742 3743 } else {
3743 3744 dmu_return_arcbuf(
3744 3745 bigbuf_arcbufs[2 * j]);
3745 3746 dmu_return_arcbuf(
3746 3747 bigbuf_arcbufs[2 * j + 1]);
3747 3748 }
3748 3749 }
3749 3750 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3750 3751 dmu_buf_rele(bonus_db, FTAG);
3751 3752 return;
3752 3753 }
3753 3754
3754 3755 /*
3755 3756 * 50% of the time don't read objects in the 1st iteration to
3756 3757 * test dmu_assign_arcbuf() for the case when there're no
3757 3758 * existing dbufs for the specified offsets.
3758 3759 */
3759 3760 if (i != 0 || ztest_random(2) != 0) {
3760 3761 error = dmu_read(os, packobj, packoff,
3761 3762 packsize, packbuf, DMU_READ_PREFETCH);
3762 3763 ASSERT3U(error, ==, 0);
3763 3764 error = dmu_read(os, bigobj, bigoff, bigsize,
3764 3765 bigbuf, DMU_READ_PREFETCH);
3765 3766 ASSERT3U(error, ==, 0);
3766 3767 }
3767 3768 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
3768 3769 n, chunksize, txg);
3769 3770
3770 3771 /*
3771 3772 * We've verified all the old bufwads, and made new ones.
3772 3773 * Now write them out.
3773 3774 */
3774 3775 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3775 3776 if (ztest_opts.zo_verbose >= 7) {
3776 3777 (void) printf("writing offset %llx size %llx"
3777 3778 " txg %llx\n",
3778 3779 (u_longlong_t)bigoff,
3779 3780 (u_longlong_t)bigsize,
3780 3781 (u_longlong_t)txg);
3781 3782 }
3782 3783 for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
3783 3784 dmu_buf_t *dbt;
3784 3785 if (i != 5) {
3785 3786 bcopy((caddr_t)bigbuf + (off - bigoff),
3786 3787 bigbuf_arcbufs[j]->b_data, chunksize);
3787 3788 } else {
3788 3789 bcopy((caddr_t)bigbuf + (off - bigoff),
3789 3790 bigbuf_arcbufs[2 * j]->b_data,
3790 3791 chunksize / 2);
3791 3792 bcopy((caddr_t)bigbuf + (off - bigoff) +
3792 3793 chunksize / 2,
3793 3794 bigbuf_arcbufs[2 * j + 1]->b_data,
3794 3795 chunksize / 2);
3795 3796 }
3796 3797
3797 3798 if (i == 1) {
3798 3799 VERIFY(dmu_buf_hold(os, bigobj, off,
3799 3800 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
3800 3801 }
3801 3802 if (i != 5) {
3802 3803 dmu_assign_arcbuf(bonus_db, off,
3803 3804 bigbuf_arcbufs[j], tx);
3804 3805 } else {
3805 3806 dmu_assign_arcbuf(bonus_db, off,
3806 3807 bigbuf_arcbufs[2 * j], tx);
3807 3808 dmu_assign_arcbuf(bonus_db,
3808 3809 off + chunksize / 2,
3809 3810 bigbuf_arcbufs[2 * j + 1], tx);
3810 3811 }
3811 3812 if (i == 1) {
3812 3813 dmu_buf_rele(dbt, FTAG);
3813 3814 }
3814 3815 }
3815 3816 dmu_tx_commit(tx);
3816 3817
3817 3818 /*
3818 3819 * Sanity check the stuff we just wrote.
3819 3820 */
3820 3821 {
3821 3822 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3822 3823 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3823 3824
3824 3825 VERIFY(0 == dmu_read(os, packobj, packoff,
3825 3826 packsize, packcheck, DMU_READ_PREFETCH));
3826 3827 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3827 3828 bigsize, bigcheck, DMU_READ_PREFETCH));
3828 3829
3829 3830 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3830 3831 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3831 3832
3832 3833 umem_free(packcheck, packsize);
3833 3834 umem_free(bigcheck, bigsize);
3834 3835 }
3835 3836 if (i == 2) {
3836 3837 txg_wait_open(dmu_objset_pool(os), 0);
3837 3838 } else if (i == 3) {
3838 3839 txg_wait_synced(dmu_objset_pool(os), 0);
3839 3840 }
3840 3841 }
3841 3842
3842 3843 dmu_buf_rele(bonus_db, FTAG);
3843 3844 umem_free(packbuf, packsize);
3844 3845 umem_free(bigbuf, bigsize);
3845 3846 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3846 3847 }
3847 3848
3848 3849 /* ARGSUSED */
3849 3850 void
3850 3851 ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
3851 3852 {
3852 3853 ztest_od_t od[1];
3853 3854 uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
3854 3855 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3855 3856
3856 3857 /*
3857 3858 * Have multiple threads write to large offsets in an object
3858 3859 * to verify that parallel writes to an object -- even to the
3859 3860 * same blocks within the object -- doesn't cause any trouble.
3860 3861 */
3861 3862 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
3862 3863
3863 3864 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3864 3865 return;
3865 3866
3866 3867 while (ztest_random(10) != 0)
3867 3868 ztest_io(zd, od[0].od_object, offset);
3868 3869 }
3869 3870
3870 3871 void
3871 3872 ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
3872 3873 {
3873 3874 ztest_od_t od[1];
3874 3875 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
3875 3876 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3876 3877 uint64_t count = ztest_random(20) + 1;
3877 3878 uint64_t blocksize = ztest_random_blocksize();
3878 3879 void *data;
3879 3880
3880 3881 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3881 3882
3882 3883 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
3883 3884 return;
3884 3885
3885 3886 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0)
3886 3887 return;
3887 3888
3888 3889 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize);
3889 3890
3890 3891 data = umem_zalloc(blocksize, UMEM_NOFAIL);
3891 3892
3892 3893 while (ztest_random(count) != 0) {
3893 3894 uint64_t randoff = offset + (ztest_random(count) * blocksize);
3894 3895 if (ztest_write(zd, od[0].od_object, randoff, blocksize,
3895 3896 data) != 0)
3896 3897 break;
3897 3898 while (ztest_random(4) != 0)
3898 3899 ztest_io(zd, od[0].od_object, randoff);
3899 3900 }
3900 3901
3901 3902 umem_free(data, blocksize);
3902 3903 }
3903 3904
3904 3905 /*
3905 3906 * Verify that zap_{create,destroy,add,remove,update} work as expected.
3906 3907 */
3907 3908 #define ZTEST_ZAP_MIN_INTS 1
3908 3909 #define ZTEST_ZAP_MAX_INTS 4
3909 3910 #define ZTEST_ZAP_MAX_PROPS 1000
3910 3911
3911 3912 void
3912 3913 ztest_zap(ztest_ds_t *zd, uint64_t id)
3913 3914 {
3914 3915 objset_t *os = zd->zd_os;
3915 3916 ztest_od_t od[1];
3916 3917 uint64_t object;
3917 3918 uint64_t txg, last_txg;
3918 3919 uint64_t value[ZTEST_ZAP_MAX_INTS];
3919 3920 uint64_t zl_ints, zl_intsize, prop;
3920 3921 int i, ints;
3921 3922 dmu_tx_t *tx;
3922 3923 char propname[100], txgname[100];
3923 3924 int error;
3924 3925 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
3925 3926
3926 3927 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
3927 3928
3928 3929 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
3929 3930 return;
3930 3931
3931 3932 object = od[0].od_object;
3932 3933
3933 3934 /*
3934 3935 * Generate a known hash collision, and verify that
3935 3936 * we can lookup and remove both entries.
3936 3937 */
3937 3938 tx = dmu_tx_create(os);
3938 3939 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
3939 3940 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3940 3941 if (txg == 0)
3941 3942 return;
3942 3943 for (i = 0; i < 2; i++) {
3943 3944 value[i] = i;
3944 3945 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
3945 3946 1, &value[i], tx));
3946 3947 }
3947 3948 for (i = 0; i < 2; i++) {
3948 3949 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
3949 3950 sizeof (uint64_t), 1, &value[i], tx));
3950 3951 VERIFY3U(0, ==,
3951 3952 zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
3952 3953 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3953 3954 ASSERT3U(zl_ints, ==, 1);
3954 3955 }
3955 3956 for (i = 0; i < 2; i++) {
3956 3957 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx));
3957 3958 }
3958 3959 dmu_tx_commit(tx);
3959 3960
3960 3961 /*
3961 3962 * Generate a buch of random entries.
3962 3963 */
3963 3964 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
3964 3965
3965 3966 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
3966 3967 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
3967 3968 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
3968 3969 bzero(value, sizeof (value));
3969 3970 last_txg = 0;
3970 3971
3971 3972 /*
3972 3973 * If these zap entries already exist, validate their contents.
3973 3974 */
3974 3975 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
3975 3976 if (error == 0) {
3976 3977 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3977 3978 ASSERT3U(zl_ints, ==, 1);
3978 3979
3979 3980 VERIFY(zap_lookup(os, object, txgname, zl_intsize,
3980 3981 zl_ints, &last_txg) == 0);
3981 3982
3982 3983 VERIFY(zap_length(os, object, propname, &zl_intsize,
3983 3984 &zl_ints) == 0);
3984 3985
3985 3986 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3986 3987 ASSERT3U(zl_ints, ==, ints);
3987 3988
3988 3989 VERIFY(zap_lookup(os, object, propname, zl_intsize,
3989 3990 zl_ints, value) == 0);
3990 3991
3991 3992 for (i = 0; i < ints; i++) {
3992 3993 ASSERT3U(value[i], ==, last_txg + object + i);
3993 3994 }
3994 3995 } else {
3995 3996 ASSERT3U(error, ==, ENOENT);
3996 3997 }
3997 3998
3998 3999 /*
3999 4000 * Atomically update two entries in our zap object.
4000 4001 * The first is named txg_%llu, and contains the txg
4001 4002 * in which the property was last updated. The second
4002 4003 * is named prop_%llu, and the nth element of its value
4003 4004 * should be txg + object + n.
4004 4005 */
4005 4006 tx = dmu_tx_create(os);
4006 4007 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4007 4008 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4008 4009 if (txg == 0)
4009 4010 return;
4010 4011
4011 4012 if (last_txg > txg)
4012 4013 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
4013 4014
4014 4015 for (i = 0; i < ints; i++)
4015 4016 value[i] = txg + object + i;
4016 4017
4017 4018 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t),
4018 4019 1, &txg, tx));
4019 4020 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t),
4020 4021 ints, value, tx));
4021 4022
4022 4023 dmu_tx_commit(tx);
4023 4024
4024 4025 /*
4025 4026 * Remove a random pair of entries.
4026 4027 */
4027 4028 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4028 4029 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4029 4030 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4030 4031
4031 4032 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4032 4033
4033 4034 if (error == ENOENT)
4034 4035 return;
4035 4036
4036 4037 ASSERT3U(error, ==, 0);
4037 4038
4038 4039 tx = dmu_tx_create(os);
4039 4040 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4040 4041 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4041 4042 if (txg == 0)
4042 4043 return;
4043 4044 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
4044 4045 VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
4045 4046 dmu_tx_commit(tx);
4046 4047 }
4047 4048
4048 4049 /*
4049 4050 * Testcase to test the upgrading of a microzap to fatzap.
4050 4051 */
4051 4052 void
4052 4053 ztest_fzap(ztest_ds_t *zd, uint64_t id)
4053 4054 {
4054 4055 objset_t *os = zd->zd_os;
4055 4056 ztest_od_t od[1];
4056 4057 uint64_t object, txg;
4057 4058
4058 4059 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4059 4060
4060 4061 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4061 4062 return;
4062 4063
4063 4064 object = od[0].od_object;
4064 4065
4065 4066 /*
4066 4067 * Add entries to this ZAP and make sure it spills over
4067 4068 * and gets upgraded to a fatzap. Also, since we are adding
4068 4069 * 2050 entries we should see ptrtbl growth and leaf-block split.
4069 4070 */
4070 4071 for (int i = 0; i < 2050; i++) {
4071 4072 char name[MAXNAMELEN];
4072 4073 uint64_t value = i;
4073 4074 dmu_tx_t *tx;
4074 4075 int error;
4075 4076
4076 4077 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu",
4077 4078 id, value);
4078 4079
4079 4080 tx = dmu_tx_create(os);
4080 4081 dmu_tx_hold_zap(tx, object, B_TRUE, name);
4081 4082 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4082 4083 if (txg == 0)
4083 4084 return;
4084 4085 error = zap_add(os, object, name, sizeof (uint64_t), 1,
4085 4086 &value, tx);
4086 4087 ASSERT(error == 0 || error == EEXIST);
4087 4088 dmu_tx_commit(tx);
4088 4089 }
4089 4090 }
4090 4091
4091 4092 /* ARGSUSED */
4092 4093 void
4093 4094 ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
4094 4095 {
4095 4096 objset_t *os = zd->zd_os;
4096 4097 ztest_od_t od[1];
4097 4098 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
4098 4099 dmu_tx_t *tx;
4099 4100 int i, namelen, error;
4100 4101 int micro = ztest_random(2);
4101 4102 char name[20], string_value[20];
4102 4103 void *data;
4103 4104
4104 4105 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
4105 4106
4106 4107 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4107 4108 return;
4108 4109
4109 4110 object = od[0].od_object;
4110 4111
4111 4112 /*
4112 4113 * Generate a random name of the form 'xxx.....' where each
4113 4114 * x is a random printable character and the dots are dots.
4114 4115 * There are 94 such characters, and the name length goes from
4115 4116 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
4116 4117 */
4117 4118 namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
4118 4119
4119 4120 for (i = 0; i < 3; i++)
4120 4121 name[i] = '!' + ztest_random('~' - '!' + 1);
4121 4122 for (; i < namelen - 1; i++)
4122 4123 name[i] = '.';
4123 4124 name[i] = '\0';
4124 4125
4125 4126 if ((namelen & 1) || micro) {
4126 4127 wsize = sizeof (txg);
4127 4128 wc = 1;
4128 4129 data = &txg;
4129 4130 } else {
4130 4131 wsize = 1;
4131 4132 wc = namelen;
4132 4133 data = string_value;
4133 4134 }
4134 4135
4135 4136 count = -1ULL;
4136 4137 VERIFY(zap_count(os, object, &count) == 0);
4137 4138 ASSERT(count != -1ULL);
4138 4139
4139 4140 /*
4140 4141 * Select an operation: length, lookup, add, update, remove.
4141 4142 */
4142 4143 i = ztest_random(5);
4143 4144
4144 4145 if (i >= 2) {
4145 4146 tx = dmu_tx_create(os);
4146 4147 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4147 4148 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4148 4149 if (txg == 0)
4149 4150 return;
4150 4151 bcopy(name, string_value, namelen);
4151 4152 } else {
4152 4153 tx = NULL;
4153 4154 txg = 0;
4154 4155 bzero(string_value, namelen);
4155 4156 }
4156 4157
4157 4158 switch (i) {
4158 4159
4159 4160 case 0:
4160 4161 error = zap_length(os, object, name, &zl_wsize, &zl_wc);
4161 4162 if (error == 0) {
4162 4163 ASSERT3U(wsize, ==, zl_wsize);
4163 4164 ASSERT3U(wc, ==, zl_wc);
4164 4165 } else {
4165 4166 ASSERT3U(error, ==, ENOENT);
4166 4167 }
4167 4168 break;
4168 4169
4169 4170 case 1:
4170 4171 error = zap_lookup(os, object, name, wsize, wc, data);
4171 4172 if (error == 0) {
4172 4173 if (data == string_value &&
4173 4174 bcmp(name, data, namelen) != 0)
4174 4175 fatal(0, "name '%s' != val '%s' len %d",
4175 4176 name, data, namelen);
4176 4177 } else {
4177 4178 ASSERT3U(error, ==, ENOENT);
4178 4179 }
4179 4180 break;
4180 4181
4181 4182 case 2:
4182 4183 error = zap_add(os, object, name, wsize, wc, data, tx);
4183 4184 ASSERT(error == 0 || error == EEXIST);
4184 4185 break;
4185 4186
4186 4187 case 3:
4187 4188 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0);
4188 4189 break;
4189 4190
4190 4191 case 4:
4191 4192 error = zap_remove(os, object, name, tx);
4192 4193 ASSERT(error == 0 || error == ENOENT);
4193 4194 break;
4194 4195 }
4195 4196
4196 4197 if (tx != NULL)
4197 4198 dmu_tx_commit(tx);
4198 4199 }
4199 4200
4200 4201 /*
4201 4202 * Commit callback data.
4202 4203 */
4203 4204 typedef struct ztest_cb_data {
4204 4205 list_node_t zcd_node;
4205 4206 uint64_t zcd_txg;
4206 4207 int zcd_expected_err;
4207 4208 boolean_t zcd_added;
4208 4209 boolean_t zcd_called;
4209 4210 spa_t *zcd_spa;
4210 4211 } ztest_cb_data_t;
4211 4212
4212 4213 /* This is the actual commit callback function */
4213 4214 static void
4214 4215 ztest_commit_callback(void *arg, int error)
4215 4216 {
4216 4217 ztest_cb_data_t *data = arg;
4217 4218 uint64_t synced_txg;
4218 4219
4219 4220 VERIFY(data != NULL);
4220 4221 VERIFY3S(data->zcd_expected_err, ==, error);
4221 4222 VERIFY(!data->zcd_called);
4222 4223
4223 4224 synced_txg = spa_last_synced_txg(data->zcd_spa);
4224 4225 if (data->zcd_txg > synced_txg)
4225 4226 fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
4226 4227 ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
4227 4228 synced_txg);
4228 4229
4229 4230 data->zcd_called = B_TRUE;
4230 4231
4231 4232 if (error == ECANCELED) {
4232 4233 ASSERT3U(data->zcd_txg, ==, 0);
4233 4234 ASSERT(!data->zcd_added);
4234 4235
4235 4236 /*
4236 4237 * The private callback data should be destroyed here, but
4237 4238 * since we are going to check the zcd_called field after
4238 4239 * dmu_tx_abort(), we will destroy it there.
4239 4240 */
4240 4241 return;
4241 4242 }
4242 4243
4243 4244 /* Was this callback added to the global callback list? */
4244 4245 if (!data->zcd_added)
4245 4246 goto out;
4246 4247
4247 4248 ASSERT3U(data->zcd_txg, !=, 0);
4248 4249
4249 4250 /* Remove our callback from the list */
4250 4251 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4251 4252 list_remove(&zcl.zcl_callbacks, data);
4252 4253 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4253 4254
4254 4255 out:
4255 4256 umem_free(data, sizeof (ztest_cb_data_t));
4256 4257 }
4257 4258
4258 4259 /* Allocate and initialize callback data structure */
4259 4260 static ztest_cb_data_t *
4260 4261 ztest_create_cb_data(objset_t *os, uint64_t txg)
4261 4262 {
4262 4263 ztest_cb_data_t *cb_data;
4263 4264
4264 4265 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
4265 4266
4266 4267 cb_data->zcd_txg = txg;
4267 4268 cb_data->zcd_spa = dmu_objset_spa(os);
4268 4269
4269 4270 return (cb_data);
4270 4271 }
4271 4272
4272 4273 /*
4273 4274 * If a number of txgs equal to this threshold have been created after a commit
4274 4275 * callback has been registered but not called, then we assume there is an
4275 4276 * implementation bug.
4276 4277 */
4277 4278 #define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2)
4278 4279
4279 4280 /*
4280 4281 * Commit callback test.
4281 4282 */
4282 4283 void
4283 4284 ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
4284 4285 {
4285 4286 objset_t *os = zd->zd_os;
4286 4287 ztest_od_t od[1];
4287 4288 dmu_tx_t *tx;
4288 4289 ztest_cb_data_t *cb_data[3], *tmp_cb;
4289 4290 uint64_t old_txg, txg;
4290 4291 int i, error;
4291 4292
4292 4293 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4293 4294
4294 4295 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4295 4296 return;
4296 4297
4297 4298 tx = dmu_tx_create(os);
4298 4299
4299 4300 cb_data[0] = ztest_create_cb_data(os, 0);
4300 4301 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
4301 4302
4302 4303 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t));
4303 4304
4304 4305 /* Every once in a while, abort the transaction on purpose */
4305 4306 if (ztest_random(100) == 0)
4306 4307 error = -1;
4307 4308
4308 4309 if (!error)
4309 4310 error = dmu_tx_assign(tx, TXG_NOWAIT);
4310 4311
4311 4312 txg = error ? 0 : dmu_tx_get_txg(tx);
4312 4313
4313 4314 cb_data[0]->zcd_txg = txg;
4314 4315 cb_data[1] = ztest_create_cb_data(os, txg);
4315 4316 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
4316 4317
4317 4318 if (error) {
4318 4319 /*
4319 4320 * It's not a strict requirement to call the registered
4320 4321 * callbacks from inside dmu_tx_abort(), but that's what
4321 4322 * it's supposed to happen in the current implementation
4322 4323 * so we will check for that.
4323 4324 */
4324 4325 for (i = 0; i < 2; i++) {
4325 4326 cb_data[i]->zcd_expected_err = ECANCELED;
4326 4327 VERIFY(!cb_data[i]->zcd_called);
4327 4328 }
4328 4329
4329 4330 dmu_tx_abort(tx);
4330 4331
4331 4332 for (i = 0; i < 2; i++) {
4332 4333 VERIFY(cb_data[i]->zcd_called);
4333 4334 umem_free(cb_data[i], sizeof (ztest_cb_data_t));
4334 4335 }
4335 4336
4336 4337 return;
4337 4338 }
4338 4339
4339 4340 cb_data[2] = ztest_create_cb_data(os, txg);
4340 4341 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
4341 4342
4342 4343 /*
4343 4344 * Read existing data to make sure there isn't a future leak.
4344 4345 */
4345 4346 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t),
4346 4347 &old_txg, DMU_READ_PREFETCH));
4347 4348
4348 4349 if (old_txg > txg)
4349 4350 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
4350 4351 old_txg, txg);
4351 4352
4352 4353 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx);
4353 4354
4354 4355 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4355 4356
4356 4357 /*
4357 4358 * Since commit callbacks don't have any ordering requirement and since
4358 4359 * it is theoretically possible for a commit callback to be called
4359 4360 * after an arbitrary amount of time has elapsed since its txg has been
4360 4361 * synced, it is difficult to reliably determine whether a commit
4361 4362 * callback hasn't been called due to high load or due to a flawed
4362 4363 * implementation.
4363 4364 *
4364 4365 * In practice, we will assume that if after a certain number of txgs a
4365 4366 * commit callback hasn't been called, then most likely there's an
4366 4367 * implementation bug..
4367 4368 */
4368 4369 tmp_cb = list_head(&zcl.zcl_callbacks);
4369 4370 if (tmp_cb != NULL &&
4370 4371 tmp_cb->zcd_txg > txg - ZTEST_COMMIT_CALLBACK_THRESH) {
4371 4372 fatal(0, "Commit callback threshold exceeded, oldest txg: %"
4372 4373 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
4373 4374 }
4374 4375
4375 4376 /*
4376 4377 * Let's find the place to insert our callbacks.
4377 4378 *
4378 4379 * Even though the list is ordered by txg, it is possible for the
4379 4380 * insertion point to not be the end because our txg may already be
4380 4381 * quiescing at this point and other callbacks in the open txg
4381 4382 * (from other objsets) may have sneaked in.
4382 4383 */
4383 4384 tmp_cb = list_tail(&zcl.zcl_callbacks);
4384 4385 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
4385 4386 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
4386 4387
4387 4388 /* Add the 3 callbacks to the list */
4388 4389 for (i = 0; i < 3; i++) {
4389 4390 if (tmp_cb == NULL)
4390 4391 list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
4391 4392 else
4392 4393 list_insert_after(&zcl.zcl_callbacks, tmp_cb,
4393 4394 cb_data[i]);
4394 4395
4395 4396 cb_data[i]->zcd_added = B_TRUE;
4396 4397 VERIFY(!cb_data[i]->zcd_called);
4397 4398
4398 4399 tmp_cb = cb_data[i];
4399 4400 }
4400 4401
4401 4402 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4402 4403
4403 4404 dmu_tx_commit(tx);
4404 4405 }
4405 4406
4406 4407 /* ARGSUSED */
4407 4408 void
4408 4409 ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
4409 4410 {
4410 4411 zfs_prop_t proplist[] = {
4411 4412 ZFS_PROP_CHECKSUM,
4412 4413 ZFS_PROP_COMPRESSION,
4413 4414 ZFS_PROP_COPIES,
4414 4415 ZFS_PROP_DEDUP
4415 4416 };
4416 4417
4417 4418 (void) rw_rdlock(&ztest_name_lock);
4418 4419
4419 4420 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
4420 4421 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
4421 4422 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
4422 4423
4423 4424 (void) rw_unlock(&ztest_name_lock);
4424 4425 }
4425 4426
4426 4427 /* ARGSUSED */
4427 4428 void
4428 4429 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
4429 4430 {
4430 4431 nvlist_t *props = NULL;
4431 4432
4432 4433 (void) rw_rdlock(&ztest_name_lock);
4433 4434
4434 4435 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
4435 4436 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
4436 4437
4437 4438 VERIFY3U(spa_prop_get(ztest_spa, &props), ==, 0);
4438 4439
4439 4440 if (ztest_opts.zo_verbose >= 6)
4440 4441 dump_nvlist(props, 4);
4441 4442
4442 4443 nvlist_free(props);
4443 4444
4444 4445 (void) rw_unlock(&ztest_name_lock);
4445 4446 }
4446 4447
4447 4448 /*
4448 4449 * Test snapshot hold/release and deferred destroy.
4449 4450 */
4450 4451 void
4451 4452 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
4452 4453 {
4453 4454 int error;
4454 4455 objset_t *os = zd->zd_os;
4455 4456 objset_t *origin;
4456 4457 char snapname[100];
4457 4458 char fullname[100];
4458 4459 char clonename[100];
4459 4460 char tag[100];
4460 4461 char osname[MAXNAMELEN];
4461 4462
4462 4463 (void) rw_rdlock(&ztest_name_lock);
4463 4464
4464 4465 dmu_objset_name(os, osname);
4465 4466
4466 4467 (void) snprintf(snapname, 100, "sh1_%llu", id);
4467 4468 (void) snprintf(fullname, 100, "%s@%s", osname, snapname);
4468 4469 (void) snprintf(clonename, 100, "%s/ch1_%llu", osname, id);
4469 4470 (void) snprintf(tag, 100, "%tag_%llu", id);
4470 4471
4471 4472 /*
4472 4473 * Clean up from any previous run.
4473 4474 */
4474 4475 (void) dmu_objset_destroy(clonename, B_FALSE);
4475 4476 (void) dsl_dataset_user_release(osname, snapname, tag, B_FALSE);
4476 4477 (void) dmu_objset_destroy(fullname, B_FALSE);
4477 4478
4478 4479 /*
4479 4480 * Create snapshot, clone it, mark snap for deferred destroy,
4480 4481 * destroy clone, verify snap was also destroyed.
4481 4482 */
4482 4483 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE,
4483 4484 FALSE, -1);
4484 4485 if (error) {
4485 4486 if (error == ENOSPC) {
4486 4487 ztest_record_enospc("dmu_objset_snapshot");
4487 4488 goto out;
4488 4489 }
4489 4490 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4490 4491 }
4491 4492
4492 4493 error = dmu_objset_hold(fullname, FTAG, &origin);
4493 4494 if (error)
4494 4495 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4495 4496
4496 4497 error = dmu_objset_clone(clonename, dmu_objset_ds(origin), 0);
4497 4498 dmu_objset_rele(origin, FTAG);
4498 4499 if (error) {
4499 4500 if (error == ENOSPC) {
4500 4501 ztest_record_enospc("dmu_objset_clone");
4501 4502 goto out;
4502 4503 }
4503 4504 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error);
4504 4505 }
4505 4506
4506 4507 error = dmu_objset_destroy(fullname, B_TRUE);
4507 4508 if (error) {
4508 4509 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4509 4510 fullname, error);
4510 4511 }
4511 4512
4512 4513 error = dmu_objset_destroy(clonename, B_FALSE);
4513 4514 if (error)
4514 4515 fatal(0, "dmu_objset_destroy(%s) = %d", clonename, error);
4515 4516
4516 4517 error = dmu_objset_hold(fullname, FTAG, &origin);
4517 4518 if (error != ENOENT)
4518 4519 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4519 4520
4520 4521 /*
4521 4522 * Create snapshot, add temporary hold, verify that we can't
4522 4523 * destroy a held snapshot, mark for deferred destroy,
4523 4524 * release hold, verify snapshot was destroyed.
4524 4525 */
4525 4526 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE,
4526 4527 FALSE, -1);
4527 4528 if (error) {
4528 4529 if (error == ENOSPC) {
4529 4530 ztest_record_enospc("dmu_objset_snapshot");
4530 4531 goto out;
4531 4532 }
4532 4533 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4533 4534 }
4534 4535
4535 4536 error = dsl_dataset_user_hold(osname, snapname, tag, B_FALSE,
4536 4537 B_TRUE, -1);
4537 4538 if (error)
4538 4539 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag);
4539 4540
4540 4541 error = dmu_objset_destroy(fullname, B_FALSE);
4541 4542 if (error != EBUSY) {
4542 4543 fatal(0, "dmu_objset_destroy(%s, B_FALSE) = %d",
4543 4544 fullname, error);
4544 4545 }
4545 4546
4546 4547 error = dmu_objset_destroy(fullname, B_TRUE);
4547 4548 if (error) {
4548 4549 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4549 4550 fullname, error);
4550 4551 }
4551 4552
4552 4553 error = dsl_dataset_user_release(osname, snapname, tag, B_FALSE);
4553 4554 if (error)
4554 4555 fatal(0, "dsl_dataset_user_release(%s)", fullname, tag);
4555 4556
4556 4557 VERIFY(dmu_objset_hold(fullname, FTAG, &origin) == ENOENT);
4557 4558
4558 4559 out:
4559 4560 (void) rw_unlock(&ztest_name_lock);
4560 4561 }
4561 4562
4562 4563 /*
4563 4564 * Inject random faults into the on-disk data.
4564 4565 */
4565 4566 /* ARGSUSED */
4566 4567 void
4567 4568 ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
4568 4569 {
4569 4570 ztest_shared_t *zs = ztest_shared;
4570 4571 spa_t *spa = ztest_spa;
4571 4572 int fd;
4572 4573 uint64_t offset;
4573 4574 uint64_t leaves;
4574 4575 uint64_t bad = 0x1990c0ffeedecade;
4575 4576 uint64_t top, leaf;
4576 4577 char path0[MAXPATHLEN];
4577 4578 char pathrand[MAXPATHLEN];
4578 4579 size_t fsize;
4579 4580 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */
4580 4581 int iters = 1000;
4581 4582 int maxfaults;
4582 4583 int mirror_save;
4583 4584 vdev_t *vd0 = NULL;
4584 4585 uint64_t guid0 = 0;
4585 4586 boolean_t islog = B_FALSE;
4586 4587
4587 4588 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4588 4589 maxfaults = MAXFAULTS();
4589 4590 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
4590 4591 mirror_save = zs->zs_mirrors;
4591 4592 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4592 4593
4593 4594 ASSERT(leaves >= 1);
4594 4595
4595 4596 /*
4596 4597 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4597 4598 */
4598 4599 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4599 4600
4600 4601 if (ztest_random(2) == 0) {
4601 4602 /*
4602 4603 * Inject errors on a normal data device or slog device.
4603 4604 */
4604 4605 top = ztest_random_vdev_top(spa, B_TRUE);
4605 4606 leaf = ztest_random(leaves) + zs->zs_splits;
4606 4607
4607 4608 /*
4608 4609 * Generate paths to the first leaf in this top-level vdev,
4609 4610 * and to the random leaf we selected. We'll induce transient
4610 4611 * write failures and random online/offline activity on leaf 0,
4611 4612 * and we'll write random garbage to the randomly chosen leaf.
4612 4613 */
4613 4614 (void) snprintf(path0, sizeof (path0), ztest_dev_template,
4614 4615 ztest_opts.zo_dir, ztest_opts.zo_pool,
4615 4616 top * leaves + zs->zs_splits);
4616 4617 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template,
4617 4618 ztest_opts.zo_dir, ztest_opts.zo_pool,
4618 4619 top * leaves + leaf);
4619 4620
4620 4621 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
4621 4622 if (vd0 != NULL && vd0->vdev_top->vdev_islog)
4622 4623 islog = B_TRUE;
4623 4624
4624 4625 if (vd0 != NULL && maxfaults != 1) {
4625 4626 /*
4626 4627 * Make vd0 explicitly claim to be unreadable,
4627 4628 * or unwriteable, or reach behind its back
4628 4629 * and close the underlying fd. We can do this if
4629 4630 * maxfaults == 0 because we'll fail and reexecute,
4630 4631 * and we can do it if maxfaults >= 2 because we'll
4631 4632 * have enough redundancy. If maxfaults == 1, the
4632 4633 * combination of this with injection of random data
4633 4634 * corruption below exceeds the pool's fault tolerance.
4634 4635 */
4635 4636 vdev_file_t *vf = vd0->vdev_tsd;
4636 4637
4637 4638 if (vf != NULL && ztest_random(3) == 0) {
4638 4639 (void) close(vf->vf_vnode->v_fd);
4639 4640 vf->vf_vnode->v_fd = -1;
4640 4641 } else if (ztest_random(2) == 0) {
4641 4642 vd0->vdev_cant_read = B_TRUE;
4642 4643 } else {
4643 4644 vd0->vdev_cant_write = B_TRUE;
4644 4645 }
4645 4646 guid0 = vd0->vdev_guid;
4646 4647 }
4647 4648 } else {
4648 4649 /*
4649 4650 * Inject errors on an l2cache device.
4650 4651 */
4651 4652 spa_aux_vdev_t *sav = &spa->spa_l2cache;
4652 4653
4653 4654 if (sav->sav_count == 0) {
4654 4655 spa_config_exit(spa, SCL_STATE, FTAG);
4655 4656 return;
4656 4657 }
4657 4658 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
4658 4659 guid0 = vd0->vdev_guid;
4659 4660 (void) strcpy(path0, vd0->vdev_path);
4660 4661 (void) strcpy(pathrand, vd0->vdev_path);
4661 4662
4662 4663 leaf = 0;
4663 4664 leaves = 1;
4664 4665 maxfaults = INT_MAX; /* no limit on cache devices */
4665 4666 }
4666 4667
4667 4668 spa_config_exit(spa, SCL_STATE, FTAG);
4668 4669
4669 4670 /*
4670 4671 * If we can tolerate two or more faults, or we're dealing
4671 4672 * with a slog, randomly online/offline vd0.
4672 4673 */
4673 4674 if ((maxfaults >= 2 || islog) && guid0 != 0) {
4674 4675 if (ztest_random(10) < 6) {
4675 4676 int flags = (ztest_random(2) == 0 ?
4676 4677 ZFS_OFFLINE_TEMPORARY : 0);
4677 4678
4678 4679 /*
4679 4680 * We have to grab the zs_name_lock as writer to
4680 4681 * prevent a race between offlining a slog and
4681 4682 * destroying a dataset. Offlining the slog will
4682 4683 * grab a reference on the dataset which may cause
4683 4684 * dmu_objset_destroy() to fail with EBUSY thus
4684 4685 * leaving the dataset in an inconsistent state.
4685 4686 */
4686 4687 if (islog)
4687 4688 (void) rw_wrlock(&ztest_name_lock);
4688 4689
4689 4690 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
4690 4691
4691 4692 if (islog)
4692 4693 (void) rw_unlock(&ztest_name_lock);
4693 4694 } else {
4694 4695 (void) vdev_online(spa, guid0, 0, NULL);
4695 4696 }
4696 4697 }
4697 4698
4698 4699 if (maxfaults == 0)
4699 4700 return;
4700 4701
4701 4702 /*
4702 4703 * We have at least single-fault tolerance, so inject data corruption.
4703 4704 */
4704 4705 fd = open(pathrand, O_RDWR);
4705 4706
4706 4707 if (fd == -1) /* we hit a gap in the device namespace */
4707 4708 return;
4708 4709
4709 4710 fsize = lseek(fd, 0, SEEK_END);
4710 4711
4711 4712 while (--iters != 0) {
4712 4713 offset = ztest_random(fsize / (leaves << bshift)) *
4713 4714 (leaves << bshift) + (leaf << bshift) +
4714 4715 (ztest_random(1ULL << (bshift - 1)) & -8ULL);
4715 4716
4716 4717 if (offset >= fsize)
4717 4718 continue;
4718 4719
4719 4720 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4720 4721 if (mirror_save != zs->zs_mirrors) {
4721 4722 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4722 4723 (void) close(fd);
4723 4724 return;
4724 4725 }
4725 4726
4726 4727 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
4727 4728 fatal(1, "can't inject bad word at 0x%llx in %s",
4728 4729 offset, pathrand);
4729 4730
4730 4731 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4731 4732
4732 4733 if (ztest_opts.zo_verbose >= 7)
4733 4734 (void) printf("injected bad word into %s,"
4734 4735 " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
4735 4736 }
4736 4737
4737 4738 (void) close(fd);
4738 4739 }
4739 4740
4740 4741 /*
4741 4742 * Verify that DDT repair works as expected.
4742 4743 */
4743 4744 void
4744 4745 ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
4745 4746 {
4746 4747 ztest_shared_t *zs = ztest_shared;
4747 4748 spa_t *spa = ztest_spa;
4748 4749 objset_t *os = zd->zd_os;
4749 4750 ztest_od_t od[1];
4750 4751 uint64_t object, blocksize, txg, pattern, psize;
4751 4752 enum zio_checksum checksum = spa_dedup_checksum(spa);
4752 4753 dmu_buf_t *db;
4753 4754 dmu_tx_t *tx;
4754 4755 void *buf;
4755 4756 blkptr_t blk;
4756 4757 int copies = 2 * ZIO_DEDUPDITTO_MIN;
4757 4758
4758 4759 blocksize = ztest_random_blocksize();
4759 4760 blocksize = MIN(blocksize, 2048); /* because we write so many */
4760 4761
4761 4762 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4762 4763
4763 4764 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4764 4765 return;
4765 4766
4766 4767 /*
4767 4768 * Take the name lock as writer to prevent anyone else from changing
4768 4769 * the pool and dataset properies we need to maintain during this test.
4769 4770 */
4770 4771 (void) rw_wrlock(&ztest_name_lock);
4771 4772
4772 4773 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
4773 4774 B_FALSE) != 0 ||
4774 4775 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
4775 4776 B_FALSE) != 0) {
4776 4777 (void) rw_unlock(&ztest_name_lock);
4777 4778 return;
4778 4779 }
4779 4780
4780 4781 object = od[0].od_object;
4781 4782 blocksize = od[0].od_blocksize;
4782 4783 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os);
4783 4784
4784 4785 ASSERT(object != 0);
4785 4786
4786 4787 tx = dmu_tx_create(os);
4787 4788 dmu_tx_hold_write(tx, object, 0, copies * blocksize);
4788 4789 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
4789 4790 if (txg == 0) {
4790 4791 (void) rw_unlock(&ztest_name_lock);
4791 4792 return;
4792 4793 }
4793 4794
4794 4795 /*
4795 4796 * Write all the copies of our block.
4796 4797 */
4797 4798 for (int i = 0; i < copies; i++) {
4798 4799 uint64_t offset = i * blocksize;
4799 4800 VERIFY(dmu_buf_hold(os, object, offset, FTAG, &db,
4800 4801 DMU_READ_NO_PREFETCH) == 0);
4801 4802 ASSERT(db->db_offset == offset);
4802 4803 ASSERT(db->db_size == blocksize);
4803 4804 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) ||
4804 4805 ztest_pattern_match(db->db_data, db->db_size, 0ULL));
4805 4806 dmu_buf_will_fill(db, tx);
4806 4807 ztest_pattern_set(db->db_data, db->db_size, pattern);
4807 4808 dmu_buf_rele(db, FTAG);
4808 4809 }
4809 4810
4810 4811 dmu_tx_commit(tx);
4811 4812 txg_wait_synced(spa_get_dsl(spa), txg);
4812 4813
4813 4814 /*
4814 4815 * Find out what block we got.
4815 4816 */
4816 4817 VERIFY(dmu_buf_hold(os, object, 0, FTAG, &db,
4817 4818 DMU_READ_NO_PREFETCH) == 0);
4818 4819 blk = *((dmu_buf_impl_t *)db)->db_blkptr;
4819 4820 dmu_buf_rele(db, FTAG);
4820 4821
4821 4822 /*
4822 4823 * Damage the block. Dedup-ditto will save us when we read it later.
4823 4824 */
4824 4825 psize = BP_GET_PSIZE(&blk);
4825 4826 buf = zio_buf_alloc(psize);
4826 4827 ztest_pattern_set(buf, psize, ~pattern);
4827 4828
4828 4829 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
4829 4830 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
4830 4831 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
4831 4832
4832 4833 zio_buf_free(buf, psize);
4833 4834
4834 4835 (void) rw_unlock(&ztest_name_lock);
4835 4836 }
4836 4837
4837 4838 /*
4838 4839 * Scrub the pool.
4839 4840 */
4840 4841 /* ARGSUSED */
4841 4842 void
4842 4843 ztest_scrub(ztest_ds_t *zd, uint64_t id)
4843 4844 {
4844 4845 spa_t *spa = ztest_spa;
4845 4846
4846 4847 (void) spa_scan(spa, POOL_SCAN_SCRUB);
4847 4848 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */
4848 4849 (void) spa_scan(spa, POOL_SCAN_SCRUB);
4849 4850 }
4850 4851
4851 4852 /*
4852 4853 * Change the guid for the pool.
4853 4854 */
4854 4855 /* ARGSUSED */
4855 4856 void
4856 4857 ztest_reguid(ztest_ds_t *zd, uint64_t id)
4857 4858 {
4858 4859 spa_t *spa = ztest_spa;
4859 4860 uint64_t orig, load;
4860 4861
4861 4862 orig = spa_guid(spa);
4862 4863 load = spa_load_guid(spa);
4863 4864 if (spa_change_guid(spa) != 0)
4864 4865 return;
4865 4866
4866 4867 if (ztest_opts.zo_verbose >= 3) {
4867 4868 (void) printf("Changed guid old %llu -> %llu\n",
4868 4869 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
4869 4870 }
4870 4871
4871 4872 VERIFY3U(orig, !=, spa_guid(spa));
4872 4873 VERIFY3U(load, ==, spa_load_guid(spa));
4873 4874 }
4874 4875
4875 4876 /*
4876 4877 * Rename the pool to a different name and then rename it back.
4877 4878 */
4878 4879 /* ARGSUSED */
4879 4880 void
4880 4881 ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
4881 4882 {
4882 4883 char *oldname, *newname;
4883 4884 spa_t *spa;
4884 4885
4885 4886 (void) rw_wrlock(&ztest_name_lock);
4886 4887
4887 4888 oldname = ztest_opts.zo_pool;
4888 4889 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
4889 4890 (void) strcpy(newname, oldname);
4890 4891 (void) strcat(newname, "_tmp");
4891 4892
4892 4893 /*
4893 4894 * Do the rename
4894 4895 */
4895 4896 VERIFY3U(0, ==, spa_rename(oldname, newname));
4896 4897
4897 4898 /*
4898 4899 * Try to open it under the old name, which shouldn't exist
4899 4900 */
4900 4901 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
4901 4902
4902 4903 /*
4903 4904 * Open it under the new name and make sure it's still the same spa_t.
4904 4905 */
4905 4906 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
4906 4907
4907 4908 ASSERT(spa == ztest_spa);
4908 4909 spa_close(spa, FTAG);
4909 4910
4910 4911 /*
4911 4912 * Rename it back to the original
4912 4913 */
4913 4914 VERIFY3U(0, ==, spa_rename(newname, oldname));
4914 4915
4915 4916 /*
4916 4917 * Make sure it can still be opened
4917 4918 */
4918 4919 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
4919 4920
4920 4921 ASSERT(spa == ztest_spa);
4921 4922 spa_close(spa, FTAG);
4922 4923
4923 4924 umem_free(newname, strlen(newname) + 1);
4924 4925
4925 4926 (void) rw_unlock(&ztest_name_lock);
4926 4927 }
4927 4928
4928 4929 /*
4929 4930 * Verify pool integrity by running zdb.
4930 4931 */
4931 4932 static void
4932 4933 ztest_run_zdb(char *pool)
4933 4934 {
4934 4935 int status;
4935 4936 char zdb[MAXPATHLEN + MAXNAMELEN + 20];
4936 4937 char zbuf[1024];
4937 4938 char *bin;
4938 4939 char *ztest;
4939 4940 char *isa;
4940 4941 int isalen;
4941 4942 FILE *fp;
4942 4943
4943 4944 (void) realpath(getexecname(), zdb);
4944 4945
4945 4946 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */
4946 4947 bin = strstr(zdb, "/usr/bin/");
4947 4948 ztest = strstr(bin, "/ztest");
4948 4949 isa = bin + 8;
4949 4950 isalen = ztest - isa;
4950 4951 isa = strdup(isa);
4951 4952 /* LINTED */
4952 4953 (void) sprintf(bin,
4953 4954 "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s",
4954 4955 isalen,
4955 4956 isa,
4956 4957 ztest_opts.zo_verbose >= 3 ? "s" : "",
4957 4958 ztest_opts.zo_verbose >= 4 ? "v" : "",
4958 4959 spa_config_path,
4959 4960 pool);
4960 4961 free(isa);
4961 4962
4962 4963 if (ztest_opts.zo_verbose >= 5)
4963 4964 (void) printf("Executing %s\n", strstr(zdb, "zdb "));
4964 4965
4965 4966 fp = popen(zdb, "r");
4966 4967
4967 4968 while (fgets(zbuf, sizeof (zbuf), fp) != NULL)
4968 4969 if (ztest_opts.zo_verbose >= 3)
4969 4970 (void) printf("%s", zbuf);
4970 4971
4971 4972 status = pclose(fp);
4972 4973
4973 4974 if (status == 0)
4974 4975 return;
4975 4976
4976 4977 ztest_dump_core = 0;
4977 4978 if (WIFEXITED(status))
4978 4979 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
4979 4980 else
4980 4981 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
4981 4982 }
4982 4983
4983 4984 static void
4984 4985 ztest_walk_pool_directory(char *header)
4985 4986 {
4986 4987 spa_t *spa = NULL;
4987 4988
4988 4989 if (ztest_opts.zo_verbose >= 6)
4989 4990 (void) printf("%s\n", header);
4990 4991
4991 4992 mutex_enter(&spa_namespace_lock);
4992 4993 while ((spa = spa_next(spa)) != NULL)
4993 4994 if (ztest_opts.zo_verbose >= 6)
4994 4995 (void) printf("\t%s\n", spa_name(spa));
4995 4996 mutex_exit(&spa_namespace_lock);
4996 4997 }
4997 4998
4998 4999 static void
4999 5000 ztest_spa_import_export(char *oldname, char *newname)
5000 5001 {
5001 5002 nvlist_t *config, *newconfig;
5002 5003 uint64_t pool_guid;
5003 5004 spa_t *spa;
5004 5005
5005 5006 if (ztest_opts.zo_verbose >= 4) {
5006 5007 (void) printf("import/export: old = %s, new = %s\n",
5007 5008 oldname, newname);
5008 5009 }
5009 5010
5010 5011 /*
5011 5012 * Clean up from previous runs.
5012 5013 */
5013 5014 (void) spa_destroy(newname);
5014 5015
5015 5016 /*
5016 5017 * Get the pool's configuration and guid.
5017 5018 */
5018 5019 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5019 5020
5020 5021 /*
5021 5022 * Kick off a scrub to tickle scrub/export races.
5022 5023 */
5023 5024 if (ztest_random(2) == 0)
5024 5025 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5025 5026
5026 5027 pool_guid = spa_guid(spa);
5027 5028 spa_close(spa, FTAG);
5028 5029
5029 5030 ztest_walk_pool_directory("pools before export");
5030 5031
5031 5032 /*
5032 5033 * Export it.
5033 5034 */
5034 5035 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE));
5035 5036
5036 5037 ztest_walk_pool_directory("pools after export");
5037 5038
5038 5039 /*
5039 5040 * Try to import it.
5040 5041 */
5041 5042 newconfig = spa_tryimport(config);
5042 5043 ASSERT(newconfig != NULL);
5043 5044 nvlist_free(newconfig);
5044 5045
5045 5046 /*
5046 5047 * Import it under the new name.
5047 5048 */
5048 5049 VERIFY3U(0, ==, spa_import(newname, config, NULL, 0));
5049 5050
5050 5051 ztest_walk_pool_directory("pools after import");
5051 5052
5052 5053 /*
5053 5054 * Try to import it again -- should fail with EEXIST.
5054 5055 */
5055 5056 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
5056 5057
5057 5058 /*
5058 5059 * Try to import it under a different name -- should fail with EEXIST.
5059 5060 */
5060 5061 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
5061 5062
5062 5063 /*
5063 5064 * Verify that the pool is no longer visible under the old name.
5064 5065 */
5065 5066 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5066 5067
5067 5068 /*
5068 5069 * Verify that we can open and close the pool using the new name.
5069 5070 */
5070 5071 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5071 5072 ASSERT(pool_guid == spa_guid(spa));
5072 5073 spa_close(spa, FTAG);
5073 5074
5074 5075 nvlist_free(config);
5075 5076 }
5076 5077
5077 5078 static void
5078 5079 ztest_resume(spa_t *spa)
5079 5080 {
5080 5081 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
5081 5082 (void) printf("resuming from suspended state\n");
5082 5083 spa_vdev_state_enter(spa, SCL_NONE);
5083 5084 vdev_clear(spa, NULL);
5084 5085 (void) spa_vdev_state_exit(spa, NULL, 0);
5085 5086 (void) zio_resume(spa);
5086 5087 }
5087 5088
5088 5089 static void *
5089 5090 ztest_resume_thread(void *arg)
5090 5091 {
5091 5092 spa_t *spa = arg;
5092 5093
5093 5094 while (!ztest_exiting) {
5094 5095 if (spa_suspended(spa))
5095 5096 ztest_resume(spa);
5096 5097 (void) poll(NULL, 0, 100);
5097 5098 }
5098 5099 return (NULL);
5099 5100 }
5100 5101
5101 5102 static void *
5102 5103 ztest_deadman_thread(void *arg)
5103 5104 {
5104 5105 ztest_shared_t *zs = arg;
5105 5106 int grace = 300;
5106 5107 hrtime_t delta;
5107 5108
5108 5109 delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace;
5109 5110
5110 5111 (void) poll(NULL, 0, (int)(1000 * delta));
5111 5112
5112 5113 fatal(0, "failed to complete within %d seconds of deadline", grace);
5113 5114
5114 5115 return (NULL);
5115 5116 }
5116 5117
5117 5118 static void
5118 5119 ztest_execute(int test, ztest_info_t *zi, uint64_t id)
5119 5120 {
5120 5121 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
5121 5122 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
5122 5123 hrtime_t functime = gethrtime();
5123 5124
5124 5125 for (int i = 0; i < zi->zi_iters; i++)
5125 5126 zi->zi_func(zd, id);
5126 5127
5127 5128 functime = gethrtime() - functime;
5128 5129
5129 5130 atomic_add_64(&zc->zc_count, 1);
5130 5131 atomic_add_64(&zc->zc_time, functime);
5131 5132
5132 5133 if (ztest_opts.zo_verbose >= 4) {
5133 5134 Dl_info dli;
5134 5135 (void) dladdr((void *)zi->zi_func, &dli);
5135 5136 (void) printf("%6.2f sec in %s\n",
5136 5137 (double)functime / NANOSEC, dli.dli_sname);
5137 5138 }
5138 5139 }
5139 5140
5140 5141 static void *
5141 5142 ztest_thread(void *arg)
5142 5143 {
5143 5144 int rand;
5144 5145 uint64_t id = (uintptr_t)arg;
5145 5146 ztest_shared_t *zs = ztest_shared;
5146 5147 uint64_t call_next;
5147 5148 hrtime_t now;
5148 5149 ztest_info_t *zi;
5149 5150 ztest_shared_callstate_t *zc;
5150 5151
5151 5152 while ((now = gethrtime()) < zs->zs_thread_stop) {
5152 5153 /*
5153 5154 * See if it's time to force a crash.
5154 5155 */
5155 5156 if (now > zs->zs_thread_kill)
5156 5157 ztest_kill(zs);
5157 5158
5158 5159 /*
5159 5160 * If we're getting ENOSPC with some regularity, stop.
5160 5161 */
5161 5162 if (zs->zs_enospc_count > 10)
5162 5163 break;
5163 5164
5164 5165 /*
5165 5166 * Pick a random function to execute.
5166 5167 */
5167 5168 rand = ztest_random(ZTEST_FUNCS);
5168 5169 zi = &ztest_info[rand];
5169 5170 zc = ZTEST_GET_SHARED_CALLSTATE(rand);
5170 5171 call_next = zc->zc_next;
5171 5172
5172 5173 if (now >= call_next &&
5173 5174 atomic_cas_64(&zc->zc_next, call_next, call_next +
5174 5175 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
5175 5176 ztest_execute(rand, zi, id);
5176 5177 }
5177 5178 }
5178 5179
5179 5180 return (NULL);
5180 5181 }
5181 5182
5182 5183 static void
5183 5184 ztest_dataset_name(char *dsname, char *pool, int d)
5184 5185 {
5185 5186 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d);
5186 5187 }
5187 5188
5188 5189 static void
5189 5190 ztest_dataset_destroy(int d)
5190 5191 {
5191 5192 char name[MAXNAMELEN];
5192 5193
5193 5194 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5194 5195
5195 5196 if (ztest_opts.zo_verbose >= 3)
5196 5197 (void) printf("Destroying %s to free up space\n", name);
5197 5198
5198 5199 /*
5199 5200 * Cleanup any non-standard clones and snapshots. In general,
5200 5201 * ztest thread t operates on dataset (t % zopt_datasets),
5201 5202 * so there may be more than one thing to clean up.
5202 5203 */
5203 5204 for (int t = d; t < ztest_opts.zo_threads;
5204 5205 t += ztest_opts.zo_datasets) {
5205 5206 ztest_dsl_dataset_cleanup(name, t);
5206 5207 }
5207 5208
5208 5209 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
5209 5210 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5210 5211 }
5211 5212
5212 5213 static void
5213 5214 ztest_dataset_dirobj_verify(ztest_ds_t *zd)
5214 5215 {
5215 5216 uint64_t usedobjs, dirobjs, scratch;
5216 5217
5217 5218 /*
5218 5219 * ZTEST_DIROBJ is the object directory for the entire dataset.
5219 5220 * Therefore, the number of objects in use should equal the
5220 5221 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5221 5222 * If not, we have an object leak.
5222 5223 *
5223 5224 * Note that we can only check this in ztest_dataset_open(),
5224 5225 * when the open-context and syncing-context values agree.
5225 5226 * That's because zap_count() returns the open-context value,
5226 5227 * while dmu_objset_space() returns the rootbp fill count.
5227 5228 */
5228 5229 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
5229 5230 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
5230 5231 ASSERT3U(dirobjs + 1, ==, usedobjs);
5231 5232 }
5232 5233
5233 5234 static int
5234 5235 ztest_dataset_open(int d)
5235 5236 {
5236 5237 ztest_ds_t *zd = &ztest_ds[d];
5237 5238 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
5238 5239 objset_t *os;
5239 5240 zilog_t *zilog;
5240 5241 char name[MAXNAMELEN];
5241 5242 int error;
5242 5243
5243 5244 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5244 5245
5245 5246 (void) rw_rdlock(&ztest_name_lock);
5246 5247
5247 5248 error = ztest_dataset_create(name);
5248 5249 if (error == ENOSPC) {
5249 5250 (void) rw_unlock(&ztest_name_lock);
5250 5251 ztest_record_enospc(FTAG);
5251 5252 return (error);
5252 5253 }
5253 5254 ASSERT(error == 0 || error == EEXIST);
5254 5255
5255 5256 VERIFY3U(dmu_objset_hold(name, zd, &os), ==, 0);
5256 5257 (void) rw_unlock(&ztest_name_lock);
5257 5258
5258 5259 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
5259 5260
5260 5261 zilog = zd->zd_zilog;
5261 5262
5262 5263 if (zilog->zl_header->zh_claim_lr_seq != 0 &&
5263 5264 zilog->zl_header->zh_claim_lr_seq < committed_seq)
5264 5265 fatal(0, "missing log records: claimed %llu < committed %llu",
5265 5266 zilog->zl_header->zh_claim_lr_seq, committed_seq);
5266 5267
5267 5268 ztest_dataset_dirobj_verify(zd);
5268 5269
5269 5270 zil_replay(os, zd, ztest_replay_vector);
5270 5271
5271 5272 ztest_dataset_dirobj_verify(zd);
5272 5273
5273 5274 if (ztest_opts.zo_verbose >= 6)
5274 5275 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5275 5276 zd->zd_name,
5276 5277 (u_longlong_t)zilog->zl_parse_blk_count,
5277 5278 (u_longlong_t)zilog->zl_parse_lr_count,
5278 5279 (u_longlong_t)zilog->zl_replaying_seq);
5279 5280
5280 5281 zilog = zil_open(os, ztest_get_data);
5281 5282
5282 5283 if (zilog->zl_replaying_seq != 0 &&
5283 5284 zilog->zl_replaying_seq < committed_seq)
5284 5285 fatal(0, "missing log records: replayed %llu < committed %llu",
5285 5286 zilog->zl_replaying_seq, committed_seq);
5286 5287
5287 5288 return (0);
5288 5289 }
5289 5290
5290 5291 static void
5291 5292 ztest_dataset_close(int d)
5292 5293 {
5293 5294 ztest_ds_t *zd = &ztest_ds[d];
5294 5295
5295 5296 zil_close(zd->zd_zilog);
5296 5297 dmu_objset_rele(zd->zd_os, zd);
5297 5298
5298 5299 ztest_zd_fini(zd);
5299 5300 }
5300 5301
5301 5302 /*
5302 5303 * Kick off threads to run tests on all datasets in parallel.
5303 5304 */
5304 5305 static void
5305 5306 ztest_run(ztest_shared_t *zs)
5306 5307 {
5307 5308 thread_t *tid;
5308 5309 spa_t *spa;
5309 5310 objset_t *os;
5310 5311 thread_t resume_tid;
5311 5312 int error;
5312 5313
5313 5314 ztest_exiting = B_FALSE;
5314 5315
5315 5316 /*
5316 5317 * Initialize parent/child shared state.
5317 5318 */
5318 5319 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5319 5320 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5320 5321
5321 5322 zs->zs_thread_start = gethrtime();
5322 5323 zs->zs_thread_stop =
5323 5324 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
5324 5325 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
5325 5326 zs->zs_thread_kill = zs->zs_thread_stop;
5326 5327 if (ztest_random(100) < ztest_opts.zo_killrate) {
5327 5328 zs->zs_thread_kill -=
5328 5329 ztest_random(ztest_opts.zo_passtime * NANOSEC);
5329 5330 }
5330 5331
5331 5332 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
5332 5333
5333 5334 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
5334 5335 offsetof(ztest_cb_data_t, zcd_node));
5335 5336
5336 5337 /*
5337 5338 * Open our pool.
5338 5339 */
5339 5340 kernel_init(FREAD | FWRITE);
5340 5341 VERIFY(spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0);
5341 5342 spa->spa_debug = B_TRUE;
5342 5343 ztest_spa = spa;
5343 5344
5344 5345 VERIFY3U(0, ==, dmu_objset_hold(ztest_opts.zo_pool, FTAG, &os));
5345 5346 zs->zs_guid = dmu_objset_fsid_guid(os);
5346 5347 dmu_objset_rele(os, FTAG);
5347 5348
5348 5349 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
5349 5350
5350 5351 /*
5351 5352 * We don't expect the pool to suspend unless maxfaults == 0,
5352 5353 * in which case ztest_fault_inject() temporarily takes away
5353 5354 * the only valid replica.
5354 5355 */
5355 5356 if (MAXFAULTS() == 0)
5356 5357 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
5357 5358 else
5358 5359 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
5359 5360
5360 5361 /*
5361 5362 * Create a thread to periodically resume suspended I/O.
5362 5363 */
5363 5364 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
5364 5365 &resume_tid) == 0);
5365 5366
5366 5367 /*
5367 5368 * Create a deadman thread to abort() if we hang.
5368 5369 */
5369 5370 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND,
5370 5371 NULL) == 0);
5371 5372
5372 5373 /*
5373 5374 * Verify that we can safely inquire about about any object,
5374 5375 * whether it's allocated or not. To make it interesting,
5375 5376 * we probe a 5-wide window around each power of two.
5376 5377 * This hits all edge cases, including zero and the max.
5377 5378 */
5378 5379 for (int t = 0; t < 64; t++) {
5379 5380 for (int d = -5; d <= 5; d++) {
5380 5381 error = dmu_object_info(spa->spa_meta_objset,
5381 5382 (1ULL << t) + d, NULL);
5382 5383 ASSERT(error == 0 || error == ENOENT ||
5383 5384 error == EINVAL);
5384 5385 }
5385 5386 }
5386 5387
5387 5388 /*
5388 5389 * If we got any ENOSPC errors on the previous run, destroy something.
5389 5390 */
5390 5391 if (zs->zs_enospc_count != 0) {
5391 5392 int d = ztest_random(ztest_opts.zo_datasets);
5392 5393 ztest_dataset_destroy(d);
5393 5394 }
5394 5395 zs->zs_enospc_count = 0;
5395 5396
5396 5397 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t),
5397 5398 UMEM_NOFAIL);
5398 5399
5399 5400 if (ztest_opts.zo_verbose >= 4)
5400 5401 (void) printf("starting main threads...\n");
5401 5402
5402 5403 /*
5403 5404 * Kick off all the tests that run in parallel.
5404 5405 */
5405 5406 for (int t = 0; t < ztest_opts.zo_threads; t++) {
5406 5407 if (t < ztest_opts.zo_datasets &&
5407 5408 ztest_dataset_open(t) != 0)
5408 5409 return;
5409 5410 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t,
5410 5411 THR_BOUND, &tid[t]) == 0);
5411 5412 }
5412 5413
5413 5414 /*
5414 5415 * Wait for all of the tests to complete. We go in reverse order
5415 5416 * so we don't close datasets while threads are still using them.
5416 5417 */
5417 5418 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) {
5418 5419 VERIFY(thr_join(tid[t], NULL, NULL) == 0);
5419 5420 if (t < ztest_opts.zo_datasets)
5420 5421 ztest_dataset_close(t);
5421 5422 }
5422 5423
5423 5424 txg_wait_synced(spa_get_dsl(spa), 0);
5424 5425
5425 5426 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
5426 5427 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
5427 5428
5428 5429 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t));
5429 5430
5430 5431 /* Kill the resume thread */
5431 5432 ztest_exiting = B_TRUE;
5432 5433 VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
5433 5434 ztest_resume(spa);
5434 5435
5435 5436 /*
5436 5437 * Right before closing the pool, kick off a bunch of async I/O;
5437 5438 * spa_close() should wait for it to complete.
5438 5439 */
5439 5440 for (uint64_t object = 1; object < 50; object++)
5440 5441 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20);
5441 5442
5442 5443 spa_close(spa, FTAG);
5443 5444
5444 5445 /*
5445 5446 * Verify that we can loop over all pools.
5446 5447 */
5447 5448 mutex_enter(&spa_namespace_lock);
5448 5449 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
5449 5450 if (ztest_opts.zo_verbose > 3)
5450 5451 (void) printf("spa_next: found %s\n", spa_name(spa));
5451 5452 mutex_exit(&spa_namespace_lock);
5452 5453
5453 5454 /*
5454 5455 * Verify that we can export the pool and reimport it under a
5455 5456 * different name.
5456 5457 */
5457 5458 if (ztest_random(2) == 0) {
5458 5459 char name[MAXNAMELEN];
5459 5460 (void) snprintf(name, MAXNAMELEN, "%s_import",
5460 5461 ztest_opts.zo_pool);
5461 5462 ztest_spa_import_export(ztest_opts.zo_pool, name);
5462 5463 ztest_spa_import_export(name, ztest_opts.zo_pool);
5463 5464 }
5464 5465
5465 5466 kernel_fini();
5466 5467
5467 5468 list_destroy(&zcl.zcl_callbacks);
5468 5469
5469 5470 (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
5470 5471
5471 5472 (void) rwlock_destroy(&ztest_name_lock);
5472 5473 (void) _mutex_destroy(&ztest_vdev_lock);
5473 5474 }
5474 5475
5475 5476 static void
5476 5477 ztest_freeze(void)
5477 5478 {
5478 5479 ztest_ds_t *zd = &ztest_ds[0];
5479 5480 spa_t *spa;
5480 5481 int numloops = 0;
5481 5482
5482 5483 if (ztest_opts.zo_verbose >= 3)
5483 5484 (void) printf("testing spa_freeze()...\n");
5484 5485
5485 5486 kernel_init(FREAD | FWRITE);
5486 5487 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5487 5488 VERIFY3U(0, ==, ztest_dataset_open(0));
5488 5489
5489 5490 /*
5490 5491 * Force the first log block to be transactionally allocated.
5491 5492 * We have to do this before we freeze the pool -- otherwise
5492 5493 * the log chain won't be anchored.
5493 5494 */
5494 5495 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
5495 5496 ztest_dmu_object_alloc_free(zd, 0);
5496 5497 zil_commit(zd->zd_zilog, 0);
5497 5498 }
5498 5499
5499 5500 txg_wait_synced(spa_get_dsl(spa), 0);
5500 5501
5501 5502 /*
5502 5503 * Freeze the pool. This stops spa_sync() from doing anything,
5503 5504 * so that the only way to record changes from now on is the ZIL.
5504 5505 */
5505 5506 spa_freeze(spa);
5506 5507
5507 5508 /*
5508 5509 * Run tests that generate log records but don't alter the pool config
5509 5510 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
5510 5511 * We do a txg_wait_synced() after each iteration to force the txg
5511 5512 * to increase well beyond the last synced value in the uberblock.
5512 5513 * The ZIL should be OK with that.
5513 5514 */
5514 5515 while (ztest_random(10) != 0 &&
5515 5516 numloops++ < ztest_opts.zo_maxloops) {
5516 5517 ztest_dmu_write_parallel(zd, 0);
5517 5518 ztest_dmu_object_alloc_free(zd, 0);
5518 5519 txg_wait_synced(spa_get_dsl(spa), 0);
5519 5520 }
5520 5521
5521 5522 /*
5522 5523 * Commit all of the changes we just generated.
5523 5524 */
5524 5525 zil_commit(zd->zd_zilog, 0);
5525 5526 txg_wait_synced(spa_get_dsl(spa), 0);
5526 5527
5527 5528 /*
5528 5529 * Close our dataset and close the pool.
5529 5530 */
5530 5531 ztest_dataset_close(0);
5531 5532 spa_close(spa, FTAG);
5532 5533 kernel_fini();
5533 5534
5534 5535 /*
5535 5536 * Open and close the pool and dataset to induce log replay.
5536 5537 */
5537 5538 kernel_init(FREAD | FWRITE);
5538 5539 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5539 5540 VERIFY3U(0, ==, ztest_dataset_open(0));
5540 5541 ztest_dataset_close(0);
5541 5542 spa_close(spa, FTAG);
5542 5543 kernel_fini();
5543 5544 }
5544 5545
5545 5546 void
5546 5547 print_time(hrtime_t t, char *timebuf)
5547 5548 {
5548 5549 hrtime_t s = t / NANOSEC;
5549 5550 hrtime_t m = s / 60;
5550 5551 hrtime_t h = m / 60;
5551 5552 hrtime_t d = h / 24;
5552 5553
5553 5554 s -= m * 60;
5554 5555 m -= h * 60;
5555 5556 h -= d * 24;
5556 5557
5557 5558 timebuf[0] = '\0';
5558 5559
5559 5560 if (d)
5560 5561 (void) sprintf(timebuf,
5561 5562 "%llud%02lluh%02llum%02llus", d, h, m, s);
5562 5563 else if (h)
5563 5564 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
5564 5565 else if (m)
|
↓ open down ↓ |
5446 lines elided |
↑ open up ↑ |
5565 5566 (void) sprintf(timebuf, "%llum%02llus", m, s);
5566 5567 else
5567 5568 (void) sprintf(timebuf, "%llus", s);
5568 5569 }
5569 5570
5570 5571 static nvlist_t *
5571 5572 make_random_props()
5572 5573 {
5573 5574 nvlist_t *props;
5574 5575
5575 - if (ztest_random(2) == 0)
5576 - return (NULL);
5577 -
5578 5576 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
5577 + if (ztest_random(2) == 0)
5578 + return (props);
5579 5579 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0);
5580 5580
5581 5581 return (props);
5582 5582 }
5583 5583
5584 5584 /*
5585 5585 * Create a storage pool with the given name and initial vdev size.
5586 5586 * Then test spa_freeze() functionality.
5587 5587 */
5588 5588 static void
5589 5589 ztest_init(ztest_shared_t *zs)
5590 5590 {
5591 5591 spa_t *spa;
5592 5592 nvlist_t *nvroot, *props;
5593 5593
5594 5594 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5595 5595 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5596 5596
5597 5597 kernel_init(FREAD | FWRITE);
5598 5598
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
5599 5599 /*
5600 5600 * Create the storage pool.
5601 5601 */
5602 5602 (void) spa_destroy(ztest_opts.zo_pool);
5603 5603 ztest_shared->zs_vdev_next_leaf = 0;
5604 5604 zs->zs_splits = 0;
5605 5605 zs->zs_mirrors = ztest_opts.zo_mirrors;
5606 5606 nvroot = make_vdev_root(NULL, NULL, ztest_opts.zo_vdev_size, 0,
5607 5607 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
5608 5608 props = make_random_props();
5609 + for (int i = 0; i < SPA_FEATURES; i++) {
5610 + char buf[1024];
5611 + (void) snprintf(buf, sizeof (buf), "feature@%s",
5612 + spa_feature_table[i].fi_uname);
5613 + VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0));
5614 + }
5609 5615 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props,
5610 5616 NULL, NULL));
5611 5617 nvlist_free(nvroot);
5612 5618
5613 5619 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5614 5620 zs->zs_metaslab_sz =
5615 5621 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
5622 +
5616 5623 spa_close(spa, FTAG);
5617 5624
5618 5625 kernel_fini();
5619 5626
5620 5627 ztest_run_zdb(ztest_opts.zo_pool);
5621 5628
5622 5629 ztest_freeze();
5623 5630
5624 5631 ztest_run_zdb(ztest_opts.zo_pool);
5625 5632
5626 5633 (void) rwlock_destroy(&ztest_name_lock);
5627 5634 (void) _mutex_destroy(&ztest_vdev_lock);
5628 5635 }
5629 5636
5630 5637 static void
5631 5638 setup_fds(void)
5632 5639 {
5633 5640 int fd;
5634 5641
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
5635 5642 char *tmp = tempnam(NULL, NULL);
5636 5643 fd = open(tmp, O_RDWR | O_CREAT, 0700);
5637 5644 ASSERT3U(fd, ==, ZTEST_FD_DATA);
5638 5645 (void) unlink(tmp);
5639 5646 free(tmp);
5640 5647
5641 5648 fd = open("/dev/urandom", O_RDONLY);
5642 5649 ASSERT3U(fd, ==, ZTEST_FD_RAND);
5643 5650 }
5644 5651
5652 +static int
5653 +shared_data_size(ztest_shared_hdr_t *hdr)
5654 +{
5655 + int size;
5656 +
5657 + size = hdr->zh_hdr_size;
5658 + size += hdr->zh_opts_size;
5659 + size += hdr->zh_size;
5660 + size += hdr->zh_stats_size * hdr->zh_stats_count;
5661 + size += hdr->zh_ds_size * hdr->zh_ds_count;
5662 +
5663 + return (size);
5664 +}
5665 +
5645 5666 static void
5646 5667 setup_hdr(void)
5647 5668 {
5669 + int size;
5648 5670 ztest_shared_hdr_t *hdr;
5649 5671
5650 5672 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5651 5673 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0);
5652 5674 ASSERT(hdr != MAP_FAILED);
5653 5675
5676 + VERIFY3U(0, ==, ftruncate(ZTEST_FD_DATA, sizeof (ztest_shared_hdr_t)));
5677 +
5654 5678 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
5655 5679 hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
5656 5680 hdr->zh_size = sizeof (ztest_shared_t);
5657 5681 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
5658 5682 hdr->zh_stats_count = ZTEST_FUNCS;
5659 5683 hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
5660 5684 hdr->zh_ds_count = ztest_opts.zo_datasets;
5661 5685
5686 + size = shared_data_size(hdr);
5687 + VERIFY3U(0, ==, ftruncate(ZTEST_FD_DATA, size));
5688 +
5662 5689 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5663 5690 }
5664 5691
5665 5692 static void
5666 5693 setup_data(void)
5667 5694 {
5668 5695 int size, offset;
5669 5696 ztest_shared_hdr_t *hdr;
5670 5697 uint8_t *buf;
5671 5698
5672 5699 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5673 5700 PROT_READ, MAP_SHARED, ZTEST_FD_DATA, 0);
5674 5701 ASSERT(hdr != MAP_FAILED);
5675 5702
5676 - size = hdr->zh_hdr_size;
5677 - size += hdr->zh_opts_size;
5678 - size += hdr->zh_size;
5679 - size += hdr->zh_stats_size * hdr->zh_stats_count;
5680 - size += hdr->zh_ds_size * hdr->zh_ds_count;
5703 + size = shared_data_size(hdr);
5681 5704
5682 5705 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5683 5706 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
5684 5707 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0);
5685 5708 ASSERT(hdr != MAP_FAILED);
5686 5709 buf = (uint8_t *)hdr;
5687 5710
5688 5711 offset = hdr->zh_hdr_size;
5689 5712 ztest_shared_opts = (void *)&buf[offset];
5690 5713 offset += hdr->zh_opts_size;
5691 5714 ztest_shared = (void *)&buf[offset];
5692 5715 offset += hdr->zh_size;
5693 5716 ztest_shared_callstate = (void *)&buf[offset];
5694 5717 offset += hdr->zh_stats_size * hdr->zh_stats_count;
5695 5718 ztest_shared_ds = (void *)&buf[offset];
5696 5719 }
5697 5720
5698 5721 static boolean_t
5699 5722 exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
5700 5723 {
5701 5724 pid_t pid;
5702 5725 int status;
5703 5726 char cmdbuf[MAXPATHLEN];
5704 5727
5705 5728 pid = fork();
5706 5729
5707 5730 if (cmd == NULL) {
5708 5731 (void) strlcpy(cmdbuf, getexecname(), sizeof (cmdbuf));
5709 5732 cmd = cmdbuf;
5710 5733 }
5711 5734
5712 5735 if (pid == -1)
5713 5736 fatal(1, "fork failed");
5714 5737
5715 5738 if (pid == 0) { /* child */
5716 5739 char *emptyargv[2] = { cmd, NULL };
5717 5740
5718 5741 struct rlimit rl = { 1024, 1024 };
5719 5742 (void) setrlimit(RLIMIT_NOFILE, &rl);
5720 5743 (void) enable_extended_FILE_stdio(-1, -1);
5721 5744 if (libpath != NULL)
5722 5745 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1));
5723 5746 (void) execv(cmd, emptyargv);
5724 5747 ztest_dump_core = B_FALSE;
5725 5748 fatal(B_TRUE, "exec failed: %s", cmd);
5726 5749 }
5727 5750
5728 5751 while (waitpid(pid, &status, 0) != pid)
5729 5752 continue;
5730 5753 if (statusp != NULL)
5731 5754 *statusp = status;
5732 5755
5733 5756 if (WIFEXITED(status)) {
5734 5757 if (WEXITSTATUS(status) != 0) {
5735 5758 (void) fprintf(stderr, "child exited with code %d\n",
5736 5759 WEXITSTATUS(status));
5737 5760 exit(2);
5738 5761 }
5739 5762 return (B_FALSE);
5740 5763 } else if (WIFSIGNALED(status)) {
5741 5764 if (!ignorekill || WTERMSIG(status) != SIGKILL) {
5742 5765 (void) fprintf(stderr, "child died with signal %d\n",
5743 5766 WTERMSIG(status));
5744 5767 exit(3);
5745 5768 }
5746 5769 return (B_TRUE);
5747 5770 } else {
5748 5771 (void) fprintf(stderr, "something strange happened to child\n");
5749 5772 exit(4);
5750 5773 /* NOTREACHED */
5751 5774 }
5752 5775 }
5753 5776
5754 5777 static void
5755 5778 ztest_run_init(void)
5756 5779 {
5757 5780 ztest_shared_t *zs = ztest_shared;
5758 5781
5759 5782 ASSERT(ztest_opts.zo_init != 0);
5760 5783
5761 5784 /*
5762 5785 * Blow away any existing copy of zpool.cache
5763 5786 */
5764 5787 (void) remove(spa_config_path);
5765 5788
5766 5789 /*
5767 5790 * Create and initialize our storage pool.
5768 5791 */
5769 5792 for (int i = 1; i <= ztest_opts.zo_init; i++) {
5770 5793 bzero(zs, sizeof (ztest_shared_t));
5771 5794 if (ztest_opts.zo_verbose >= 3 &&
5772 5795 ztest_opts.zo_init != 1) {
5773 5796 (void) printf("ztest_init(), pass %d\n", i);
5774 5797 }
5775 5798 ztest_init(zs);
5776 5799 }
5777 5800 }
5778 5801
5779 5802 int
5780 5803 main(int argc, char **argv)
5781 5804 {
5782 5805 int kills = 0;
5783 5806 int iters = 0;
5784 5807 int older = 0;
5785 5808 int newer = 0;
5786 5809 ztest_shared_t *zs;
5787 5810 ztest_info_t *zi;
5788 5811 ztest_shared_callstate_t *zc;
5789 5812 char timebuf[100];
5790 5813 char numbuf[6];
5791 5814 spa_t *spa;
5792 5815 char cmd[MAXNAMELEN];
5793 5816 boolean_t hasalt;
5794 5817
5795 5818 boolean_t ischild = (0 == lseek(ZTEST_FD_DATA, 0, SEEK_CUR));
5796 5819 ASSERT(ischild || errno == EBADF);
5797 5820
5798 5821 (void) setvbuf(stdout, NULL, _IOLBF, 0);
5799 5822
5800 5823 if (!ischild) {
5801 5824 process_options(argc, argv);
5802 5825
5803 5826 setup_fds();
5804 5827 setup_hdr();
5805 5828 setup_data();
5806 5829 bcopy(&ztest_opts, ztest_shared_opts,
5807 5830 sizeof (*ztest_shared_opts));
5808 5831 } else {
5809 5832 setup_data();
5810 5833 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
5811 5834 }
5812 5835 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
5813 5836
5814 5837 /* Override location of zpool.cache */
5815 5838 (void) asprintf((char **)&spa_config_path, "%s/zpool.cache",
5816 5839 ztest_opts.zo_dir);
5817 5840
5818 5841 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
5819 5842 UMEM_NOFAIL);
5820 5843 zs = ztest_shared;
5821 5844
5822 5845 if (ischild) {
5823 5846 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang;
5824 5847 metaslab_df_alloc_threshold =
5825 5848 zs->zs_metaslab_df_alloc_threshold;
5826 5849
5827 5850 if (zs->zs_do_init)
5828 5851 ztest_run_init();
5829 5852 else
5830 5853 ztest_run(zs);
5831 5854 exit(0);
5832 5855 }
5833 5856
5834 5857 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
5835 5858
5836 5859 if (ztest_opts.zo_verbose >= 1) {
5837 5860 (void) printf("%llu vdevs, %d datasets, %d threads,"
5838 5861 " %llu seconds...\n",
5839 5862 (u_longlong_t)ztest_opts.zo_vdevs,
5840 5863 ztest_opts.zo_datasets,
5841 5864 ztest_opts.zo_threads,
5842 5865 (u_longlong_t)ztest_opts.zo_time);
5843 5866 }
5844 5867
5845 5868 (void) strlcpy(cmd, getexecname(), sizeof (cmd));
5846 5869
5847 5870 zs->zs_do_init = B_TRUE;
5848 5871 if (strlen(ztest_opts.zo_alt_ztest) != 0) {
5849 5872 if (ztest_opts.zo_verbose >= 1) {
5850 5873 (void) printf("Executing older ztest for "
5851 5874 "initialization: %s\n", ztest_opts.zo_alt_ztest);
5852 5875 }
5853 5876 VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
5854 5877 ztest_opts.zo_alt_libpath, B_FALSE, NULL));
5855 5878 } else {
5856 5879 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
5857 5880 }
5858 5881 zs->zs_do_init = B_FALSE;
5859 5882
5860 5883 zs->zs_proc_start = gethrtime();
5861 5884 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
5862 5885
5863 5886 for (int f = 0; f < ZTEST_FUNCS; f++) {
5864 5887 zi = &ztest_info[f];
5865 5888 zc = ZTEST_GET_SHARED_CALLSTATE(f);
5866 5889 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
5867 5890 zc->zc_next = UINT64_MAX;
5868 5891 else
5869 5892 zc->zc_next = zs->zs_proc_start +
5870 5893 ztest_random(2 * zi->zi_interval[0] + 1);
5871 5894 }
5872 5895
5873 5896 /*
5874 5897 * Run the tests in a loop. These tests include fault injection
5875 5898 * to verify that self-healing data works, and forced crashes
5876 5899 * to verify that we never lose on-disk consistency.
5877 5900 */
5878 5901 while (gethrtime() < zs->zs_proc_stop) {
5879 5902 int status;
5880 5903 boolean_t killed;
5881 5904
5882 5905 /*
5883 5906 * Initialize the workload counters for each function.
5884 5907 */
5885 5908 for (int f = 0; f < ZTEST_FUNCS; f++) {
5886 5909 zc = ZTEST_GET_SHARED_CALLSTATE(f);
5887 5910 zc->zc_count = 0;
5888 5911 zc->zc_time = 0;
5889 5912 }
5890 5913
5891 5914 /* Set the allocation switch size */
5892 5915 zs->zs_metaslab_df_alloc_threshold =
5893 5916 ztest_random(zs->zs_metaslab_sz / 4) + 1;
5894 5917
5895 5918 if (!hasalt || ztest_random(2) == 0) {
5896 5919 if (hasalt && ztest_opts.zo_verbose >= 1) {
5897 5920 (void) printf("Executing newer ztest: %s\n",
5898 5921 cmd);
5899 5922 }
5900 5923 newer++;
5901 5924 killed = exec_child(cmd, NULL, B_TRUE, &status);
5902 5925 } else {
5903 5926 if (hasalt && ztest_opts.zo_verbose >= 1) {
5904 5927 (void) printf("Executing older ztest: %s\n",
5905 5928 ztest_opts.zo_alt_ztest);
5906 5929 }
5907 5930 older++;
5908 5931 killed = exec_child(ztest_opts.zo_alt_ztest,
5909 5932 ztest_opts.zo_alt_libpath, B_TRUE, &status);
5910 5933 }
5911 5934
5912 5935 if (killed)
5913 5936 kills++;
5914 5937 iters++;
5915 5938
5916 5939 if (ztest_opts.zo_verbose >= 1) {
5917 5940 hrtime_t now = gethrtime();
5918 5941
5919 5942 now = MIN(now, zs->zs_proc_stop);
5920 5943 print_time(zs->zs_proc_stop - now, timebuf);
5921 5944 nicenum(zs->zs_space, numbuf);
5922 5945
5923 5946 (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
5924 5947 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
5925 5948 iters,
5926 5949 WIFEXITED(status) ? "Complete" : "SIGKILL",
5927 5950 (u_longlong_t)zs->zs_enospc_count,
5928 5951 100.0 * zs->zs_alloc / zs->zs_space,
5929 5952 numbuf,
5930 5953 100.0 * (now - zs->zs_proc_start) /
5931 5954 (ztest_opts.zo_time * NANOSEC), timebuf);
5932 5955 }
5933 5956
5934 5957 if (ztest_opts.zo_verbose >= 2) {
5935 5958 (void) printf("\nWorkload summary:\n\n");
5936 5959 (void) printf("%7s %9s %s\n",
5937 5960 "Calls", "Time", "Function");
5938 5961 (void) printf("%7s %9s %s\n",
5939 5962 "-----", "----", "--------");
5940 5963 for (int f = 0; f < ZTEST_FUNCS; f++) {
5941 5964 Dl_info dli;
5942 5965
5943 5966 zi = &ztest_info[f];
5944 5967 zc = ZTEST_GET_SHARED_CALLSTATE(f);
5945 5968 print_time(zc->zc_time, timebuf);
5946 5969 (void) dladdr((void *)zi->zi_func, &dli);
5947 5970 (void) printf("%7llu %9s %s\n",
5948 5971 (u_longlong_t)zc->zc_count, timebuf,
5949 5972 dli.dli_sname);
5950 5973 }
5951 5974 (void) printf("\n");
5952 5975 }
5953 5976
5954 5977 /*
5955 5978 * It's possible that we killed a child during a rename test,
5956 5979 * in which case we'll have a 'ztest_tmp' pool lying around
5957 5980 * instead of 'ztest'. Do a blind rename in case this happened.
5958 5981 */
5959 5982 kernel_init(FREAD);
5960 5983 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) {
5961 5984 spa_close(spa, FTAG);
5962 5985 } else {
5963 5986 char tmpname[MAXNAMELEN];
5964 5987 kernel_fini();
5965 5988 kernel_init(FREAD | FWRITE);
5966 5989 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp",
5967 5990 ztest_opts.zo_pool);
5968 5991 (void) spa_rename(tmpname, ztest_opts.zo_pool);
5969 5992 }
5970 5993 kernel_fini();
5971 5994
5972 5995 ztest_run_zdb(ztest_opts.zo_pool);
5973 5996 }
5974 5997
5975 5998 if (ztest_opts.zo_verbose >= 1) {
5976 5999 if (hasalt) {
5977 6000 (void) printf("%d runs of older ztest: %s\n", older,
5978 6001 ztest_opts.zo_alt_ztest);
5979 6002 (void) printf("%d runs of newer ztest: %s\n", newer,
5980 6003 cmd);
5981 6004 }
5982 6005 (void) printf("%d killed, %d completed, %.0f%% kill rate\n",
5983 6006 kills, iters - kills, (100.0 * kills) / MAX(1, iters));
5984 6007 }
5985 6008
5986 6009 return (0);
5987 6010 }
|
↓ open down ↓ |
297 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX