1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 /*
28 * The objective of this program is to provide a DMU/ZAP/SPA stress test
29 * that runs entirely in userland, is easy to use, and easy to extend.
30 *
31 * The overall design of the ztest program is as follows:
32 *
33 * (1) For each major functional area (e.g. adding vdevs to a pool,
34 * creating and destroying datasets, reading and writing objects, etc)
35 * we have a simple routine to test that functionality. These
36 * individual routines do not have to do anything "stressful".
37 *
38 * (2) We turn these simple functionality tests into a stress test by
39 * running them all in parallel, with as many threads as desired,
40 * and spread across as many datasets, objects, and vdevs as desired.
41 *
42 * (3) While all this is happening, we inject faults into the pool to
43 * verify that self-healing data really works.
44 *
45 * (4) Every time we open a dataset, we change its checksum and compression
46 * functions. Thus even individual objects vary from block to block
47 * in which checksum they use and whether they're compressed.
48 *
49 * (5) To verify that we never lose on-disk consistency after a crash,
50 * we run the entire test in a child of the main process.
51 * At random times, the child self-immolates with a SIGKILL.
52 * This is the software equivalent of pulling the power cord.
53 * The parent then runs the test again, using the existing
54 * storage pool, as many times as desired. If backwards compatability
55 * testing is enabled ztest will sometimes run the "older" version
56 * of ztest after a SIGKILL.
57 *
58 * (6) To verify that we don't have future leaks or temporal incursions,
59 * many of the functional tests record the transaction group number
60 * as part of their data. When reading old data, they verify that
61 * the transaction group number is less than the current, open txg.
62 * If you add a new test, please do this if applicable.
63 *
64 * When run with no arguments, ztest runs for about five minutes and
65 * produces no output if successful. To get a little bit of information,
66 * specify -V. To get more information, specify -VV, and so on.
67 *
68 * To turn this into an overnight stress test, use -T to specify run time.
69 *
70 * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
71 * to increase the pool capacity, fanout, and overall stress level.
72 *
73 * Use the -k option to set the desired frequency of kills.
74 *
75 * When ztest invokes itself it passes all relevant information through a
76 * temporary file which is mmap-ed in the child process. This allows shared
77 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
78 * stored at offset 0 of this file and contains information on the size and
79 * number of shared structures in the file. The information stored in this file
80 * must remain backwards compatible with older versions of ztest so that
81 * ztest can invoke them during backwards compatibility testing (-B).
82 */
83
84 #include <sys/zfs_context.h>
85 #include <sys/spa.h>
86 #include <sys/dmu.h>
87 #include <sys/txg.h>
88 #include <sys/dbuf.h>
89 #include <sys/zap.h>
90 #include <sys/dmu_objset.h>
91 #include <sys/poll.h>
92 #include <sys/stat.h>
93 #include <sys/time.h>
94 #include <sys/wait.h>
95 #include <sys/mman.h>
96 #include <sys/resource.h>
97 #include <sys/zio.h>
98 #include <sys/zil.h>
99 #include <sys/zil_impl.h>
100 #include <sys/vdev_impl.h>
101 #include <sys/vdev_file.h>
102 #include <sys/spa_impl.h>
103 #include <sys/metaslab_impl.h>
104 #include <sys/dsl_prop.h>
105 #include <sys/dsl_dataset.h>
106 #include <sys/dsl_scan.h>
107 #include <sys/zio_checksum.h>
108 #include <sys/refcount.h>
109 #include <stdio.h>
110 #include <stdio_ext.h>
111 #include <stdlib.h>
112 #include <unistd.h>
113 #include <signal.h>
114 #include <umem.h>
115 #include <dlfcn.h>
116 #include <ctype.h>
117 #include <math.h>
118 #include <sys/fs/zfs.h>
119 #include <libnvpair.h>
120
121 #define ZTEST_FD_DATA 3
122 #define ZTEST_FD_RAND 4
123
124 typedef struct ztest_shared_hdr {
125 uint64_t zh_hdr_size;
126 uint64_t zh_opts_size;
127 uint64_t zh_size;
128 uint64_t zh_stats_size;
129 uint64_t zh_stats_count;
130 uint64_t zh_ds_size;
131 uint64_t zh_ds_count;
132 } ztest_shared_hdr_t;
133
134 static ztest_shared_hdr_t *ztest_shared_hdr;
135
136 typedef struct ztest_shared_opts {
137 char zo_pool[MAXNAMELEN];
138 char zo_dir[MAXNAMELEN];
139 char zo_alt_ztest[MAXNAMELEN];
140 char zo_alt_libpath[MAXNAMELEN];
141 uint64_t zo_vdevs;
142 uint64_t zo_vdevtime;
143 size_t zo_vdev_size;
144 int zo_ashift;
145 int zo_mirrors;
146 int zo_raidz;
147 int zo_raidz_parity;
148 int zo_datasets;
149 int zo_threads;
150 uint64_t zo_passtime;
151 uint64_t zo_killrate;
152 int zo_verbose;
153 int zo_init;
154 uint64_t zo_time;
155 uint64_t zo_maxloops;
156 uint64_t zo_metaslab_gang_bang;
157 } ztest_shared_opts_t;
158
159 static const ztest_shared_opts_t ztest_opts_defaults = {
160 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' },
161 .zo_dir = { '/', 't', 'm', 'p', '\0' },
162 .zo_alt_ztest = { '\0' },
163 .zo_alt_libpath = { '\0' },
164 .zo_vdevs = 5,
165 .zo_ashift = SPA_MINBLOCKSHIFT,
166 .zo_mirrors = 2,
167 .zo_raidz = 4,
168 .zo_raidz_parity = 1,
169 .zo_vdev_size = SPA_MINDEVSIZE,
170 .zo_datasets = 7,
171 .zo_threads = 23,
172 .zo_passtime = 60, /* 60 seconds */
173 .zo_killrate = 70, /* 70% kill rate */
174 .zo_verbose = 0,
175 .zo_init = 1,
176 .zo_time = 300, /* 5 minutes */
177 .zo_maxloops = 50, /* max loops during spa_freeze() */
178 .zo_metaslab_gang_bang = 32 << 10
179 };
180
181 extern uint64_t metaslab_gang_bang;
182 extern uint64_t metaslab_df_alloc_threshold;
183
184 static ztest_shared_opts_t *ztest_shared_opts;
185 static ztest_shared_opts_t ztest_opts;
186
187 typedef struct ztest_shared_ds {
188 uint64_t zd_seq;
189 } ztest_shared_ds_t;
190
191 static ztest_shared_ds_t *ztest_shared_ds;
192 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
193
194 #define BT_MAGIC 0x123456789abcdefULL
195 #define MAXFAULTS() \
196 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
197
198 enum ztest_io_type {
199 ZTEST_IO_WRITE_TAG,
200 ZTEST_IO_WRITE_PATTERN,
201 ZTEST_IO_WRITE_ZEROES,
202 ZTEST_IO_TRUNCATE,
203 ZTEST_IO_SETATTR,
204 ZTEST_IO_TYPES
205 };
206
207 typedef struct ztest_block_tag {
208 uint64_t bt_magic;
209 uint64_t bt_objset;
210 uint64_t bt_object;
211 uint64_t bt_offset;
212 uint64_t bt_gen;
213 uint64_t bt_txg;
214 uint64_t bt_crtxg;
215 } ztest_block_tag_t;
216
217 typedef struct bufwad {
218 uint64_t bw_index;
219 uint64_t bw_txg;
220 uint64_t bw_data;
221 } bufwad_t;
222
223 /*
224 * XXX -- fix zfs range locks to be generic so we can use them here.
225 */
226 typedef enum {
227 RL_READER,
228 RL_WRITER,
229 RL_APPEND
230 } rl_type_t;
231
232 typedef struct rll {
233 void *rll_writer;
234 int rll_readers;
235 mutex_t rll_lock;
236 cond_t rll_cv;
237 } rll_t;
238
239 typedef struct rl {
240 uint64_t rl_object;
241 uint64_t rl_offset;
242 uint64_t rl_size;
243 rll_t *rl_lock;
244 } rl_t;
245
246 #define ZTEST_RANGE_LOCKS 64
247 #define ZTEST_OBJECT_LOCKS 64
248
249 /*
250 * Object descriptor. Used as a template for object lookup/create/remove.
251 */
252 typedef struct ztest_od {
253 uint64_t od_dir;
254 uint64_t od_object;
255 dmu_object_type_t od_type;
256 dmu_object_type_t od_crtype;
257 uint64_t od_blocksize;
258 uint64_t od_crblocksize;
259 uint64_t od_gen;
260 uint64_t od_crgen;
261 char od_name[MAXNAMELEN];
262 } ztest_od_t;
263
264 /*
265 * Per-dataset state.
266 */
267 typedef struct ztest_ds {
268 ztest_shared_ds_t *zd_shared;
269 objset_t *zd_os;
270 rwlock_t zd_zilog_lock;
271 zilog_t *zd_zilog;
272 ztest_od_t *zd_od; /* debugging aid */
273 char zd_name[MAXNAMELEN];
274 mutex_t zd_dirobj_lock;
275 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
276 rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
277 } ztest_ds_t;
278
279 /*
280 * Per-iteration state.
281 */
282 typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
283
284 typedef struct ztest_info {
285 ztest_func_t *zi_func; /* test function */
286 uint64_t zi_iters; /* iterations per execution */
287 uint64_t *zi_interval; /* execute every <interval> seconds */
288 } ztest_info_t;
289
290 typedef struct ztest_shared_callstate {
291 uint64_t zc_count; /* per-pass count */
292 uint64_t zc_time; /* per-pass time */
293 uint64_t zc_next; /* next time to call this function */
294 } ztest_shared_callstate_t;
295
296 static ztest_shared_callstate_t *ztest_shared_callstate;
297 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
298
299 /*
300 * Note: these aren't static because we want dladdr() to work.
301 */
302 ztest_func_t ztest_dmu_read_write;
303 ztest_func_t ztest_dmu_write_parallel;
304 ztest_func_t ztest_dmu_object_alloc_free;
305 ztest_func_t ztest_dmu_commit_callbacks;
306 ztest_func_t ztest_zap;
307 ztest_func_t ztest_zap_parallel;
308 ztest_func_t ztest_zil_commit;
309 ztest_func_t ztest_zil_remount;
310 ztest_func_t ztest_dmu_read_write_zcopy;
311 ztest_func_t ztest_dmu_objset_create_destroy;
312 ztest_func_t ztest_dmu_prealloc;
313 ztest_func_t ztest_fzap;
314 ztest_func_t ztest_dmu_snapshot_create_destroy;
315 ztest_func_t ztest_dsl_prop_get_set;
316 ztest_func_t ztest_spa_prop_get_set;
317 ztest_func_t ztest_spa_create_destroy;
318 ztest_func_t ztest_fault_inject;
319 ztest_func_t ztest_ddt_repair;
320 ztest_func_t ztest_dmu_snapshot_hold;
321 ztest_func_t ztest_spa_rename;
322 ztest_func_t ztest_scrub;
323 ztest_func_t ztest_dsl_dataset_promote_busy;
324 ztest_func_t ztest_vdev_attach_detach;
325 ztest_func_t ztest_vdev_LUN_growth;
326 ztest_func_t ztest_vdev_add_remove;
327 ztest_func_t ztest_vdev_aux_add_remove;
328 ztest_func_t ztest_split_pool;
329 ztest_func_t ztest_reguid;
330
331 uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
332 uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
333 uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
334 uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
335 uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
336
337 ztest_info_t ztest_info[] = {
338 { ztest_dmu_read_write, 1, &zopt_always },
339 { ztest_dmu_write_parallel, 10, &zopt_always },
340 { ztest_dmu_object_alloc_free, 1, &zopt_always },
341 { ztest_dmu_commit_callbacks, 1, &zopt_always },
342 { ztest_zap, 30, &zopt_always },
343 { ztest_zap_parallel, 100, &zopt_always },
344 { ztest_split_pool, 1, &zopt_always },
345 { ztest_zil_commit, 1, &zopt_incessant },
346 { ztest_zil_remount, 1, &zopt_sometimes },
347 { ztest_dmu_read_write_zcopy, 1, &zopt_often },
348 { ztest_dmu_objset_create_destroy, 1, &zopt_often },
349 { ztest_dsl_prop_get_set, 1, &zopt_often },
350 { ztest_spa_prop_get_set, 1, &zopt_sometimes },
351 #if 0
352 { ztest_dmu_prealloc, 1, &zopt_sometimes },
353 #endif
354 { ztest_fzap, 1, &zopt_sometimes },
355 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes },
356 { ztest_spa_create_destroy, 1, &zopt_sometimes },
357 { ztest_fault_inject, 1, &zopt_sometimes },
358 { ztest_ddt_repair, 1, &zopt_sometimes },
359 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes },
360 { ztest_reguid, 1, &zopt_sometimes },
361 { ztest_spa_rename, 1, &zopt_rarely },
362 { ztest_scrub, 1, &zopt_rarely },
363 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely },
364 { ztest_vdev_attach_detach, 1, &zopt_rarely },
365 { ztest_vdev_LUN_growth, 1, &zopt_rarely },
366 { ztest_vdev_add_remove, 1,
367 &ztest_opts.zo_vdevtime },
368 { ztest_vdev_aux_add_remove, 1,
369 &ztest_opts.zo_vdevtime },
370 };
371
372 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
373
374 /*
375 * The following struct is used to hold a list of uncalled commit callbacks.
376 * The callbacks are ordered by txg number.
377 */
378 typedef struct ztest_cb_list {
379 mutex_t zcl_callbacks_lock;
380 list_t zcl_callbacks;
381 } ztest_cb_list_t;
382
383 /*
384 * Stuff we need to share writably between parent and child.
385 */
386 typedef struct ztest_shared {
387 boolean_t zs_do_init;
388 hrtime_t zs_proc_start;
389 hrtime_t zs_proc_stop;
390 hrtime_t zs_thread_start;
391 hrtime_t zs_thread_stop;
392 hrtime_t zs_thread_kill;
393 uint64_t zs_enospc_count;
394 uint64_t zs_vdev_next_leaf;
395 uint64_t zs_vdev_aux;
396 uint64_t zs_alloc;
397 uint64_t zs_space;
398 uint64_t zs_splits;
399 uint64_t zs_mirrors;
400 uint64_t zs_metaslab_sz;
401 uint64_t zs_metaslab_df_alloc_threshold;
402 uint64_t zs_guid;
403 } ztest_shared_t;
404
405 #define ID_PARALLEL -1ULL
406
407 static char ztest_dev_template[] = "%s/%s.%llua";
408 static char ztest_aux_template[] = "%s/%s.%s.%llu";
409 ztest_shared_t *ztest_shared;
410
411 static spa_t *ztest_spa = NULL;
412 static ztest_ds_t *ztest_ds;
413
414 static mutex_t ztest_vdev_lock;
415 static rwlock_t ztest_name_lock;
416
417 static boolean_t ztest_dump_core = B_TRUE;
418 static boolean_t ztest_exiting;
419
420 /* Global commit callback list */
421 static ztest_cb_list_t zcl;
422
423 enum ztest_object {
424 ZTEST_META_DNODE = 0,
425 ZTEST_DIROBJ,
426 ZTEST_OBJECTS
427 };
428
429 static void usage(boolean_t) __NORETURN;
430
431 /*
432 * These libumem hooks provide a reasonable set of defaults for the allocator's
433 * debugging facilities.
434 */
435 const char *
436 _umem_debug_init()
437 {
438 return ("default,verbose"); /* $UMEM_DEBUG setting */
439 }
440
441 const char *
442 _umem_logging_init(void)
443 {
444 return ("fail,contents"); /* $UMEM_LOGGING setting */
445 }
446
447 #define FATAL_MSG_SZ 1024
448
449 char *fatal_msg;
450
451 static void
452 fatal(int do_perror, char *message, ...)
453 {
454 va_list args;
455 int save_errno = errno;
456 char buf[FATAL_MSG_SZ];
457
458 (void) fflush(stdout);
459
460 va_start(args, message);
461 (void) sprintf(buf, "ztest: ");
462 /* LINTED */
463 (void) vsprintf(buf + strlen(buf), message, args);
464 va_end(args);
465 if (do_perror) {
466 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
467 ": %s", strerror(save_errno));
468 }
469 (void) fprintf(stderr, "%s\n", buf);
470 fatal_msg = buf; /* to ease debugging */
471 if (ztest_dump_core)
472 abort();
473 exit(3);
474 }
475
476 static int
477 str2shift(const char *buf)
478 {
479 const char *ends = "BKMGTPEZ";
480 int i;
481
482 if (buf[0] == '\0')
483 return (0);
484 for (i = 0; i < strlen(ends); i++) {
485 if (toupper(buf[0]) == ends[i])
486 break;
487 }
488 if (i == strlen(ends)) {
489 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
490 buf);
491 usage(B_FALSE);
492 }
493 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
494 return (10*i);
495 }
496 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
497 usage(B_FALSE);
498 /* NOTREACHED */
499 }
500
501 static uint64_t
502 nicenumtoull(const char *buf)
503 {
504 char *end;
505 uint64_t val;
506
507 val = strtoull(buf, &end, 0);
508 if (end == buf) {
509 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
510 usage(B_FALSE);
511 } else if (end[0] == '.') {
512 double fval = strtod(buf, &end);
513 fval *= pow(2, str2shift(end));
514 if (fval > UINT64_MAX) {
515 (void) fprintf(stderr, "ztest: value too large: %s\n",
516 buf);
517 usage(B_FALSE);
518 }
519 val = (uint64_t)fval;
520 } else {
521 int shift = str2shift(end);
522 if (shift >= 64 || (val << shift) >> shift != val) {
523 (void) fprintf(stderr, "ztest: value too large: %s\n",
524 buf);
525 usage(B_FALSE);
526 }
527 val <<= shift;
528 }
529 return (val);
530 }
531
532 static void
533 usage(boolean_t requested)
534 {
535 const ztest_shared_opts_t *zo = &ztest_opts_defaults;
536
537 char nice_vdev_size[10];
538 char nice_gang_bang[10];
539 FILE *fp = requested ? stdout : stderr;
540
541 nicenum(zo->zo_vdev_size, nice_vdev_size);
542 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang);
543
544 (void) fprintf(fp, "Usage: %s\n"
545 "\t[-v vdevs (default: %llu)]\n"
546 "\t[-s size_of_each_vdev (default: %s)]\n"
547 "\t[-a alignment_shift (default: %d)] use 0 for random\n"
548 "\t[-m mirror_copies (default: %d)]\n"
549 "\t[-r raidz_disks (default: %d)]\n"
550 "\t[-R raidz_parity (default: %d)]\n"
551 "\t[-d datasets (default: %d)]\n"
552 "\t[-t threads (default: %d)]\n"
553 "\t[-g gang_block_threshold (default: %s)]\n"
554 "\t[-i init_count (default: %d)] initialize pool i times\n"
555 "\t[-k kill_percentage (default: %llu%%)]\n"
556 "\t[-p pool_name (default: %s)]\n"
557 "\t[-f dir (default: %s)] file directory for vdev files\n"
558 "\t[-V] verbose (use multiple times for ever more blather)\n"
559 "\t[-E] use existing pool instead of creating new one\n"
560 "\t[-T time (default: %llu sec)] total run time\n"
561 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
562 "\t[-P passtime (default: %llu sec)] time per pass\n"
563 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n"
564 "\t[-h] (print help)\n"
565 "",
566 zo->zo_pool,
567 (u_longlong_t)zo->zo_vdevs, /* -v */
568 nice_vdev_size, /* -s */
569 zo->zo_ashift, /* -a */
570 zo->zo_mirrors, /* -m */
571 zo->zo_raidz, /* -r */
572 zo->zo_raidz_parity, /* -R */
573 zo->zo_datasets, /* -d */
574 zo->zo_threads, /* -t */
575 nice_gang_bang, /* -g */
576 zo->zo_init, /* -i */
577 (u_longlong_t)zo->zo_killrate, /* -k */
578 zo->zo_pool, /* -p */
579 zo->zo_dir, /* -f */
580 (u_longlong_t)zo->zo_time, /* -T */
581 (u_longlong_t)zo->zo_maxloops, /* -F */
582 (u_longlong_t)zo->zo_passtime);
583 exit(requested ? 0 : 1);
584 }
585
586 static void
587 process_options(int argc, char **argv)
588 {
589 char *path;
590 ztest_shared_opts_t *zo = &ztest_opts;
591
592 int opt;
593 uint64_t value;
594 char altdir[MAXNAMELEN] = { 0 };
595
596 bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
597
598 while ((opt = getopt(argc, argv,
599 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) {
600 value = 0;
601 switch (opt) {
602 case 'v':
603 case 's':
604 case 'a':
605 case 'm':
606 case 'r':
607 case 'R':
608 case 'd':
609 case 't':
610 case 'g':
611 case 'i':
612 case 'k':
613 case 'T':
614 case 'P':
615 case 'F':
616 value = nicenumtoull(optarg);
617 }
618 switch (opt) {
619 case 'v':
620 zo->zo_vdevs = value;
621 break;
622 case 's':
623 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
624 break;
625 case 'a':
626 zo->zo_ashift = value;
627 break;
628 case 'm':
629 zo->zo_mirrors = value;
630 break;
631 case 'r':
632 zo->zo_raidz = MAX(1, value);
633 break;
634 case 'R':
635 zo->zo_raidz_parity = MIN(MAX(value, 1), 3);
636 break;
637 case 'd':
638 zo->zo_datasets = MAX(1, value);
639 break;
640 case 't':
641 zo->zo_threads = MAX(1, value);
642 break;
643 case 'g':
644 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1,
645 value);
646 break;
647 case 'i':
648 zo->zo_init = value;
649 break;
650 case 'k':
651 zo->zo_killrate = value;
652 break;
653 case 'p':
654 (void) strlcpy(zo->zo_pool, optarg,
655 sizeof (zo->zo_pool));
656 break;
657 case 'f':
658 path = realpath(optarg, NULL);
659 if (path == NULL) {
660 (void) fprintf(stderr, "error: %s: %s\n",
661 optarg, strerror(errno));
662 usage(B_FALSE);
663 } else {
664 (void) strlcpy(zo->zo_dir, path,
665 sizeof (zo->zo_dir));
666 }
667 break;
668 case 'V':
669 zo->zo_verbose++;
670 break;
671 case 'E':
672 zo->zo_init = 0;
673 break;
674 case 'T':
675 zo->zo_time = value;
676 break;
677 case 'P':
678 zo->zo_passtime = MAX(1, value);
679 break;
680 case 'F':
681 zo->zo_maxloops = MAX(1, value);
682 break;
683 case 'B':
684 (void) strlcpy(altdir, optarg, sizeof (altdir));
685 break;
686 case 'h':
687 usage(B_TRUE);
688 break;
689 case '?':
690 default:
691 usage(B_FALSE);
692 break;
693 }
694 }
695
696 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1);
697
698 zo->zo_vdevtime =
699 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
700 UINT64_MAX >> 2);
701
702 if (strlen(altdir) > 0) {
703 char cmd[MAXNAMELEN];
704 char realaltdir[MAXNAMELEN];
705 char *bin;
706 char *ztest;
707 char *isa;
708 int isalen;
709
710 (void) realpath(getexecname(), cmd);
711 if (0 != access(altdir, F_OK)) {
712 ztest_dump_core = B_FALSE;
713 fatal(B_TRUE, "invalid alternate ztest path: %s",
714 altdir);
715 }
716 VERIFY(NULL != realpath(altdir, realaltdir));
717
718 /*
719 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
720 * We want to extract <isa> to determine if we should use
721 * 32 or 64 bit binaries.
722 */
723 bin = strstr(cmd, "/usr/bin/");
724 ztest = strstr(bin, "/ztest");
725 isa = bin + 9;
726 isalen = ztest - isa;
727 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest),
728 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa);
729 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath),
730 "%s/usr/lib/%.*s", realaltdir, isalen, isa);
731
732 if (0 != access(zo->zo_alt_ztest, X_OK)) {
733 ztest_dump_core = B_FALSE;
734 fatal(B_TRUE, "invalid alternate ztest: %s",
735 zo->zo_alt_ztest);
736 } else if (0 != access(zo->zo_alt_libpath, X_OK)) {
737 ztest_dump_core = B_FALSE;
738 fatal(B_TRUE, "invalid alternate lib directory %s",
739 zo->zo_alt_libpath);
740 }
741 }
742 }
743
744 static void
745 ztest_kill(ztest_shared_t *zs)
746 {
747 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
748 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
749 (void) kill(getpid(), SIGKILL);
750 }
751
752 static uint64_t
753 ztest_random(uint64_t range)
754 {
755 uint64_t r;
756
757 if (range == 0)
758 return (0);
759
760 if (read(ZTEST_FD_RAND, &r, sizeof (r)) != sizeof (r))
761 fatal(1, "short read from /dev/urandom");
762
763 return (r % range);
764 }
765
766 /* ARGSUSED */
767 static void
768 ztest_record_enospc(const char *s)
769 {
770 ztest_shared->zs_enospc_count++;
771 }
772
773 static uint64_t
774 ztest_get_ashift(void)
775 {
776 if (ztest_opts.zo_ashift == 0)
777 return (SPA_MINBLOCKSHIFT + ztest_random(3));
778 return (ztest_opts.zo_ashift);
779 }
780
781 static nvlist_t *
782 make_vdev_file(char *path, char *aux, size_t size, uint64_t ashift)
783 {
784 char pathbuf[MAXPATHLEN];
785 uint64_t vdev;
786 nvlist_t *file;
787
788 if (ashift == 0)
789 ashift = ztest_get_ashift();
790
791 if (path == NULL) {
792 path = pathbuf;
793
794 if (aux != NULL) {
795 vdev = ztest_shared->zs_vdev_aux;
796 (void) snprintf(path, sizeof (pathbuf),
797 ztest_aux_template, ztest_opts.zo_dir,
798 ztest_opts.zo_pool, aux, vdev);
799 } else {
800 vdev = ztest_shared->zs_vdev_next_leaf++;
801 (void) snprintf(path, sizeof (pathbuf),
802 ztest_dev_template, ztest_opts.zo_dir,
803 ztest_opts.zo_pool, vdev);
804 }
805 }
806
807 if (size != 0) {
808 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
809 if (fd == -1)
810 fatal(1, "can't open %s", path);
811 if (ftruncate(fd, size) != 0)
812 fatal(1, "can't ftruncate %s", path);
813 (void) close(fd);
814 }
815
816 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
817 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
818 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
819 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
820
821 return (file);
822 }
823
824 static nvlist_t *
825 make_vdev_raidz(char *path, char *aux, size_t size, uint64_t ashift, int r)
826 {
827 nvlist_t *raidz, **child;
828 int c;
829
830 if (r < 2)
831 return (make_vdev_file(path, aux, size, ashift));
832 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
833
834 for (c = 0; c < r; c++)
835 child[c] = make_vdev_file(path, aux, size, ashift);
836
837 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
838 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
839 VDEV_TYPE_RAIDZ) == 0);
840 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
841 ztest_opts.zo_raidz_parity) == 0);
842 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
843 child, r) == 0);
844
845 for (c = 0; c < r; c++)
846 nvlist_free(child[c]);
847
848 umem_free(child, r * sizeof (nvlist_t *));
849
850 return (raidz);
851 }
852
853 static nvlist_t *
854 make_vdev_mirror(char *path, char *aux, size_t size, uint64_t ashift,
855 int r, int m)
856 {
857 nvlist_t *mirror, **child;
858 int c;
859
860 if (m < 1)
861 return (make_vdev_raidz(path, aux, size, ashift, r));
862
863 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
864
865 for (c = 0; c < m; c++)
866 child[c] = make_vdev_raidz(path, aux, size, ashift, r);
867
868 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
869 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
870 VDEV_TYPE_MIRROR) == 0);
871 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
872 child, m) == 0);
873
874 for (c = 0; c < m; c++)
875 nvlist_free(child[c]);
876
877 umem_free(child, m * sizeof (nvlist_t *));
878
879 return (mirror);
880 }
881
882 static nvlist_t *
883 make_vdev_root(char *path, char *aux, size_t size, uint64_t ashift,
884 int log, int r, int m, int t)
885 {
886 nvlist_t *root, **child;
887 int c;
888
889 ASSERT(t > 0);
890
891 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
892
893 for (c = 0; c < t; c++) {
894 child[c] = make_vdev_mirror(path, aux, size, ashift, r, m);
895 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
896 log) == 0);
897 }
898
899 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
900 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
901 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
902 child, t) == 0);
903
904 for (c = 0; c < t; c++)
905 nvlist_free(child[c]);
906
907 umem_free(child, t * sizeof (nvlist_t *));
908
909 return (root);
910 }
911
912 static int
913 ztest_random_blocksize(void)
914 {
915 return (1 << (SPA_MINBLOCKSHIFT +
916 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)));
917 }
918
919 static int
920 ztest_random_ibshift(void)
921 {
922 return (DN_MIN_INDBLKSHIFT +
923 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
924 }
925
926 static uint64_t
927 ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
928 {
929 uint64_t top;
930 vdev_t *rvd = spa->spa_root_vdev;
931 vdev_t *tvd;
932
933 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
934
935 do {
936 top = ztest_random(rvd->vdev_children);
937 tvd = rvd->vdev_child[top];
938 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) ||
939 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
940
941 return (top);
942 }
943
944 static uint64_t
945 ztest_random_dsl_prop(zfs_prop_t prop)
946 {
947 uint64_t value;
948
949 do {
950 value = zfs_prop_random_value(prop, ztest_random(-1ULL));
951 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
952
953 return (value);
954 }
955
956 static int
957 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
958 boolean_t inherit)
959 {
960 const char *propname = zfs_prop_to_name(prop);
961 const char *valname;
962 char setpoint[MAXPATHLEN];
963 uint64_t curval;
964 int error;
965
966 error = dsl_prop_set(osname, propname,
967 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL),
968 sizeof (value), 1, &value);
969
970 if (error == ENOSPC) {
971 ztest_record_enospc(FTAG);
972 return (error);
973 }
974 ASSERT3U(error, ==, 0);
975
976 VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval),
977 1, &curval, setpoint), ==, 0);
978
979 if (ztest_opts.zo_verbose >= 6) {
980 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0);
981 (void) printf("%s %s = %s at '%s'\n",
982 osname, propname, valname, setpoint);
983 }
984
985 return (error);
986 }
987
988 static int
989 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
990 {
991 spa_t *spa = ztest_spa;
992 nvlist_t *props = NULL;
993 int error;
994
995 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
996 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
997
998 error = spa_prop_set(spa, props);
999
1000 nvlist_free(props);
1001
1002 if (error == ENOSPC) {
1003 ztest_record_enospc(FTAG);
1004 return (error);
1005 }
1006 ASSERT3U(error, ==, 0);
1007
1008 return (error);
1009 }
1010
1011 static void
1012 ztest_rll_init(rll_t *rll)
1013 {
1014 rll->rll_writer = NULL;
1015 rll->rll_readers = 0;
1016 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
1017 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
1018 }
1019
1020 static void
1021 ztest_rll_destroy(rll_t *rll)
1022 {
1023 ASSERT(rll->rll_writer == NULL);
1024 ASSERT(rll->rll_readers == 0);
1025 VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
1026 VERIFY(cond_destroy(&rll->rll_cv) == 0);
1027 }
1028
1029 static void
1030 ztest_rll_lock(rll_t *rll, rl_type_t type)
1031 {
1032 VERIFY(mutex_lock(&rll->rll_lock) == 0);
1033
1034 if (type == RL_READER) {
1035 while (rll->rll_writer != NULL)
1036 (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
1037 rll->rll_readers++;
1038 } else {
1039 while (rll->rll_writer != NULL || rll->rll_readers)
1040 (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
1041 rll->rll_writer = curthread;
1042 }
1043
1044 VERIFY(mutex_unlock(&rll->rll_lock) == 0);
1045 }
1046
1047 static void
1048 ztest_rll_unlock(rll_t *rll)
1049 {
1050 VERIFY(mutex_lock(&rll->rll_lock) == 0);
1051
1052 if (rll->rll_writer) {
1053 ASSERT(rll->rll_readers == 0);
1054 rll->rll_writer = NULL;
1055 } else {
1056 ASSERT(rll->rll_readers != 0);
1057 ASSERT(rll->rll_writer == NULL);
1058 rll->rll_readers--;
1059 }
1060
1061 if (rll->rll_writer == NULL && rll->rll_readers == 0)
1062 VERIFY(cond_broadcast(&rll->rll_cv) == 0);
1063
1064 VERIFY(mutex_unlock(&rll->rll_lock) == 0);
1065 }
1066
1067 static void
1068 ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
1069 {
1070 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1071
1072 ztest_rll_lock(rll, type);
1073 }
1074
1075 static void
1076 ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
1077 {
1078 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1079
1080 ztest_rll_unlock(rll);
1081 }
1082
1083 static rl_t *
1084 ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
1085 uint64_t size, rl_type_t type)
1086 {
1087 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
1088 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
1089 rl_t *rl;
1090
1091 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
1092 rl->rl_object = object;
1093 rl->rl_offset = offset;
1094 rl->rl_size = size;
1095 rl->rl_lock = rll;
1096
1097 ztest_rll_lock(rll, type);
1098
1099 return (rl);
1100 }
1101
1102 static void
1103 ztest_range_unlock(rl_t *rl)
1104 {
1105 rll_t *rll = rl->rl_lock;
1106
1107 ztest_rll_unlock(rll);
1108
1109 umem_free(rl, sizeof (*rl));
1110 }
1111
1112 static void
1113 ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
1114 {
1115 zd->zd_os = os;
1116 zd->zd_zilog = dmu_objset_zil(os);
1117 zd->zd_shared = szd;
1118 dmu_objset_name(os, zd->zd_name);
1119
1120 if (zd->zd_shared != NULL)
1121 zd->zd_shared->zd_seq = 0;
1122
1123 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0);
1124 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0);
1125
1126 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1127 ztest_rll_init(&zd->zd_object_lock[l]);
1128
1129 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1130 ztest_rll_init(&zd->zd_range_lock[l]);
1131 }
1132
1133 static void
1134 ztest_zd_fini(ztest_ds_t *zd)
1135 {
1136 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0);
1137
1138 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1139 ztest_rll_destroy(&zd->zd_object_lock[l]);
1140
1141 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1142 ztest_rll_destroy(&zd->zd_range_lock[l]);
1143 }
1144
1145 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1146
1147 static uint64_t
1148 ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
1149 {
1150 uint64_t txg;
1151 int error;
1152
1153 /*
1154 * Attempt to assign tx to some transaction group.
1155 */
1156 error = dmu_tx_assign(tx, txg_how);
1157 if (error) {
1158 if (error == ERESTART) {
1159 ASSERT(txg_how == TXG_NOWAIT);
1160 dmu_tx_wait(tx);
1161 } else {
1162 ASSERT3U(error, ==, ENOSPC);
1163 ztest_record_enospc(tag);
1164 }
1165 dmu_tx_abort(tx);
1166 return (0);
1167 }
1168 txg = dmu_tx_get_txg(tx);
1169 ASSERT(txg != 0);
1170 return (txg);
1171 }
1172
1173 static void
1174 ztest_pattern_set(void *buf, uint64_t size, uint64_t value)
1175 {
1176 uint64_t *ip = buf;
1177 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1178
1179 while (ip < ip_end)
1180 *ip++ = value;
1181 }
1182
1183 static boolean_t
1184 ztest_pattern_match(void *buf, uint64_t size, uint64_t value)
1185 {
1186 uint64_t *ip = buf;
1187 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1188 uint64_t diff = 0;
1189
1190 while (ip < ip_end)
1191 diff |= (value - *ip++);
1192
1193 return (diff == 0);
1194 }
1195
1196 static void
1197 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1198 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1199 {
1200 bt->bt_magic = BT_MAGIC;
1201 bt->bt_objset = dmu_objset_id(os);
1202 bt->bt_object = object;
1203 bt->bt_offset = offset;
1204 bt->bt_gen = gen;
1205 bt->bt_txg = txg;
1206 bt->bt_crtxg = crtxg;
1207 }
1208
1209 static void
1210 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1211 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1212 {
1213 ASSERT(bt->bt_magic == BT_MAGIC);
1214 ASSERT(bt->bt_objset == dmu_objset_id(os));
1215 ASSERT(bt->bt_object == object);
1216 ASSERT(bt->bt_offset == offset);
1217 ASSERT(bt->bt_gen <= gen);
1218 ASSERT(bt->bt_txg <= txg);
1219 ASSERT(bt->bt_crtxg == crtxg);
1220 }
1221
1222 static ztest_block_tag_t *
1223 ztest_bt_bonus(dmu_buf_t *db)
1224 {
1225 dmu_object_info_t doi;
1226 ztest_block_tag_t *bt;
1227
1228 dmu_object_info_from_db(db, &doi);
1229 ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
1230 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
1231 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
1232
1233 return (bt);
1234 }
1235
1236 /*
1237 * ZIL logging ops
1238 */
1239
1240 #define lrz_type lr_mode
1241 #define lrz_blocksize lr_uid
1242 #define lrz_ibshift lr_gid
1243 #define lrz_bonustype lr_rdev
1244 #define lrz_bonuslen lr_crtime[1]
1245
1246 static void
1247 ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
1248 {
1249 char *name = (void *)(lr + 1); /* name follows lr */
1250 size_t namesize = strlen(name) + 1;
1251 itx_t *itx;
1252
1253 if (zil_replaying(zd->zd_zilog, tx))
1254 return;
1255
1256 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
1257 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1258 sizeof (*lr) + namesize - sizeof (lr_t));
1259
1260 zil_itx_assign(zd->zd_zilog, itx, tx);
1261 }
1262
1263 static void
1264 ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
1265 {
1266 char *name = (void *)(lr + 1); /* name follows lr */
1267 size_t namesize = strlen(name) + 1;
1268 itx_t *itx;
1269
1270 if (zil_replaying(zd->zd_zilog, tx))
1271 return;
1272
1273 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
1274 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1275 sizeof (*lr) + namesize - sizeof (lr_t));
1276
1277 itx->itx_oid = object;
1278 zil_itx_assign(zd->zd_zilog, itx, tx);
1279 }
1280
1281 static void
1282 ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
1283 {
1284 itx_t *itx;
1285 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
1286
1287 if (zil_replaying(zd->zd_zilog, tx))
1288 return;
1289
1290 if (lr->lr_length > ZIL_MAX_LOG_DATA)
1291 write_state = WR_INDIRECT;
1292
1293 itx = zil_itx_create(TX_WRITE,
1294 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
1295
1296 if (write_state == WR_COPIED &&
1297 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
1298 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
1299 zil_itx_destroy(itx);
1300 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1301 write_state = WR_NEED_COPY;
1302 }
1303 itx->itx_private = zd;
1304 itx->itx_wr_state = write_state;
1305 itx->itx_sync = (ztest_random(8) == 0);
1306 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0);
1307
1308 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1309 sizeof (*lr) - sizeof (lr_t));
1310
1311 zil_itx_assign(zd->zd_zilog, itx, tx);
1312 }
1313
1314 static void
1315 ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
1316 {
1317 itx_t *itx;
1318
1319 if (zil_replaying(zd->zd_zilog, tx))
1320 return;
1321
1322 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1323 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1324 sizeof (*lr) - sizeof (lr_t));
1325
1326 itx->itx_sync = B_FALSE;
1327 zil_itx_assign(zd->zd_zilog, itx, tx);
1328 }
1329
1330 static void
1331 ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
1332 {
1333 itx_t *itx;
1334
1335 if (zil_replaying(zd->zd_zilog, tx))
1336 return;
1337
1338 itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
1339 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1340 sizeof (*lr) - sizeof (lr_t));
1341
1342 itx->itx_sync = B_FALSE;
1343 zil_itx_assign(zd->zd_zilog, itx, tx);
1344 }
1345
1346 /*
1347 * ZIL replay ops
1348 */
1349 static int
1350 ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap)
1351 {
1352 char *name = (void *)(lr + 1); /* name follows lr */
1353 objset_t *os = zd->zd_os;
1354 ztest_block_tag_t *bbt;
1355 dmu_buf_t *db;
1356 dmu_tx_t *tx;
1357 uint64_t txg;
1358 int error = 0;
1359
1360 if (byteswap)
1361 byteswap_uint64_array(lr, sizeof (*lr));
1362
1363 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1364 ASSERT(name[0] != '\0');
1365
1366 tx = dmu_tx_create(os);
1367
1368 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
1369
1370 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1371 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1372 } else {
1373 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1374 }
1375
1376 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1377 if (txg == 0)
1378 return (ENOSPC);
1379
1380 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid);
1381
1382 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1383 if (lr->lr_foid == 0) {
1384 lr->lr_foid = zap_create(os,
1385 lr->lrz_type, lr->lrz_bonustype,
1386 lr->lrz_bonuslen, tx);
1387 } else {
1388 error = zap_create_claim(os, lr->lr_foid,
1389 lr->lrz_type, lr->lrz_bonustype,
1390 lr->lrz_bonuslen, tx);
1391 }
1392 } else {
1393 if (lr->lr_foid == 0) {
1394 lr->lr_foid = dmu_object_alloc(os,
1395 lr->lrz_type, 0, lr->lrz_bonustype,
1396 lr->lrz_bonuslen, tx);
1397 } else {
1398 error = dmu_object_claim(os, lr->lr_foid,
1399 lr->lrz_type, 0, lr->lrz_bonustype,
1400 lr->lrz_bonuslen, tx);
1401 }
1402 }
1403
1404 if (error) {
1405 ASSERT3U(error, ==, EEXIST);
1406 ASSERT(zd->zd_zilog->zl_replay);
1407 dmu_tx_commit(tx);
1408 return (error);
1409 }
1410
1411 ASSERT(lr->lr_foid != 0);
1412
1413 if (lr->lrz_type != DMU_OT_ZAP_OTHER)
1414 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid,
1415 lr->lrz_blocksize, lr->lrz_ibshift, tx));
1416
1417 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1418 bbt = ztest_bt_bonus(db);
1419 dmu_buf_will_dirty(db, tx);
1420 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
1421 dmu_buf_rele(db, FTAG);
1422
1423 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
1424 &lr->lr_foid, tx));
1425
1426 (void) ztest_log_create(zd, tx, lr);
1427
1428 dmu_tx_commit(tx);
1429
1430 return (0);
1431 }
1432
1433 static int
1434 ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap)
1435 {
1436 char *name = (void *)(lr + 1); /* name follows lr */
1437 objset_t *os = zd->zd_os;
1438 dmu_object_info_t doi;
1439 dmu_tx_t *tx;
1440 uint64_t object, txg;
1441
1442 if (byteswap)
1443 byteswap_uint64_array(lr, sizeof (*lr));
1444
1445 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1446 ASSERT(name[0] != '\0');
1447
1448 VERIFY3U(0, ==,
1449 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
1450 ASSERT(object != 0);
1451
1452 ztest_object_lock(zd, object, RL_WRITER);
1453
1454 VERIFY3U(0, ==, dmu_object_info(os, object, &doi));
1455
1456 tx = dmu_tx_create(os);
1457
1458 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
1459 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1460
1461 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1462 if (txg == 0) {
1463 ztest_object_unlock(zd, object);
1464 return (ENOSPC);
1465 }
1466
1467 if (doi.doi_type == DMU_OT_ZAP_OTHER) {
1468 VERIFY3U(0, ==, zap_destroy(os, object, tx));
1469 } else {
1470 VERIFY3U(0, ==, dmu_object_free(os, object, tx));
1471 }
1472
1473 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx));
1474
1475 (void) ztest_log_remove(zd, tx, lr, object);
1476
1477 dmu_tx_commit(tx);
1478
1479 ztest_object_unlock(zd, object);
1480
1481 return (0);
1482 }
1483
1484 static int
1485 ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
1486 {
1487 objset_t *os = zd->zd_os;
1488 void *data = lr + 1; /* data follows lr */
1489 uint64_t offset, length;
1490 ztest_block_tag_t *bt = data;
1491 ztest_block_tag_t *bbt;
1492 uint64_t gen, txg, lrtxg, crtxg;
1493 dmu_object_info_t doi;
1494 dmu_tx_t *tx;
1495 dmu_buf_t *db;
1496 arc_buf_t *abuf = NULL;
1497 rl_t *rl;
1498
1499 if (byteswap)
1500 byteswap_uint64_array(lr, sizeof (*lr));
1501
1502 offset = lr->lr_offset;
1503 length = lr->lr_length;
1504
1505 /* If it's a dmu_sync() block, write the whole block */
1506 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
1507 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
1508 if (length < blocksize) {
1509 offset -= offset % blocksize;
1510 length = blocksize;
1511 }
1512 }
1513
1514 if (bt->bt_magic == BSWAP_64(BT_MAGIC))
1515 byteswap_uint64_array(bt, sizeof (*bt));
1516
1517 if (bt->bt_magic != BT_MAGIC)
1518 bt = NULL;
1519
1520 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1521 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
1522
1523 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1524
1525 dmu_object_info_from_db(db, &doi);
1526
1527 bbt = ztest_bt_bonus(db);
1528 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1529 gen = bbt->bt_gen;
1530 crtxg = bbt->bt_crtxg;
1531 lrtxg = lr->lr_common.lrc_txg;
1532
1533 tx = dmu_tx_create(os);
1534
1535 dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
1536
1537 if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
1538 P2PHASE(offset, length) == 0)
1539 abuf = dmu_request_arcbuf(db, length);
1540
1541 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1542 if (txg == 0) {
1543 if (abuf != NULL)
1544 dmu_return_arcbuf(abuf);
1545 dmu_buf_rele(db, FTAG);
1546 ztest_range_unlock(rl);
1547 ztest_object_unlock(zd, lr->lr_foid);
1548 return (ENOSPC);
1549 }
1550
1551 if (bt != NULL) {
1552 /*
1553 * Usually, verify the old data before writing new data --
1554 * but not always, because we also want to verify correct
1555 * behavior when the data was not recently read into cache.
1556 */
1557 ASSERT(offset % doi.doi_data_block_size == 0);
1558 if (ztest_random(4) != 0) {
1559 int prefetch = ztest_random(2) ?
1560 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
1561 ztest_block_tag_t rbt;
1562
1563 VERIFY(dmu_read(os, lr->lr_foid, offset,
1564 sizeof (rbt), &rbt, prefetch) == 0);
1565 if (rbt.bt_magic == BT_MAGIC) {
1566 ztest_bt_verify(&rbt, os, lr->lr_foid,
1567 offset, gen, txg, crtxg);
1568 }
1569 }
1570
1571 /*
1572 * Writes can appear to be newer than the bonus buffer because
1573 * the ztest_get_data() callback does a dmu_read() of the
1574 * open-context data, which may be different than the data
1575 * as it was when the write was generated.
1576 */
1577 if (zd->zd_zilog->zl_replay) {
1578 ztest_bt_verify(bt, os, lr->lr_foid, offset,
1579 MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
1580 bt->bt_crtxg);
1581 }
1582
1583 /*
1584 * Set the bt's gen/txg to the bonus buffer's gen/txg
1585 * so that all of the usual ASSERTs will work.
1586 */
1587 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg);
1588 }
1589
1590 if (abuf == NULL) {
1591 dmu_write(os, lr->lr_foid, offset, length, data, tx);
1592 } else {
1593 bcopy(data, abuf->b_data, length);
1594 dmu_assign_arcbuf(db, offset, abuf, tx);
1595 }
1596
1597 (void) ztest_log_write(zd, tx, lr);
1598
1599 dmu_buf_rele(db, FTAG);
1600
1601 dmu_tx_commit(tx);
1602
1603 ztest_range_unlock(rl);
1604 ztest_object_unlock(zd, lr->lr_foid);
1605
1606 return (0);
1607 }
1608
1609 static int
1610 ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
1611 {
1612 objset_t *os = zd->zd_os;
1613 dmu_tx_t *tx;
1614 uint64_t txg;
1615 rl_t *rl;
1616
1617 if (byteswap)
1618 byteswap_uint64_array(lr, sizeof (*lr));
1619
1620 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1621 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
1622 RL_WRITER);
1623
1624 tx = dmu_tx_create(os);
1625
1626 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
1627
1628 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1629 if (txg == 0) {
1630 ztest_range_unlock(rl);
1631 ztest_object_unlock(zd, lr->lr_foid);
1632 return (ENOSPC);
1633 }
1634
1635 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
1636 lr->lr_length, tx) == 0);
1637
1638 (void) ztest_log_truncate(zd, tx, lr);
1639
1640 dmu_tx_commit(tx);
1641
1642 ztest_range_unlock(rl);
1643 ztest_object_unlock(zd, lr->lr_foid);
1644
1645 return (0);
1646 }
1647
1648 static int
1649 ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap)
1650 {
1651 objset_t *os = zd->zd_os;
1652 dmu_tx_t *tx;
1653 dmu_buf_t *db;
1654 ztest_block_tag_t *bbt;
1655 uint64_t txg, lrtxg, crtxg;
1656
1657 if (byteswap)
1658 byteswap_uint64_array(lr, sizeof (*lr));
1659
1660 ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
1661
1662 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1663
1664 tx = dmu_tx_create(os);
1665 dmu_tx_hold_bonus(tx, lr->lr_foid);
1666
1667 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1668 if (txg == 0) {
1669 dmu_buf_rele(db, FTAG);
1670 ztest_object_unlock(zd, lr->lr_foid);
1671 return (ENOSPC);
1672 }
1673
1674 bbt = ztest_bt_bonus(db);
1675 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1676 crtxg = bbt->bt_crtxg;
1677 lrtxg = lr->lr_common.lrc_txg;
1678
1679 if (zd->zd_zilog->zl_replay) {
1680 ASSERT(lr->lr_size != 0);
1681 ASSERT(lr->lr_mode != 0);
1682 ASSERT(lrtxg != 0);
1683 } else {
1684 /*
1685 * Randomly change the size and increment the generation.
1686 */
1687 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
1688 sizeof (*bbt);
1689 lr->lr_mode = bbt->bt_gen + 1;
1690 ASSERT(lrtxg == 0);
1691 }
1692
1693 /*
1694 * Verify that the current bonus buffer is not newer than our txg.
1695 */
1696 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
1697 MAX(txg, lrtxg), crtxg);
1698
1699 dmu_buf_will_dirty(db, tx);
1700
1701 ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
1702 ASSERT3U(lr->lr_size, <=, db->db_size);
1703 VERIFY3U(dmu_set_bonus(db, lr->lr_size, tx), ==, 0);
1704 bbt = ztest_bt_bonus(db);
1705
1706 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
1707
1708 dmu_buf_rele(db, FTAG);
1709
1710 (void) ztest_log_setattr(zd, tx, lr);
1711
1712 dmu_tx_commit(tx);
1713
1714 ztest_object_unlock(zd, lr->lr_foid);
1715
1716 return (0);
1717 }
1718
1719 zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
1720 NULL, /* 0 no such transaction type */
1721 ztest_replay_create, /* TX_CREATE */
1722 NULL, /* TX_MKDIR */
1723 NULL, /* TX_MKXATTR */
1724 NULL, /* TX_SYMLINK */
1725 ztest_replay_remove, /* TX_REMOVE */
1726 NULL, /* TX_RMDIR */
1727 NULL, /* TX_LINK */
1728 NULL, /* TX_RENAME */
1729 ztest_replay_write, /* TX_WRITE */
1730 ztest_replay_truncate, /* TX_TRUNCATE */
1731 ztest_replay_setattr, /* TX_SETATTR */
1732 NULL, /* TX_ACL */
1733 NULL, /* TX_CREATE_ACL */
1734 NULL, /* TX_CREATE_ATTR */
1735 NULL, /* TX_CREATE_ACL_ATTR */
1736 NULL, /* TX_MKDIR_ACL */
1737 NULL, /* TX_MKDIR_ATTR */
1738 NULL, /* TX_MKDIR_ACL_ATTR */
1739 NULL, /* TX_WRITE2 */
1740 };
1741
1742 /*
1743 * ZIL get_data callbacks
1744 */
1745
1746 static void
1747 ztest_get_done(zgd_t *zgd, int error)
1748 {
1749 ztest_ds_t *zd = zgd->zgd_private;
1750 uint64_t object = zgd->zgd_rl->rl_object;
1751
1752 if (zgd->zgd_db)
1753 dmu_buf_rele(zgd->zgd_db, zgd);
1754
1755 ztest_range_unlock(zgd->zgd_rl);
1756 ztest_object_unlock(zd, object);
1757
1758 if (error == 0 && zgd->zgd_bp)
1759 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1760
1761 umem_free(zgd, sizeof (*zgd));
1762 }
1763
1764 static int
1765 ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1766 {
1767 ztest_ds_t *zd = arg;
1768 objset_t *os = zd->zd_os;
1769 uint64_t object = lr->lr_foid;
1770 uint64_t offset = lr->lr_offset;
1771 uint64_t size = lr->lr_length;
1772 blkptr_t *bp = &lr->lr_blkptr;
1773 uint64_t txg = lr->lr_common.lrc_txg;
1774 uint64_t crtxg;
1775 dmu_object_info_t doi;
1776 dmu_buf_t *db;
1777 zgd_t *zgd;
1778 int error;
1779
1780 ztest_object_lock(zd, object, RL_READER);
1781 error = dmu_bonus_hold(os, object, FTAG, &db);
1782 if (error) {
1783 ztest_object_unlock(zd, object);
1784 return (error);
1785 }
1786
1787 crtxg = ztest_bt_bonus(db)->bt_crtxg;
1788
1789 if (crtxg == 0 || crtxg > txg) {
1790 dmu_buf_rele(db, FTAG);
1791 ztest_object_unlock(zd, object);
1792 return (ENOENT);
1793 }
1794
1795 dmu_object_info_from_db(db, &doi);
1796 dmu_buf_rele(db, FTAG);
1797 db = NULL;
1798
1799 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
1800 zgd->zgd_zilog = zd->zd_zilog;
1801 zgd->zgd_private = zd;
1802
1803 if (buf != NULL) { /* immediate write */
1804 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1805 RL_READER);
1806
1807 error = dmu_read(os, object, offset, size, buf,
1808 DMU_READ_NO_PREFETCH);
1809 ASSERT(error == 0);
1810 } else {
1811 size = doi.doi_data_block_size;
1812 if (ISP2(size)) {
1813 offset = P2ALIGN(offset, size);
1814 } else {
1815 ASSERT(offset < size);
1816 offset = 0;
1817 }
1818
1819 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1820 RL_READER);
1821
1822 error = dmu_buf_hold(os, object, offset, zgd, &db,
1823 DMU_READ_NO_PREFETCH);
1824
1825 if (error == 0) {
1826 zgd->zgd_db = db;
1827 zgd->zgd_bp = bp;
1828
1829 ASSERT(db->db_offset == offset);
1830 ASSERT(db->db_size == size);
1831
1832 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1833 ztest_get_done, zgd);
1834
1835 if (error == 0)
1836 return (0);
1837 }
1838 }
1839
1840 ztest_get_done(zgd, error);
1841
1842 return (error);
1843 }
1844
1845 static void *
1846 ztest_lr_alloc(size_t lrsize, char *name)
1847 {
1848 char *lr;
1849 size_t namesize = name ? strlen(name) + 1 : 0;
1850
1851 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
1852
1853 if (name)
1854 bcopy(name, lr + lrsize, namesize);
1855
1856 return (lr);
1857 }
1858
1859 void
1860 ztest_lr_free(void *lr, size_t lrsize, char *name)
1861 {
1862 size_t namesize = name ? strlen(name) + 1 : 0;
1863
1864 umem_free(lr, lrsize + namesize);
1865 }
1866
1867 /*
1868 * Lookup a bunch of objects. Returns the number of objects not found.
1869 */
1870 static int
1871 ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
1872 {
1873 int missing = 0;
1874 int error;
1875
1876 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1877
1878 for (int i = 0; i < count; i++, od++) {
1879 od->od_object = 0;
1880 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
1881 sizeof (uint64_t), 1, &od->od_object);
1882 if (error) {
1883 ASSERT(error == ENOENT);
1884 ASSERT(od->od_object == 0);
1885 missing++;
1886 } else {
1887 dmu_buf_t *db;
1888 ztest_block_tag_t *bbt;
1889 dmu_object_info_t doi;
1890
1891 ASSERT(od->od_object != 0);
1892 ASSERT(missing == 0); /* there should be no gaps */
1893
1894 ztest_object_lock(zd, od->od_object, RL_READER);
1895 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os,
1896 od->od_object, FTAG, &db));
1897 dmu_object_info_from_db(db, &doi);
1898 bbt = ztest_bt_bonus(db);
1899 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1900 od->od_type = doi.doi_type;
1901 od->od_blocksize = doi.doi_data_block_size;
1902 od->od_gen = bbt->bt_gen;
1903 dmu_buf_rele(db, FTAG);
1904 ztest_object_unlock(zd, od->od_object);
1905 }
1906 }
1907
1908 return (missing);
1909 }
1910
1911 static int
1912 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
1913 {
1914 int missing = 0;
1915
1916 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1917
1918 for (int i = 0; i < count; i++, od++) {
1919 if (missing) {
1920 od->od_object = 0;
1921 missing++;
1922 continue;
1923 }
1924
1925 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
1926
1927 lr->lr_doid = od->od_dir;
1928 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
1929 lr->lrz_type = od->od_crtype;
1930 lr->lrz_blocksize = od->od_crblocksize;
1931 lr->lrz_ibshift = ztest_random_ibshift();
1932 lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
1933 lr->lrz_bonuslen = dmu_bonus_max();
1934 lr->lr_gen = od->od_crgen;
1935 lr->lr_crtime[0] = time(NULL);
1936
1937 if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
1938 ASSERT(missing == 0);
1939 od->od_object = 0;
1940 missing++;
1941 } else {
1942 od->od_object = lr->lr_foid;
1943 od->od_type = od->od_crtype;
1944 od->od_blocksize = od->od_crblocksize;
1945 od->od_gen = od->od_crgen;
1946 ASSERT(od->od_object != 0);
1947 }
1948
1949 ztest_lr_free(lr, sizeof (*lr), od->od_name);
1950 }
1951
1952 return (missing);
1953 }
1954
1955 static int
1956 ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
1957 {
1958 int missing = 0;
1959 int error;
1960
1961 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1962
1963 od += count - 1;
1964
1965 for (int i = count - 1; i >= 0; i--, od--) {
1966 if (missing) {
1967 missing++;
1968 continue;
1969 }
1970
1971 if (od->od_object == 0)
1972 continue;
1973
1974 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
1975
1976 lr->lr_doid = od->od_dir;
1977
1978 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
1979 ASSERT3U(error, ==, ENOSPC);
1980 missing++;
1981 } else {
1982 od->od_object = 0;
1983 }
1984 ztest_lr_free(lr, sizeof (*lr), od->od_name);
1985 }
1986
1987 return (missing);
1988 }
1989
1990 static int
1991 ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
1992 void *data)
1993 {
1994 lr_write_t *lr;
1995 int error;
1996
1997 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
1998
1999 lr->lr_foid = object;
2000 lr->lr_offset = offset;
2001 lr->lr_length = size;
2002 lr->lr_blkoff = 0;
2003 BP_ZERO(&lr->lr_blkptr);
2004
2005 bcopy(data, lr + 1, size);
2006
2007 error = ztest_replay_write(zd, lr, B_FALSE);
2008
2009 ztest_lr_free(lr, sizeof (*lr) + size, NULL);
2010
2011 return (error);
2012 }
2013
2014 static int
2015 ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2016 {
2017 lr_truncate_t *lr;
2018 int error;
2019
2020 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2021
2022 lr->lr_foid = object;
2023 lr->lr_offset = offset;
2024 lr->lr_length = size;
2025
2026 error = ztest_replay_truncate(zd, lr, B_FALSE);
2027
2028 ztest_lr_free(lr, sizeof (*lr), NULL);
2029
2030 return (error);
2031 }
2032
2033 static int
2034 ztest_setattr(ztest_ds_t *zd, uint64_t object)
2035 {
2036 lr_setattr_t *lr;
2037 int error;
2038
2039 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2040
2041 lr->lr_foid = object;
2042 lr->lr_size = 0;
2043 lr->lr_mode = 0;
2044
2045 error = ztest_replay_setattr(zd, lr, B_FALSE);
2046
2047 ztest_lr_free(lr, sizeof (*lr), NULL);
2048
2049 return (error);
2050 }
2051
2052 static void
2053 ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2054 {
2055 objset_t *os = zd->zd_os;
2056 dmu_tx_t *tx;
2057 uint64_t txg;
2058 rl_t *rl;
2059
2060 txg_wait_synced(dmu_objset_pool(os), 0);
2061
2062 ztest_object_lock(zd, object, RL_READER);
2063 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
2064
2065 tx = dmu_tx_create(os);
2066
2067 dmu_tx_hold_write(tx, object, offset, size);
2068
2069 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
2070
2071 if (txg != 0) {
2072 dmu_prealloc(os, object, offset, size, tx);
2073 dmu_tx_commit(tx);
2074 txg_wait_synced(dmu_objset_pool(os), txg);
2075 } else {
2076 (void) dmu_free_long_range(os, object, offset, size);
2077 }
2078
2079 ztest_range_unlock(rl);
2080 ztest_object_unlock(zd, object);
2081 }
2082
2083 static void
2084 ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
2085 {
2086 ztest_block_tag_t wbt;
2087 dmu_object_info_t doi;
2088 enum ztest_io_type io_type;
2089 uint64_t blocksize;
2090 void *data;
2091
2092 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0);
2093 blocksize = doi.doi_data_block_size;
2094 data = umem_alloc(blocksize, UMEM_NOFAIL);
2095
2096 /*
2097 * Pick an i/o type at random, biased toward writing block tags.
2098 */
2099 io_type = ztest_random(ZTEST_IO_TYPES);
2100 if (ztest_random(2) == 0)
2101 io_type = ZTEST_IO_WRITE_TAG;
2102
2103 (void) rw_rdlock(&zd->zd_zilog_lock);
2104
2105 switch (io_type) {
2106
2107 case ZTEST_IO_WRITE_TAG:
2108 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
2109 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
2110 break;
2111
2112 case ZTEST_IO_WRITE_PATTERN:
2113 (void) memset(data, 'a' + (object + offset) % 5, blocksize);
2114 if (ztest_random(2) == 0) {
2115 /*
2116 * Induce fletcher2 collisions to ensure that
2117 * zio_ddt_collision() detects and resolves them
2118 * when using fletcher2-verify for deduplication.
2119 */
2120 ((uint64_t *)data)[0] ^= 1ULL << 63;
2121 ((uint64_t *)data)[4] ^= 1ULL << 63;
2122 }
2123 (void) ztest_write(zd, object, offset, blocksize, data);
2124 break;
2125
2126 case ZTEST_IO_WRITE_ZEROES:
2127 bzero(data, blocksize);
2128 (void) ztest_write(zd, object, offset, blocksize, data);
2129 break;
2130
2131 case ZTEST_IO_TRUNCATE:
2132 (void) ztest_truncate(zd, object, offset, blocksize);
2133 break;
2134
2135 case ZTEST_IO_SETATTR:
2136 (void) ztest_setattr(zd, object);
2137 break;
2138 }
2139
2140 (void) rw_unlock(&zd->zd_zilog_lock);
2141
2142 umem_free(data, blocksize);
2143 }
2144
2145 /*
2146 * Initialize an object description template.
2147 */
2148 static void
2149 ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
2150 dmu_object_type_t type, uint64_t blocksize, uint64_t gen)
2151 {
2152 od->od_dir = ZTEST_DIROBJ;
2153 od->od_object = 0;
2154
2155 od->od_crtype = type;
2156 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
2157 od->od_crgen = gen;
2158
2159 od->od_type = DMU_OT_NONE;
2160 od->od_blocksize = 0;
2161 od->od_gen = 0;
2162
2163 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]",
2164 tag, (int64_t)id, index);
2165 }
2166
2167 /*
2168 * Lookup or create the objects for a test using the od template.
2169 * If the objects do not all exist, or if 'remove' is specified,
2170 * remove any existing objects and create new ones. Otherwise,
2171 * use the existing objects.
2172 */
2173 static int
2174 ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
2175 {
2176 int count = size / sizeof (*od);
2177 int rv = 0;
2178
2179 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0);
2180 if ((ztest_lookup(zd, od, count) != 0 || remove) &&
2181 (ztest_remove(zd, od, count) != 0 ||
2182 ztest_create(zd, od, count) != 0))
2183 rv = -1;
2184 zd->zd_od = od;
2185 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
2186
2187 return (rv);
2188 }
2189
2190 /* ARGSUSED */
2191 void
2192 ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
2193 {
2194 zilog_t *zilog = zd->zd_zilog;
2195
2196 (void) rw_rdlock(&zd->zd_zilog_lock);
2197
2198 zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
2199
2200 /*
2201 * Remember the committed values in zd, which is in parent/child
2202 * shared memory. If we die, the next iteration of ztest_run()
2203 * will verify that the log really does contain this record.
2204 */
2205 mutex_enter(&zilog->zl_lock);
2206 ASSERT(zd->zd_shared != NULL);
2207 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
2208 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
2209 mutex_exit(&zilog->zl_lock);
2210
2211 (void) rw_unlock(&zd->zd_zilog_lock);
2212 }
2213
2214 /*
2215 * This function is designed to simulate the operations that occur during a
2216 * mount/unmount operation. We hold the dataset across these operations in an
2217 * attempt to expose any implicit assumptions about ZIL management.
2218 */
2219 /* ARGSUSED */
2220 void
2221 ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
2222 {
2223 objset_t *os = zd->zd_os;
2224
2225 (void) rw_wrlock(&zd->zd_zilog_lock);
2226
2227 /* zfsvfs_teardown() */
2228 zil_close(zd->zd_zilog);
2229
2230 /* zfsvfs_setup() */
2231 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
2232 zil_replay(os, zd, ztest_replay_vector);
2233
2234 (void) rw_unlock(&zd->zd_zilog_lock);
2235 }
2236
2237 /*
2238 * Verify that we can't destroy an active pool, create an existing pool,
2239 * or create a pool with a bad vdev spec.
2240 */
2241 /* ARGSUSED */
2242 void
2243 ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
2244 {
2245 ztest_shared_opts_t *zo = &ztest_opts;
2246 spa_t *spa;
2247 nvlist_t *nvroot;
2248
2249 /*
2250 * Attempt to create using a bad file.
2251 */
2252 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
2253 VERIFY3U(ENOENT, ==,
2254 spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL));
2255 nvlist_free(nvroot);
2256
2257 /*
2258 * Attempt to create using a bad mirror.
2259 */
2260 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 2, 1);
2261 VERIFY3U(ENOENT, ==,
2262 spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL));
2263 nvlist_free(nvroot);
2264
2265 /*
2266 * Attempt to create an existing pool. It shouldn't matter
2267 * what's in the nvroot; we should fail with EEXIST.
2268 */
2269 (void) rw_rdlock(&ztest_name_lock);
2270 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
2271 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL, NULL));
2272 nvlist_free(nvroot);
2273 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
2274 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
2275 spa_close(spa, FTAG);
2276
2277 (void) rw_unlock(&ztest_name_lock);
2278 }
2279
2280 static vdev_t *
2281 vdev_lookup_by_path(vdev_t *vd, const char *path)
2282 {
2283 vdev_t *mvd;
2284
2285 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
2286 return (vd);
2287
2288 for (int c = 0; c < vd->vdev_children; c++)
2289 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
2290 NULL)
2291 return (mvd);
2292
2293 return (NULL);
2294 }
2295
2296 /*
2297 * Find the first available hole which can be used as a top-level.
2298 */
2299 int
2300 find_vdev_hole(spa_t *spa)
2301 {
2302 vdev_t *rvd = spa->spa_root_vdev;
2303 int c;
2304
2305 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV);
2306
2307 for (c = 0; c < rvd->vdev_children; c++) {
2308 vdev_t *cvd = rvd->vdev_child[c];
2309
2310 if (cvd->vdev_ishole)
2311 break;
2312 }
2313 return (c);
2314 }
2315
2316 /*
2317 * Verify that vdev_add() works as expected.
2318 */
2319 /* ARGSUSED */
2320 void
2321 ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
2322 {
2323 ztest_shared_t *zs = ztest_shared;
2324 spa_t *spa = ztest_spa;
2325 uint64_t leaves;
2326 uint64_t guid;
2327 nvlist_t *nvroot;
2328 int error;
2329
2330 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2331 leaves =
2332 MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
2333
2334 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2335
2336 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
2337
2338 /*
2339 * If we have slogs then remove them 1/4 of the time.
2340 */
2341 if (spa_has_slogs(spa) && ztest_random(4) == 0) {
2342 /*
2343 * Grab the guid from the head of the log class rotor.
2344 */
2345 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid;
2346
2347 spa_config_exit(spa, SCL_VDEV, FTAG);
2348
2349 /*
2350 * We have to grab the zs_name_lock as writer to
2351 * prevent a race between removing a slog (dmu_objset_find)
2352 * and destroying a dataset. Removing the slog will
2353 * grab a reference on the dataset which may cause
2354 * dmu_objset_destroy() to fail with EBUSY thus
2355 * leaving the dataset in an inconsistent state.
2356 */
2357 VERIFY(rw_wrlock(&ztest_name_lock) == 0);
2358 error = spa_vdev_remove(spa, guid, B_FALSE);
2359 VERIFY(rw_unlock(&ztest_name_lock) == 0);
2360
2361 if (error && error != EEXIST)
2362 fatal(0, "spa_vdev_remove() = %d", error);
2363 } else {
2364 spa_config_exit(spa, SCL_VDEV, FTAG);
2365
2366 /*
2367 * Make 1/4 of the devices be log devices.
2368 */
2369 nvroot = make_vdev_root(NULL, NULL,
2370 ztest_opts.zo_vdev_size, 0,
2371 ztest_random(4) == 0, ztest_opts.zo_raidz,
2372 zs->zs_mirrors, 1);
2373
2374 error = spa_vdev_add(spa, nvroot);
2375 nvlist_free(nvroot);
2376
2377 if (error == ENOSPC)
2378 ztest_record_enospc("spa_vdev_add");
2379 else if (error != 0)
2380 fatal(0, "spa_vdev_add() = %d", error);
2381 }
2382
2383 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2384 }
2385
2386 /*
2387 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2388 */
2389 /* ARGSUSED */
2390 void
2391 ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
2392 {
2393 ztest_shared_t *zs = ztest_shared;
2394 spa_t *spa = ztest_spa;
2395 vdev_t *rvd = spa->spa_root_vdev;
2396 spa_aux_vdev_t *sav;
2397 char *aux;
2398 uint64_t guid = 0;
2399 int error;
2400
2401 if (ztest_random(2) == 0) {
2402 sav = &spa->spa_spares;
2403 aux = ZPOOL_CONFIG_SPARES;
2404 } else {
2405 sav = &spa->spa_l2cache;
2406 aux = ZPOOL_CONFIG_L2CACHE;
2407 }
2408
2409 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2410
2411 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2412
2413 if (sav->sav_count != 0 && ztest_random(4) == 0) {
2414 /*
2415 * Pick a random device to remove.
2416 */
2417 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
2418 } else {
2419 /*
2420 * Find an unused device we can add.
2421 */
2422 zs->zs_vdev_aux = 0;
2423 for (;;) {
2424 char path[MAXPATHLEN];
2425 int c;
2426 (void) snprintf(path, sizeof (path), ztest_aux_template,
2427 ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
2428 zs->zs_vdev_aux);
2429 for (c = 0; c < sav->sav_count; c++)
2430 if (strcmp(sav->sav_vdevs[c]->vdev_path,
2431 path) == 0)
2432 break;
2433 if (c == sav->sav_count &&
2434 vdev_lookup_by_path(rvd, path) == NULL)
2435 break;
2436 zs->zs_vdev_aux++;
2437 }
2438 }
2439
2440 spa_config_exit(spa, SCL_VDEV, FTAG);
2441
2442 if (guid == 0) {
2443 /*
2444 * Add a new device.
2445 */
2446 nvlist_t *nvroot = make_vdev_root(NULL, aux,
2447 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1);
2448 error = spa_vdev_add(spa, nvroot);
2449 if (error != 0)
2450 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error);
2451 nvlist_free(nvroot);
2452 } else {
2453 /*
2454 * Remove an existing device. Sometimes, dirty its
2455 * vdev state first to make sure we handle removal
2456 * of devices that have pending state changes.
2457 */
2458 if (ztest_random(2) == 0)
2459 (void) vdev_online(spa, guid, 0, NULL);
2460
2461 error = spa_vdev_remove(spa, guid, B_FALSE);
2462 if (error != 0 && error != EBUSY)
2463 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
2464 }
2465
2466 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2467 }
2468
2469 /*
2470 * split a pool if it has mirror tlvdevs
2471 */
2472 /* ARGSUSED */
2473 void
2474 ztest_split_pool(ztest_ds_t *zd, uint64_t id)
2475 {
2476 ztest_shared_t *zs = ztest_shared;
2477 spa_t *spa = ztest_spa;
2478 vdev_t *rvd = spa->spa_root_vdev;
2479 nvlist_t *tree, **child, *config, *split, **schild;
2480 uint_t c, children, schildren = 0, lastlogid = 0;
2481 int error = 0;
2482
2483 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2484
2485 /* ensure we have a useable config; mirrors of raidz aren't supported */
2486 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
2487 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2488 return;
2489 }
2490
2491 /* clean up the old pool, if any */
2492 (void) spa_destroy("splitp");
2493
2494 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2495
2496 /* generate a config from the existing config */
2497 mutex_enter(&spa->spa_props_lock);
2498 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE,
2499 &tree) == 0);
2500 mutex_exit(&spa->spa_props_lock);
2501
2502 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2503 &children) == 0);
2504
2505 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
2506 for (c = 0; c < children; c++) {
2507 vdev_t *tvd = rvd->vdev_child[c];
2508 nvlist_t **mchild;
2509 uint_t mchildren;
2510
2511 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
2512 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME,
2513 0) == 0);
2514 VERIFY(nvlist_add_string(schild[schildren],
2515 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0);
2516 VERIFY(nvlist_add_uint64(schild[schildren],
2517 ZPOOL_CONFIG_IS_HOLE, 1) == 0);
2518 if (lastlogid == 0)
2519 lastlogid = schildren;
2520 ++schildren;
2521 continue;
2522 }
2523 lastlogid = 0;
2524 VERIFY(nvlist_lookup_nvlist_array(child[c],
2525 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2526 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0);
2527 }
2528
2529 /* OK, create a config that can be used to split */
2530 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0);
2531 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE,
2532 VDEV_TYPE_ROOT) == 0);
2533 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild,
2534 lastlogid != 0 ? lastlogid : schildren) == 0);
2535
2536 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
2537 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0);
2538
2539 for (c = 0; c < schildren; c++)
2540 nvlist_free(schild[c]);
2541 free(schild);
2542 nvlist_free(split);
2543
2544 spa_config_exit(spa, SCL_VDEV, FTAG);
2545
2546 (void) rw_wrlock(&ztest_name_lock);
2547 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
2548 (void) rw_unlock(&ztest_name_lock);
2549
2550 nvlist_free(config);
2551
2552 if (error == 0) {
2553 (void) printf("successful split - results:\n");
2554 mutex_enter(&spa_namespace_lock);
2555 show_pool_stats(spa);
2556 show_pool_stats(spa_lookup("splitp"));
2557 mutex_exit(&spa_namespace_lock);
2558 ++zs->zs_splits;
2559 --zs->zs_mirrors;
2560 }
2561 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2562
2563 }
2564
2565 /*
2566 * Verify that we can attach and detach devices.
2567 */
2568 /* ARGSUSED */
2569 void
2570 ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
2571 {
2572 ztest_shared_t *zs = ztest_shared;
2573 spa_t *spa = ztest_spa;
2574 spa_aux_vdev_t *sav = &spa->spa_spares;
2575 vdev_t *rvd = spa->spa_root_vdev;
2576 vdev_t *oldvd, *newvd, *pvd;
2577 nvlist_t *root;
2578 uint64_t leaves;
2579 uint64_t leaf, top;
2580 uint64_t ashift = ztest_get_ashift();
2581 uint64_t oldguid, pguid;
2582 size_t oldsize, newsize;
2583 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN];
2584 int replacing;
2585 int oldvd_has_siblings = B_FALSE;
2586 int newvd_is_spare = B_FALSE;
2587 int oldvd_is_log;
2588 int error, expected_error;
2589
2590 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2591 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
2592
2593 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2594
2595 /*
2596 * Decide whether to do an attach or a replace.
2597 */
2598 replacing = ztest_random(2);
2599
2600 /*
2601 * Pick a random top-level vdev.
2602 */
2603 top = ztest_random_vdev_top(spa, B_TRUE);
2604
2605 /*
2606 * Pick a random leaf within it.
2607 */
2608 leaf = ztest_random(leaves);
2609
2610 /*
2611 * Locate this vdev.
2612 */
2613 oldvd = rvd->vdev_child[top];
2614 if (zs->zs_mirrors >= 1) {
2615 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops);
2616 ASSERT(oldvd->vdev_children >= zs->zs_mirrors);
2617 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz];
2618 }
2619 if (ztest_opts.zo_raidz > 1) {
2620 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
2621 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz);
2622 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz];
2623 }
2624
2625 /*
2626 * If we're already doing an attach or replace, oldvd may be a
2627 * mirror vdev -- in which case, pick a random child.
2628 */
2629 while (oldvd->vdev_children != 0) {
2630 oldvd_has_siblings = B_TRUE;
2631 ASSERT(oldvd->vdev_children >= 2);
2632 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
2633 }
2634
2635 oldguid = oldvd->vdev_guid;
2636 oldsize = vdev_get_min_asize(oldvd);
2637 oldvd_is_log = oldvd->vdev_top->vdev_islog;
2638 (void) strcpy(oldpath, oldvd->vdev_path);
2639 pvd = oldvd->vdev_parent;
2640 pguid = pvd->vdev_guid;
2641
2642 /*
2643 * If oldvd has siblings, then half of the time, detach it.
2644 */
2645 if (oldvd_has_siblings && ztest_random(2) == 0) {
2646 spa_config_exit(spa, SCL_VDEV, FTAG);
2647 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
2648 if (error != 0 && error != ENODEV && error != EBUSY &&
2649 error != ENOTSUP)
2650 fatal(0, "detach (%s) returned %d", oldpath, error);
2651 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2652 return;
2653 }
2654
2655 /*
2656 * For the new vdev, choose with equal probability between the two
2657 * standard paths (ending in either 'a' or 'b') or a random hot spare.
2658 */
2659 if (sav->sav_count != 0 && ztest_random(3) == 0) {
2660 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
2661 newvd_is_spare = B_TRUE;
2662 (void) strcpy(newpath, newvd->vdev_path);
2663 } else {
2664 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template,
2665 ztest_opts.zo_dir, ztest_opts.zo_pool,
2666 top * leaves + leaf);
2667 if (ztest_random(2) == 0)
2668 newpath[strlen(newpath) - 1] = 'b';
2669 newvd = vdev_lookup_by_path(rvd, newpath);
2670 }
2671
2672 if (newvd) {
2673 newsize = vdev_get_min_asize(newvd);
2674 } else {
2675 /*
2676 * Make newsize a little bigger or smaller than oldsize.
2677 * If it's smaller, the attach should fail.
2678 * If it's larger, and we're doing a replace,
2679 * we should get dynamic LUN growth when we're done.
2680 */
2681 newsize = 10 * oldsize / (9 + ztest_random(3));
2682 }
2683
2684 /*
2685 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
2686 * unless it's a replace; in that case any non-replacing parent is OK.
2687 *
2688 * If newvd is already part of the pool, it should fail with EBUSY.
2689 *
2690 * If newvd is too small, it should fail with EOVERFLOW.
2691 */
2692 if (pvd->vdev_ops != &vdev_mirror_ops &&
2693 pvd->vdev_ops != &vdev_root_ops && (!replacing ||
2694 pvd->vdev_ops == &vdev_replacing_ops ||
2695 pvd->vdev_ops == &vdev_spare_ops))
2696 expected_error = ENOTSUP;
2697 else if (newvd_is_spare && (!replacing || oldvd_is_log))
2698 expected_error = ENOTSUP;
2699 else if (newvd == oldvd)
2700 expected_error = replacing ? 0 : EBUSY;
2701 else if (vdev_lookup_by_path(rvd, newpath) != NULL)
2702 expected_error = EBUSY;
2703 else if (newsize < oldsize)
2704 expected_error = EOVERFLOW;
2705 else if (ashift > oldvd->vdev_top->vdev_ashift)
2706 expected_error = EDOM;
2707 else
2708 expected_error = 0;
2709
2710 spa_config_exit(spa, SCL_VDEV, FTAG);
2711
2712 /*
2713 * Build the nvlist describing newpath.
2714 */
2715 root = make_vdev_root(newpath, NULL, newvd == NULL ? newsize : 0,
2716 ashift, 0, 0, 0, 1);
2717
2718 error = spa_vdev_attach(spa, oldguid, root, replacing);
2719
2720 nvlist_free(root);
2721
2722 /*
2723 * If our parent was the replacing vdev, but the replace completed,
2724 * then instead of failing with ENOTSUP we may either succeed,
2725 * fail with ENODEV, or fail with EOVERFLOW.
2726 */
2727 if (expected_error == ENOTSUP &&
2728 (error == 0 || error == ENODEV || error == EOVERFLOW))
2729 expected_error = error;
2730
2731 /*
2732 * If someone grew the LUN, the replacement may be too small.
2733 */
2734 if (error == EOVERFLOW || error == EBUSY)
2735 expected_error = error;
2736
2737 /* XXX workaround 6690467 */
2738 if (error != expected_error && expected_error != EBUSY) {
2739 fatal(0, "attach (%s %llu, %s %llu, %d) "
2740 "returned %d, expected %d",
2741 oldpath, (longlong_t)oldsize, newpath,
2742 (longlong_t)newsize, replacing, error, expected_error);
2743 }
2744
2745 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2746 }
2747
2748 /*
2749 * Callback function which expands the physical size of the vdev.
2750 */
2751 vdev_t *
2752 grow_vdev(vdev_t *vd, void *arg)
2753 {
2754 spa_t *spa = vd->vdev_spa;
2755 size_t *newsize = arg;
2756 size_t fsize;
2757 int fd;
2758
2759 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2760 ASSERT(vd->vdev_ops->vdev_op_leaf);
2761
2762 if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
2763 return (vd);
2764
2765 fsize = lseek(fd, 0, SEEK_END);
2766 (void) ftruncate(fd, *newsize);
2767
2768 if (ztest_opts.zo_verbose >= 6) {
2769 (void) printf("%s grew from %lu to %lu bytes\n",
2770 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
2771 }
2772 (void) close(fd);
2773 return (NULL);
2774 }
2775
2776 /*
2777 * Callback function which expands a given vdev by calling vdev_online().
2778 */
2779 /* ARGSUSED */
2780 vdev_t *
2781 online_vdev(vdev_t *vd, void *arg)
2782 {
2783 spa_t *spa = vd->vdev_spa;
2784 vdev_t *tvd = vd->vdev_top;
2785 uint64_t guid = vd->vdev_guid;
2786 uint64_t generation = spa->spa_config_generation + 1;
2787 vdev_state_t newstate = VDEV_STATE_UNKNOWN;
2788 int error;
2789
2790 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2791 ASSERT(vd->vdev_ops->vdev_op_leaf);
2792
2793 /* Calling vdev_online will initialize the new metaslabs */
2794 spa_config_exit(spa, SCL_STATE, spa);
2795 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
2796 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2797
2798 /*
2799 * If vdev_online returned an error or the underlying vdev_open
2800 * failed then we abort the expand. The only way to know that
2801 * vdev_open fails is by checking the returned newstate.
2802 */
2803 if (error || newstate != VDEV_STATE_HEALTHY) {
2804 if (ztest_opts.zo_verbose >= 5) {
2805 (void) printf("Unable to expand vdev, state %llu, "
2806 "error %d\n", (u_longlong_t)newstate, error);
2807 }
2808 return (vd);
2809 }
2810 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
2811
2812 /*
2813 * Since we dropped the lock we need to ensure that we're
2814 * still talking to the original vdev. It's possible this
2815 * vdev may have been detached/replaced while we were
2816 * trying to online it.
2817 */
2818 if (generation != spa->spa_config_generation) {
2819 if (ztest_opts.zo_verbose >= 5) {
2820 (void) printf("vdev configuration has changed, "
2821 "guid %llu, state %llu, expected gen %llu, "
2822 "got gen %llu\n",
2823 (u_longlong_t)guid,
2824 (u_longlong_t)tvd->vdev_state,
2825 (u_longlong_t)generation,
2826 (u_longlong_t)spa->spa_config_generation);
2827 }
2828 return (vd);
2829 }
2830 return (NULL);
2831 }
2832
2833 /*
2834 * Traverse the vdev tree calling the supplied function.
2835 * We continue to walk the tree until we either have walked all
2836 * children or we receive a non-NULL return from the callback.
2837 * If a NULL callback is passed, then we just return back the first
2838 * leaf vdev we encounter.
2839 */
2840 vdev_t *
2841 vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
2842 {
2843 if (vd->vdev_ops->vdev_op_leaf) {
2844 if (func == NULL)
2845 return (vd);
2846 else
2847 return (func(vd, arg));
2848 }
2849
2850 for (uint_t c = 0; c < vd->vdev_children; c++) {
2851 vdev_t *cvd = vd->vdev_child[c];
2852 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
2853 return (cvd);
2854 }
2855 return (NULL);
2856 }
2857
2858 /*
2859 * Verify that dynamic LUN growth works as expected.
2860 */
2861 /* ARGSUSED */
2862 void
2863 ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
2864 {
2865 spa_t *spa = ztest_spa;
2866 vdev_t *vd, *tvd;
2867 metaslab_class_t *mc;
2868 metaslab_group_t *mg;
2869 size_t psize, newsize;
2870 uint64_t top;
2871 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
2872
2873 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2874 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2875
2876 top = ztest_random_vdev_top(spa, B_TRUE);
2877
2878 tvd = spa->spa_root_vdev->vdev_child[top];
2879 mg = tvd->vdev_mg;
2880 mc = mg->mg_class;
2881 old_ms_count = tvd->vdev_ms_count;
2882 old_class_space = metaslab_class_get_space(mc);
2883
2884 /*
2885 * Determine the size of the first leaf vdev associated with
2886 * our top-level device.
2887 */
2888 vd = vdev_walk_tree(tvd, NULL, NULL);
2889 ASSERT3P(vd, !=, NULL);
2890 ASSERT(vd->vdev_ops->vdev_op_leaf);
2891
2892 psize = vd->vdev_psize;
2893
2894 /*
2895 * We only try to expand the vdev if it's healthy, less than 4x its
2896 * original size, and it has a valid psize.
2897 */
2898 if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
2899 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
2900 spa_config_exit(spa, SCL_STATE, spa);
2901 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2902 return;
2903 }
2904 ASSERT(psize > 0);
2905 newsize = psize + psize / 8;
2906 ASSERT3U(newsize, >, psize);
2907
2908 if (ztest_opts.zo_verbose >= 6) {
2909 (void) printf("Expanding LUN %s from %lu to %lu\n",
2910 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
2911 }
2912
2913 /*
2914 * Growing the vdev is a two step process:
2915 * 1). expand the physical size (i.e. relabel)
2916 * 2). online the vdev to create the new metaslabs
2917 */
2918 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
2919 vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
2920 tvd->vdev_state != VDEV_STATE_HEALTHY) {
2921 if (ztest_opts.zo_verbose >= 5) {
2922 (void) printf("Could not expand LUN because "
2923 "the vdev configuration changed.\n");
2924 }
2925 spa_config_exit(spa, SCL_STATE, spa);
2926 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2927 return;
2928 }
2929
2930 spa_config_exit(spa, SCL_STATE, spa);
2931
2932 /*
2933 * Expanding the LUN will update the config asynchronously,
2934 * thus we must wait for the async thread to complete any
2935 * pending tasks before proceeding.
2936 */
2937 for (;;) {
2938 boolean_t done;
2939 mutex_enter(&spa->spa_async_lock);
2940 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
2941 mutex_exit(&spa->spa_async_lock);
2942 if (done)
2943 break;
2944 txg_wait_synced(spa_get_dsl(spa), 0);
2945 (void) poll(NULL, 0, 100);
2946 }
2947
2948 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2949
2950 tvd = spa->spa_root_vdev->vdev_child[top];
2951 new_ms_count = tvd->vdev_ms_count;
2952 new_class_space = metaslab_class_get_space(mc);
2953
2954 if (tvd->vdev_mg != mg || mg->mg_class != mc) {
2955 if (ztest_opts.zo_verbose >= 5) {
2956 (void) printf("Could not verify LUN expansion due to "
2957 "intervening vdev offline or remove.\n");
2958 }
2959 spa_config_exit(spa, SCL_STATE, spa);
2960 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2961 return;
2962 }
2963
2964 /*
2965 * Make sure we were able to grow the vdev.
2966 */
2967 if (new_ms_count <= old_ms_count)
2968 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
2969 old_ms_count, new_ms_count);
2970
2971 /*
2972 * Make sure we were able to grow the pool.
2973 */
2974 if (new_class_space <= old_class_space)
2975 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
2976 old_class_space, new_class_space);
2977
2978 if (ztest_opts.zo_verbose >= 5) {
2979 char oldnumbuf[6], newnumbuf[6];
2980
2981 nicenum(old_class_space, oldnumbuf);
2982 nicenum(new_class_space, newnumbuf);
2983 (void) printf("%s grew from %s to %s\n",
2984 spa->spa_name, oldnumbuf, newnumbuf);
2985 }
2986
2987 spa_config_exit(spa, SCL_STATE, spa);
2988 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2989 }
2990
2991 /*
2992 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
2993 */
2994 /* ARGSUSED */
2995 static void
2996 ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
2997 {
2998 /*
2999 * Create the objects common to all ztest datasets.
3000 */
3001 VERIFY(zap_create_claim(os, ZTEST_DIROBJ,
3002 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
3003 }
3004
3005 static int
3006 ztest_dataset_create(char *dsname)
3007 {
3008 uint64_t zilset = ztest_random(100);
3009 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0,
3010 ztest_objset_create_cb, NULL);
3011
3012 if (err || zilset < 80)
3013 return (err);
3014
3015 if (ztest_opts.zo_verbose >= 6)
3016 (void) printf("Setting dataset %s to sync always\n", dsname);
3017 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
3018 ZFS_SYNC_ALWAYS, B_FALSE));
3019 }
3020
3021 /* ARGSUSED */
3022 static int
3023 ztest_objset_destroy_cb(const char *name, void *arg)
3024 {
3025 objset_t *os;
3026 dmu_object_info_t doi;
3027 int error;
3028
3029 /*
3030 * Verify that the dataset contains a directory object.
3031 */
3032 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os));
3033 error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
3034 if (error != ENOENT) {
3035 /* We could have crashed in the middle of destroying it */
3036 ASSERT3U(error, ==, 0);
3037 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
3038 ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
3039 }
3040 dmu_objset_rele(os, FTAG);
3041
3042 /*
3043 * Destroy the dataset.
3044 */
3045 VERIFY3U(0, ==, dmu_objset_destroy(name, B_FALSE));
3046 return (0);
3047 }
3048
3049 static boolean_t
3050 ztest_snapshot_create(char *osname, uint64_t id)
3051 {
3052 char snapname[MAXNAMELEN];
3053 int error;
3054
3055 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3056 (u_longlong_t)id);
3057
3058 error = dmu_objset_snapshot(osname, strchr(snapname, '@') + 1,
3059 NULL, NULL, B_FALSE, B_FALSE, -1);
3060 if (error == ENOSPC) {
3061 ztest_record_enospc(FTAG);
3062 return (B_FALSE);
3063 }
3064 if (error != 0 && error != EEXIST)
3065 fatal(0, "ztest_snapshot_create(%s) = %d", snapname, error);
3066 return (B_TRUE);
3067 }
3068
3069 static boolean_t
3070 ztest_snapshot_destroy(char *osname, uint64_t id)
3071 {
3072 char snapname[MAXNAMELEN];
3073 int error;
3074
3075 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3076 (u_longlong_t)id);
3077
3078 error = dmu_objset_destroy(snapname, B_FALSE);
3079 if (error != 0 && error != ENOENT)
3080 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error);
3081 return (B_TRUE);
3082 }
3083
3084 /* ARGSUSED */
3085 void
3086 ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
3087 {
3088 ztest_ds_t zdtmp;
3089 int iters;
3090 int error;
3091 objset_t *os, *os2;
3092 char name[MAXNAMELEN];
3093 zilog_t *zilog;
3094
3095 (void) rw_rdlock(&ztest_name_lock);
3096
3097 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu",
3098 ztest_opts.zo_pool, (u_longlong_t)id);
3099
3100 /*
3101 * If this dataset exists from a previous run, process its replay log
3102 * half of the time. If we don't replay it, then dmu_objset_destroy()
3103 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
3104 */
3105 if (ztest_random(2) == 0 &&
3106 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) {
3107 ztest_zd_init(&zdtmp, NULL, os);
3108 zil_replay(os, &zdtmp, ztest_replay_vector);
3109 ztest_zd_fini(&zdtmp);
3110 dmu_objset_disown(os, FTAG);
3111 }
3112
3113 /*
3114 * There may be an old instance of the dataset we're about to
3115 * create lying around from a previous run. If so, destroy it
3116 * and all of its snapshots.
3117 */
3118 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
3119 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
3120
3121 /*
3122 * Verify that the destroyed dataset is no longer in the namespace.
3123 */
3124 VERIFY3U(ENOENT, ==, dmu_objset_hold(name, FTAG, &os));
3125
3126 /*
3127 * Verify that we can create a new dataset.
3128 */
3129 error = ztest_dataset_create(name);
3130 if (error) {
3131 if (error == ENOSPC) {
3132 ztest_record_enospc(FTAG);
3133 (void) rw_unlock(&ztest_name_lock);
3134 return;
3135 }
3136 fatal(0, "dmu_objset_create(%s) = %d", name, error);
3137 }
3138
3139 VERIFY3U(0, ==,
3140 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
3141
3142 ztest_zd_init(&zdtmp, NULL, os);
3143
3144 /*
3145 * Open the intent log for it.
3146 */
3147 zilog = zil_open(os, ztest_get_data);
3148
3149 /*
3150 * Put some objects in there, do a little I/O to them,
3151 * and randomly take a couple of snapshots along the way.
3152 */
3153 iters = ztest_random(5);
3154 for (int i = 0; i < iters; i++) {
3155 ztest_dmu_object_alloc_free(&zdtmp, id);
3156 if (ztest_random(iters) == 0)
3157 (void) ztest_snapshot_create(name, i);
3158 }
3159
3160 /*
3161 * Verify that we cannot create an existing dataset.
3162 */
3163 VERIFY3U(EEXIST, ==,
3164 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL));
3165
3166 /*
3167 * Verify that we can hold an objset that is also owned.
3168 */
3169 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2));
3170 dmu_objset_rele(os2, FTAG);
3171
3172 /*
3173 * Verify that we cannot own an objset that is already owned.
3174 */
3175 VERIFY3U(EBUSY, ==,
3176 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
3177
3178 zil_close(zilog);
3179 dmu_objset_disown(os, FTAG);
3180 ztest_zd_fini(&zdtmp);
3181
3182 (void) rw_unlock(&ztest_name_lock);
3183 }
3184
3185 /*
3186 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3187 */
3188 void
3189 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
3190 {
3191 (void) rw_rdlock(&ztest_name_lock);
3192 (void) ztest_snapshot_destroy(zd->zd_name, id);
3193 (void) ztest_snapshot_create(zd->zd_name, id);
3194 (void) rw_unlock(&ztest_name_lock);
3195 }
3196
3197 /*
3198 * Cleanup non-standard snapshots and clones.
3199 */
3200 void
3201 ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
3202 {
3203 char snap1name[MAXNAMELEN];
3204 char clone1name[MAXNAMELEN];
3205 char snap2name[MAXNAMELEN];
3206 char clone2name[MAXNAMELEN];
3207 char snap3name[MAXNAMELEN];
3208 int error;
3209
3210 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
3211 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
3212 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
3213 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
3214 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
3215
3216 error = dmu_objset_destroy(clone2name, B_FALSE);
3217 if (error && error != ENOENT)
3218 fatal(0, "dmu_objset_destroy(%s) = %d", clone2name, error);
3219 error = dmu_objset_destroy(snap3name, B_FALSE);
3220 if (error && error != ENOENT)
3221 fatal(0, "dmu_objset_destroy(%s) = %d", snap3name, error);
3222 error = dmu_objset_destroy(snap2name, B_FALSE);
3223 if (error && error != ENOENT)
3224 fatal(0, "dmu_objset_destroy(%s) = %d", snap2name, error);
3225 error = dmu_objset_destroy(clone1name, B_FALSE);
3226 if (error && error != ENOENT)
3227 fatal(0, "dmu_objset_destroy(%s) = %d", clone1name, error);
3228 error = dmu_objset_destroy(snap1name, B_FALSE);
3229 if (error && error != ENOENT)
3230 fatal(0, "dmu_objset_destroy(%s) = %d", snap1name, error);
3231 }
3232
3233 /*
3234 * Verify dsl_dataset_promote handles EBUSY
3235 */
3236 void
3237 ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
3238 {
3239 objset_t *clone;
3240 dsl_dataset_t *ds;
3241 char snap1name[MAXNAMELEN];
3242 char clone1name[MAXNAMELEN];
3243 char snap2name[MAXNAMELEN];
3244 char clone2name[MAXNAMELEN];
3245 char snap3name[MAXNAMELEN];
3246 char *osname = zd->zd_name;
3247 int error;
3248
3249 (void) rw_rdlock(&ztest_name_lock);
3250
3251 ztest_dsl_dataset_cleanup(osname, id);
3252
3253 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
3254 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
3255 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
3256 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
3257 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
3258
3259 error = dmu_objset_snapshot(osname, strchr(snap1name, '@')+1,
3260 NULL, NULL, B_FALSE, B_FALSE, -1);
3261 if (error && error != EEXIST) {
3262 if (error == ENOSPC) {
3263 ztest_record_enospc(FTAG);
3264 goto out;
3265 }
3266 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error);
3267 }
3268
3269 error = dmu_objset_hold(snap1name, FTAG, &clone);
3270 if (error)
3271 fatal(0, "dmu_open_snapshot(%s) = %d", snap1name, error);
3272
3273 error = dmu_objset_clone(clone1name, dmu_objset_ds(clone), 0);
3274 dmu_objset_rele(clone, FTAG);
3275 if (error) {
3276 if (error == ENOSPC) {
3277 ztest_record_enospc(FTAG);
3278 goto out;
3279 }
3280 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error);
3281 }
3282
3283 error = dmu_objset_snapshot(clone1name, strchr(snap2name, '@')+1,
3284 NULL, NULL, B_FALSE, B_FALSE, -1);
3285 if (error && error != EEXIST) {
3286 if (error == ENOSPC) {
3287 ztest_record_enospc(FTAG);
3288 goto out;
3289 }
3290 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error);
3291 }
3292
3293 error = dmu_objset_snapshot(clone1name, strchr(snap3name, '@')+1,
3294 NULL, NULL, B_FALSE, B_FALSE, -1);
3295 if (error && error != EEXIST) {
3296 if (error == ENOSPC) {
3297 ztest_record_enospc(FTAG);
3298 goto out;
3299 }
3300 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3301 }
3302
3303 error = dmu_objset_hold(snap3name, FTAG, &clone);
3304 if (error)
3305 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3306
3307 error = dmu_objset_clone(clone2name, dmu_objset_ds(clone), 0);
3308 dmu_objset_rele(clone, FTAG);
3309 if (error) {
3310 if (error == ENOSPC) {
3311 ztest_record_enospc(FTAG);
3312 goto out;
3313 }
3314 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error);
3315 }
3316
3317 error = dsl_dataset_own(snap2name, B_FALSE, FTAG, &ds);
3318 if (error)
3319 fatal(0, "dsl_dataset_own(%s) = %d", snap2name, error);
3320 error = dsl_dataset_promote(clone2name, NULL);
3321 if (error != EBUSY)
3322 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
3323 error);
3324 dsl_dataset_disown(ds, FTAG);
3325
3326 out:
3327 ztest_dsl_dataset_cleanup(osname, id);
3328
3329 (void) rw_unlock(&ztest_name_lock);
3330 }
3331
3332 /*
3333 * Verify that dmu_object_{alloc,free} work as expected.
3334 */
3335 void
3336 ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
3337 {
3338 ztest_od_t od[4];
3339 int batchsize = sizeof (od) / sizeof (od[0]);
3340
3341 for (int b = 0; b < batchsize; b++)
3342 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
3343
3344 /*
3345 * Destroy the previous batch of objects, create a new batch,
3346 * and do some I/O on the new objects.
3347 */
3348 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0)
3349 return;
3350
3351 while (ztest_random(4 * batchsize) != 0)
3352 ztest_io(zd, od[ztest_random(batchsize)].od_object,
3353 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3354 }
3355
3356 /*
3357 * Verify that dmu_{read,write} work as expected.
3358 */
3359 void
3360 ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
3361 {
3362 objset_t *os = zd->zd_os;
3363 ztest_od_t od[2];
3364 dmu_tx_t *tx;
3365 int i, freeit, error;
3366 uint64_t n, s, txg;
3367 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
3368 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3369 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
3370 uint64_t regions = 997;
3371 uint64_t stride = 123456789ULL;
3372 uint64_t width = 40;
3373 int free_percent = 5;
3374
3375 /*
3376 * This test uses two objects, packobj and bigobj, that are always
3377 * updated together (i.e. in the same tx) so that their contents are
3378 * in sync and can be compared. Their contents relate to each other
3379 * in a simple way: packobj is a dense array of 'bufwad' structures,
3380 * while bigobj is a sparse array of the same bufwads. Specifically,
3381 * for any index n, there are three bufwads that should be identical:
3382 *
3383 * packobj, at offset n * sizeof (bufwad_t)
3384 * bigobj, at the head of the nth chunk
3385 * bigobj, at the tail of the nth chunk
3386 *
3387 * The chunk size is arbitrary. It doesn't have to be a power of two,
3388 * and it doesn't have any relation to the object blocksize.
3389 * The only requirement is that it can hold at least two bufwads.
3390 *
3391 * Normally, we write the bufwad to each of these locations.
3392 * However, free_percent of the time we instead write zeroes to
3393 * packobj and perform a dmu_free_range() on bigobj. By comparing
3394 * bigobj to packobj, we can verify that the DMU is correctly
3395 * tracking which parts of an object are allocated and free,
3396 * and that the contents of the allocated blocks are correct.
3397 */
3398
3399 /*
3400 * Read the directory info. If it's the first time, set things up.
3401 */
3402 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
3403 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3404
3405 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3406 return;
3407
3408 bigobj = od[0].od_object;
3409 packobj = od[1].od_object;
3410 chunksize = od[0].od_gen;
3411 ASSERT(chunksize == od[1].od_gen);
3412
3413 /*
3414 * Prefetch a random chunk of the big object.
3415 * Our aim here is to get some async reads in flight
3416 * for blocks that we may free below; the DMU should
3417 * handle this race correctly.
3418 */
3419 n = ztest_random(regions) * stride + ztest_random(width);
3420 s = 1 + ztest_random(2 * width - 1);
3421 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize);
3422
3423 /*
3424 * Pick a random index and compute the offsets into packobj and bigobj.
3425 */
3426 n = ztest_random(regions) * stride + ztest_random(width);
3427 s = 1 + ztest_random(width - 1);
3428
3429 packoff = n * sizeof (bufwad_t);
3430 packsize = s * sizeof (bufwad_t);
3431
3432 bigoff = n * chunksize;
3433 bigsize = s * chunksize;
3434
3435 packbuf = umem_alloc(packsize, UMEM_NOFAIL);
3436 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
3437
3438 /*
3439 * free_percent of the time, free a range of bigobj rather than
3440 * overwriting it.
3441 */
3442 freeit = (ztest_random(100) < free_percent);
3443
3444 /*
3445 * Read the current contents of our objects.
3446 */
3447 error = dmu_read(os, packobj, packoff, packsize, packbuf,
3448 DMU_READ_PREFETCH);
3449 ASSERT3U(error, ==, 0);
3450 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
3451 DMU_READ_PREFETCH);
3452 ASSERT3U(error, ==, 0);
3453
3454 /*
3455 * Get a tx for the mods to both packobj and bigobj.
3456 */
3457 tx = dmu_tx_create(os);
3458
3459 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3460
3461 if (freeit)
3462 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
3463 else
3464 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3465
3466 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3467 if (txg == 0) {
3468 umem_free(packbuf, packsize);
3469 umem_free(bigbuf, bigsize);
3470 return;
3471 }
3472
3473 dmu_object_set_checksum(os, bigobj,
3474 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx);
3475
3476 dmu_object_set_compress(os, bigobj,
3477 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx);
3478
3479 /*
3480 * For each index from n to n + s, verify that the existing bufwad
3481 * in packobj matches the bufwads at the head and tail of the
3482 * corresponding chunk in bigobj. Then update all three bufwads
3483 * with the new values we want to write out.
3484 */
3485 for (i = 0; i < s; i++) {
3486 /* LINTED */
3487 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3488 /* LINTED */
3489 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3490 /* LINTED */
3491 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3492
3493 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3494 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3495
3496 if (pack->bw_txg > txg)
3497 fatal(0, "future leak: got %llx, open txg is %llx",
3498 pack->bw_txg, txg);
3499
3500 if (pack->bw_data != 0 && pack->bw_index != n + i)
3501 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3502 pack->bw_index, n, i);
3503
3504 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3505 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3506
3507 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3508 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3509
3510 if (freeit) {
3511 bzero(pack, sizeof (bufwad_t));
3512 } else {
3513 pack->bw_index = n + i;
3514 pack->bw_txg = txg;
3515 pack->bw_data = 1 + ztest_random(-2ULL);
3516 }
3517 *bigH = *pack;
3518 *bigT = *pack;
3519 }
3520
3521 /*
3522 * We've verified all the old bufwads, and made new ones.
3523 * Now write them out.
3524 */
3525 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3526
3527 if (freeit) {
3528 if (ztest_opts.zo_verbose >= 7) {
3529 (void) printf("freeing offset %llx size %llx"
3530 " txg %llx\n",
3531 (u_longlong_t)bigoff,
3532 (u_longlong_t)bigsize,
3533 (u_longlong_t)txg);
3534 }
3535 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx));
3536 } else {
3537 if (ztest_opts.zo_verbose >= 7) {
3538 (void) printf("writing offset %llx size %llx"
3539 " txg %llx\n",
3540 (u_longlong_t)bigoff,
3541 (u_longlong_t)bigsize,
3542 (u_longlong_t)txg);
3543 }
3544 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
3545 }
3546
3547 dmu_tx_commit(tx);
3548
3549 /*
3550 * Sanity check the stuff we just wrote.
3551 */
3552 {
3553 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3554 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3555
3556 VERIFY(0 == dmu_read(os, packobj, packoff,
3557 packsize, packcheck, DMU_READ_PREFETCH));
3558 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3559 bigsize, bigcheck, DMU_READ_PREFETCH));
3560
3561 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3562 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3563
3564 umem_free(packcheck, packsize);
3565 umem_free(bigcheck, bigsize);
3566 }
3567
3568 umem_free(packbuf, packsize);
3569 umem_free(bigbuf, bigsize);
3570 }
3571
3572 void
3573 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
3574 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
3575 {
3576 uint64_t i;
3577 bufwad_t *pack;
3578 bufwad_t *bigH;
3579 bufwad_t *bigT;
3580
3581 /*
3582 * For each index from n to n + s, verify that the existing bufwad
3583 * in packobj matches the bufwads at the head and tail of the
3584 * corresponding chunk in bigobj. Then update all three bufwads
3585 * with the new values we want to write out.
3586 */
3587 for (i = 0; i < s; i++) {
3588 /* LINTED */
3589 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3590 /* LINTED */
3591 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3592 /* LINTED */
3593 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3594
3595 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3596 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3597
3598 if (pack->bw_txg > txg)
3599 fatal(0, "future leak: got %llx, open txg is %llx",
3600 pack->bw_txg, txg);
3601
3602 if (pack->bw_data != 0 && pack->bw_index != n + i)
3603 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3604 pack->bw_index, n, i);
3605
3606 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3607 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3608
3609 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3610 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3611
3612 pack->bw_index = n + i;
3613 pack->bw_txg = txg;
3614 pack->bw_data = 1 + ztest_random(-2ULL);
3615
3616 *bigH = *pack;
3617 *bigT = *pack;
3618 }
3619 }
3620
3621 void
3622 ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
3623 {
3624 objset_t *os = zd->zd_os;
3625 ztest_od_t od[2];
3626 dmu_tx_t *tx;
3627 uint64_t i;
3628 int error;
3629 uint64_t n, s, txg;
3630 bufwad_t *packbuf, *bigbuf;
3631 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3632 uint64_t blocksize = ztest_random_blocksize();
3633 uint64_t chunksize = blocksize;
3634 uint64_t regions = 997;
3635 uint64_t stride = 123456789ULL;
3636 uint64_t width = 9;
3637 dmu_buf_t *bonus_db;
3638 arc_buf_t **bigbuf_arcbufs;
3639 dmu_object_info_t doi;
3640
3641 /*
3642 * This test uses two objects, packobj and bigobj, that are always
3643 * updated together (i.e. in the same tx) so that their contents are
3644 * in sync and can be compared. Their contents relate to each other
3645 * in a simple way: packobj is a dense array of 'bufwad' structures,
3646 * while bigobj is a sparse array of the same bufwads. Specifically,
3647 * for any index n, there are three bufwads that should be identical:
3648 *
3649 * packobj, at offset n * sizeof (bufwad_t)
3650 * bigobj, at the head of the nth chunk
3651 * bigobj, at the tail of the nth chunk
3652 *
3653 * The chunk size is set equal to bigobj block size so that
3654 * dmu_assign_arcbuf() can be tested for object updates.
3655 */
3656
3657 /*
3658 * Read the directory info. If it's the first time, set things up.
3659 */
3660 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3661 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3662
3663 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3664 return;
3665
3666 bigobj = od[0].od_object;
3667 packobj = od[1].od_object;
3668 blocksize = od[0].od_blocksize;
3669 chunksize = blocksize;
3670 ASSERT(chunksize == od[1].od_gen);
3671
3672 VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
3673 VERIFY(ISP2(doi.doi_data_block_size));
3674 VERIFY(chunksize == doi.doi_data_block_size);
3675 VERIFY(chunksize >= 2 * sizeof (bufwad_t));
3676
3677 /*
3678 * Pick a random index and compute the offsets into packobj and bigobj.
3679 */
3680 n = ztest_random(regions) * stride + ztest_random(width);
3681 s = 1 + ztest_random(width - 1);
3682
3683 packoff = n * sizeof (bufwad_t);
3684 packsize = s * sizeof (bufwad_t);
3685
3686 bigoff = n * chunksize;
3687 bigsize = s * chunksize;
3688
3689 packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
3690 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
3691
3692 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
3693
3694 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
3695
3696 /*
3697 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3698 * Iteration 1 test zcopy to already referenced dbufs.
3699 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3700 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3701 * Iteration 4 test zcopy when dbuf is no longer dirty.
3702 * Iteration 5 test zcopy when it can't be done.
3703 * Iteration 6 one more zcopy write.
3704 */
3705 for (i = 0; i < 7; i++) {
3706 uint64_t j;
3707 uint64_t off;
3708
3709 /*
3710 * In iteration 5 (i == 5) use arcbufs
3711 * that don't match bigobj blksz to test
3712 * dmu_assign_arcbuf() when it can't directly
3713 * assign an arcbuf to a dbuf.
3714 */
3715 for (j = 0; j < s; j++) {
3716 if (i != 5) {
3717 bigbuf_arcbufs[j] =
3718 dmu_request_arcbuf(bonus_db, chunksize);
3719 } else {
3720 bigbuf_arcbufs[2 * j] =
3721 dmu_request_arcbuf(bonus_db, chunksize / 2);
3722 bigbuf_arcbufs[2 * j + 1] =
3723 dmu_request_arcbuf(bonus_db, chunksize / 2);
3724 }
3725 }
3726
3727 /*
3728 * Get a tx for the mods to both packobj and bigobj.
3729 */
3730 tx = dmu_tx_create(os);
3731
3732 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3733 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3734
3735 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3736 if (txg == 0) {
3737 umem_free(packbuf, packsize);
3738 umem_free(bigbuf, bigsize);
3739 for (j = 0; j < s; j++) {
3740 if (i != 5) {
3741 dmu_return_arcbuf(bigbuf_arcbufs[j]);
3742 } else {
3743 dmu_return_arcbuf(
3744 bigbuf_arcbufs[2 * j]);
3745 dmu_return_arcbuf(
3746 bigbuf_arcbufs[2 * j + 1]);
3747 }
3748 }
3749 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3750 dmu_buf_rele(bonus_db, FTAG);
3751 return;
3752 }
3753
3754 /*
3755 * 50% of the time don't read objects in the 1st iteration to
3756 * test dmu_assign_arcbuf() for the case when there're no
3757 * existing dbufs for the specified offsets.
3758 */
3759 if (i != 0 || ztest_random(2) != 0) {
3760 error = dmu_read(os, packobj, packoff,
3761 packsize, packbuf, DMU_READ_PREFETCH);
3762 ASSERT3U(error, ==, 0);
3763 error = dmu_read(os, bigobj, bigoff, bigsize,
3764 bigbuf, DMU_READ_PREFETCH);
3765 ASSERT3U(error, ==, 0);
3766 }
3767 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
3768 n, chunksize, txg);
3769
3770 /*
3771 * We've verified all the old bufwads, and made new ones.
3772 * Now write them out.
3773 */
3774 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3775 if (ztest_opts.zo_verbose >= 7) {
3776 (void) printf("writing offset %llx size %llx"
3777 " txg %llx\n",
3778 (u_longlong_t)bigoff,
3779 (u_longlong_t)bigsize,
3780 (u_longlong_t)txg);
3781 }
3782 for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
3783 dmu_buf_t *dbt;
3784 if (i != 5) {
3785 bcopy((caddr_t)bigbuf + (off - bigoff),
3786 bigbuf_arcbufs[j]->b_data, chunksize);
3787 } else {
3788 bcopy((caddr_t)bigbuf + (off - bigoff),
3789 bigbuf_arcbufs[2 * j]->b_data,
3790 chunksize / 2);
3791 bcopy((caddr_t)bigbuf + (off - bigoff) +
3792 chunksize / 2,
3793 bigbuf_arcbufs[2 * j + 1]->b_data,
3794 chunksize / 2);
3795 }
3796
3797 if (i == 1) {
3798 VERIFY(dmu_buf_hold(os, bigobj, off,
3799 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
3800 }
3801 if (i != 5) {
3802 dmu_assign_arcbuf(bonus_db, off,
3803 bigbuf_arcbufs[j], tx);
3804 } else {
3805 dmu_assign_arcbuf(bonus_db, off,
3806 bigbuf_arcbufs[2 * j], tx);
3807 dmu_assign_arcbuf(bonus_db,
3808 off + chunksize / 2,
3809 bigbuf_arcbufs[2 * j + 1], tx);
3810 }
3811 if (i == 1) {
3812 dmu_buf_rele(dbt, FTAG);
3813 }
3814 }
3815 dmu_tx_commit(tx);
3816
3817 /*
3818 * Sanity check the stuff we just wrote.
3819 */
3820 {
3821 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3822 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3823
3824 VERIFY(0 == dmu_read(os, packobj, packoff,
3825 packsize, packcheck, DMU_READ_PREFETCH));
3826 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3827 bigsize, bigcheck, DMU_READ_PREFETCH));
3828
3829 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3830 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3831
3832 umem_free(packcheck, packsize);
3833 umem_free(bigcheck, bigsize);
3834 }
3835 if (i == 2) {
3836 txg_wait_open(dmu_objset_pool(os), 0);
3837 } else if (i == 3) {
3838 txg_wait_synced(dmu_objset_pool(os), 0);
3839 }
3840 }
3841
3842 dmu_buf_rele(bonus_db, FTAG);
3843 umem_free(packbuf, packsize);
3844 umem_free(bigbuf, bigsize);
3845 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3846 }
3847
3848 /* ARGSUSED */
3849 void
3850 ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
3851 {
3852 ztest_od_t od[1];
3853 uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
3854 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3855
3856 /*
3857 * Have multiple threads write to large offsets in an object
3858 * to verify that parallel writes to an object -- even to the
3859 * same blocks within the object -- doesn't cause any trouble.
3860 */
3861 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
3862
3863 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3864 return;
3865
3866 while (ztest_random(10) != 0)
3867 ztest_io(zd, od[0].od_object, offset);
3868 }
3869
3870 void
3871 ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
3872 {
3873 ztest_od_t od[1];
3874 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
3875 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3876 uint64_t count = ztest_random(20) + 1;
3877 uint64_t blocksize = ztest_random_blocksize();
3878 void *data;
3879
3880 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3881
3882 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
3883 return;
3884
3885 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0)
3886 return;
3887
3888 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize);
3889
3890 data = umem_zalloc(blocksize, UMEM_NOFAIL);
3891
3892 while (ztest_random(count) != 0) {
3893 uint64_t randoff = offset + (ztest_random(count) * blocksize);
3894 if (ztest_write(zd, od[0].od_object, randoff, blocksize,
3895 data) != 0)
3896 break;
3897 while (ztest_random(4) != 0)
3898 ztest_io(zd, od[0].od_object, randoff);
3899 }
3900
3901 umem_free(data, blocksize);
3902 }
3903
3904 /*
3905 * Verify that zap_{create,destroy,add,remove,update} work as expected.
3906 */
3907 #define ZTEST_ZAP_MIN_INTS 1
3908 #define ZTEST_ZAP_MAX_INTS 4
3909 #define ZTEST_ZAP_MAX_PROPS 1000
3910
3911 void
3912 ztest_zap(ztest_ds_t *zd, uint64_t id)
3913 {
3914 objset_t *os = zd->zd_os;
3915 ztest_od_t od[1];
3916 uint64_t object;
3917 uint64_t txg, last_txg;
3918 uint64_t value[ZTEST_ZAP_MAX_INTS];
3919 uint64_t zl_ints, zl_intsize, prop;
3920 int i, ints;
3921 dmu_tx_t *tx;
3922 char propname[100], txgname[100];
3923 int error;
3924 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
3925
3926 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
3927
3928 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
3929 return;
3930
3931 object = od[0].od_object;
3932
3933 /*
3934 * Generate a known hash collision, and verify that
3935 * we can lookup and remove both entries.
3936 */
3937 tx = dmu_tx_create(os);
3938 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
3939 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3940 if (txg == 0)
3941 return;
3942 for (i = 0; i < 2; i++) {
3943 value[i] = i;
3944 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
3945 1, &value[i], tx));
3946 }
3947 for (i = 0; i < 2; i++) {
3948 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
3949 sizeof (uint64_t), 1, &value[i], tx));
3950 VERIFY3U(0, ==,
3951 zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
3952 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3953 ASSERT3U(zl_ints, ==, 1);
3954 }
3955 for (i = 0; i < 2; i++) {
3956 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx));
3957 }
3958 dmu_tx_commit(tx);
3959
3960 /*
3961 * Generate a buch of random entries.
3962 */
3963 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
3964
3965 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
3966 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
3967 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
3968 bzero(value, sizeof (value));
3969 last_txg = 0;
3970
3971 /*
3972 * If these zap entries already exist, validate their contents.
3973 */
3974 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
3975 if (error == 0) {
3976 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3977 ASSERT3U(zl_ints, ==, 1);
3978
3979 VERIFY(zap_lookup(os, object, txgname, zl_intsize,
3980 zl_ints, &last_txg) == 0);
3981
3982 VERIFY(zap_length(os, object, propname, &zl_intsize,
3983 &zl_ints) == 0);
3984
3985 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3986 ASSERT3U(zl_ints, ==, ints);
3987
3988 VERIFY(zap_lookup(os, object, propname, zl_intsize,
3989 zl_ints, value) == 0);
3990
3991 for (i = 0; i < ints; i++) {
3992 ASSERT3U(value[i], ==, last_txg + object + i);
3993 }
3994 } else {
3995 ASSERT3U(error, ==, ENOENT);
3996 }
3997
3998 /*
3999 * Atomically update two entries in our zap object.
4000 * The first is named txg_%llu, and contains the txg
4001 * in which the property was last updated. The second
4002 * is named prop_%llu, and the nth element of its value
4003 * should be txg + object + n.
4004 */
4005 tx = dmu_tx_create(os);
4006 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4007 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4008 if (txg == 0)
4009 return;
4010
4011 if (last_txg > txg)
4012 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
4013
4014 for (i = 0; i < ints; i++)
4015 value[i] = txg + object + i;
4016
4017 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t),
4018 1, &txg, tx));
4019 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t),
4020 ints, value, tx));
4021
4022 dmu_tx_commit(tx);
4023
4024 /*
4025 * Remove a random pair of entries.
4026 */
4027 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4028 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4029 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4030
4031 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4032
4033 if (error == ENOENT)
4034 return;
4035
4036 ASSERT3U(error, ==, 0);
4037
4038 tx = dmu_tx_create(os);
4039 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4040 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4041 if (txg == 0)
4042 return;
4043 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
4044 VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
4045 dmu_tx_commit(tx);
4046 }
4047
4048 /*
4049 * Testcase to test the upgrading of a microzap to fatzap.
4050 */
4051 void
4052 ztest_fzap(ztest_ds_t *zd, uint64_t id)
4053 {
4054 objset_t *os = zd->zd_os;
4055 ztest_od_t od[1];
4056 uint64_t object, txg;
4057
4058 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4059
4060 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4061 return;
4062
4063 object = od[0].od_object;
4064
4065 /*
4066 * Add entries to this ZAP and make sure it spills over
4067 * and gets upgraded to a fatzap. Also, since we are adding
4068 * 2050 entries we should see ptrtbl growth and leaf-block split.
4069 */
4070 for (int i = 0; i < 2050; i++) {
4071 char name[MAXNAMELEN];
4072 uint64_t value = i;
4073 dmu_tx_t *tx;
4074 int error;
4075
4076 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu",
4077 id, value);
4078
4079 tx = dmu_tx_create(os);
4080 dmu_tx_hold_zap(tx, object, B_TRUE, name);
4081 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4082 if (txg == 0)
4083 return;
4084 error = zap_add(os, object, name, sizeof (uint64_t), 1,
4085 &value, tx);
4086 ASSERT(error == 0 || error == EEXIST);
4087 dmu_tx_commit(tx);
4088 }
4089 }
4090
4091 /* ARGSUSED */
4092 void
4093 ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
4094 {
4095 objset_t *os = zd->zd_os;
4096 ztest_od_t od[1];
4097 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
4098 dmu_tx_t *tx;
4099 int i, namelen, error;
4100 int micro = ztest_random(2);
4101 char name[20], string_value[20];
4102 void *data;
4103
4104 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
4105
4106 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4107 return;
4108
4109 object = od[0].od_object;
4110
4111 /*
4112 * Generate a random name of the form 'xxx.....' where each
4113 * x is a random printable character and the dots are dots.
4114 * There are 94 such characters, and the name length goes from
4115 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
4116 */
4117 namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
4118
4119 for (i = 0; i < 3; i++)
4120 name[i] = '!' + ztest_random('~' - '!' + 1);
4121 for (; i < namelen - 1; i++)
4122 name[i] = '.';
4123 name[i] = '\0';
4124
4125 if ((namelen & 1) || micro) {
4126 wsize = sizeof (txg);
4127 wc = 1;
4128 data = &txg;
4129 } else {
4130 wsize = 1;
4131 wc = namelen;
4132 data = string_value;
4133 }
4134
4135 count = -1ULL;
4136 VERIFY(zap_count(os, object, &count) == 0);
4137 ASSERT(count != -1ULL);
4138
4139 /*
4140 * Select an operation: length, lookup, add, update, remove.
4141 */
4142 i = ztest_random(5);
4143
4144 if (i >= 2) {
4145 tx = dmu_tx_create(os);
4146 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4147 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4148 if (txg == 0)
4149 return;
4150 bcopy(name, string_value, namelen);
4151 } else {
4152 tx = NULL;
4153 txg = 0;
4154 bzero(string_value, namelen);
4155 }
4156
4157 switch (i) {
4158
4159 case 0:
4160 error = zap_length(os, object, name, &zl_wsize, &zl_wc);
4161 if (error == 0) {
4162 ASSERT3U(wsize, ==, zl_wsize);
4163 ASSERT3U(wc, ==, zl_wc);
4164 } else {
4165 ASSERT3U(error, ==, ENOENT);
4166 }
4167 break;
4168
4169 case 1:
4170 error = zap_lookup(os, object, name, wsize, wc, data);
4171 if (error == 0) {
4172 if (data == string_value &&
4173 bcmp(name, data, namelen) != 0)
4174 fatal(0, "name '%s' != val '%s' len %d",
4175 name, data, namelen);
4176 } else {
4177 ASSERT3U(error, ==, ENOENT);
4178 }
4179 break;
4180
4181 case 2:
4182 error = zap_add(os, object, name, wsize, wc, data, tx);
4183 ASSERT(error == 0 || error == EEXIST);
4184 break;
4185
4186 case 3:
4187 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0);
4188 break;
4189
4190 case 4:
4191 error = zap_remove(os, object, name, tx);
4192 ASSERT(error == 0 || error == ENOENT);
4193 break;
4194 }
4195
4196 if (tx != NULL)
4197 dmu_tx_commit(tx);
4198 }
4199
4200 /*
4201 * Commit callback data.
4202 */
4203 typedef struct ztest_cb_data {
4204 list_node_t zcd_node;
4205 uint64_t zcd_txg;
4206 int zcd_expected_err;
4207 boolean_t zcd_added;
4208 boolean_t zcd_called;
4209 spa_t *zcd_spa;
4210 } ztest_cb_data_t;
4211
4212 /* This is the actual commit callback function */
4213 static void
4214 ztest_commit_callback(void *arg, int error)
4215 {
4216 ztest_cb_data_t *data = arg;
4217 uint64_t synced_txg;
4218
4219 VERIFY(data != NULL);
4220 VERIFY3S(data->zcd_expected_err, ==, error);
4221 VERIFY(!data->zcd_called);
4222
4223 synced_txg = spa_last_synced_txg(data->zcd_spa);
4224 if (data->zcd_txg > synced_txg)
4225 fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
4226 ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
4227 synced_txg);
4228
4229 data->zcd_called = B_TRUE;
4230
4231 if (error == ECANCELED) {
4232 ASSERT3U(data->zcd_txg, ==, 0);
4233 ASSERT(!data->zcd_added);
4234
4235 /*
4236 * The private callback data should be destroyed here, but
4237 * since we are going to check the zcd_called field after
4238 * dmu_tx_abort(), we will destroy it there.
4239 */
4240 return;
4241 }
4242
4243 /* Was this callback added to the global callback list? */
4244 if (!data->zcd_added)
4245 goto out;
4246
4247 ASSERT3U(data->zcd_txg, !=, 0);
4248
4249 /* Remove our callback from the list */
4250 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4251 list_remove(&zcl.zcl_callbacks, data);
4252 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4253
4254 out:
4255 umem_free(data, sizeof (ztest_cb_data_t));
4256 }
4257
4258 /* Allocate and initialize callback data structure */
4259 static ztest_cb_data_t *
4260 ztest_create_cb_data(objset_t *os, uint64_t txg)
4261 {
4262 ztest_cb_data_t *cb_data;
4263
4264 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
4265
4266 cb_data->zcd_txg = txg;
4267 cb_data->zcd_spa = dmu_objset_spa(os);
4268
4269 return (cb_data);
4270 }
4271
4272 /*
4273 * If a number of txgs equal to this threshold have been created after a commit
4274 * callback has been registered but not called, then we assume there is an
4275 * implementation bug.
4276 */
4277 #define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2)
4278
4279 /*
4280 * Commit callback test.
4281 */
4282 void
4283 ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
4284 {
4285 objset_t *os = zd->zd_os;
4286 ztest_od_t od[1];
4287 dmu_tx_t *tx;
4288 ztest_cb_data_t *cb_data[3], *tmp_cb;
4289 uint64_t old_txg, txg;
4290 int i, error;
4291
4292 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4293
4294 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4295 return;
4296
4297 tx = dmu_tx_create(os);
4298
4299 cb_data[0] = ztest_create_cb_data(os, 0);
4300 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
4301
4302 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t));
4303
4304 /* Every once in a while, abort the transaction on purpose */
4305 if (ztest_random(100) == 0)
4306 error = -1;
4307
4308 if (!error)
4309 error = dmu_tx_assign(tx, TXG_NOWAIT);
4310
4311 txg = error ? 0 : dmu_tx_get_txg(tx);
4312
4313 cb_data[0]->zcd_txg = txg;
4314 cb_data[1] = ztest_create_cb_data(os, txg);
4315 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
4316
4317 if (error) {
4318 /*
4319 * It's not a strict requirement to call the registered
4320 * callbacks from inside dmu_tx_abort(), but that's what
4321 * it's supposed to happen in the current implementation
4322 * so we will check for that.
4323 */
4324 for (i = 0; i < 2; i++) {
4325 cb_data[i]->zcd_expected_err = ECANCELED;
4326 VERIFY(!cb_data[i]->zcd_called);
4327 }
4328
4329 dmu_tx_abort(tx);
4330
4331 for (i = 0; i < 2; i++) {
4332 VERIFY(cb_data[i]->zcd_called);
4333 umem_free(cb_data[i], sizeof (ztest_cb_data_t));
4334 }
4335
4336 return;
4337 }
4338
4339 cb_data[2] = ztest_create_cb_data(os, txg);
4340 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
4341
4342 /*
4343 * Read existing data to make sure there isn't a future leak.
4344 */
4345 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t),
4346 &old_txg, DMU_READ_PREFETCH));
4347
4348 if (old_txg > txg)
4349 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
4350 old_txg, txg);
4351
4352 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx);
4353
4354 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4355
4356 /*
4357 * Since commit callbacks don't have any ordering requirement and since
4358 * it is theoretically possible for a commit callback to be called
4359 * after an arbitrary amount of time has elapsed since its txg has been
4360 * synced, it is difficult to reliably determine whether a commit
4361 * callback hasn't been called due to high load or due to a flawed
4362 * implementation.
4363 *
4364 * In practice, we will assume that if after a certain number of txgs a
4365 * commit callback hasn't been called, then most likely there's an
4366 * implementation bug..
4367 */
4368 tmp_cb = list_head(&zcl.zcl_callbacks);
4369 if (tmp_cb != NULL &&
4370 tmp_cb->zcd_txg > txg - ZTEST_COMMIT_CALLBACK_THRESH) {
4371 fatal(0, "Commit callback threshold exceeded, oldest txg: %"
4372 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
4373 }
4374
4375 /*
4376 * Let's find the place to insert our callbacks.
4377 *
4378 * Even though the list is ordered by txg, it is possible for the
4379 * insertion point to not be the end because our txg may already be
4380 * quiescing at this point and other callbacks in the open txg
4381 * (from other objsets) may have sneaked in.
4382 */
4383 tmp_cb = list_tail(&zcl.zcl_callbacks);
4384 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
4385 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
4386
4387 /* Add the 3 callbacks to the list */
4388 for (i = 0; i < 3; i++) {
4389 if (tmp_cb == NULL)
4390 list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
4391 else
4392 list_insert_after(&zcl.zcl_callbacks, tmp_cb,
4393 cb_data[i]);
4394
4395 cb_data[i]->zcd_added = B_TRUE;
4396 VERIFY(!cb_data[i]->zcd_called);
4397
4398 tmp_cb = cb_data[i];
4399 }
4400
4401 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4402
4403 dmu_tx_commit(tx);
4404 }
4405
4406 /* ARGSUSED */
4407 void
4408 ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
4409 {
4410 zfs_prop_t proplist[] = {
4411 ZFS_PROP_CHECKSUM,
4412 ZFS_PROP_COMPRESSION,
4413 ZFS_PROP_COPIES,
4414 ZFS_PROP_DEDUP
4415 };
4416
4417 (void) rw_rdlock(&ztest_name_lock);
4418
4419 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
4420 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
4421 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
4422
4423 (void) rw_unlock(&ztest_name_lock);
4424 }
4425
4426 /* ARGSUSED */
4427 void
4428 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
4429 {
4430 nvlist_t *props = NULL;
4431
4432 (void) rw_rdlock(&ztest_name_lock);
4433
4434 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
4435 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
4436
4437 VERIFY3U(spa_prop_get(ztest_spa, &props), ==, 0);
4438
4439 if (ztest_opts.zo_verbose >= 6)
4440 dump_nvlist(props, 4);
4441
4442 nvlist_free(props);
4443
4444 (void) rw_unlock(&ztest_name_lock);
4445 }
4446
4447 /*
4448 * Test snapshot hold/release and deferred destroy.
4449 */
4450 void
4451 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
4452 {
4453 int error;
4454 objset_t *os = zd->zd_os;
4455 objset_t *origin;
4456 char snapname[100];
4457 char fullname[100];
4458 char clonename[100];
4459 char tag[100];
4460 char osname[MAXNAMELEN];
4461
4462 (void) rw_rdlock(&ztest_name_lock);
4463
4464 dmu_objset_name(os, osname);
4465
4466 (void) snprintf(snapname, 100, "sh1_%llu", id);
4467 (void) snprintf(fullname, 100, "%s@%s", osname, snapname);
4468 (void) snprintf(clonename, 100, "%s/ch1_%llu", osname, id);
4469 (void) snprintf(tag, 100, "%tag_%llu", id);
4470
4471 /*
4472 * Clean up from any previous run.
4473 */
4474 (void) dmu_objset_destroy(clonename, B_FALSE);
4475 (void) dsl_dataset_user_release(osname, snapname, tag, B_FALSE);
4476 (void) dmu_objset_destroy(fullname, B_FALSE);
4477
4478 /*
4479 * Create snapshot, clone it, mark snap for deferred destroy,
4480 * destroy clone, verify snap was also destroyed.
4481 */
4482 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE,
4483 FALSE, -1);
4484 if (error) {
4485 if (error == ENOSPC) {
4486 ztest_record_enospc("dmu_objset_snapshot");
4487 goto out;
4488 }
4489 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4490 }
4491
4492 error = dmu_objset_hold(fullname, FTAG, &origin);
4493 if (error)
4494 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4495
4496 error = dmu_objset_clone(clonename, dmu_objset_ds(origin), 0);
4497 dmu_objset_rele(origin, FTAG);
4498 if (error) {
4499 if (error == ENOSPC) {
4500 ztest_record_enospc("dmu_objset_clone");
4501 goto out;
4502 }
4503 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error);
4504 }
4505
4506 error = dmu_objset_destroy(fullname, B_TRUE);
4507 if (error) {
4508 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4509 fullname, error);
4510 }
4511
4512 error = dmu_objset_destroy(clonename, B_FALSE);
4513 if (error)
4514 fatal(0, "dmu_objset_destroy(%s) = %d", clonename, error);
4515
4516 error = dmu_objset_hold(fullname, FTAG, &origin);
4517 if (error != ENOENT)
4518 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4519
4520 /*
4521 * Create snapshot, add temporary hold, verify that we can't
4522 * destroy a held snapshot, mark for deferred destroy,
4523 * release hold, verify snapshot was destroyed.
4524 */
4525 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE,
4526 FALSE, -1);
4527 if (error) {
4528 if (error == ENOSPC) {
4529 ztest_record_enospc("dmu_objset_snapshot");
4530 goto out;
4531 }
4532 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4533 }
4534
4535 error = dsl_dataset_user_hold(osname, snapname, tag, B_FALSE,
4536 B_TRUE, -1);
4537 if (error)
4538 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag);
4539
4540 error = dmu_objset_destroy(fullname, B_FALSE);
4541 if (error != EBUSY) {
4542 fatal(0, "dmu_objset_destroy(%s, B_FALSE) = %d",
4543 fullname, error);
4544 }
4545
4546 error = dmu_objset_destroy(fullname, B_TRUE);
4547 if (error) {
4548 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4549 fullname, error);
4550 }
4551
4552 error = dsl_dataset_user_release(osname, snapname, tag, B_FALSE);
4553 if (error)
4554 fatal(0, "dsl_dataset_user_release(%s)", fullname, tag);
4555
4556 VERIFY(dmu_objset_hold(fullname, FTAG, &origin) == ENOENT);
4557
4558 out:
4559 (void) rw_unlock(&ztest_name_lock);
4560 }
4561
4562 /*
4563 * Inject random faults into the on-disk data.
4564 */
4565 /* ARGSUSED */
4566 void
4567 ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
4568 {
4569 ztest_shared_t *zs = ztest_shared;
4570 spa_t *spa = ztest_spa;
4571 int fd;
4572 uint64_t offset;
4573 uint64_t leaves;
4574 uint64_t bad = 0x1990c0ffeedecade;
4575 uint64_t top, leaf;
4576 char path0[MAXPATHLEN];
4577 char pathrand[MAXPATHLEN];
4578 size_t fsize;
4579 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */
4580 int iters = 1000;
4581 int maxfaults;
4582 int mirror_save;
4583 vdev_t *vd0 = NULL;
4584 uint64_t guid0 = 0;
4585 boolean_t islog = B_FALSE;
4586
4587 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4588 maxfaults = MAXFAULTS();
4589 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
4590 mirror_save = zs->zs_mirrors;
4591 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4592
4593 ASSERT(leaves >= 1);
4594
4595 /*
4596 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4597 */
4598 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4599
4600 if (ztest_random(2) == 0) {
4601 /*
4602 * Inject errors on a normal data device or slog device.
4603 */
4604 top = ztest_random_vdev_top(spa, B_TRUE);
4605 leaf = ztest_random(leaves) + zs->zs_splits;
4606
4607 /*
4608 * Generate paths to the first leaf in this top-level vdev,
4609 * and to the random leaf we selected. We'll induce transient
4610 * write failures and random online/offline activity on leaf 0,
4611 * and we'll write random garbage to the randomly chosen leaf.
4612 */
4613 (void) snprintf(path0, sizeof (path0), ztest_dev_template,
4614 ztest_opts.zo_dir, ztest_opts.zo_pool,
4615 top * leaves + zs->zs_splits);
4616 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template,
4617 ztest_opts.zo_dir, ztest_opts.zo_pool,
4618 top * leaves + leaf);
4619
4620 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
4621 if (vd0 != NULL && vd0->vdev_top->vdev_islog)
4622 islog = B_TRUE;
4623
4624 if (vd0 != NULL && maxfaults != 1) {
4625 /*
4626 * Make vd0 explicitly claim to be unreadable,
4627 * or unwriteable, or reach behind its back
4628 * and close the underlying fd. We can do this if
4629 * maxfaults == 0 because we'll fail and reexecute,
4630 * and we can do it if maxfaults >= 2 because we'll
4631 * have enough redundancy. If maxfaults == 1, the
4632 * combination of this with injection of random data
4633 * corruption below exceeds the pool's fault tolerance.
4634 */
4635 vdev_file_t *vf = vd0->vdev_tsd;
4636
4637 if (vf != NULL && ztest_random(3) == 0) {
4638 (void) close(vf->vf_vnode->v_fd);
4639 vf->vf_vnode->v_fd = -1;
4640 } else if (ztest_random(2) == 0) {
4641 vd0->vdev_cant_read = B_TRUE;
4642 } else {
4643 vd0->vdev_cant_write = B_TRUE;
4644 }
4645 guid0 = vd0->vdev_guid;
4646 }
4647 } else {
4648 /*
4649 * Inject errors on an l2cache device.
4650 */
4651 spa_aux_vdev_t *sav = &spa->spa_l2cache;
4652
4653 if (sav->sav_count == 0) {
4654 spa_config_exit(spa, SCL_STATE, FTAG);
4655 return;
4656 }
4657 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
4658 guid0 = vd0->vdev_guid;
4659 (void) strcpy(path0, vd0->vdev_path);
4660 (void) strcpy(pathrand, vd0->vdev_path);
4661
4662 leaf = 0;
4663 leaves = 1;
4664 maxfaults = INT_MAX; /* no limit on cache devices */
4665 }
4666
4667 spa_config_exit(spa, SCL_STATE, FTAG);
4668
4669 /*
4670 * If we can tolerate two or more faults, or we're dealing
4671 * with a slog, randomly online/offline vd0.
4672 */
4673 if ((maxfaults >= 2 || islog) && guid0 != 0) {
4674 if (ztest_random(10) < 6) {
4675 int flags = (ztest_random(2) == 0 ?
4676 ZFS_OFFLINE_TEMPORARY : 0);
4677
4678 /*
4679 * We have to grab the zs_name_lock as writer to
4680 * prevent a race between offlining a slog and
4681 * destroying a dataset. Offlining the slog will
4682 * grab a reference on the dataset which may cause
4683 * dmu_objset_destroy() to fail with EBUSY thus
4684 * leaving the dataset in an inconsistent state.
4685 */
4686 if (islog)
4687 (void) rw_wrlock(&ztest_name_lock);
4688
4689 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
4690
4691 if (islog)
4692 (void) rw_unlock(&ztest_name_lock);
4693 } else {
4694 (void) vdev_online(spa, guid0, 0, NULL);
4695 }
4696 }
4697
4698 if (maxfaults == 0)
4699 return;
4700
4701 /*
4702 * We have at least single-fault tolerance, so inject data corruption.
4703 */
4704 fd = open(pathrand, O_RDWR);
4705
4706 if (fd == -1) /* we hit a gap in the device namespace */
4707 return;
4708
4709 fsize = lseek(fd, 0, SEEK_END);
4710
4711 while (--iters != 0) {
4712 offset = ztest_random(fsize / (leaves << bshift)) *
4713 (leaves << bshift) + (leaf << bshift) +
4714 (ztest_random(1ULL << (bshift - 1)) & -8ULL);
4715
4716 if (offset >= fsize)
4717 continue;
4718
4719 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4720 if (mirror_save != zs->zs_mirrors) {
4721 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4722 (void) close(fd);
4723 return;
4724 }
4725
4726 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
4727 fatal(1, "can't inject bad word at 0x%llx in %s",
4728 offset, pathrand);
4729
4730 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4731
4732 if (ztest_opts.zo_verbose >= 7)
4733 (void) printf("injected bad word into %s,"
4734 " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
4735 }
4736
4737 (void) close(fd);
4738 }
4739
4740 /*
4741 * Verify that DDT repair works as expected.
4742 */
4743 void
4744 ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
4745 {
4746 ztest_shared_t *zs = ztest_shared;
4747 spa_t *spa = ztest_spa;
4748 objset_t *os = zd->zd_os;
4749 ztest_od_t od[1];
4750 uint64_t object, blocksize, txg, pattern, psize;
4751 enum zio_checksum checksum = spa_dedup_checksum(spa);
4752 dmu_buf_t *db;
4753 dmu_tx_t *tx;
4754 void *buf;
4755 blkptr_t blk;
4756 int copies = 2 * ZIO_DEDUPDITTO_MIN;
4757
4758 blocksize = ztest_random_blocksize();
4759 blocksize = MIN(blocksize, 2048); /* because we write so many */
4760
4761 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4762
4763 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4764 return;
4765
4766 /*
4767 * Take the name lock as writer to prevent anyone else from changing
4768 * the pool and dataset properies we need to maintain during this test.
4769 */
4770 (void) rw_wrlock(&ztest_name_lock);
4771
4772 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
4773 B_FALSE) != 0 ||
4774 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
4775 B_FALSE) != 0) {
4776 (void) rw_unlock(&ztest_name_lock);
4777 return;
4778 }
4779
4780 object = od[0].od_object;
4781 blocksize = od[0].od_blocksize;
4782 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os);
4783
4784 ASSERT(object != 0);
4785
4786 tx = dmu_tx_create(os);
4787 dmu_tx_hold_write(tx, object, 0, copies * blocksize);
4788 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
4789 if (txg == 0) {
4790 (void) rw_unlock(&ztest_name_lock);
4791 return;
4792 }
4793
4794 /*
4795 * Write all the copies of our block.
4796 */
4797 for (int i = 0; i < copies; i++) {
4798 uint64_t offset = i * blocksize;
4799 VERIFY(dmu_buf_hold(os, object, offset, FTAG, &db,
4800 DMU_READ_NO_PREFETCH) == 0);
4801 ASSERT(db->db_offset == offset);
4802 ASSERT(db->db_size == blocksize);
4803 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) ||
4804 ztest_pattern_match(db->db_data, db->db_size, 0ULL));
4805 dmu_buf_will_fill(db, tx);
4806 ztest_pattern_set(db->db_data, db->db_size, pattern);
4807 dmu_buf_rele(db, FTAG);
4808 }
4809
4810 dmu_tx_commit(tx);
4811 txg_wait_synced(spa_get_dsl(spa), txg);
4812
4813 /*
4814 * Find out what block we got.
4815 */
4816 VERIFY(dmu_buf_hold(os, object, 0, FTAG, &db,
4817 DMU_READ_NO_PREFETCH) == 0);
4818 blk = *((dmu_buf_impl_t *)db)->db_blkptr;
4819 dmu_buf_rele(db, FTAG);
4820
4821 /*
4822 * Damage the block. Dedup-ditto will save us when we read it later.
4823 */
4824 psize = BP_GET_PSIZE(&blk);
4825 buf = zio_buf_alloc(psize);
4826 ztest_pattern_set(buf, psize, ~pattern);
4827
4828 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
4829 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
4830 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
4831
4832 zio_buf_free(buf, psize);
4833
4834 (void) rw_unlock(&ztest_name_lock);
4835 }
4836
4837 /*
4838 * Scrub the pool.
4839 */
4840 /* ARGSUSED */
4841 void
4842 ztest_scrub(ztest_ds_t *zd, uint64_t id)
4843 {
4844 spa_t *spa = ztest_spa;
4845
4846 (void) spa_scan(spa, POOL_SCAN_SCRUB);
4847 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */
4848 (void) spa_scan(spa, POOL_SCAN_SCRUB);
4849 }
4850
4851 /*
4852 * Change the guid for the pool.
4853 */
4854 /* ARGSUSED */
4855 void
4856 ztest_reguid(ztest_ds_t *zd, uint64_t id)
4857 {
4858 spa_t *spa = ztest_spa;
4859 uint64_t orig, load;
4860
4861 orig = spa_guid(spa);
4862 load = spa_load_guid(spa);
4863 if (spa_change_guid(spa) != 0)
4864 return;
4865
4866 if (ztest_opts.zo_verbose >= 3) {
4867 (void) printf("Changed guid old %llu -> %llu\n",
4868 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
4869 }
4870
4871 VERIFY3U(orig, !=, spa_guid(spa));
4872 VERIFY3U(load, ==, spa_load_guid(spa));
4873 }
4874
4875 /*
4876 * Rename the pool to a different name and then rename it back.
4877 */
4878 /* ARGSUSED */
4879 void
4880 ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
4881 {
4882 char *oldname, *newname;
4883 spa_t *spa;
4884
4885 (void) rw_wrlock(&ztest_name_lock);
4886
4887 oldname = ztest_opts.zo_pool;
4888 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
4889 (void) strcpy(newname, oldname);
4890 (void) strcat(newname, "_tmp");
4891
4892 /*
4893 * Do the rename
4894 */
4895 VERIFY3U(0, ==, spa_rename(oldname, newname));
4896
4897 /*
4898 * Try to open it under the old name, which shouldn't exist
4899 */
4900 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
4901
4902 /*
4903 * Open it under the new name and make sure it's still the same spa_t.
4904 */
4905 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
4906
4907 ASSERT(spa == ztest_spa);
4908 spa_close(spa, FTAG);
4909
4910 /*
4911 * Rename it back to the original
4912 */
4913 VERIFY3U(0, ==, spa_rename(newname, oldname));
4914
4915 /*
4916 * Make sure it can still be opened
4917 */
4918 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
4919
4920 ASSERT(spa == ztest_spa);
4921 spa_close(spa, FTAG);
4922
4923 umem_free(newname, strlen(newname) + 1);
4924
4925 (void) rw_unlock(&ztest_name_lock);
4926 }
4927
4928 /*
4929 * Verify pool integrity by running zdb.
4930 */
4931 static void
4932 ztest_run_zdb(char *pool)
4933 {
4934 int status;
4935 char zdb[MAXPATHLEN + MAXNAMELEN + 20];
4936 char zbuf[1024];
4937 char *bin;
4938 char *ztest;
4939 char *isa;
4940 int isalen;
4941 FILE *fp;
4942
4943 (void) realpath(getexecname(), zdb);
4944
4945 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */
4946 bin = strstr(zdb, "/usr/bin/");
4947 ztest = strstr(bin, "/ztest");
4948 isa = bin + 8;
4949 isalen = ztest - isa;
4950 isa = strdup(isa);
4951 /* LINTED */
4952 (void) sprintf(bin,
4953 "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s",
4954 isalen,
4955 isa,
4956 ztest_opts.zo_verbose >= 3 ? "s" : "",
4957 ztest_opts.zo_verbose >= 4 ? "v" : "",
4958 spa_config_path,
4959 pool);
4960 free(isa);
4961
4962 if (ztest_opts.zo_verbose >= 5)
4963 (void) printf("Executing %s\n", strstr(zdb, "zdb "));
4964
4965 fp = popen(zdb, "r");
4966
4967 while (fgets(zbuf, sizeof (zbuf), fp) != NULL)
4968 if (ztest_opts.zo_verbose >= 3)
4969 (void) printf("%s", zbuf);
4970
4971 status = pclose(fp);
4972
4973 if (status == 0)
4974 return;
4975
4976 ztest_dump_core = 0;
4977 if (WIFEXITED(status))
4978 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
4979 else
4980 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
4981 }
4982
4983 static void
4984 ztest_walk_pool_directory(char *header)
4985 {
4986 spa_t *spa = NULL;
4987
4988 if (ztest_opts.zo_verbose >= 6)
4989 (void) printf("%s\n", header);
4990
4991 mutex_enter(&spa_namespace_lock);
4992 while ((spa = spa_next(spa)) != NULL)
4993 if (ztest_opts.zo_verbose >= 6)
4994 (void) printf("\t%s\n", spa_name(spa));
4995 mutex_exit(&spa_namespace_lock);
4996 }
4997
4998 static void
4999 ztest_spa_import_export(char *oldname, char *newname)
5000 {
5001 nvlist_t *config, *newconfig;
5002 uint64_t pool_guid;
5003 spa_t *spa;
5004
5005 if (ztest_opts.zo_verbose >= 4) {
5006 (void) printf("import/export: old = %s, new = %s\n",
5007 oldname, newname);
5008 }
5009
5010 /*
5011 * Clean up from previous runs.
5012 */
5013 (void) spa_destroy(newname);
5014
5015 /*
5016 * Get the pool's configuration and guid.
5017 */
5018 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5019
5020 /*
5021 * Kick off a scrub to tickle scrub/export races.
5022 */
5023 if (ztest_random(2) == 0)
5024 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5025
5026 pool_guid = spa_guid(spa);
5027 spa_close(spa, FTAG);
5028
5029 ztest_walk_pool_directory("pools before export");
5030
5031 /*
5032 * Export it.
5033 */
5034 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE));
5035
5036 ztest_walk_pool_directory("pools after export");
5037
5038 /*
5039 * Try to import it.
5040 */
5041 newconfig = spa_tryimport(config);
5042 ASSERT(newconfig != NULL);
5043 nvlist_free(newconfig);
5044
5045 /*
5046 * Import it under the new name.
5047 */
5048 VERIFY3U(0, ==, spa_import(newname, config, NULL, 0));
5049
5050 ztest_walk_pool_directory("pools after import");
5051
5052 /*
5053 * Try to import it again -- should fail with EEXIST.
5054 */
5055 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
5056
5057 /*
5058 * Try to import it under a different name -- should fail with EEXIST.
5059 */
5060 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
5061
5062 /*
5063 * Verify that the pool is no longer visible under the old name.
5064 */
5065 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5066
5067 /*
5068 * Verify that we can open and close the pool using the new name.
5069 */
5070 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5071 ASSERT(pool_guid == spa_guid(spa));
5072 spa_close(spa, FTAG);
5073
5074 nvlist_free(config);
5075 }
5076
5077 static void
5078 ztest_resume(spa_t *spa)
5079 {
5080 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
5081 (void) printf("resuming from suspended state\n");
5082 spa_vdev_state_enter(spa, SCL_NONE);
5083 vdev_clear(spa, NULL);
5084 (void) spa_vdev_state_exit(spa, NULL, 0);
5085 (void) zio_resume(spa);
5086 }
5087
5088 static void *
5089 ztest_resume_thread(void *arg)
5090 {
5091 spa_t *spa = arg;
5092
5093 while (!ztest_exiting) {
5094 if (spa_suspended(spa))
5095 ztest_resume(spa);
5096 (void) poll(NULL, 0, 100);
5097 }
5098 return (NULL);
5099 }
5100
5101 static void *
5102 ztest_deadman_thread(void *arg)
5103 {
5104 ztest_shared_t *zs = arg;
5105 int grace = 300;
5106 hrtime_t delta;
5107
5108 delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace;
5109
5110 (void) poll(NULL, 0, (int)(1000 * delta));
5111
5112 fatal(0, "failed to complete within %d seconds of deadline", grace);
5113
5114 return (NULL);
5115 }
5116
5117 static void
5118 ztest_execute(int test, ztest_info_t *zi, uint64_t id)
5119 {
5120 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
5121 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
5122 hrtime_t functime = gethrtime();
5123
5124 for (int i = 0; i < zi->zi_iters; i++)
5125 zi->zi_func(zd, id);
5126
5127 functime = gethrtime() - functime;
5128
5129 atomic_add_64(&zc->zc_count, 1);
5130 atomic_add_64(&zc->zc_time, functime);
5131
5132 if (ztest_opts.zo_verbose >= 4) {
5133 Dl_info dli;
5134 (void) dladdr((void *)zi->zi_func, &dli);
5135 (void) printf("%6.2f sec in %s\n",
5136 (double)functime / NANOSEC, dli.dli_sname);
5137 }
5138 }
5139
5140 static void *
5141 ztest_thread(void *arg)
5142 {
5143 int rand;
5144 uint64_t id = (uintptr_t)arg;
5145 ztest_shared_t *zs = ztest_shared;
5146 uint64_t call_next;
5147 hrtime_t now;
5148 ztest_info_t *zi;
5149 ztest_shared_callstate_t *zc;
5150
5151 while ((now = gethrtime()) < zs->zs_thread_stop) {
5152 /*
5153 * See if it's time to force a crash.
5154 */
5155 if (now > zs->zs_thread_kill)
5156 ztest_kill(zs);
5157
5158 /*
5159 * If we're getting ENOSPC with some regularity, stop.
5160 */
5161 if (zs->zs_enospc_count > 10)
5162 break;
5163
5164 /*
5165 * Pick a random function to execute.
5166 */
5167 rand = ztest_random(ZTEST_FUNCS);
5168 zi = &ztest_info[rand];
5169 zc = ZTEST_GET_SHARED_CALLSTATE(rand);
5170 call_next = zc->zc_next;
5171
5172 if (now >= call_next &&
5173 atomic_cas_64(&zc->zc_next, call_next, call_next +
5174 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
5175 ztest_execute(rand, zi, id);
5176 }
5177 }
5178
5179 return (NULL);
5180 }
5181
5182 static void
5183 ztest_dataset_name(char *dsname, char *pool, int d)
5184 {
5185 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d);
5186 }
5187
5188 static void
5189 ztest_dataset_destroy(int d)
5190 {
5191 char name[MAXNAMELEN];
5192
5193 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5194
5195 if (ztest_opts.zo_verbose >= 3)
5196 (void) printf("Destroying %s to free up space\n", name);
5197
5198 /*
5199 * Cleanup any non-standard clones and snapshots. In general,
5200 * ztest thread t operates on dataset (t % zopt_datasets),
5201 * so there may be more than one thing to clean up.
5202 */
5203 for (int t = d; t < ztest_opts.zo_threads;
5204 t += ztest_opts.zo_datasets) {
5205 ztest_dsl_dataset_cleanup(name, t);
5206 }
5207
5208 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
5209 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5210 }
5211
5212 static void
5213 ztest_dataset_dirobj_verify(ztest_ds_t *zd)
5214 {
5215 uint64_t usedobjs, dirobjs, scratch;
5216
5217 /*
5218 * ZTEST_DIROBJ is the object directory for the entire dataset.
5219 * Therefore, the number of objects in use should equal the
5220 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5221 * If not, we have an object leak.
5222 *
5223 * Note that we can only check this in ztest_dataset_open(),
5224 * when the open-context and syncing-context values agree.
5225 * That's because zap_count() returns the open-context value,
5226 * while dmu_objset_space() returns the rootbp fill count.
5227 */
5228 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
5229 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
5230 ASSERT3U(dirobjs + 1, ==, usedobjs);
5231 }
5232
5233 static int
5234 ztest_dataset_open(int d)
5235 {
5236 ztest_ds_t *zd = &ztest_ds[d];
5237 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
5238 objset_t *os;
5239 zilog_t *zilog;
5240 char name[MAXNAMELEN];
5241 int error;
5242
5243 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5244
5245 (void) rw_rdlock(&ztest_name_lock);
5246
5247 error = ztest_dataset_create(name);
5248 if (error == ENOSPC) {
5249 (void) rw_unlock(&ztest_name_lock);
5250 ztest_record_enospc(FTAG);
5251 return (error);
5252 }
5253 ASSERT(error == 0 || error == EEXIST);
5254
5255 VERIFY3U(dmu_objset_hold(name, zd, &os), ==, 0);
5256 (void) rw_unlock(&ztest_name_lock);
5257
5258 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
5259
5260 zilog = zd->zd_zilog;
5261
5262 if (zilog->zl_header->zh_claim_lr_seq != 0 &&
5263 zilog->zl_header->zh_claim_lr_seq < committed_seq)
5264 fatal(0, "missing log records: claimed %llu < committed %llu",
5265 zilog->zl_header->zh_claim_lr_seq, committed_seq);
5266
5267 ztest_dataset_dirobj_verify(zd);
5268
5269 zil_replay(os, zd, ztest_replay_vector);
5270
5271 ztest_dataset_dirobj_verify(zd);
5272
5273 if (ztest_opts.zo_verbose >= 6)
5274 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5275 zd->zd_name,
5276 (u_longlong_t)zilog->zl_parse_blk_count,
5277 (u_longlong_t)zilog->zl_parse_lr_count,
5278 (u_longlong_t)zilog->zl_replaying_seq);
5279
5280 zilog = zil_open(os, ztest_get_data);
5281
5282 if (zilog->zl_replaying_seq != 0 &&
5283 zilog->zl_replaying_seq < committed_seq)
5284 fatal(0, "missing log records: replayed %llu < committed %llu",
5285 zilog->zl_replaying_seq, committed_seq);
5286
5287 return (0);
5288 }
5289
5290 static void
5291 ztest_dataset_close(int d)
5292 {
5293 ztest_ds_t *zd = &ztest_ds[d];
5294
5295 zil_close(zd->zd_zilog);
5296 dmu_objset_rele(zd->zd_os, zd);
5297
5298 ztest_zd_fini(zd);
5299 }
5300
5301 /*
5302 * Kick off threads to run tests on all datasets in parallel.
5303 */
5304 static void
5305 ztest_run(ztest_shared_t *zs)
5306 {
5307 thread_t *tid;
5308 spa_t *spa;
5309 objset_t *os;
5310 thread_t resume_tid;
5311 int error;
5312
5313 ztest_exiting = B_FALSE;
5314
5315 /*
5316 * Initialize parent/child shared state.
5317 */
5318 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5319 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5320
5321 zs->zs_thread_start = gethrtime();
5322 zs->zs_thread_stop =
5323 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
5324 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
5325 zs->zs_thread_kill = zs->zs_thread_stop;
5326 if (ztest_random(100) < ztest_opts.zo_killrate) {
5327 zs->zs_thread_kill -=
5328 ztest_random(ztest_opts.zo_passtime * NANOSEC);
5329 }
5330
5331 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
5332
5333 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
5334 offsetof(ztest_cb_data_t, zcd_node));
5335
5336 /*
5337 * Open our pool.
5338 */
5339 kernel_init(FREAD | FWRITE);
5340 VERIFY(spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0);
5341 spa->spa_debug = B_TRUE;
5342 ztest_spa = spa;
5343
5344 VERIFY3U(0, ==, dmu_objset_hold(ztest_opts.zo_pool, FTAG, &os));
5345 zs->zs_guid = dmu_objset_fsid_guid(os);
5346 dmu_objset_rele(os, FTAG);
5347
5348 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
5349
5350 /*
5351 * We don't expect the pool to suspend unless maxfaults == 0,
5352 * in which case ztest_fault_inject() temporarily takes away
5353 * the only valid replica.
5354 */
5355 if (MAXFAULTS() == 0)
5356 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
5357 else
5358 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
5359
5360 /*
5361 * Create a thread to periodically resume suspended I/O.
5362 */
5363 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
5364 &resume_tid) == 0);
5365
5366 /*
5367 * Create a deadman thread to abort() if we hang.
5368 */
5369 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND,
5370 NULL) == 0);
5371
5372 /*
5373 * Verify that we can safely inquire about about any object,
5374 * whether it's allocated or not. To make it interesting,
5375 * we probe a 5-wide window around each power of two.
5376 * This hits all edge cases, including zero and the max.
5377 */
5378 for (int t = 0; t < 64; t++) {
5379 for (int d = -5; d <= 5; d++) {
5380 error = dmu_object_info(spa->spa_meta_objset,
5381 (1ULL << t) + d, NULL);
5382 ASSERT(error == 0 || error == ENOENT ||
5383 error == EINVAL);
5384 }
5385 }
5386
5387 /*
5388 * If we got any ENOSPC errors on the previous run, destroy something.
5389 */
5390 if (zs->zs_enospc_count != 0) {
5391 int d = ztest_random(ztest_opts.zo_datasets);
5392 ztest_dataset_destroy(d);
5393 }
5394 zs->zs_enospc_count = 0;
5395
5396 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t),
5397 UMEM_NOFAIL);
5398
5399 if (ztest_opts.zo_verbose >= 4)
5400 (void) printf("starting main threads...\n");
5401
5402 /*
5403 * Kick off all the tests that run in parallel.
5404 */
5405 for (int t = 0; t < ztest_opts.zo_threads; t++) {
5406 if (t < ztest_opts.zo_datasets &&
5407 ztest_dataset_open(t) != 0)
5408 return;
5409 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t,
5410 THR_BOUND, &tid[t]) == 0);
5411 }
5412
5413 /*
5414 * Wait for all of the tests to complete. We go in reverse order
5415 * so we don't close datasets while threads are still using them.
5416 */
5417 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) {
5418 VERIFY(thr_join(tid[t], NULL, NULL) == 0);
5419 if (t < ztest_opts.zo_datasets)
5420 ztest_dataset_close(t);
5421 }
5422
5423 txg_wait_synced(spa_get_dsl(spa), 0);
5424
5425 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
5426 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
5427
5428 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t));
5429
5430 /* Kill the resume thread */
5431 ztest_exiting = B_TRUE;
5432 VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
5433 ztest_resume(spa);
5434
5435 /*
5436 * Right before closing the pool, kick off a bunch of async I/O;
5437 * spa_close() should wait for it to complete.
5438 */
5439 for (uint64_t object = 1; object < 50; object++)
5440 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20);
5441
5442 spa_close(spa, FTAG);
5443
5444 /*
5445 * Verify that we can loop over all pools.
5446 */
5447 mutex_enter(&spa_namespace_lock);
5448 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
5449 if (ztest_opts.zo_verbose > 3)
5450 (void) printf("spa_next: found %s\n", spa_name(spa));
5451 mutex_exit(&spa_namespace_lock);
5452
5453 /*
5454 * Verify that we can export the pool and reimport it under a
5455 * different name.
5456 */
5457 if (ztest_random(2) == 0) {
5458 char name[MAXNAMELEN];
5459 (void) snprintf(name, MAXNAMELEN, "%s_import",
5460 ztest_opts.zo_pool);
5461 ztest_spa_import_export(ztest_opts.zo_pool, name);
5462 ztest_spa_import_export(name, ztest_opts.zo_pool);
5463 }
5464
5465 kernel_fini();
5466
5467 list_destroy(&zcl.zcl_callbacks);
5468
5469 (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
5470
5471 (void) rwlock_destroy(&ztest_name_lock);
5472 (void) _mutex_destroy(&ztest_vdev_lock);
5473 }
5474
5475 static void
5476 ztest_freeze(void)
5477 {
5478 ztest_ds_t *zd = &ztest_ds[0];
5479 spa_t *spa;
5480 int numloops = 0;
5481
5482 if (ztest_opts.zo_verbose >= 3)
5483 (void) printf("testing spa_freeze()...\n");
5484
5485 kernel_init(FREAD | FWRITE);
5486 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5487 VERIFY3U(0, ==, ztest_dataset_open(0));
5488
5489 /*
5490 * Force the first log block to be transactionally allocated.
5491 * We have to do this before we freeze the pool -- otherwise
5492 * the log chain won't be anchored.
5493 */
5494 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
5495 ztest_dmu_object_alloc_free(zd, 0);
5496 zil_commit(zd->zd_zilog, 0);
5497 }
5498
5499 txg_wait_synced(spa_get_dsl(spa), 0);
5500
5501 /*
5502 * Freeze the pool. This stops spa_sync() from doing anything,
5503 * so that the only way to record changes from now on is the ZIL.
5504 */
5505 spa_freeze(spa);
5506
5507 /*
5508 * Run tests that generate log records but don't alter the pool config
5509 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
5510 * We do a txg_wait_synced() after each iteration to force the txg
5511 * to increase well beyond the last synced value in the uberblock.
5512 * The ZIL should be OK with that.
5513 */
5514 while (ztest_random(10) != 0 &&
5515 numloops++ < ztest_opts.zo_maxloops) {
5516 ztest_dmu_write_parallel(zd, 0);
5517 ztest_dmu_object_alloc_free(zd, 0);
5518 txg_wait_synced(spa_get_dsl(spa), 0);
5519 }
5520
5521 /*
5522 * Commit all of the changes we just generated.
5523 */
5524 zil_commit(zd->zd_zilog, 0);
5525 txg_wait_synced(spa_get_dsl(spa), 0);
5526
5527 /*
5528 * Close our dataset and close the pool.
5529 */
5530 ztest_dataset_close(0);
5531 spa_close(spa, FTAG);
5532 kernel_fini();
5533
5534 /*
5535 * Open and close the pool and dataset to induce log replay.
5536 */
5537 kernel_init(FREAD | FWRITE);
5538 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5539 VERIFY3U(0, ==, ztest_dataset_open(0));
5540 ztest_dataset_close(0);
5541 spa_close(spa, FTAG);
5542 kernel_fini();
5543 }
5544
5545 void
5546 print_time(hrtime_t t, char *timebuf)
5547 {
5548 hrtime_t s = t / NANOSEC;
5549 hrtime_t m = s / 60;
5550 hrtime_t h = m / 60;
5551 hrtime_t d = h / 24;
5552
5553 s -= m * 60;
5554 m -= h * 60;
5555 h -= d * 24;
5556
5557 timebuf[0] = '\0';
5558
5559 if (d)
5560 (void) sprintf(timebuf,
5561 "%llud%02lluh%02llum%02llus", d, h, m, s);
5562 else if (h)
5563 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
5564 else if (m)
5565 (void) sprintf(timebuf, "%llum%02llus", m, s);
5566 else
5567 (void) sprintf(timebuf, "%llus", s);
5568 }
5569
5570 static nvlist_t *
5571 make_random_props()
5572 {
5573 nvlist_t *props;
5574
5575 if (ztest_random(2) == 0)
5576 return (NULL);
5577
5578 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
5579 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0);
5580
5581 return (props);
5582 }
5583
5584 /*
5585 * Create a storage pool with the given name and initial vdev size.
5586 * Then test spa_freeze() functionality.
5587 */
5588 static void
5589 ztest_init(ztest_shared_t *zs)
5590 {
5591 spa_t *spa;
5592 nvlist_t *nvroot, *props;
5593
5594 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5595 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5596
5597 kernel_init(FREAD | FWRITE);
5598
5599 /*
5600 * Create the storage pool.
5601 */
5602 (void) spa_destroy(ztest_opts.zo_pool);
5603 ztest_shared->zs_vdev_next_leaf = 0;
5604 zs->zs_splits = 0;
5605 zs->zs_mirrors = ztest_opts.zo_mirrors;
5606 nvroot = make_vdev_root(NULL, NULL, ztest_opts.zo_vdev_size, 0,
5607 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
5608 props = make_random_props();
5609 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props,
5610 NULL, NULL));
5611 nvlist_free(nvroot);
5612
5613 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5614 zs->zs_metaslab_sz =
5615 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
5616 spa_close(spa, FTAG);
5617
5618 kernel_fini();
5619
5620 ztest_run_zdb(ztest_opts.zo_pool);
5621
5622 ztest_freeze();
5623
5624 ztest_run_zdb(ztest_opts.zo_pool);
5625
5626 (void) rwlock_destroy(&ztest_name_lock);
5627 (void) _mutex_destroy(&ztest_vdev_lock);
5628 }
5629
5630 static void
5631 setup_fds(void)
5632 {
5633 int fd;
5634
5635 char *tmp = tempnam(NULL, NULL);
5636 fd = open(tmp, O_RDWR | O_CREAT, 0700);
5637 ASSERT3U(fd, ==, ZTEST_FD_DATA);
5638 (void) unlink(tmp);
5639 free(tmp);
5640
5641 fd = open("/dev/urandom", O_RDONLY);
5642 ASSERT3U(fd, ==, ZTEST_FD_RAND);
5643 }
5644
5645 static void
5646 setup_hdr(void)
5647 {
5648 ztest_shared_hdr_t *hdr;
5649
5650 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5651 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0);
5652 ASSERT(hdr != MAP_FAILED);
5653
5654 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
5655 hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
5656 hdr->zh_size = sizeof (ztest_shared_t);
5657 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
5658 hdr->zh_stats_count = ZTEST_FUNCS;
5659 hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
5660 hdr->zh_ds_count = ztest_opts.zo_datasets;
5661
5662 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5663 }
5664
5665 static void
5666 setup_data(void)
5667 {
5668 int size, offset;
5669 ztest_shared_hdr_t *hdr;
5670 uint8_t *buf;
5671
5672 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5673 PROT_READ, MAP_SHARED, ZTEST_FD_DATA, 0);
5674 ASSERT(hdr != MAP_FAILED);
5675
5676 size = hdr->zh_hdr_size;
5677 size += hdr->zh_opts_size;
5678 size += hdr->zh_size;
5679 size += hdr->zh_stats_size * hdr->zh_stats_count;
5680 size += hdr->zh_ds_size * hdr->zh_ds_count;
5681
5682 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5683 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
5684 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0);
5685 ASSERT(hdr != MAP_FAILED);
5686 buf = (uint8_t *)hdr;
5687
5688 offset = hdr->zh_hdr_size;
5689 ztest_shared_opts = (void *)&buf[offset];
5690 offset += hdr->zh_opts_size;
5691 ztest_shared = (void *)&buf[offset];
5692 offset += hdr->zh_size;
5693 ztest_shared_callstate = (void *)&buf[offset];
5694 offset += hdr->zh_stats_size * hdr->zh_stats_count;
5695 ztest_shared_ds = (void *)&buf[offset];
5696 }
5697
5698 static boolean_t
5699 exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
5700 {
5701 pid_t pid;
5702 int status;
5703 char cmdbuf[MAXPATHLEN];
5704
5705 pid = fork();
5706
5707 if (cmd == NULL) {
5708 (void) strlcpy(cmdbuf, getexecname(), sizeof (cmdbuf));
5709 cmd = cmdbuf;
5710 }
5711
5712 if (pid == -1)
5713 fatal(1, "fork failed");
5714
5715 if (pid == 0) { /* child */
5716 char *emptyargv[2] = { cmd, NULL };
5717
5718 struct rlimit rl = { 1024, 1024 };
5719 (void) setrlimit(RLIMIT_NOFILE, &rl);
5720 (void) enable_extended_FILE_stdio(-1, -1);
5721 if (libpath != NULL)
5722 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1));
5723 (void) execv(cmd, emptyargv);
5724 ztest_dump_core = B_FALSE;
5725 fatal(B_TRUE, "exec failed: %s", cmd);
5726 }
5727
5728 while (waitpid(pid, &status, 0) != pid)
5729 continue;
5730 if (statusp != NULL)
5731 *statusp = status;
5732
5733 if (WIFEXITED(status)) {
5734 if (WEXITSTATUS(status) != 0) {
5735 (void) fprintf(stderr, "child exited with code %d\n",
5736 WEXITSTATUS(status));
5737 exit(2);
5738 }
5739 return (B_FALSE);
5740 } else if (WIFSIGNALED(status)) {
5741 if (!ignorekill || WTERMSIG(status) != SIGKILL) {
5742 (void) fprintf(stderr, "child died with signal %d\n",
5743 WTERMSIG(status));
5744 exit(3);
5745 }
5746 return (B_TRUE);
5747 } else {
5748 (void) fprintf(stderr, "something strange happened to child\n");
5749 exit(4);
5750 /* NOTREACHED */
5751 }
5752 }
5753
5754 static void
5755 ztest_run_init(void)
5756 {
5757 ztest_shared_t *zs = ztest_shared;
5758
5759 ASSERT(ztest_opts.zo_init != 0);
5760
5761 /*
5762 * Blow away any existing copy of zpool.cache
5763 */
5764 (void) remove(spa_config_path);
5765
5766 /*
5767 * Create and initialize our storage pool.
5768 */
5769 for (int i = 1; i <= ztest_opts.zo_init; i++) {
5770 bzero(zs, sizeof (ztest_shared_t));
5771 if (ztest_opts.zo_verbose >= 3 &&
5772 ztest_opts.zo_init != 1) {
5773 (void) printf("ztest_init(), pass %d\n", i);
5774 }
5775 ztest_init(zs);
5776 }
5777 }
5778
5779 int
5780 main(int argc, char **argv)
5781 {
5782 int kills = 0;
5783 int iters = 0;
5784 int older = 0;
5785 int newer = 0;
5786 ztest_shared_t *zs;
5787 ztest_info_t *zi;
5788 ztest_shared_callstate_t *zc;
5789 char timebuf[100];
5790 char numbuf[6];
5791 spa_t *spa;
5792 char cmd[MAXNAMELEN];
5793 boolean_t hasalt;
5794
5795 boolean_t ischild = (0 == lseek(ZTEST_FD_DATA, 0, SEEK_CUR));
5796 ASSERT(ischild || errno == EBADF);
5797
5798 (void) setvbuf(stdout, NULL, _IOLBF, 0);
5799
5800 if (!ischild) {
5801 process_options(argc, argv);
5802
5803 setup_fds();
5804 setup_hdr();
5805 setup_data();
5806 bcopy(&ztest_opts, ztest_shared_opts,
5807 sizeof (*ztest_shared_opts));
5808 } else {
5809 setup_data();
5810 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
5811 }
5812 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
5813
5814 /* Override location of zpool.cache */
5815 (void) asprintf((char **)&spa_config_path, "%s/zpool.cache",
5816 ztest_opts.zo_dir);
5817
5818 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
5819 UMEM_NOFAIL);
5820 zs = ztest_shared;
5821
5822 if (ischild) {
5823 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang;
5824 metaslab_df_alloc_threshold =
5825 zs->zs_metaslab_df_alloc_threshold;
5826
5827 if (zs->zs_do_init)
5828 ztest_run_init();
5829 else
5830 ztest_run(zs);
5831 exit(0);
5832 }
5833
5834 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
5835
5836 if (ztest_opts.zo_verbose >= 1) {
5837 (void) printf("%llu vdevs, %d datasets, %d threads,"
5838 " %llu seconds...\n",
5839 (u_longlong_t)ztest_opts.zo_vdevs,
5840 ztest_opts.zo_datasets,
5841 ztest_opts.zo_threads,
5842 (u_longlong_t)ztest_opts.zo_time);
5843 }
5844
5845 (void) strlcpy(cmd, getexecname(), sizeof (cmd));
5846
5847 zs->zs_do_init = B_TRUE;
5848 if (strlen(ztest_opts.zo_alt_ztest) != 0) {
5849 if (ztest_opts.zo_verbose >= 1) {
5850 (void) printf("Executing older ztest for "
5851 "initialization: %s\n", ztest_opts.zo_alt_ztest);
5852 }
5853 VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
5854 ztest_opts.zo_alt_libpath, B_FALSE, NULL));
5855 } else {
5856 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
5857 }
5858 zs->zs_do_init = B_FALSE;
5859
5860 zs->zs_proc_start = gethrtime();
5861 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
5862
5863 for (int f = 0; f < ZTEST_FUNCS; f++) {
5864 zi = &ztest_info[f];
5865 zc = ZTEST_GET_SHARED_CALLSTATE(f);
5866 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
5867 zc->zc_next = UINT64_MAX;
5868 else
5869 zc->zc_next = zs->zs_proc_start +
5870 ztest_random(2 * zi->zi_interval[0] + 1);
5871 }
5872
5873 /*
5874 * Run the tests in a loop. These tests include fault injection
5875 * to verify that self-healing data works, and forced crashes
5876 * to verify that we never lose on-disk consistency.
5877 */
5878 while (gethrtime() < zs->zs_proc_stop) {
5879 int status;
5880 boolean_t killed;
5881
5882 /*
5883 * Initialize the workload counters for each function.
5884 */
5885 for (int f = 0; f < ZTEST_FUNCS; f++) {
5886 zc = ZTEST_GET_SHARED_CALLSTATE(f);
5887 zc->zc_count = 0;
5888 zc->zc_time = 0;
5889 }
5890
5891 /* Set the allocation switch size */
5892 zs->zs_metaslab_df_alloc_threshold =
5893 ztest_random(zs->zs_metaslab_sz / 4) + 1;
5894
5895 if (!hasalt || ztest_random(2) == 0) {
5896 if (hasalt && ztest_opts.zo_verbose >= 1) {
5897 (void) printf("Executing newer ztest: %s\n",
5898 cmd);
5899 }
5900 newer++;
5901 killed = exec_child(cmd, NULL, B_TRUE, &status);
5902 } else {
5903 if (hasalt && ztest_opts.zo_verbose >= 1) {
5904 (void) printf("Executing older ztest: %s\n",
5905 ztest_opts.zo_alt_ztest);
5906 }
5907 older++;
5908 killed = exec_child(ztest_opts.zo_alt_ztest,
5909 ztest_opts.zo_alt_libpath, B_TRUE, &status);
5910 }
5911
5912 if (killed)
5913 kills++;
5914 iters++;
5915
5916 if (ztest_opts.zo_verbose >= 1) {
5917 hrtime_t now = gethrtime();
5918
5919 now = MIN(now, zs->zs_proc_stop);
5920 print_time(zs->zs_proc_stop - now, timebuf);
5921 nicenum(zs->zs_space, numbuf);
5922
5923 (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
5924 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
5925 iters,
5926 WIFEXITED(status) ? "Complete" : "SIGKILL",
5927 (u_longlong_t)zs->zs_enospc_count,
5928 100.0 * zs->zs_alloc / zs->zs_space,
5929 numbuf,
5930 100.0 * (now - zs->zs_proc_start) /
5931 (ztest_opts.zo_time * NANOSEC), timebuf);
5932 }
5933
5934 if (ztest_opts.zo_verbose >= 2) {
5935 (void) printf("\nWorkload summary:\n\n");
5936 (void) printf("%7s %9s %s\n",
5937 "Calls", "Time", "Function");
5938 (void) printf("%7s %9s %s\n",
5939 "-----", "----", "--------");
5940 for (int f = 0; f < ZTEST_FUNCS; f++) {
5941 Dl_info dli;
5942
5943 zi = &ztest_info[f];
5944 zc = ZTEST_GET_SHARED_CALLSTATE(f);
5945 print_time(zc->zc_time, timebuf);
5946 (void) dladdr((void *)zi->zi_func, &dli);
5947 (void) printf("%7llu %9s %s\n",
5948 (u_longlong_t)zc->zc_count, timebuf,
5949 dli.dli_sname);
5950 }
5951 (void) printf("\n");
5952 }
5953
5954 /*
5955 * It's possible that we killed a child during a rename test,
5956 * in which case we'll have a 'ztest_tmp' pool lying around
5957 * instead of 'ztest'. Do a blind rename in case this happened.
5958 */
5959 kernel_init(FREAD);
5960 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) {
5961 spa_close(spa, FTAG);
5962 } else {
5963 char tmpname[MAXNAMELEN];
5964 kernel_fini();
5965 kernel_init(FREAD | FWRITE);
5966 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp",
5967 ztest_opts.zo_pool);
5968 (void) spa_rename(tmpname, ztest_opts.zo_pool);
5969 }
5970 kernel_fini();
5971
5972 ztest_run_zdb(ztest_opts.zo_pool);
5973 }
5974
5975 if (ztest_opts.zo_verbose >= 1) {
5976 if (hasalt) {
5977 (void) printf("%d runs of older ztest: %s\n", older,
5978 ztest_opts.zo_alt_ztest);
5979 (void) printf("%d runs of newer ztest: %s\n", newer,
5980 cmd);
5981 }
5982 (void) printf("%d killed, %d completed, %.0f%% kill rate\n",
5983 kills, iters - kills, (100.0 * kills) / MAX(1, iters));
5984 }
5985
5986 return (0);
5987 }