1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
  24  */
  25 
  26 /*
  27  * ZFS fault injection
  28  *
  29  * To handle fault injection, we keep track of a series of zinject_record_t
  30  * structures which describe which logical block(s) should be injected with a
  31  * fault.  These are kept in a global list.  Each record corresponds to a given
  32  * spa_t and maintains a special hold on the spa_t so that it cannot be deleted
  33  * or exported while the injection record exists.
  34  *
  35  * Device level injection is done using the 'zi_guid' field.  If this is set, it
  36  * means that the error is destined for a particular device, not a piece of
  37  * data.
  38  *
  39  * This is a rather poor data structure and algorithm, but we don't expect more
  40  * than a few faults at any one time, so it should be sufficient for our needs.
  41  */
  42 
  43 #include <sys/arc.h>
  44 #include <sys/zio_impl.h>
  45 #include <sys/zfs_ioctl.h>
  46 #include <sys/vdev_impl.h>
  47 #include <sys/dmu_objset.h>
  48 #include <sys/fs/zfs.h>
  49 
  50 uint32_t zio_injection_enabled;
  51 
  52 /*
  53  * Data describing each zinject handler registered on the system, and
  54  * contains the list node linking the handler in the global zinject
  55  * handler list.
  56  */
  57 typedef struct inject_handler {
  58         int                     zi_id;
  59         spa_t                   *zi_spa;
  60         zinject_record_t        zi_record;
  61         uint64_t                *zi_lanes;
  62         int                     zi_next_lane;
  63         list_node_t             zi_link;
  64 } inject_handler_t;
  65 
  66 /*
  67  * List of all zinject handlers registered on the system, protected by
  68  * the inject_lock defined below.
  69  */
  70 static list_t inject_handlers;
  71 
  72 /*
  73  * This protects insertion into, and traversal of, the inject handler
  74  * list defined above; as well as the inject_delay_count. Any time a
  75  * handler is inserted or removed from the list, this lock should be
  76  * taken as a RW_WRITER; and any time traversal is done over the list
  77  * (without modification to it) this lock should be taken as a RW_READER.
  78  */
  79 static krwlock_t inject_lock;
  80 
  81 /*
  82  * This holds the number of zinject delay handlers that have been
  83  * registered on the system. It is protected by the inject_lock defined
  84  * above. Thus modifications to this count must be a RW_WRITER of the
  85  * inject_lock, and reads of this count must be (at least) a RW_READER
  86  * of the lock.
  87  */
  88 static int inject_delay_count = 0;
  89 
  90 /*
  91  * This lock is used only in zio_handle_io_delay(), refer to the comment
  92  * in that function for more details.
  93  */
  94 static kmutex_t inject_delay_mtx;
  95 
  96 /*
  97  * Used to assign unique identifying numbers to each new zinject handler.
  98  */
  99 static int inject_next_id = 1;
 100 
 101 /*
 102  * Returns true if the given record matches the I/O in progress.
 103  */
 104 static boolean_t
 105 zio_match_handler(zbookmark_phys_t *zb, uint64_t type,
 106     zinject_record_t *record, int error)
 107 {
 108         /*
 109          * Check for a match against the MOS, which is based on type
 110          */
 111         if (zb->zb_objset == DMU_META_OBJSET &&
 112             record->zi_objset == DMU_META_OBJSET &&
 113             record->zi_object == DMU_META_DNODE_OBJECT) {
 114                 if (record->zi_type == DMU_OT_NONE ||
 115                     type == record->zi_type)
 116                         return (record->zi_freq == 0 ||
 117                             spa_get_random(100) < record->zi_freq);
 118                 else
 119                         return (B_FALSE);
 120         }
 121 
 122         /*
 123          * Check for an exact match.
 124          */
 125         if (zb->zb_objset == record->zi_objset &&
 126             zb->zb_object == record->zi_object &&
 127             zb->zb_level == record->zi_level &&
 128             zb->zb_blkid >= record->zi_start &&
 129             zb->zb_blkid <= record->zi_end &&
 130             error == record->zi_error)
 131                 return (record->zi_freq == 0 ||
 132                     spa_get_random(100) < record->zi_freq);
 133 
 134         return (B_FALSE);
 135 }
 136 
 137 /*
 138  * Panic the system when a config change happens in the function
 139  * specified by tag.
 140  */
 141 void
 142 zio_handle_panic_injection(spa_t *spa, char *tag, uint64_t type)
 143 {
 144         inject_handler_t *handler;
 145 
 146         rw_enter(&inject_lock, RW_READER);
 147 
 148         for (handler = list_head(&inject_handlers); handler != NULL;
 149             handler = list_next(&inject_handlers, handler)) {
 150 
 151                 if (spa != handler->zi_spa)
 152                         continue;
 153 
 154                 if (handler->zi_record.zi_type == type &&
 155                     strcmp(tag, handler->zi_record.zi_func) == 0)
 156                         panic("Panic requested in function %s\n", tag);
 157         }
 158 
 159         rw_exit(&inject_lock);
 160 }
 161 
 162 /*
 163  * Determine if the I/O in question should return failure.  Returns the errno
 164  * to be returned to the caller.
 165  */
 166 int
 167 zio_handle_fault_injection(zio_t *zio, int error)
 168 {
 169         int ret = 0;
 170         inject_handler_t *handler;
 171 
 172         /*
 173          * Ignore I/O not associated with any logical data.
 174          */
 175         if (zio->io_logical == NULL)
 176                 return (0);
 177 
 178         /*
 179          * Currently, we only support fault injection on reads.
 180          */
 181         if (zio->io_type != ZIO_TYPE_READ)
 182                 return (0);
 183 
 184         rw_enter(&inject_lock, RW_READER);
 185 
 186         for (handler = list_head(&inject_handlers); handler != NULL;
 187             handler = list_next(&inject_handlers, handler)) {
 188 
 189                 if (zio->io_spa != handler->zi_spa ||
 190                     handler->zi_record.zi_cmd != ZINJECT_DATA_FAULT)
 191                         continue;
 192 
 193                 /* If this handler matches, return EIO */
 194                 if (zio_match_handler(&zio->io_logical->io_bookmark,
 195                     zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE,
 196                     &handler->zi_record, error)) {
 197                         ret = error;
 198                         break;
 199                 }
 200         }
 201 
 202         rw_exit(&inject_lock);
 203 
 204         return (ret);
 205 }
 206 
 207 /*
 208  * Determine if the zio is part of a label update and has an injection
 209  * handler associated with that portion of the label. Currently, we
 210  * allow error injection in either the nvlist or the uberblock region of
 211  * of the vdev label.
 212  */
 213 int
 214 zio_handle_label_injection(zio_t *zio, int error)
 215 {
 216         inject_handler_t *handler;
 217         vdev_t *vd = zio->io_vd;
 218         uint64_t offset = zio->io_offset;
 219         int label;
 220         int ret = 0;
 221 
 222         if (offset >= VDEV_LABEL_START_SIZE &&
 223             offset < vd->vdev_psize - VDEV_LABEL_END_SIZE)
 224                 return (0);
 225 
 226         rw_enter(&inject_lock, RW_READER);
 227 
 228         for (handler = list_head(&inject_handlers); handler != NULL;
 229             handler = list_next(&inject_handlers, handler)) {
 230                 uint64_t start = handler->zi_record.zi_start;
 231                 uint64_t end = handler->zi_record.zi_end;
 232 
 233                 if (handler->zi_record.zi_cmd != ZINJECT_LABEL_FAULT)
 234                         continue;
 235 
 236                 /*
 237                  * The injection region is the relative offsets within a
 238                  * vdev label. We must determine the label which is being
 239                  * updated and adjust our region accordingly.
 240                  */
 241                 label = vdev_label_number(vd->vdev_psize, offset);
 242                 start = vdev_label_offset(vd->vdev_psize, label, start);
 243                 end = vdev_label_offset(vd->vdev_psize, label, end);
 244 
 245                 if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid &&
 246                     (offset >= start && offset <= end)) {
 247                         ret = error;
 248                         break;
 249                 }
 250         }
 251         rw_exit(&inject_lock);
 252         return (ret);
 253 }
 254 
 255 
 256 int
 257 zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error)
 258 {
 259         inject_handler_t *handler;
 260         int ret = 0;
 261 
 262         /*
 263          * We skip over faults in the labels unless it's during
 264          * device open (i.e. zio == NULL).
 265          */
 266         if (zio != NULL) {
 267                 uint64_t offset = zio->io_offset;
 268 
 269                 if (offset < VDEV_LABEL_START_SIZE ||
 270                     offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE)
 271                         return (0);
 272         }
 273 
 274         rw_enter(&inject_lock, RW_READER);
 275 
 276         for (handler = list_head(&inject_handlers); handler != NULL;
 277             handler = list_next(&inject_handlers, handler)) {
 278 
 279                 if (handler->zi_record.zi_cmd != ZINJECT_DEVICE_FAULT)
 280                         continue;
 281 
 282                 if (vd->vdev_guid == handler->zi_record.zi_guid) {
 283                         if (handler->zi_record.zi_failfast &&
 284                             (zio == NULL || (zio->io_flags &
 285                             (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))) {
 286                                 continue;
 287                         }
 288 
 289                         /* Handle type specific I/O failures */
 290                         if (zio != NULL &&
 291                             handler->zi_record.zi_iotype != ZIO_TYPES &&
 292                             handler->zi_record.zi_iotype != zio->io_type)
 293                                 continue;
 294 
 295                         if (handler->zi_record.zi_error == error) {
 296                                 /*
 297                                  * For a failed open, pretend like the device
 298                                  * has gone away.
 299                                  */
 300                                 if (error == ENXIO)
 301                                         vd->vdev_stat.vs_aux =
 302                                             VDEV_AUX_OPEN_FAILED;
 303 
 304                                 /*
 305                                  * Treat these errors as if they had been
 306                                  * retried so that all the appropriate stats
 307                                  * and FMA events are generated.
 308                                  */
 309                                 if (!handler->zi_record.zi_failfast &&
 310                                     zio != NULL)
 311                                         zio->io_flags |= ZIO_FLAG_IO_RETRY;
 312 
 313                                 ret = error;
 314                                 break;
 315                         }
 316                         if (handler->zi_record.zi_error == ENXIO) {
 317                                 ret = SET_ERROR(EIO);
 318                                 break;
 319                         }
 320                 }
 321         }
 322 
 323         rw_exit(&inject_lock);
 324 
 325         return (ret);
 326 }
 327 
 328 /*
 329  * Simulate hardware that ignores cache flushes.  For requested number
 330  * of seconds nix the actual writing to disk.
 331  */
 332 void
 333 zio_handle_ignored_writes(zio_t *zio)
 334 {
 335         inject_handler_t *handler;
 336 
 337         rw_enter(&inject_lock, RW_READER);
 338 
 339         for (handler = list_head(&inject_handlers); handler != NULL;
 340             handler = list_next(&inject_handlers, handler)) {
 341 
 342                 /* Ignore errors not destined for this pool */
 343                 if (zio->io_spa != handler->zi_spa ||
 344                     handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
 345                         continue;
 346 
 347                 /*
 348                  * Positive duration implies # of seconds, negative
 349                  * a number of txgs
 350                  */
 351                 if (handler->zi_record.zi_timer == 0) {
 352                         if (handler->zi_record.zi_duration > 0)
 353                                 handler->zi_record.zi_timer = ddi_get_lbolt64();
 354                         else
 355                                 handler->zi_record.zi_timer = zio->io_txg;
 356                 }
 357 
 358                 /* Have a "problem" writing 60% of the time */
 359                 if (spa_get_random(100) < 60)
 360                         zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
 361                 break;
 362         }
 363 
 364         rw_exit(&inject_lock);
 365 }
 366 
 367 void
 368 spa_handle_ignored_writes(spa_t *spa)
 369 {
 370         inject_handler_t *handler;
 371 
 372         if (zio_injection_enabled == 0)
 373                 return;
 374 
 375         rw_enter(&inject_lock, RW_READER);
 376 
 377         for (handler = list_head(&inject_handlers); handler != NULL;
 378             handler = list_next(&inject_handlers, handler)) {
 379 
 380                 if (spa != handler->zi_spa ||
 381                     handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
 382                         continue;
 383 
 384                 if (handler->zi_record.zi_duration > 0) {
 385                         VERIFY(handler->zi_record.zi_timer == 0 ||
 386                             handler->zi_record.zi_timer +
 387                             handler->zi_record.zi_duration * hz >
 388                             ddi_get_lbolt64());
 389                 } else {
 390                         /* duration is negative so the subtraction here adds */
 391                         VERIFY(handler->zi_record.zi_timer == 0 ||
 392                             handler->zi_record.zi_timer -
 393                             handler->zi_record.zi_duration >=
 394                             spa_syncing_txg(spa));
 395                 }
 396         }
 397 
 398         rw_exit(&inject_lock);
 399 }
 400 
 401 hrtime_t
 402 zio_handle_io_delay(zio_t *zio)
 403 {
 404         vdev_t *vd = zio->io_vd;
 405         inject_handler_t *min_handler = NULL;
 406         hrtime_t min_target = 0;
 407 
 408         rw_enter(&inject_lock, RW_READER);
 409 
 410         /*
 411          * inject_delay_count is a subset of zio_injection_enabled that
 412          * is only incremented for delay handlers. These checks are
 413          * mainly added to remind the reader why we're not explicitly
 414          * checking zio_injection_enabled like the other functions.
 415          */
 416         IMPLY(inject_delay_count > 0, zio_injection_enabled > 0);
 417         IMPLY(zio_injection_enabled == 0, inject_delay_count == 0);
 418 
 419         /*
 420          * If there aren't any inject delay handlers registered, then we
 421          * can short circuit and simply return 0 here. A value of zero
 422          * informs zio_delay_interrupt() that this request should not be
 423          * delayed. This short circuit keeps us from acquiring the
 424          * inject_delay_mutex unnecessarily.
 425          */
 426         if (inject_delay_count == 0) {
 427                 rw_exit(&inject_lock);
 428                 return (0);
 429         }
 430 
 431         /*
 432          * Each inject handler has a number of "lanes" associated with
 433          * it. Each lane is able to handle requests independently of one
 434          * another, and at a latency defined by the inject handler
 435          * record's zi_timer field. Thus if a handler in configured with
 436          * a single lane with a 10ms latency, it will delay requests
 437          * such that only a single request is completed every 10ms. So,
 438          * if more than one request is attempted per each 10ms interval,
 439          * the average latency of the requests will be greater than
 440          * 10ms; but if only a single request is submitted each 10ms
 441          * interval the average latency will be 10ms.
 442          *
 443          * We need to acquire this mutex to prevent multiple concurrent
 444          * threads being assigned to the same lane of a given inject
 445          * handler. The mutex allows us to perform the following two
 446          * operations atomically:
 447          *
 448          *      1. determine the minimum handler and minimum target
 449          *         value of all the possible handlers
 450          *      2. update that minimum handler's lane array
 451          *
 452          * Without atomicity, two (or more) threads could pick the same
 453          * lane in step (1), and then conflict with each other in step
 454          * (2). This could allow a single lane handler to process
 455          * multiple requests simultaneously, which shouldn't be possible.
 456          */
 457         mutex_enter(&inject_delay_mtx);
 458 
 459         for (inject_handler_t *handler = list_head(&inject_handlers);
 460             handler != NULL; handler = list_next(&inject_handlers, handler)) {
 461                 if (handler->zi_record.zi_cmd != ZINJECT_DELAY_IO)
 462                         continue;
 463 
 464                 if (vd->vdev_guid != handler->zi_record.zi_guid)
 465                         continue;
 466 
 467                 /*
 468                  * Defensive; should never happen as the array allocation
 469                  * occurs prior to inserting this handler on the list.
 470                  */
 471                 ASSERT3P(handler->zi_lanes, !=, NULL);
 472 
 473                 /*
 474                  * This should never happen, the zinject command should
 475                  * prevent a user from setting an IO delay with zero lanes.
 476                  */
 477                 ASSERT3U(handler->zi_record.zi_nlanes, !=, 0);
 478 
 479                 ASSERT3U(handler->zi_record.zi_nlanes, >,
 480                     handler->zi_next_lane);
 481 
 482                 /*
 483                  * We want to issue this IO to the lane that will become
 484                  * idle the soonest, so we compare the soonest this
 485                  * specific handler can complete the IO with all other
 486                  * handlers, to find the lowest value of all possible
 487                  * lanes. We then use this lane to submit the request.
 488                  *
 489                  * Since each handler has a constant value for its
 490                  * delay, we can just use the "next" lane for that
 491                  * handler; as it will always be the lane with the
 492                  * lowest value for that particular handler (i.e. the
 493                  * lane that will become idle the soonest). This saves a
 494                  * scan of each handler's lanes array.
 495                  *
 496                  * There's two cases to consider when determining when
 497                  * this specific IO request should complete. If this
 498                  * lane is idle, we want to "submit" the request now so
 499                  * it will complete after zi_timer milliseconds. Thus,
 500                  * we set the target to now + zi_timer.
 501                  *
 502                  * If the lane is busy, we want this request to complete
 503                  * zi_timer milliseconds after the lane becomes idle.
 504                  * Since the 'zi_lanes' array holds the time at which
 505                  * each lane will become idle, we use that value to
 506                  * determine when this request should complete.
 507                  */
 508                 hrtime_t idle = handler->zi_record.zi_timer + gethrtime();
 509                 hrtime_t busy = handler->zi_record.zi_timer +
 510                     handler->zi_lanes[handler->zi_next_lane];
 511                 hrtime_t target = MAX(idle, busy);
 512 
 513                 if (min_handler == NULL) {
 514                         min_handler = handler;
 515                         min_target = target;
 516                         continue;
 517                 }
 518 
 519                 ASSERT3P(min_handler, !=, NULL);
 520                 ASSERT3U(min_target, !=, 0);
 521 
 522                 /*
 523                  * We don't yet increment the "next lane" variable since
 524                  * we still might find a lower value lane in another
 525                  * handler during any remaining iterations. Once we're
 526                  * sure we've selected the absolute minimum, we'll claim
 527                  * the lane and increment the handler's "next lane"
 528                  * field below.
 529                  */
 530 
 531                 if (target < min_target) {
 532                         min_handler = handler;
 533                         min_target = target;
 534                 }
 535         }
 536 
 537         /*
 538          * 'min_handler' will be NULL if no IO delays are registered for
 539          * this vdev, otherwise it will point to the handler containing
 540          * the lane that will become idle the soonest.
 541          */
 542         if (min_handler != NULL) {
 543                 ASSERT3U(min_target, !=, 0);
 544                 min_handler->zi_lanes[min_handler->zi_next_lane] = min_target;
 545 
 546                 /*
 547                  * If we've used all possible lanes for this handler,
 548                  * loop back and start using the first lane again;
 549                  * otherwise, just increment the lane index.
 550                  */
 551                 min_handler->zi_next_lane = (min_handler->zi_next_lane + 1) %
 552                     min_handler->zi_record.zi_nlanes;
 553         }
 554 
 555         mutex_exit(&inject_delay_mtx);
 556         rw_exit(&inject_lock);
 557 
 558         return (min_target);
 559 }
 560 
 561 /*
 562  * Create a new handler for the given record.  We add it to the list, adding
 563  * a reference to the spa_t in the process.  We increment zio_injection_enabled,
 564  * which is the switch to trigger all fault injection.
 565  */
 566 int
 567 zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
 568 {
 569         inject_handler_t *handler;
 570         int error;
 571         spa_t *spa;
 572 
 573         /*
 574          * If this is pool-wide metadata, make sure we unload the corresponding
 575          * spa_t, so that the next attempt to load it will trigger the fault.
 576          * We call spa_reset() to unload the pool appropriately.
 577          */
 578         if (flags & ZINJECT_UNLOAD_SPA)
 579                 if ((error = spa_reset(name)) != 0)
 580                         return (error);
 581 
 582         if (record->zi_cmd == ZINJECT_DELAY_IO) {
 583                 /*
 584                  * A value of zero for the number of lanes or for the
 585                  * delay time doesn't make sense.
 586                  */
 587                 if (record->zi_timer == 0 || record->zi_nlanes == 0)
 588                         return (SET_ERROR(EINVAL));
 589 
 590                 /*
 591                  * The number of lanes is directly mapped to the size of
 592                  * an array used by the handler. Thus, to ensure the
 593                  * user doesn't trigger an allocation that's "too large"
 594                  * we cap the number of lanes here.
 595                  */
 596                 if (record->zi_nlanes >= UINT16_MAX)
 597                         return (SET_ERROR(EINVAL));
 598         }
 599 
 600         if (!(flags & ZINJECT_NULL)) {
 601                 /*
 602                  * spa_inject_ref() will add an injection reference, which will
 603                  * prevent the pool from being removed from the namespace while
 604                  * still allowing it to be unloaded.
 605                  */
 606                 if ((spa = spa_inject_addref(name)) == NULL)
 607                         return (SET_ERROR(ENOENT));
 608 
 609                 handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP);
 610 
 611                 handler->zi_spa = spa;
 612                 handler->zi_record = *record;
 613 
 614                 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
 615                         handler->zi_lanes = kmem_zalloc(
 616                             sizeof (*handler->zi_lanes) *
 617                             handler->zi_record.zi_nlanes, KM_SLEEP);
 618                         handler->zi_next_lane = 0;
 619                 } else {
 620                         handler->zi_lanes = NULL;
 621                         handler->zi_next_lane = 0;
 622                 }
 623 
 624                 rw_enter(&inject_lock, RW_WRITER);
 625 
 626                 /*
 627                  * We can't move this increment into the conditional
 628                  * above because we need to hold the RW_WRITER lock of
 629                  * inject_lock, and we don't want to hold that while
 630                  * allocating the handler's zi_lanes array.
 631                  */
 632                 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
 633                         ASSERT3S(inject_delay_count, >=, 0);
 634                         inject_delay_count++;
 635                         ASSERT3S(inject_delay_count, >, 0);
 636                 }
 637 
 638                 *id = handler->zi_id = inject_next_id++;
 639                 list_insert_tail(&inject_handlers, handler);
 640                 atomic_inc_32(&zio_injection_enabled);
 641 
 642                 rw_exit(&inject_lock);
 643         }
 644 
 645         /*
 646          * Flush the ARC, so that any attempts to read this data will end up
 647          * going to the ZIO layer.  Note that this is a little overkill, but
 648          * we don't have the necessary ARC interfaces to do anything else, and
 649          * fault injection isn't a performance critical path.
 650          */
 651         if (flags & ZINJECT_FLUSH_ARC)
 652                 /*
 653                  * We must use FALSE to ensure arc_flush returns, since
 654                  * we're not preventing concurrent ARC insertions.
 655                  */
 656                 arc_flush(NULL, FALSE);
 657 
 658         return (0);
 659 }
 660 
 661 /*
 662  * Returns the next record with an ID greater than that supplied to the
 663  * function.  Used to iterate over all handlers in the system.
 664  */
 665 int
 666 zio_inject_list_next(int *id, char *name, size_t buflen,
 667     zinject_record_t *record)
 668 {
 669         inject_handler_t *handler;
 670         int ret;
 671 
 672         mutex_enter(&spa_namespace_lock);
 673         rw_enter(&inject_lock, RW_READER);
 674 
 675         for (handler = list_head(&inject_handlers); handler != NULL;
 676             handler = list_next(&inject_handlers, handler))
 677                 if (handler->zi_id > *id)
 678                         break;
 679 
 680         if (handler) {
 681                 *record = handler->zi_record;
 682                 *id = handler->zi_id;
 683                 (void) strncpy(name, spa_name(handler->zi_spa), buflen);
 684                 ret = 0;
 685         } else {
 686                 ret = SET_ERROR(ENOENT);
 687         }
 688 
 689         rw_exit(&inject_lock);
 690         mutex_exit(&spa_namespace_lock);
 691 
 692         return (ret);
 693 }
 694 
 695 /*
 696  * Clear the fault handler with the given identifier, or return ENOENT if none
 697  * exists.
 698  */
 699 int
 700 zio_clear_fault(int id)
 701 {
 702         inject_handler_t *handler;
 703 
 704         rw_enter(&inject_lock, RW_WRITER);
 705 
 706         for (handler = list_head(&inject_handlers); handler != NULL;
 707             handler = list_next(&inject_handlers, handler))
 708                 if (handler->zi_id == id)
 709                         break;
 710 
 711         if (handler == NULL) {
 712                 rw_exit(&inject_lock);
 713                 return (SET_ERROR(ENOENT));
 714         }
 715 
 716         if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
 717                 ASSERT3S(inject_delay_count, >, 0);
 718                 inject_delay_count--;
 719                 ASSERT3S(inject_delay_count, >=, 0);
 720         }
 721 
 722         list_remove(&inject_handlers, handler);
 723         rw_exit(&inject_lock);
 724 
 725         if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
 726                 ASSERT3P(handler->zi_lanes, !=, NULL);
 727                 kmem_free(handler->zi_lanes, sizeof (*handler->zi_lanes) *
 728                     handler->zi_record.zi_nlanes);
 729         } else {
 730                 ASSERT3P(handler->zi_lanes, ==, NULL);
 731         }
 732 
 733         spa_inject_delref(handler->zi_spa);
 734         kmem_free(handler, sizeof (inject_handler_t));
 735         atomic_dec_32(&zio_injection_enabled);
 736 
 737         return (0);
 738 }
 739 
 740 void
 741 zio_inject_init(void)
 742 {
 743         rw_init(&inject_lock, NULL, RW_DEFAULT, NULL);
 744         mutex_init(&inject_delay_mtx, NULL, MUTEX_DEFAULT, NULL);
 745         list_create(&inject_handlers, sizeof (inject_handler_t),
 746             offsetof(inject_handler_t, zi_link));
 747 }
 748 
 749 void
 750 zio_inject_fini(void)
 751 {
 752         list_destroy(&inject_handlers);
 753         mutex_destroy(&inject_delay_mtx);
 754         rw_destroy(&inject_lock);
 755 }