Print this page
NEX-18069 Unable to get/set VDEV_PROP_RESILVER_MAXACTIVE/VDEV_PROP_RESILVER_MINACTIVE props
Reviewed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
Reviewed by: Saso Kiselkov <saso.kiselkov@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
NEX-9552 zfs_scan_idle throttling harms performance and needs to be removed
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-13937 Improve kstat performance
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
NEX-3558 KRRP Integration
OS-103 handle CoS descriptor persistent references across vdev operations
OS-80 support for vdev and CoS properties for the new I/O scheduler
OS-95 lint warning introduced by OS-61
re #12643 rb4064 ZFS meta refactoring - vdev utilization tracking, auto-dedup
re #12585 rb4049 ZFS++ work port - refactoring to improve separation of open/closed code, bug fixes, performance improvements - open code
Bug 11205: add missing libzfs_closed_stubs.c to fix opensource-only build.
ZFS plus work: special vdevs, cos, cos/vdev properties


   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.

  24  */
  25 
  26 /*
  27  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
  28  * Copyright (c) 2014 Integros [integros.com]
  29  */
  30 
  31 #include <sys/zfs_context.h>
  32 #include <sys/vdev_impl.h>

  33 #include <sys/spa_impl.h>
  34 #include <sys/zio.h>
  35 #include <sys/avl.h>
  36 #include <sys/dsl_pool.h>
  37 #include <sys/metaslab_impl.h>
  38 #include <sys/abd.h>
  39 
  40 /*
  41  * ZFS I/O Scheduler
  42  * ---------------
  43  *
  44  * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios.  The
  45  * I/O scheduler determines when and in what order those operations are
  46  * issued.  The I/O scheduler divides operations into five I/O classes
  47  * prioritized in the following order: sync read, sync write, async read,
  48  * async write, and scrub/resilver.  Each queue defines the minimum and
  49  * maximum number of concurrent operations that may be issued to the device.
  50  * In addition, the device has an aggregate maximum. Note that the sum of the
  51  * per-queue minimums must not exceed the aggregate maximum, and if the
  52  * aggregate maximum is equal to or greater than the sum of the per-queue


 129  * and then select from queues in the order defined by zio_priority_t.
 130  *
 131  * In general, smaller max_active's will lead to lower latency of synchronous
 132  * operations.  Larger max_active's may lead to higher overall throughput,
 133  * depending on underlying storage.
 134  *
 135  * The ratio of the queues' max_actives determines the balance of performance
 136  * between reads, writes, and scrubs.  E.g., increasing
 137  * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete
 138  * more quickly, but reads and writes to have higher latency and lower
 139  * throughput.
 140  */
 141 uint32_t zfs_vdev_sync_read_min_active = 10;
 142 uint32_t zfs_vdev_sync_read_max_active = 10;
 143 uint32_t zfs_vdev_sync_write_min_active = 10;
 144 uint32_t zfs_vdev_sync_write_max_active = 10;
 145 uint32_t zfs_vdev_async_read_min_active = 1;
 146 uint32_t zfs_vdev_async_read_max_active = 3;
 147 uint32_t zfs_vdev_async_write_min_active = 1;
 148 uint32_t zfs_vdev_async_write_max_active = 10;


 149 uint32_t zfs_vdev_scrub_min_active = 1;
 150 uint32_t zfs_vdev_scrub_max_active = 2;
 151 uint32_t zfs_vdev_removal_min_active = 1;
 152 uint32_t zfs_vdev_removal_max_active = 2;
 153 
 154 /*
 155  * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
 156  * dirty data, use zfs_vdev_async_write_min_active.  When it has more than
 157  * zfs_vdev_async_write_active_max_dirty_percent, use
 158  * zfs_vdev_async_write_max_active. The value is linearly interpolated
 159  * between min and max.
 160  */
 161 int zfs_vdev_async_write_active_min_dirty_percent = 30;
 162 int zfs_vdev_async_write_active_max_dirty_percent = 60;
 163 
 164 /*
 165  * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
 166  * For read I/Os, we also aggregate across small adjacency gaps; for writes
 167  * we include spans of optional I/Os to aid aggregation at the disk even when
 168  * they aren't able to help us aggregate at this level.
 169  */
 170 int zfs_vdev_aggregation_limit = SPA_OLD_MAXBLOCKSIZE;
 171 int zfs_vdev_read_gap_limit = 32 << 10;
 172 int zfs_vdev_write_gap_limit = 4 << 10;


 278 }
 279 
 280 void
 281 vdev_queue_fini(vdev_t *vd)
 282 {
 283         vdev_queue_t *vq = &vd->vdev_queue;
 284 
 285         for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
 286                 avl_destroy(vdev_queue_class_tree(vq, p));
 287         avl_destroy(&vq->vq_active_tree);
 288         avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));
 289         avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE));
 290 
 291         mutex_destroy(&vq->vq_lock);
 292 }
 293 
 294 static void
 295 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
 296 {
 297         spa_t *spa = zio->io_spa;

 298 
 299         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 300         avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
 301         avl_add(vdev_queue_type_tree(vq, zio->io_type), zio);
 302 

 303         mutex_enter(&spa->spa_iokstat_lock);
 304         spa->spa_queue_stats[zio->io_priority].spa_queued++;
 305         if (spa->spa_iokstat != NULL)
 306                 kstat_waitq_enter(spa->spa_iokstat->ks_data);


 307         mutex_exit(&spa->spa_iokstat_lock);
 308 }
 309 
 310 static void
 311 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
 312 {
 313         spa_t *spa = zio->io_spa;

 314 
 315         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 316         avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
 317         avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio);
 318 
 319         mutex_enter(&spa->spa_iokstat_lock);
 320         ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0);
 321         spa->spa_queue_stats[zio->io_priority].spa_queued--;


 322         if (spa->spa_iokstat != NULL)
 323                 kstat_waitq_exit(spa->spa_iokstat->ks_data);


 324         mutex_exit(&spa->spa_iokstat_lock);
 325 }
 326 
 327 static void
 328 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
 329 {
 330         spa_t *spa = zio->io_spa;


 331         ASSERT(MUTEX_HELD(&vq->vq_lock));
 332         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 333         vq->vq_class[zio->io_priority].vqc_active++;
 334         avl_add(&vq->vq_active_tree, zio);
 335 

 336         mutex_enter(&spa->spa_iokstat_lock);
 337         spa->spa_queue_stats[zio->io_priority].spa_active++;
 338         if (spa->spa_iokstat != NULL)
 339                 kstat_runq_enter(spa->spa_iokstat->ks_data);


 340         mutex_exit(&spa->spa_iokstat_lock);
 341 }
 342 
 343 static void
 344 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
 345 {
 346         spa_t *spa = zio->io_spa;


 347         ASSERT(MUTEX_HELD(&vq->vq_lock));
 348         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 349         vq->vq_class[zio->io_priority].vqc_active--;
 350         avl_remove(&vq->vq_active_tree, zio);
 351 
 352         mutex_enter(&spa->spa_iokstat_lock);
 353         ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0);
 354         spa->spa_queue_stats[zio->io_priority].spa_active--;


 355         if (spa->spa_iokstat != NULL) {
 356                 kstat_io_t *ksio = spa->spa_iokstat->ks_data;
 357 
 358                 kstat_runq_exit(spa->spa_iokstat->ks_data);
 359                 if (zio->io_type == ZIO_TYPE_READ) {
 360                         ksio->reads++;
 361                         ksio->nread += zio->io_size;
 362                 } else if (zio->io_type == ZIO_TYPE_WRITE) {
 363                         ksio->writes++;
 364                         ksio->nwritten += zio->io_size;
 365                 }
 366         }













 367         mutex_exit(&spa->spa_iokstat_lock);
 368 }
 369 
 370 static void
 371 vdev_queue_agg_io_done(zio_t *aio)
 372 {
 373         if (aio->io_type == ZIO_TYPE_READ) {
 374                 zio_t *pio;
 375                 zio_link_t *zl = NULL;
 376                 while ((pio = zio_walk_parents(aio, &zl)) != NULL) {
 377                         abd_copy_off(pio->io_abd, aio->io_abd,
 378                             0, pio->io_offset - aio->io_offset, pio->io_size);
 379                 }
 380         }
 381 
 382         abd_free(aio->io_abd);
 383 }
 384 





























 385 static int
 386 vdev_queue_class_min_active(zio_priority_t p)
 387 {







 388         switch (p) {
 389         case ZIO_PRIORITY_SYNC_READ:
 390                 return (zfs_vdev_sync_read_min_active);

 391         case ZIO_PRIORITY_SYNC_WRITE:
 392                 return (zfs_vdev_sync_write_min_active);

 393         case ZIO_PRIORITY_ASYNC_READ:
 394                 return (zfs_vdev_async_read_min_active);

 395         case ZIO_PRIORITY_ASYNC_WRITE:
 396                 return (zfs_vdev_async_write_min_active);
 397         case ZIO_PRIORITY_SCRUB:
 398                 return (zfs_vdev_scrub_min_active);
 399         case ZIO_PRIORITY_REMOVAL:
 400                 return (zfs_vdev_removal_min_active);













 401         default:
 402                 panic("invalid priority %u", p);
 403                 return (0);
 404         }



 405 }
 406 
 407 static int
 408 vdev_queue_max_async_writes(spa_t *spa)
 409 {
 410         int writes;
 411         uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total;
 412         uint64_t min_bytes = zfs_dirty_data_max *
 413             zfs_vdev_async_write_active_min_dirty_percent / 100;
 414         uint64_t max_bytes = zfs_dirty_data_max *
 415             zfs_vdev_async_write_active_max_dirty_percent / 100;
 416 
 417         /*













 418          * Sync tasks correspond to interactive user actions. To reduce the
 419          * execution time of those actions we push data out as fast as possible.
 420          */
 421         if (spa_has_pending_synctask(spa)) {
 422                 return (zfs_vdev_async_write_max_active);
 423         }
 424 
 425         if (dirty < min_bytes)
 426                 return (zfs_vdev_async_write_min_active);
 427         if (dirty > max_bytes)
 428                 return (zfs_vdev_async_write_max_active);
 429 
 430         /*
 431          * linear interpolation:
 432          * slope = (max_writes - min_writes) / (max_bytes - min_bytes)
 433          * move right by min_bytes
 434          * move up by min_writes
 435          */
 436         writes = (dirty - min_bytes) *
 437             (zfs_vdev_async_write_max_active -
 438             zfs_vdev_async_write_min_active) /
 439             (max_bytes - min_bytes) +
 440             zfs_vdev_async_write_min_active;
 441         ASSERT3U(writes, >=, zfs_vdev_async_write_min_active);
 442         ASSERT3U(writes, <=, zfs_vdev_async_write_max_active);
 443         return (writes);
 444 }
 445 
 446 static int
 447 vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
 448 {







 449         switch (p) {
 450         case ZIO_PRIORITY_SYNC_READ:
 451                 return (zfs_vdev_sync_read_max_active);

 452         case ZIO_PRIORITY_SYNC_WRITE:
 453                 return (zfs_vdev_sync_write_max_active);

 454         case ZIO_PRIORITY_ASYNC_READ:
 455                 return (zfs_vdev_async_read_max_active);

 456         case ZIO_PRIORITY_ASYNC_WRITE:
 457                 return (vdev_queue_max_async_writes(spa));
 458         case ZIO_PRIORITY_SCRUB:
 459                 return (zfs_vdev_scrub_max_active);
 460         case ZIO_PRIORITY_REMOVAL:
 461                 return (zfs_vdev_removal_max_active);















 462         default:
 463                 panic("invalid priority %u", p);
 464                 return (0);
 465         }



 466 }
 467 
 468 /*
 469  * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if
 470  * there is no eligible class.
 471  */
 472 static zio_priority_t
 473 vdev_queue_class_to_issue(vdev_queue_t *vq)
 474 {
 475         spa_t *spa = vq->vq_vdev->vdev_spa;
 476         zio_priority_t p;
 477 
 478         if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
 479                 return (ZIO_PRIORITY_NUM_QUEUEABLE);
 480 
 481         /* find a queue that has not reached its minimum # outstanding i/os */
 482         for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
 483                 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
 484                     vq->vq_class[p].vqc_active <
 485                     vdev_queue_class_min_active(p))
 486                         return (p);
 487         }
 488 
 489         /*
 490          * If we haven't found a queue, look for one that hasn't reached its
 491          * maximum # outstanding i/os.
 492          */
 493         for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
 494                 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
 495                     vq->vq_class[p].vqc_active <
 496                     vdev_queue_class_max_active(spa, p))
 497                         return (p);
 498         }
 499 
 500         /* No eligible queued i/os */
 501         return (ZIO_PRIORITY_NUM_QUEUEABLE);
 502 }
 503 
 504 /*
 505  * Compute the range spanned by two i/os, which is the endpoint of the last
 506  * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
 507  * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
 508  * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
 509  */
 510 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
 511 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
 512 
 513 static zio_t *
 514 vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
 515 {
 516         zio_t *first, *last, *aio, *dio, *mandatory, *nio;


 533          * the same flavor, as expressed by the AGG_INHERIT flags.
 534          * The latter requirement is necessary so that certain
 535          * attributes of the I/O, such as whether it's a normal I/O
 536          * or a scrub/resilver, can be preserved in the aggregate.
 537          * We can include optional I/Os, but don't allow them
 538          * to begin a range as they add no benefit in that situation.
 539          */
 540 
 541         /*
 542          * We keep track of the last non-optional I/O.
 543          */
 544         mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first;
 545 
 546         /*
 547          * Walk backwards through sufficiently contiguous I/Os
 548          * recording the last non-optional I/O.
 549          */
 550         while ((dio = AVL_PREV(t, first)) != NULL &&
 551             (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
 552             IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit &&
 553             IO_GAP(dio, first) <= maxgap &&
 554             dio->io_type == zio->io_type) {
 555                 first = dio;
 556                 if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL))
 557                         mandatory = first;
 558         }
 559 
 560         /*
 561          * Skip any initial optional I/Os.
 562          */
 563         while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) {
 564                 first = AVL_NEXT(t, first);
 565                 ASSERT(first != NULL);
 566         }
 567 
 568         /*
 569          * Walk forward through sufficiently contiguous I/Os.
 570          * The aggregation limit does not apply to optional i/os, so that
 571          * we can issue contiguous writes even if they are larger than the
 572          * aggregation limit.
 573          */
 574         while ((dio = AVL_NEXT(t, last)) != NULL &&
 575             (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
 576             (IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit ||
 577             (dio->io_flags & ZIO_FLAG_OPTIONAL)) &&
 578             IO_GAP(last, dio) <= maxgap &&
 579             dio->io_type == zio->io_type) {
 580                 last = dio;
 581                 if (!(last->io_flags & ZIO_FLAG_OPTIONAL))
 582                         mandatory = last;
 583         }
 584 
 585         /*
 586          * Now that we've established the range of the I/O aggregation
 587          * we must decide what to do with trailing optional I/Os.
 588          * For reads, there's nothing to do. While we are unable to
 589          * aggregate further, it's possible that a trailing optional
 590          * I/O would allow the underlying device to aggregate with
 591          * subsequent I/Os. We must therefore determine if the next
 592          * non-optional I/O is close enough to make aggregation
 593          * worthwhile.
 594          */
 595         if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) {
 596                 zio_t *nio = last;
 597                 while ((dio = AVL_NEXT(t, nio)) != NULL &&
 598                     IO_GAP(nio, dio) == 0 &&
 599                     IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) {


 718 
 719         return (zio);
 720 }
 721 
 722 zio_t *
 723 vdev_queue_io(zio_t *zio)
 724 {
 725         vdev_queue_t *vq = &zio->io_vd->vdev_queue;
 726         zio_t *nio;
 727 
 728         if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
 729                 return (zio);
 730 
 731         /*
 732          * Children i/os inherent their parent's priority, which might
 733          * not match the child's i/o type.  Fix it up here.
 734          */
 735         if (zio->io_type == ZIO_TYPE_READ) {
 736                 if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
 737                     zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
 738                     zio->io_priority != ZIO_PRIORITY_SCRUB &&
 739                     zio->io_priority != ZIO_PRIORITY_REMOVAL)
 740                         zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
 741         } else {
 742                 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
 743                 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
 744                     zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE &&
 745                     zio->io_priority != ZIO_PRIORITY_REMOVAL)
 746                         zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
 747         }
 748 
 749         zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
 750 
 751         mutex_enter(&vq->vq_lock);
 752         zio->io_timestamp = gethrtime();
 753         vdev_queue_io_add(vq, zio);
 754         nio = vdev_queue_io_to_issue(vq);
 755         mutex_exit(&vq->vq_lock);
 756 
 757         if (nio == NULL)
 758                 return (NULL);
 759 
 760         if (nio->io_done == vdev_queue_agg_io_done) {
 761                 zio_nowait(nio);
 762                 return (NULL);
 763         }
 764 
 765         return (nio);


 772         zio_t *nio;
 773 
 774         mutex_enter(&vq->vq_lock);
 775 
 776         vdev_queue_pending_remove(vq, zio);
 777 
 778         vq->vq_io_complete_ts = gethrtime();
 779 
 780         while ((nio = vdev_queue_io_to_issue(vq)) != NULL) {
 781                 mutex_exit(&vq->vq_lock);
 782                 if (nio->io_done == vdev_queue_agg_io_done) {
 783                         zio_nowait(nio);
 784                 } else {
 785                         zio_vdev_io_reissue(nio);
 786                         zio_execute(nio);
 787                 }
 788                 mutex_enter(&vq->vq_lock);
 789         }
 790 
 791         mutex_exit(&vq->vq_lock);

























































 792 }


   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
  25  */
  26 
  27 /*
  28  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
  29  * Copyright (c) 2014 Integros [integros.com]
  30  */
  31 
  32 #include <sys/zfs_context.h>
  33 #include <sys/vdev_impl.h>
  34 #include <sys/cos.h>
  35 #include <sys/spa_impl.h>
  36 #include <sys/zio.h>
  37 #include <sys/avl.h>
  38 #include <sys/dsl_pool.h>
  39 #include <sys/metaslab_impl.h>
  40 #include <sys/abd.h>
  41 
  42 /*
  43  * ZFS I/O Scheduler
  44  * ---------------
  45  *
  46  * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios.  The
  47  * I/O scheduler determines when and in what order those operations are
  48  * issued.  The I/O scheduler divides operations into five I/O classes
  49  * prioritized in the following order: sync read, sync write, async read,
  50  * async write, and scrub/resilver.  Each queue defines the minimum and
  51  * maximum number of concurrent operations that may be issued to the device.
  52  * In addition, the device has an aggregate maximum. Note that the sum of the
  53  * per-queue minimums must not exceed the aggregate maximum, and if the
  54  * aggregate maximum is equal to or greater than the sum of the per-queue


 131  * and then select from queues in the order defined by zio_priority_t.
 132  *
 133  * In general, smaller max_active's will lead to lower latency of synchronous
 134  * operations.  Larger max_active's may lead to higher overall throughput,
 135  * depending on underlying storage.
 136  *
 137  * The ratio of the queues' max_actives determines the balance of performance
 138  * between reads, writes, and scrubs.  E.g., increasing
 139  * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete
 140  * more quickly, but reads and writes to have higher latency and lower
 141  * throughput.
 142  */
 143 uint32_t zfs_vdev_sync_read_min_active = 10;
 144 uint32_t zfs_vdev_sync_read_max_active = 10;
 145 uint32_t zfs_vdev_sync_write_min_active = 10;
 146 uint32_t zfs_vdev_sync_write_max_active = 10;
 147 uint32_t zfs_vdev_async_read_min_active = 1;
 148 uint32_t zfs_vdev_async_read_max_active = 3;
 149 uint32_t zfs_vdev_async_write_min_active = 1;
 150 uint32_t zfs_vdev_async_write_max_active = 10;
 151 uint32_t zfs_vdev_resilver_min_active = 1;
 152 uint32_t zfs_vdev_resilver_max_active = 3;
 153 uint32_t zfs_vdev_scrub_min_active = 1;
 154 uint32_t zfs_vdev_scrub_max_active = 2;


 155 
 156 /*
 157  * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
 158  * dirty data, use zfs_vdev_async_write_min_active.  When it has more than
 159  * zfs_vdev_async_write_active_max_dirty_percent, use
 160  * zfs_vdev_async_write_max_active. The value is linearly interpolated
 161  * between min and max.
 162  */
 163 int zfs_vdev_async_write_active_min_dirty_percent = 30;
 164 int zfs_vdev_async_write_active_max_dirty_percent = 60;
 165 
 166 /*
 167  * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
 168  * For read I/Os, we also aggregate across small adjacency gaps; for writes
 169  * we include spans of optional I/Os to aid aggregation at the disk even when
 170  * they aren't able to help us aggregate at this level.
 171  */
 172 int zfs_vdev_aggregation_limit = SPA_OLD_MAXBLOCKSIZE;
 173 int zfs_vdev_read_gap_limit = 32 << 10;
 174 int zfs_vdev_write_gap_limit = 4 << 10;


 280 }
 281 
 282 void
 283 vdev_queue_fini(vdev_t *vd)
 284 {
 285         vdev_queue_t *vq = &vd->vdev_queue;
 286 
 287         for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
 288                 avl_destroy(vdev_queue_class_tree(vq, p));
 289         avl_destroy(&vq->vq_active_tree);
 290         avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));
 291         avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE));
 292 
 293         mutex_destroy(&vq->vq_lock);
 294 }
 295 
 296 static void
 297 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
 298 {
 299         spa_t *spa = zio->io_spa;
 300         hrtime_t t = gethrtime_unscaled();
 301 
 302         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 303         avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
 304         avl_add(vdev_queue_type_tree(vq, zio->io_type), zio);
 305 
 306         atomic_inc_64(&spa->spa_queue_stats[zio->io_priority].spa_queued);
 307         mutex_enter(&spa->spa_iokstat_lock);

 308         if (spa->spa_iokstat != NULL)
 309                 kstat_waitq_enter_time(spa->spa_iokstat->ks_data, t);
 310         if (vq->vq_vdev->vdev_iokstat != NULL)
 311                 kstat_waitq_enter_time(vq->vq_vdev->vdev_iokstat->ks_data, t);
 312         mutex_exit(&spa->spa_iokstat_lock);
 313 }
 314 
 315 static void
 316 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
 317 {
 318         spa_t *spa = zio->io_spa;
 319         hrtime_t t = gethrtime_unscaled();
 320 
 321         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 322         avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
 323         avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio);
 324 

 325         ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0);
 326         atomic_dec_64(&spa->spa_queue_stats[zio->io_priority].spa_queued);
 327 
 328         mutex_enter(&spa->spa_iokstat_lock);
 329         if (spa->spa_iokstat != NULL)
 330                 kstat_waitq_exit_time(spa->spa_iokstat->ks_data, t);
 331         if (vq->vq_vdev->vdev_iokstat != NULL)
 332                 kstat_waitq_exit_time(vq->vq_vdev->vdev_iokstat->ks_data, t);
 333         mutex_exit(&spa->spa_iokstat_lock);
 334 }
 335 
 336 static void
 337 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
 338 {
 339         spa_t *spa = zio->io_spa;
 340         hrtime_t t = gethrtime_unscaled();
 341 
 342         ASSERT(MUTEX_HELD(&vq->vq_lock));
 343         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 344         vq->vq_class[zio->io_priority].vqc_active++;
 345         avl_add(&vq->vq_active_tree, zio);
 346 
 347         atomic_inc_64(&spa->spa_queue_stats[zio->io_priority].spa_active);
 348         mutex_enter(&spa->spa_iokstat_lock);

 349         if (spa->spa_iokstat != NULL)
 350                 kstat_runq_enter_time(spa->spa_iokstat->ks_data, t);
 351         if (vq->vq_vdev->vdev_iokstat != NULL)
 352                 kstat_runq_enter_time(vq->vq_vdev->vdev_iokstat->ks_data, t);
 353         mutex_exit(&spa->spa_iokstat_lock);
 354 }
 355 
 356 static void
 357 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
 358 {
 359         spa_t *spa = zio->io_spa;
 360         hrtime_t t = gethrtime_unscaled();
 361 
 362         ASSERT(MUTEX_HELD(&vq->vq_lock));
 363         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 364         vq->vq_class[zio->io_priority].vqc_active--;
 365         avl_remove(&vq->vq_active_tree, zio);
 366 

 367         ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0);
 368         atomic_dec_64(&spa->spa_queue_stats[zio->io_priority].spa_active);
 369 
 370         mutex_enter(&spa->spa_iokstat_lock);
 371         if (spa->spa_iokstat != NULL) {
 372                 kstat_io_t *ksio = spa->spa_iokstat->ks_data;
 373 
 374                 kstat_runq_exit_time(spa->spa_iokstat->ks_data, t);
 375                 if (zio->io_type == ZIO_TYPE_READ) {
 376                         ksio->reads++;
 377                         ksio->nread += zio->io_size;
 378                 } else if (zio->io_type == ZIO_TYPE_WRITE) {
 379                         ksio->writes++;
 380                         ksio->nwritten += zio->io_size;
 381                 }
 382         }
 383 
 384         if (vq->vq_vdev->vdev_iokstat != NULL) {
 385                 kstat_io_t *ksio = vq->vq_vdev->vdev_iokstat->ks_data;
 386 
 387                 kstat_runq_exit_time(ksio, t);
 388                 if (zio->io_type == ZIO_TYPE_READ) {
 389                         ksio->reads++;
 390                         ksio->nread += zio->io_size;
 391                 } else if (zio->io_type == ZIO_TYPE_WRITE) {
 392                         ksio->writes++;
 393                         ksio->nwritten += zio->io_size;
 394                 }
 395         }
 396         mutex_exit(&spa->spa_iokstat_lock);
 397 }
 398 
 399 static void
 400 vdev_queue_agg_io_done(zio_t *aio)
 401 {
 402         if (aio->io_type == ZIO_TYPE_READ) {
 403                 zio_t *pio;
 404                 zio_link_t *zl = NULL;
 405                 while ((pio = zio_walk_parents(aio, &zl)) != NULL) {
 406                         abd_copy_off(pio->io_abd, aio->io_abd,
 407                             0, pio->io_offset - aio->io_offset, pio->io_size);
 408                 }
 409         }
 410 
 411         abd_free(aio->io_abd);
 412 }
 413 
 414 static uint64_t
 415 scan_prio2active(uint64_t prio, boolean_t max_active)
 416 {
 417         uint64_t act, act_max;
 418 
 419         if (max_active) {
 420                 act_max = MAX(MAX(zfs_vdev_sync_read_max_active,
 421                     zfs_vdev_sync_write_max_active),
 422                     MAX(zfs_vdev_async_read_max_active,
 423                     zfs_vdev_async_write_max_active));
 424                 act = ((prio * (zfs_vdev_sync_read_max_active +
 425                     zfs_vdev_sync_write_max_active +
 426                     zfs_vdev_async_read_max_active +
 427                     zfs_vdev_async_write_max_active)) / 100);
 428         } else {
 429                 act_max = MAX(MAX(zfs_vdev_sync_read_min_active,
 430                     zfs_vdev_sync_write_min_active),
 431                     MAX(zfs_vdev_async_read_min_active,
 432                     zfs_vdev_async_write_min_active));
 433                 act = ((prio * (zfs_vdev_sync_read_min_active +
 434                     zfs_vdev_sync_write_min_active +
 435                     zfs_vdev_async_read_min_active +
 436                     zfs_vdev_async_write_min_active)) / 100);
 437         }
 438         act = MAX(MIN(act, act_max), 1);
 439 
 440         return (act);
 441 }
 442 
 443 static int
 444 vdev_queue_class_min_active(zio_priority_t p, vdev_queue_t *vq)
 445 {
 446         int zfs_min_active = 0;
 447         int vqc_min_active;
 448         vdev_prop_t prop = VDEV_ZIO_PRIO_TO_PROP_MIN(p);
 449 
 450         ASSERT(VDEV_PROP_MIN_VALID(prop));
 451         vqc_min_active = vdev_queue_get_prop_uint64(vq, prop);
 452 
 453         switch (p) {
 454         case ZIO_PRIORITY_SYNC_READ:
 455                 zfs_min_active = zfs_vdev_sync_read_min_active;
 456                 break;
 457         case ZIO_PRIORITY_SYNC_WRITE:
 458                 zfs_min_active = zfs_vdev_sync_write_min_active;
 459                 break;
 460         case ZIO_PRIORITY_ASYNC_READ:
 461                 zfs_min_active = zfs_vdev_async_read_min_active;
 462                 break;
 463         case ZIO_PRIORITY_ASYNC_WRITE:
 464                 zfs_min_active = zfs_vdev_async_write_min_active;
 465                 break;
 466         case ZIO_PRIORITY_RESILVER: {
 467                 uint64_t prio = vq->vq_vdev->vdev_spa->spa_resilver_prio;
 468                 if (prio > 0)
 469                         zfs_min_active = scan_prio2active(prio, B_FALSE);
 470                 else
 471                         zfs_min_active = zfs_vdev_resilver_min_active;
 472                 break;
 473         }
 474         case ZIO_PRIORITY_SCRUB: {
 475                 uint64_t prio = vq->vq_vdev->vdev_spa->spa_scrub_prio;
 476                 if (prio > 0)
 477                         zfs_min_active = scan_prio2active(prio, B_FALSE);
 478                 else
 479                         zfs_min_active = zfs_vdev_scrub_min_active;
 480                 break;
 481         }
 482         default:
 483                 panic("invalid priority %u", p);
 484                 return (0);
 485         }
 486 
 487         /* zero vdev-specific setting means "use zfs global setting" */
 488         return ((vqc_min_active) ? vqc_min_active : zfs_min_active);
 489 }
 490 
 491 static int
 492 vdev_queue_max_async_writes(spa_t *spa, vdev_queue_t *vq)
 493 {
 494         int writes;
 495         uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total;
 496         uint64_t min_bytes = zfs_dirty_data_max *
 497             zfs_vdev_async_write_active_min_dirty_percent / 100;
 498         uint64_t max_bytes = zfs_dirty_data_max *
 499             zfs_vdev_async_write_active_max_dirty_percent / 100;
 500 
 501         /*
 502          * vdev-specific properties override global tunables
 503          * zero vdev-specific settings indicate fallback on the globals
 504          */
 505         int vqc_min_active =
 506             vdev_queue_get_prop_uint64(vq, VDEV_PROP_AWRITE_MINACTIVE);
 507         int min_active =
 508             (vqc_min_active) ? vqc_min_active : zfs_vdev_async_write_min_active;
 509         int vqc_max_active =
 510             vdev_queue_get_prop_uint64(vq, VDEV_PROP_AWRITE_MAXACTIVE);
 511         int max_active =
 512             (vqc_max_active) ? vqc_max_active : zfs_vdev_async_write_max_active;
 513 
 514         /*
 515          * Sync tasks correspond to interactive user actions. To reduce the
 516          * execution time of those actions we push data out as fast as possible.
 517          */
 518         if (spa_has_pending_synctask(spa)) {
 519                 return (zfs_vdev_async_write_max_active);
 520         }
 521 
 522         if (dirty < min_bytes)
 523                 return (min_active);
 524         if (dirty > max_bytes)
 525                 return (max_active);
 526 
 527         /*
 528          * linear interpolation:
 529          * slope = (max_writes - min_writes) / (max_bytes - min_bytes)
 530          * move right by min_bytes
 531          * move up by min_writes
 532          */
 533         writes = (dirty - min_bytes) * (max_active - min_active) /
 534             (max_bytes - min_bytes) + min_active;
 535         ASSERT3U(writes, >=, min_active);
 536         ASSERT3U(writes, <=, max_active);



 537         return (writes);
 538 }
 539 
 540 static int
 541 vdev_queue_class_max_active(spa_t *spa, zio_priority_t p, vdev_queue_t *vq)
 542 {
 543         int zfs_max_active = 0;
 544         int vqc_max_active;
 545         vdev_prop_t prop = VDEV_ZIO_PRIO_TO_PROP_MAX(p);
 546 
 547         ASSERT(VDEV_PROP_MAX_VALID(prop));
 548         vqc_max_active = vdev_queue_get_prop_uint64(vq, prop);
 549 
 550         switch (p) {
 551         case ZIO_PRIORITY_SYNC_READ:
 552                 zfs_max_active = zfs_vdev_sync_read_max_active;
 553                 break;
 554         case ZIO_PRIORITY_SYNC_WRITE:
 555                 zfs_max_active = zfs_vdev_sync_write_max_active;
 556                 break;
 557         case ZIO_PRIORITY_ASYNC_READ:
 558                 zfs_max_active = zfs_vdev_async_read_max_active;
 559                 break;
 560         case ZIO_PRIORITY_ASYNC_WRITE:
 561                 /* takes into account vdev-specific props internally */
 562                 vqc_max_active = vdev_queue_max_async_writes(spa, vq);
 563                 ASSERT(vqc_max_active);
 564                 break;
 565         case ZIO_PRIORITY_RESILVER: {
 566                 uint64_t prio = vq->vq_vdev->vdev_spa->spa_resilver_prio;
 567                 if (prio > 0)
 568                         zfs_max_active = scan_prio2active(prio, B_TRUE);
 569                 else
 570                         zfs_max_active = zfs_vdev_resilver_max_active;
 571                 break;
 572         }
 573         case ZIO_PRIORITY_SCRUB: {
 574                 uint64_t prio = vq->vq_vdev->vdev_spa->spa_scrub_prio;
 575                 if (prio > 0)
 576                         zfs_max_active = scan_prio2active(prio, B_TRUE);
 577                 else
 578                         zfs_max_active = zfs_vdev_scrub_max_active;
 579                 break;
 580         }
 581         default:
 582                 panic("invalid priority %u", p);
 583                 return (0);
 584         }
 585 
 586         /* zero vdev-specific setting means "use zfs global setting" */
 587         return ((vqc_max_active) ? vqc_max_active : zfs_max_active);
 588 }
 589 
 590 /*
 591  * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if
 592  * there is no eligible class.
 593  */
 594 static zio_priority_t
 595 vdev_queue_class_to_issue(vdev_queue_t *vq)
 596 {
 597         spa_t *spa = vq->vq_vdev->vdev_spa;
 598         zio_priority_t p;
 599 
 600         if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
 601                 return (ZIO_PRIORITY_NUM_QUEUEABLE);
 602 
 603         /* find a queue that has not reached its minimum # outstanding i/os */
 604         for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
 605                 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
 606                     vq->vq_class[p].vqc_active <
 607                     vdev_queue_class_min_active(p, vq))
 608                         return (p);
 609         }
 610 
 611         /*
 612          * If we haven't found a queue, look for one that hasn't reached its
 613          * maximum # outstanding i/os.
 614          */
 615         for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
 616                 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
 617                     vq->vq_class[p].vqc_active <
 618                     vdev_queue_class_max_active(spa, p, vq))
 619                         return (p);
 620         }
 621 
 622         /* No eligible queued i/os */
 623         return (ZIO_PRIORITY_NUM_QUEUEABLE);
 624 }
 625 
 626 /*
 627  * Compute the range spanned by two i/os, which is the endpoint of the last
 628  * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
 629  * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
 630  * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
 631  */
 632 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
 633 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
 634 
 635 static zio_t *
 636 vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
 637 {
 638         zio_t *first, *last, *aio, *dio, *mandatory, *nio;


 655          * the same flavor, as expressed by the AGG_INHERIT flags.
 656          * The latter requirement is necessary so that certain
 657          * attributes of the I/O, such as whether it's a normal I/O
 658          * or a scrub/resilver, can be preserved in the aggregate.
 659          * We can include optional I/Os, but don't allow them
 660          * to begin a range as they add no benefit in that situation.
 661          */
 662 
 663         /*
 664          * We keep track of the last non-optional I/O.
 665          */
 666         mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first;
 667 
 668         /*
 669          * Walk backwards through sufficiently contiguous I/Os
 670          * recording the last non-optional I/O.
 671          */
 672         while ((dio = AVL_PREV(t, first)) != NULL &&
 673             (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
 674             IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit &&
 675             IO_GAP(dio, first) <= maxgap) {

 676                 first = dio;
 677                 if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL))
 678                         mandatory = first;
 679         }
 680 
 681         /*
 682          * Skip any initial optional I/Os.
 683          */
 684         while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) {
 685                 first = AVL_NEXT(t, first);
 686                 ASSERT(first != NULL);
 687         }
 688 
 689         /*
 690          * Walk forward through sufficiently contiguous I/Os.
 691          * The aggregation limit does not apply to optional i/os, so that
 692          * we can issue contiguous writes even if they are larger than the
 693          * aggregation limit.
 694          */
 695         while ((dio = AVL_NEXT(t, last)) != NULL &&
 696             (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
 697             (IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit ||
 698             (dio->io_flags & ZIO_FLAG_OPTIONAL)) &&
 699             IO_GAP(last, dio) <= maxgap) {

 700                 last = dio;
 701                 if (!(last->io_flags & ZIO_FLAG_OPTIONAL))
 702                         mandatory = last;
 703         }
 704 
 705         /*
 706          * Now that we've established the range of the I/O aggregation
 707          * we must decide what to do with trailing optional I/Os.
 708          * For reads, there's nothing to do. While we are unable to
 709          * aggregate further, it's possible that a trailing optional
 710          * I/O would allow the underlying device to aggregate with
 711          * subsequent I/Os. We must therefore determine if the next
 712          * non-optional I/O is close enough to make aggregation
 713          * worthwhile.
 714          */
 715         if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) {
 716                 zio_t *nio = last;
 717                 while ((dio = AVL_NEXT(t, nio)) != NULL &&
 718                     IO_GAP(nio, dio) == 0 &&
 719                     IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) {


 838 
 839         return (zio);
 840 }
 841 
 842 zio_t *
 843 vdev_queue_io(zio_t *zio)
 844 {
 845         vdev_queue_t *vq = &zio->io_vd->vdev_queue;
 846         zio_t *nio;
 847 
 848         if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
 849                 return (zio);
 850 
 851         /*
 852          * Children i/os inherent their parent's priority, which might
 853          * not match the child's i/o type.  Fix it up here.
 854          */
 855         if (zio->io_type == ZIO_TYPE_READ) {
 856                 if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
 857                     zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
 858                     zio->io_priority != ZIO_PRIORITY_SCRUB)

 859                         zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
 860         } else {
 861                 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
 862                 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
 863                     zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE)

 864                         zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
 865         }
 866 
 867         zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
 868 
 869         mutex_enter(&vq->vq_lock);
 870         zio->io_timestamp = gethrtime();
 871         vdev_queue_io_add(vq, zio);
 872         nio = vdev_queue_io_to_issue(vq);
 873         mutex_exit(&vq->vq_lock);
 874 
 875         if (nio == NULL)
 876                 return (NULL);
 877 
 878         if (nio->io_done == vdev_queue_agg_io_done) {
 879                 zio_nowait(nio);
 880                 return (NULL);
 881         }
 882 
 883         return (nio);


 890         zio_t *nio;
 891 
 892         mutex_enter(&vq->vq_lock);
 893 
 894         vdev_queue_pending_remove(vq, zio);
 895 
 896         vq->vq_io_complete_ts = gethrtime();
 897 
 898         while ((nio = vdev_queue_io_to_issue(vq)) != NULL) {
 899                 mutex_exit(&vq->vq_lock);
 900                 if (nio->io_done == vdev_queue_agg_io_done) {
 901                         zio_nowait(nio);
 902                 } else {
 903                         zio_vdev_io_reissue(nio);
 904                         zio_execute(nio);
 905                 }
 906                 mutex_enter(&vq->vq_lock);
 907         }
 908 
 909         mutex_exit(&vq->vq_lock);
 910 }
 911 
 912 uint64_t
 913 vdev_queue_get_prop_uint64(vdev_queue_t *vq, vdev_prop_t p)
 914 {
 915         uint64_t val = 0;
 916         int zprio = 0;
 917         cos_t *cos = vq->vq_cos;
 918 
 919         switch (p) {
 920         case VDEV_PROP_READ_MINACTIVE:
 921         case VDEV_PROP_AREAD_MINACTIVE:
 922         case VDEV_PROP_WRITE_MINACTIVE:
 923         case VDEV_PROP_AWRITE_MINACTIVE:
 924         case VDEV_PROP_SCRUB_MINACTIVE:
 925         case VDEV_PROP_RESILVER_MINACTIVE:
 926                 zprio = VDEV_PROP_TO_ZIO_PRIO_MIN(p);
 927                 ASSERT(ZIO_PRIORITY_QUEUEABLE_VALID(zprio));
 928                 if (vq->vq_cos != NULL) {
 929                         cos_prop_t p = COS_ZIO_PRIO_TO_PROP_MIN(zprio);
 930                         ASSERT(COS_PROP_MIN_VALID(p));
 931                         val = cos_get_prop_uint64(vq->vq_cos, p);
 932                 }
 933                 if (val == 0)
 934                         val = vq->vq_class[zprio].vqc_min_active;
 935                 break;
 936         case VDEV_PROP_READ_MAXACTIVE:
 937         case VDEV_PROP_AREAD_MAXACTIVE:
 938         case VDEV_PROP_WRITE_MAXACTIVE:
 939         case VDEV_PROP_AWRITE_MAXACTIVE:
 940         case VDEV_PROP_SCRUB_MAXACTIVE:
 941         case VDEV_PROP_RESILVER_MAXACTIVE:
 942                 zprio = VDEV_PROP_TO_ZIO_PRIO_MAX(p);
 943                 ASSERT(ZIO_PRIORITY_QUEUEABLE_VALID(zprio));
 944                 if (vq->vq_cos != NULL) {
 945                         cos_prop_t p = COS_ZIO_PRIO_TO_PROP_MAX(zprio);
 946                         ASSERT(COS_PROP_MAX_VALID(p));
 947                         val = cos_get_prop_uint64(vq->vq_cos, p);
 948                 }
 949                 if (val == 0)
 950                         val = vq->vq_class[zprio].vqc_max_active;
 951                 break;
 952         case VDEV_PROP_PREFERRED_READ:
 953                 if (vq->vq_cos != NULL)
 954                         val = cos_get_prop_uint64(vq->vq_cos,
 955                             COS_PROP_PREFERRED_READ);
 956                 if (val == 0)
 957                         val = vq->vq_preferred_read;
 958                 break;
 959         default:
 960                 panic("Non-numeric property requested\n");
 961                 return (0);
 962         }
 963 
 964         VERIFY(cos == vq->vq_cos);
 965 
 966         return (val);
 967 }