1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
  28  * Copyright (c) 2014 Integros [integros.com]
  29  */
  30 
  31 #include <sys/zfs_context.h>
  32 #include <sys/vdev_impl.h>
  33 #include <sys/spa_impl.h>
  34 #include <sys/zio.h>
  35 #include <sys/avl.h>
  36 #include <sys/dsl_pool.h>
  37 #include <sys/metaslab_impl.h>
  38 #include <sys/abd.h>
  39 
  40 /*
  41  * ZFS I/O Scheduler
  42  * ---------------
  43  *
  44  * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios.  The
  45  * I/O scheduler determines when and in what order those operations are
  46  * issued.  The I/O scheduler divides operations into five I/O classes
  47  * prioritized in the following order: sync read, sync write, async read,
  48  * async write, and scrub/resilver.  Each queue defines the minimum and
  49  * maximum number of concurrent operations that may be issued to the device.
  50  * In addition, the device has an aggregate maximum. Note that the sum of the
  51  * per-queue minimums must not exceed the aggregate maximum, and if the
  52  * aggregate maximum is equal to or greater than the sum of the per-queue
  53  * maximums, the per-queue minimum has no effect.
  54  *
  55  * For many physical devices, throughput increases with the number of
  56  * concurrent operations, but latency typically suffers. Further, physical
  57  * devices typically have a limit at which more concurrent operations have no
  58  * effect on throughput or can actually cause it to decrease.
  59  *
  60  * The scheduler selects the next operation to issue by first looking for an
  61  * I/O class whose minimum has not been satisfied. Once all are satisfied and
  62  * the aggregate maximum has not been hit, the scheduler looks for classes
  63  * whose maximum has not been satisfied. Iteration through the I/O classes is
  64  * done in the order specified above. No further operations are issued if the
  65  * aggregate maximum number of concurrent operations has been hit or if there
  66  * are no operations queued for an I/O class that has not hit its maximum.
  67  * Every time an i/o is queued or an operation completes, the I/O scheduler
  68  * looks for new operations to issue.
  69  *
  70  * All I/O classes have a fixed maximum number of outstanding operations
  71  * except for the async write class. Asynchronous writes represent the data
  72  * that is committed to stable storage during the syncing stage for
  73  * transaction groups (see txg.c). Transaction groups enter the syncing state
  74  * periodically so the number of queued async writes will quickly burst up and
  75  * then bleed down to zero. Rather than servicing them as quickly as possible,
  76  * the I/O scheduler changes the maximum number of active async write i/os
  77  * according to the amount of dirty data in the pool (see dsl_pool.c). Since
  78  * both throughput and latency typically increase with the number of
  79  * concurrent operations issued to physical devices, reducing the burstiness
  80  * in the number of concurrent operations also stabilizes the response time of
  81  * operations from other -- and in particular synchronous -- queues. In broad
  82  * strokes, the I/O scheduler will issue more concurrent operations from the
  83  * async write queue as there's more dirty data in the pool.
  84  *
  85  * Async Writes
  86  *
  87  * The number of concurrent operations issued for the async write I/O class
  88  * follows a piece-wise linear function defined by a few adjustable points.
  89  *
  90  *        |                   o---------| <-- zfs_vdev_async_write_max_active
  91  *   ^    |                  /^         |
  92  *   |    |                 / |         |
  93  * active |                /  |         |
  94  *  I/O   |               /   |         |
  95  * count  |              /    |         |
  96  *        |             /     |         |
  97  *        |------------o      |         | <-- zfs_vdev_async_write_min_active
  98  *       0|____________^______|_________|
  99  *        0%           |      |       100% of zfs_dirty_data_max
 100  *                     |      |
 101  *                     |      `-- zfs_vdev_async_write_active_max_dirty_percent
 102  *                     `--------- zfs_vdev_async_write_active_min_dirty_percent
 103  *
 104  * Until the amount of dirty data exceeds a minimum percentage of the dirty
 105  * data allowed in the pool, the I/O scheduler will limit the number of
 106  * concurrent operations to the minimum. As that threshold is crossed, the
 107  * number of concurrent operations issued increases linearly to the maximum at
 108  * the specified maximum percentage of the dirty data allowed in the pool.
 109  *
 110  * Ideally, the amount of dirty data on a busy pool will stay in the sloped
 111  * part of the function between zfs_vdev_async_write_active_min_dirty_percent
 112  * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the
 113  * maximum percentage, this indicates that the rate of incoming data is
 114  * greater than the rate that the backend storage can handle. In this case, we
 115  * must further throttle incoming writes (see dmu_tx_delay() for details).
 116  */
 117 
 118 /*
 119  * The maximum number of i/os active to each device.  Ideally, this will be >=
 120  * the sum of each queue's max_active.  It must be at least the sum of each
 121  * queue's min_active.
 122  */
 123 uint32_t zfs_vdev_max_active = 1000;
 124 
 125 /*
 126  * Per-queue limits on the number of i/os active to each device.  If the
 127  * sum of the queue's max_active is < zfs_vdev_max_active, then the
 128  * min_active comes into play.  We will send min_active from each queue,
 129  * and then select from queues in the order defined by zio_priority_t.
 130  *
 131  * In general, smaller max_active's will lead to lower latency of synchronous
 132  * operations.  Larger max_active's may lead to higher overall throughput,
 133  * depending on underlying storage.
 134  *
 135  * The ratio of the queues' max_actives determines the balance of performance
 136  * between reads, writes, and scrubs.  E.g., increasing
 137  * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete
 138  * more quickly, but reads and writes to have higher latency and lower
 139  * throughput.
 140  */
 141 uint32_t zfs_vdev_sync_read_min_active = 10;
 142 uint32_t zfs_vdev_sync_read_max_active = 10;
 143 uint32_t zfs_vdev_sync_write_min_active = 10;
 144 uint32_t zfs_vdev_sync_write_max_active = 10;
 145 uint32_t zfs_vdev_async_read_min_active = 1;
 146 uint32_t zfs_vdev_async_read_max_active = 3;
 147 uint32_t zfs_vdev_async_write_min_active = 1;
 148 uint32_t zfs_vdev_async_write_max_active = 10;
 149 uint32_t zfs_vdev_scrub_min_active = 1;
 150 uint32_t zfs_vdev_scrub_max_active = 2;
 151 uint32_t zfs_vdev_removal_min_active = 1;
 152 uint32_t zfs_vdev_removal_max_active = 2;
 153 
 154 /*
 155  * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
 156  * dirty data, use zfs_vdev_async_write_min_active.  When it has more than
 157  * zfs_vdev_async_write_active_max_dirty_percent, use
 158  * zfs_vdev_async_write_max_active. The value is linearly interpolated
 159  * between min and max.
 160  */
 161 int zfs_vdev_async_write_active_min_dirty_percent = 30;
 162 int zfs_vdev_async_write_active_max_dirty_percent = 60;
 163 
 164 /*
 165  * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
 166  * For read I/Os, we also aggregate across small adjacency gaps; for writes
 167  * we include spans of optional I/Os to aid aggregation at the disk even when
 168  * they aren't able to help us aggregate at this level.
 169  */
 170 int zfs_vdev_aggregation_limit = SPA_OLD_MAXBLOCKSIZE;
 171 int zfs_vdev_read_gap_limit = 32 << 10;
 172 int zfs_vdev_write_gap_limit = 4 << 10;
 173 
 174 /*
 175  * Define the queue depth percentage for each top-level. This percentage is
 176  * used in conjunction with zfs_vdev_async_max_active to determine how many
 177  * allocations a specific top-level vdev should handle. Once the queue depth
 178  * reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100
 179  * then allocator will stop allocating blocks on that top-level device.
 180  * The default kernel setting is 1000% which will yield 100 allocations per
 181  * device. For userland testing, the default setting is 300% which equates
 182  * to 30 allocations per device.
 183  */
 184 #ifdef _KERNEL
 185 int zfs_vdev_queue_depth_pct = 1000;
 186 #else
 187 int zfs_vdev_queue_depth_pct = 300;
 188 #endif
 189 
 190 
 191 int
 192 vdev_queue_offset_compare(const void *x1, const void *x2)
 193 {
 194         const zio_t *z1 = x1;
 195         const zio_t *z2 = x2;
 196 
 197         if (z1->io_offset < z2->io_offset)
 198                 return (-1);
 199         if (z1->io_offset > z2->io_offset)
 200                 return (1);
 201 
 202         if (z1 < z2)
 203                 return (-1);
 204         if (z1 > z2)
 205                 return (1);
 206 
 207         return (0);
 208 }
 209 
 210 static inline avl_tree_t *
 211 vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p)
 212 {
 213         return (&vq->vq_class[p].vqc_queued_tree);
 214 }
 215 
 216 static inline avl_tree_t *
 217 vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t)
 218 {
 219         ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE);
 220         if (t == ZIO_TYPE_READ)
 221                 return (&vq->vq_read_offset_tree);
 222         else
 223                 return (&vq->vq_write_offset_tree);
 224 }
 225 
 226 int
 227 vdev_queue_timestamp_compare(const void *x1, const void *x2)
 228 {
 229         const zio_t *z1 = x1;
 230         const zio_t *z2 = x2;
 231 
 232         if (z1->io_timestamp < z2->io_timestamp)
 233                 return (-1);
 234         if (z1->io_timestamp > z2->io_timestamp)
 235                 return (1);
 236 
 237         if (z1 < z2)
 238                 return (-1);
 239         if (z1 > z2)
 240                 return (1);
 241 
 242         return (0);
 243 }
 244 
 245 void
 246 vdev_queue_init(vdev_t *vd)
 247 {
 248         vdev_queue_t *vq = &vd->vdev_queue;
 249 
 250         mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
 251         vq->vq_vdev = vd;
 252 
 253         avl_create(&vq->vq_active_tree, vdev_queue_offset_compare,
 254             sizeof (zio_t), offsetof(struct zio, io_queue_node));
 255         avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ),
 256             vdev_queue_offset_compare, sizeof (zio_t),
 257             offsetof(struct zio, io_offset_node));
 258         avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE),
 259             vdev_queue_offset_compare, sizeof (zio_t),
 260             offsetof(struct zio, io_offset_node));
 261 
 262         for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
 263                 int (*compfn) (const void *, const void *);
 264 
 265                 /*
 266                  * The synchronous i/o queues are dispatched in FIFO rather
 267                  * than LBA order.  This provides more consistent latency for
 268                  * these i/os.
 269                  */
 270                 if (p == ZIO_PRIORITY_SYNC_READ || p == ZIO_PRIORITY_SYNC_WRITE)
 271                         compfn = vdev_queue_timestamp_compare;
 272                 else
 273                         compfn = vdev_queue_offset_compare;
 274 
 275                 avl_create(vdev_queue_class_tree(vq, p), compfn,
 276                     sizeof (zio_t), offsetof(struct zio, io_queue_node));
 277         }
 278 }
 279 
 280 void
 281 vdev_queue_fini(vdev_t *vd)
 282 {
 283         vdev_queue_t *vq = &vd->vdev_queue;
 284 
 285         for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
 286                 avl_destroy(vdev_queue_class_tree(vq, p));
 287         avl_destroy(&vq->vq_active_tree);
 288         avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));
 289         avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE));
 290 
 291         mutex_destroy(&vq->vq_lock);
 292 }
 293 
 294 static void
 295 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
 296 {
 297         spa_t *spa = zio->io_spa;
 298 
 299         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 300         avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
 301         avl_add(vdev_queue_type_tree(vq, zio->io_type), zio);
 302 
 303         mutex_enter(&spa->spa_iokstat_lock);
 304         spa->spa_queue_stats[zio->io_priority].spa_queued++;
 305         if (spa->spa_iokstat != NULL)
 306                 kstat_waitq_enter(spa->spa_iokstat->ks_data);
 307         mutex_exit(&spa->spa_iokstat_lock);
 308 }
 309 
 310 static void
 311 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
 312 {
 313         spa_t *spa = zio->io_spa;
 314 
 315         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 316         avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
 317         avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio);
 318 
 319         mutex_enter(&spa->spa_iokstat_lock);
 320         ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0);
 321         spa->spa_queue_stats[zio->io_priority].spa_queued--;
 322         if (spa->spa_iokstat != NULL)
 323                 kstat_waitq_exit(spa->spa_iokstat->ks_data);
 324         mutex_exit(&spa->spa_iokstat_lock);
 325 }
 326 
 327 static void
 328 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
 329 {
 330         spa_t *spa = zio->io_spa;
 331         ASSERT(MUTEX_HELD(&vq->vq_lock));
 332         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 333         vq->vq_class[zio->io_priority].vqc_active++;
 334         avl_add(&vq->vq_active_tree, zio);
 335 
 336         mutex_enter(&spa->spa_iokstat_lock);
 337         spa->spa_queue_stats[zio->io_priority].spa_active++;
 338         if (spa->spa_iokstat != NULL)
 339                 kstat_runq_enter(spa->spa_iokstat->ks_data);
 340         mutex_exit(&spa->spa_iokstat_lock);
 341 }
 342 
 343 static void
 344 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
 345 {
 346         spa_t *spa = zio->io_spa;
 347         ASSERT(MUTEX_HELD(&vq->vq_lock));
 348         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 349         vq->vq_class[zio->io_priority].vqc_active--;
 350         avl_remove(&vq->vq_active_tree, zio);
 351 
 352         mutex_enter(&spa->spa_iokstat_lock);
 353         ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0);
 354         spa->spa_queue_stats[zio->io_priority].spa_active--;
 355         if (spa->spa_iokstat != NULL) {
 356                 kstat_io_t *ksio = spa->spa_iokstat->ks_data;
 357 
 358                 kstat_runq_exit(spa->spa_iokstat->ks_data);
 359                 if (zio->io_type == ZIO_TYPE_READ) {
 360                         ksio->reads++;
 361                         ksio->nread += zio->io_size;
 362                 } else if (zio->io_type == ZIO_TYPE_WRITE) {
 363                         ksio->writes++;
 364                         ksio->nwritten += zio->io_size;
 365                 }
 366         }
 367         mutex_exit(&spa->spa_iokstat_lock);
 368 }
 369 
 370 static void
 371 vdev_queue_agg_io_done(zio_t *aio)
 372 {
 373         if (aio->io_type == ZIO_TYPE_READ) {
 374                 zio_t *pio;
 375                 zio_link_t *zl = NULL;
 376                 while ((pio = zio_walk_parents(aio, &zl)) != NULL) {
 377                         abd_copy_off(pio->io_abd, aio->io_abd,
 378                             0, pio->io_offset - aio->io_offset, pio->io_size);
 379                 }
 380         }
 381 
 382         abd_free(aio->io_abd);
 383 }
 384 
 385 static int
 386 vdev_queue_class_min_active(zio_priority_t p)
 387 {
 388         switch (p) {
 389         case ZIO_PRIORITY_SYNC_READ:
 390                 return (zfs_vdev_sync_read_min_active);
 391         case ZIO_PRIORITY_SYNC_WRITE:
 392                 return (zfs_vdev_sync_write_min_active);
 393         case ZIO_PRIORITY_ASYNC_READ:
 394                 return (zfs_vdev_async_read_min_active);
 395         case ZIO_PRIORITY_ASYNC_WRITE:
 396                 return (zfs_vdev_async_write_min_active);
 397         case ZIO_PRIORITY_SCRUB:
 398                 return (zfs_vdev_scrub_min_active);
 399         case ZIO_PRIORITY_REMOVAL:
 400                 return (zfs_vdev_removal_min_active);
 401         default:
 402                 panic("invalid priority %u", p);
 403                 return (0);
 404         }
 405 }
 406 
 407 static int
 408 vdev_queue_max_async_writes(spa_t *spa)
 409 {
 410         int writes;
 411         uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total;
 412         uint64_t min_bytes = zfs_dirty_data_max *
 413             zfs_vdev_async_write_active_min_dirty_percent / 100;
 414         uint64_t max_bytes = zfs_dirty_data_max *
 415             zfs_vdev_async_write_active_max_dirty_percent / 100;
 416 
 417         /*
 418          * Sync tasks correspond to interactive user actions. To reduce the
 419          * execution time of those actions we push data out as fast as possible.
 420          */
 421         if (spa_has_pending_synctask(spa)) {
 422                 return (zfs_vdev_async_write_max_active);
 423         }
 424 
 425         if (dirty < min_bytes)
 426                 return (zfs_vdev_async_write_min_active);
 427         if (dirty > max_bytes)
 428                 return (zfs_vdev_async_write_max_active);
 429 
 430         /*
 431          * linear interpolation:
 432          * slope = (max_writes - min_writes) / (max_bytes - min_bytes)
 433          * move right by min_bytes
 434          * move up by min_writes
 435          */
 436         writes = (dirty - min_bytes) *
 437             (zfs_vdev_async_write_max_active -
 438             zfs_vdev_async_write_min_active) /
 439             (max_bytes - min_bytes) +
 440             zfs_vdev_async_write_min_active;
 441         ASSERT3U(writes, >=, zfs_vdev_async_write_min_active);
 442         ASSERT3U(writes, <=, zfs_vdev_async_write_max_active);
 443         return (writes);
 444 }
 445 
 446 static int
 447 vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
 448 {
 449         switch (p) {
 450         case ZIO_PRIORITY_SYNC_READ:
 451                 return (zfs_vdev_sync_read_max_active);
 452         case ZIO_PRIORITY_SYNC_WRITE:
 453                 return (zfs_vdev_sync_write_max_active);
 454         case ZIO_PRIORITY_ASYNC_READ:
 455                 return (zfs_vdev_async_read_max_active);
 456         case ZIO_PRIORITY_ASYNC_WRITE:
 457                 return (vdev_queue_max_async_writes(spa));
 458         case ZIO_PRIORITY_SCRUB:
 459                 return (zfs_vdev_scrub_max_active);
 460         case ZIO_PRIORITY_REMOVAL:
 461                 return (zfs_vdev_removal_max_active);
 462         default:
 463                 panic("invalid priority %u", p);
 464                 return (0);
 465         }
 466 }
 467 
 468 /*
 469  * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if
 470  * there is no eligible class.
 471  */
 472 static zio_priority_t
 473 vdev_queue_class_to_issue(vdev_queue_t *vq)
 474 {
 475         spa_t *spa = vq->vq_vdev->vdev_spa;
 476         zio_priority_t p;
 477 
 478         if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
 479                 return (ZIO_PRIORITY_NUM_QUEUEABLE);
 480 
 481         /* find a queue that has not reached its minimum # outstanding i/os */
 482         for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
 483                 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
 484                     vq->vq_class[p].vqc_active <
 485                     vdev_queue_class_min_active(p))
 486                         return (p);
 487         }
 488 
 489         /*
 490          * If we haven't found a queue, look for one that hasn't reached its
 491          * maximum # outstanding i/os.
 492          */
 493         for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
 494                 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
 495                     vq->vq_class[p].vqc_active <
 496                     vdev_queue_class_max_active(spa, p))
 497                         return (p);
 498         }
 499 
 500         /* No eligible queued i/os */
 501         return (ZIO_PRIORITY_NUM_QUEUEABLE);
 502 }
 503 
 504 /*
 505  * Compute the range spanned by two i/os, which is the endpoint of the last
 506  * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
 507  * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
 508  * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
 509  */
 510 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
 511 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
 512 
 513 static zio_t *
 514 vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
 515 {
 516         zio_t *first, *last, *aio, *dio, *mandatory, *nio;
 517         uint64_t maxgap = 0;
 518         uint64_t size;
 519         boolean_t stretch = B_FALSE;
 520         avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type);
 521         enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
 522 
 523         if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE)
 524                 return (NULL);
 525 
 526         first = last = zio;
 527 
 528         if (zio->io_type == ZIO_TYPE_READ)
 529                 maxgap = zfs_vdev_read_gap_limit;
 530 
 531         /*
 532          * We can aggregate I/Os that are sufficiently adjacent and of
 533          * the same flavor, as expressed by the AGG_INHERIT flags.
 534          * The latter requirement is necessary so that certain
 535          * attributes of the I/O, such as whether it's a normal I/O
 536          * or a scrub/resilver, can be preserved in the aggregate.
 537          * We can include optional I/Os, but don't allow them
 538          * to begin a range as they add no benefit in that situation.
 539          */
 540 
 541         /*
 542          * We keep track of the last non-optional I/O.
 543          */
 544         mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first;
 545 
 546         /*
 547          * Walk backwards through sufficiently contiguous I/Os
 548          * recording the last non-optional I/O.
 549          */
 550         while ((dio = AVL_PREV(t, first)) != NULL &&
 551             (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
 552             IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit &&
 553             IO_GAP(dio, first) <= maxgap &&
 554             dio->io_type == zio->io_type) {
 555                 first = dio;
 556                 if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL))
 557                         mandatory = first;
 558         }
 559 
 560         /*
 561          * Skip any initial optional I/Os.
 562          */
 563         while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) {
 564                 first = AVL_NEXT(t, first);
 565                 ASSERT(first != NULL);
 566         }
 567 
 568         /*
 569          * Walk forward through sufficiently contiguous I/Os.
 570          * The aggregation limit does not apply to optional i/os, so that
 571          * we can issue contiguous writes even if they are larger than the
 572          * aggregation limit.
 573          */
 574         while ((dio = AVL_NEXT(t, last)) != NULL &&
 575             (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
 576             (IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit ||
 577             (dio->io_flags & ZIO_FLAG_OPTIONAL)) &&
 578             IO_GAP(last, dio) <= maxgap &&
 579             dio->io_type == zio->io_type) {
 580                 last = dio;
 581                 if (!(last->io_flags & ZIO_FLAG_OPTIONAL))
 582                         mandatory = last;
 583         }
 584 
 585         /*
 586          * Now that we've established the range of the I/O aggregation
 587          * we must decide what to do with trailing optional I/Os.
 588          * For reads, there's nothing to do. While we are unable to
 589          * aggregate further, it's possible that a trailing optional
 590          * I/O would allow the underlying device to aggregate with
 591          * subsequent I/Os. We must therefore determine if the next
 592          * non-optional I/O is close enough to make aggregation
 593          * worthwhile.
 594          */
 595         if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) {
 596                 zio_t *nio = last;
 597                 while ((dio = AVL_NEXT(t, nio)) != NULL &&
 598                     IO_GAP(nio, dio) == 0 &&
 599                     IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) {
 600                         nio = dio;
 601                         if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
 602                                 stretch = B_TRUE;
 603                                 break;
 604                         }
 605                 }
 606         }
 607 
 608         if (stretch) {
 609                 /*
 610                  * We are going to include an optional io in our aggregated
 611                  * span, thus closing the write gap.  Only mandatory i/os can
 612                  * start aggregated spans, so make sure that the next i/o
 613                  * after our span is mandatory.
 614                  */
 615                 dio = AVL_NEXT(t, last);
 616                 dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
 617         } else {
 618                 /* do not include the optional i/o */
 619                 while (last != mandatory && last != first) {
 620                         ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL);
 621                         last = AVL_PREV(t, last);
 622                         ASSERT(last != NULL);
 623                 }
 624         }
 625 
 626         if (first == last)
 627                 return (NULL);
 628 
 629         size = IO_SPAN(first, last);
 630         ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
 631 
 632         aio = zio_vdev_delegated_io(first->io_vd, first->io_offset,
 633             abd_alloc_for_io(size, B_TRUE), size, first->io_type,
 634             zio->io_priority, flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
 635             vdev_queue_agg_io_done, NULL);
 636         aio->io_timestamp = first->io_timestamp;
 637 
 638         nio = first;
 639         do {
 640                 dio = nio;
 641                 nio = AVL_NEXT(t, dio);
 642                 ASSERT3U(dio->io_type, ==, aio->io_type);
 643 
 644                 if (dio->io_flags & ZIO_FLAG_NODATA) {
 645                         ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
 646                         abd_zero_off(aio->io_abd,
 647                             dio->io_offset - aio->io_offset, dio->io_size);
 648                 } else if (dio->io_type == ZIO_TYPE_WRITE) {
 649                         abd_copy_off(aio->io_abd, dio->io_abd,
 650                             dio->io_offset - aio->io_offset, 0, dio->io_size);
 651                 }
 652 
 653                 zio_add_child(dio, aio);
 654                 vdev_queue_io_remove(vq, dio);
 655                 zio_vdev_io_bypass(dio);
 656                 zio_execute(dio);
 657         } while (dio != last);
 658 
 659         return (aio);
 660 }
 661 
 662 static zio_t *
 663 vdev_queue_io_to_issue(vdev_queue_t *vq)
 664 {
 665         zio_t *zio, *aio;
 666         zio_priority_t p;
 667         avl_index_t idx;
 668         avl_tree_t *tree;
 669         zio_t search;
 670 
 671 again:
 672         ASSERT(MUTEX_HELD(&vq->vq_lock));
 673 
 674         p = vdev_queue_class_to_issue(vq);
 675 
 676         if (p == ZIO_PRIORITY_NUM_QUEUEABLE) {
 677                 /* No eligible queued i/os */
 678                 return (NULL);
 679         }
 680 
 681         /*
 682          * For LBA-ordered queues (async / scrub), issue the i/o which follows
 683          * the most recently issued i/o in LBA (offset) order.
 684          *
 685          * For FIFO queues (sync), issue the i/o with the lowest timestamp.
 686          */
 687         tree = vdev_queue_class_tree(vq, p);
 688         search.io_timestamp = 0;
 689         search.io_offset = vq->vq_last_offset + 1;
 690         VERIFY3P(avl_find(tree, &search, &idx), ==, NULL);
 691         zio = avl_nearest(tree, idx, AVL_AFTER);
 692         if (zio == NULL)
 693                 zio = avl_first(tree);
 694         ASSERT3U(zio->io_priority, ==, p);
 695 
 696         aio = vdev_queue_aggregate(vq, zio);
 697         if (aio != NULL)
 698                 zio = aio;
 699         else
 700                 vdev_queue_io_remove(vq, zio);
 701 
 702         /*
 703          * If the I/O is or was optional and therefore has no data, we need to
 704          * simply discard it. We need to drop the vdev queue's lock to avoid a
 705          * deadlock that we could encounter since this I/O will complete
 706          * immediately.
 707          */
 708         if (zio->io_flags & ZIO_FLAG_NODATA) {
 709                 mutex_exit(&vq->vq_lock);
 710                 zio_vdev_io_bypass(zio);
 711                 zio_execute(zio);
 712                 mutex_enter(&vq->vq_lock);
 713                 goto again;
 714         }
 715 
 716         vdev_queue_pending_add(vq, zio);
 717         vq->vq_last_offset = zio->io_offset;
 718 
 719         return (zio);
 720 }
 721 
 722 zio_t *
 723 vdev_queue_io(zio_t *zio)
 724 {
 725         vdev_queue_t *vq = &zio->io_vd->vdev_queue;
 726         zio_t *nio;
 727 
 728         if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
 729                 return (zio);
 730 
 731         /*
 732          * Children i/os inherent their parent's priority, which might
 733          * not match the child's i/o type.  Fix it up here.
 734          */
 735         if (zio->io_type == ZIO_TYPE_READ) {
 736                 if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
 737                     zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
 738                     zio->io_priority != ZIO_PRIORITY_SCRUB &&
 739                     zio->io_priority != ZIO_PRIORITY_REMOVAL)
 740                         zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
 741         } else {
 742                 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
 743                 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
 744                     zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE &&
 745                     zio->io_priority != ZIO_PRIORITY_REMOVAL)
 746                         zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
 747         }
 748 
 749         zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
 750 
 751         mutex_enter(&vq->vq_lock);
 752         zio->io_timestamp = gethrtime();
 753         vdev_queue_io_add(vq, zio);
 754         nio = vdev_queue_io_to_issue(vq);
 755         mutex_exit(&vq->vq_lock);
 756 
 757         if (nio == NULL)
 758                 return (NULL);
 759 
 760         if (nio->io_done == vdev_queue_agg_io_done) {
 761                 zio_nowait(nio);
 762                 return (NULL);
 763         }
 764 
 765         return (nio);
 766 }
 767 
 768 void
 769 vdev_queue_io_done(zio_t *zio)
 770 {
 771         vdev_queue_t *vq = &zio->io_vd->vdev_queue;
 772         zio_t *nio;
 773 
 774         mutex_enter(&vq->vq_lock);
 775 
 776         vdev_queue_pending_remove(vq, zio);
 777 
 778         vq->vq_io_complete_ts = gethrtime();
 779 
 780         while ((nio = vdev_queue_io_to_issue(vq)) != NULL) {
 781                 mutex_exit(&vq->vq_lock);
 782                 if (nio->io_done == vdev_queue_agg_io_done) {
 783                         zio_nowait(nio);
 784                 } else {
 785                         zio_vdev_io_reissue(nio);
 786                         zio_execute(nio);
 787                 }
 788                 mutex_enter(&vq->vq_lock);
 789         }
 790 
 791         mutex_exit(&vq->vq_lock);
 792 }