1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 /*
28 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
30 */
31
32 #include <sys/zfs_context.h>
33 #include <sys/vdev_impl.h>
34 #include <sys/cos.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zio.h>
37 #include <sys/avl.h>
38 #include <sys/dsl_pool.h>
39 #include <sys/metaslab_impl.h>
40 #include <sys/abd.h>
41
42 /*
43 * ZFS I/O Scheduler
44 * ---------------
45 *
46 * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios. The
47 * I/O scheduler determines when and in what order those operations are
48 * issued. The I/O scheduler divides operations into five I/O classes
49 * prioritized in the following order: sync read, sync write, async read,
50 * async write, and scrub/resilver. Each queue defines the minimum and
51 * maximum number of concurrent operations that may be issued to the device.
52 * In addition, the device has an aggregate maximum. Note that the sum of the
53 * per-queue minimums must not exceed the aggregate maximum, and if the
54 * aggregate maximum is equal to or greater than the sum of the per-queue
55 * maximums, the per-queue minimum has no effect.
56 *
57 * For many physical devices, throughput increases with the number of
58 * concurrent operations, but latency typically suffers. Further, physical
59 * devices typically have a limit at which more concurrent operations have no
60 * effect on throughput or can actually cause it to decrease.
61 *
62 * The scheduler selects the next operation to issue by first looking for an
63 * I/O class whose minimum has not been satisfied. Once all are satisfied and
64 * the aggregate maximum has not been hit, the scheduler looks for classes
65 * whose maximum has not been satisfied. Iteration through the I/O classes is
66 * done in the order specified above. No further operations are issued if the
67 * aggregate maximum number of concurrent operations has been hit or if there
68 * are no operations queued for an I/O class that has not hit its maximum.
69 * Every time an i/o is queued or an operation completes, the I/O scheduler
70 * looks for new operations to issue.
71 *
72 * All I/O classes have a fixed maximum number of outstanding operations
73 * except for the async write class. Asynchronous writes represent the data
74 * that is committed to stable storage during the syncing stage for
75 * transaction groups (see txg.c). Transaction groups enter the syncing state
76 * periodically so the number of queued async writes will quickly burst up and
77 * then bleed down to zero. Rather than servicing them as quickly as possible,
78 * the I/O scheduler changes the maximum number of active async write i/os
79 * according to the amount of dirty data in the pool (see dsl_pool.c). Since
80 * both throughput and latency typically increase with the number of
81 * concurrent operations issued to physical devices, reducing the burstiness
82 * in the number of concurrent operations also stabilizes the response time of
83 * operations from other -- and in particular synchronous -- queues. In broad
84 * strokes, the I/O scheduler will issue more concurrent operations from the
85 * async write queue as there's more dirty data in the pool.
86 *
87 * Async Writes
88 *
89 * The number of concurrent operations issued for the async write I/O class
90 * follows a piece-wise linear function defined by a few adjustable points.
91 *
92 * | o---------| <-- zfs_vdev_async_write_max_active
93 * ^ | /^ |
94 * | | / | |
95 * active | / | |
96 * I/O | / | |
97 * count | / | |
98 * | / | |
99 * |------------o | | <-- zfs_vdev_async_write_min_active
100 * 0|____________^______|_________|
101 * 0% | | 100% of zfs_dirty_data_max
102 * | |
103 * | `-- zfs_vdev_async_write_active_max_dirty_percent
104 * `--------- zfs_vdev_async_write_active_min_dirty_percent
105 *
106 * Until the amount of dirty data exceeds a minimum percentage of the dirty
107 * data allowed in the pool, the I/O scheduler will limit the number of
108 * concurrent operations to the minimum. As that threshold is crossed, the
109 * number of concurrent operations issued increases linearly to the maximum at
110 * the specified maximum percentage of the dirty data allowed in the pool.
111 *
112 * Ideally, the amount of dirty data on a busy pool will stay in the sloped
113 * part of the function between zfs_vdev_async_write_active_min_dirty_percent
114 * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the
115 * maximum percentage, this indicates that the rate of incoming data is
116 * greater than the rate that the backend storage can handle. In this case, we
117 * must further throttle incoming writes (see dmu_tx_delay() for details).
118 */
119
120 /*
121 * The maximum number of i/os active to each device. Ideally, this will be >=
122 * the sum of each queue's max_active. It must be at least the sum of each
123 * queue's min_active.
124 */
125 uint32_t zfs_vdev_max_active = 1000;
126
127 /*
128 * Per-queue limits on the number of i/os active to each device. If the
129 * sum of the queue's max_active is < zfs_vdev_max_active, then the
130 * min_active comes into play. We will send min_active from each queue,
131 * and then select from queues in the order defined by zio_priority_t.
132 *
133 * In general, smaller max_active's will lead to lower latency of synchronous
134 * operations. Larger max_active's may lead to higher overall throughput,
135 * depending on underlying storage.
136 *
137 * The ratio of the queues' max_actives determines the balance of performance
138 * between reads, writes, and scrubs. E.g., increasing
139 * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete
140 * more quickly, but reads and writes to have higher latency and lower
141 * throughput.
142 */
143 uint32_t zfs_vdev_sync_read_min_active = 10;
144 uint32_t zfs_vdev_sync_read_max_active = 10;
145 uint32_t zfs_vdev_sync_write_min_active = 10;
146 uint32_t zfs_vdev_sync_write_max_active = 10;
147 uint32_t zfs_vdev_async_read_min_active = 1;
148 uint32_t zfs_vdev_async_read_max_active = 3;
149 uint32_t zfs_vdev_async_write_min_active = 1;
150 uint32_t zfs_vdev_async_write_max_active = 10;
151 uint32_t zfs_vdev_resilver_min_active = 1;
152 uint32_t zfs_vdev_resilver_max_active = 3;
153 uint32_t zfs_vdev_scrub_min_active = 1;
154 uint32_t zfs_vdev_scrub_max_active = 2;
155
156 /*
157 * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
158 * dirty data, use zfs_vdev_async_write_min_active. When it has more than
159 * zfs_vdev_async_write_active_max_dirty_percent, use
160 * zfs_vdev_async_write_max_active. The value is linearly interpolated
161 * between min and max.
162 */
163 int zfs_vdev_async_write_active_min_dirty_percent = 30;
164 int zfs_vdev_async_write_active_max_dirty_percent = 60;
165
166 /*
167 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
168 * For read I/Os, we also aggregate across small adjacency gaps; for writes
169 * we include spans of optional I/Os to aid aggregation at the disk even when
170 * they aren't able to help us aggregate at this level.
171 */
172 int zfs_vdev_aggregation_limit = SPA_OLD_MAXBLOCKSIZE;
173 int zfs_vdev_read_gap_limit = 32 << 10;
174 int zfs_vdev_write_gap_limit = 4 << 10;
175
176 /*
177 * Define the queue depth percentage for each top-level. This percentage is
178 * used in conjunction with zfs_vdev_async_max_active to determine how many
179 * allocations a specific top-level vdev should handle. Once the queue depth
180 * reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100
181 * then allocator will stop allocating blocks on that top-level device.
182 * The default kernel setting is 1000% which will yield 100 allocations per
183 * device. For userland testing, the default setting is 300% which equates
184 * to 30 allocations per device.
185 */
186 #ifdef _KERNEL
187 int zfs_vdev_queue_depth_pct = 1000;
188 #else
189 int zfs_vdev_queue_depth_pct = 300;
190 #endif
191
192
193 int
194 vdev_queue_offset_compare(const void *x1, const void *x2)
195 {
196 const zio_t *z1 = x1;
197 const zio_t *z2 = x2;
198
199 if (z1->io_offset < z2->io_offset)
200 return (-1);
201 if (z1->io_offset > z2->io_offset)
202 return (1);
203
204 if (z1 < z2)
205 return (-1);
206 if (z1 > z2)
207 return (1);
208
209 return (0);
210 }
211
212 static inline avl_tree_t *
213 vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p)
214 {
215 return (&vq->vq_class[p].vqc_queued_tree);
216 }
217
218 static inline avl_tree_t *
219 vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t)
220 {
221 ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE);
222 if (t == ZIO_TYPE_READ)
223 return (&vq->vq_read_offset_tree);
224 else
225 return (&vq->vq_write_offset_tree);
226 }
227
228 int
229 vdev_queue_timestamp_compare(const void *x1, const void *x2)
230 {
231 const zio_t *z1 = x1;
232 const zio_t *z2 = x2;
233
234 if (z1->io_timestamp < z2->io_timestamp)
235 return (-1);
236 if (z1->io_timestamp > z2->io_timestamp)
237 return (1);
238
239 if (z1 < z2)
240 return (-1);
241 if (z1 > z2)
242 return (1);
243
244 return (0);
245 }
246
247 void
248 vdev_queue_init(vdev_t *vd)
249 {
250 vdev_queue_t *vq = &vd->vdev_queue;
251
252 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
253 vq->vq_vdev = vd;
254
255 avl_create(&vq->vq_active_tree, vdev_queue_offset_compare,
256 sizeof (zio_t), offsetof(struct zio, io_queue_node));
257 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ),
258 vdev_queue_offset_compare, sizeof (zio_t),
259 offsetof(struct zio, io_offset_node));
260 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE),
261 vdev_queue_offset_compare, sizeof (zio_t),
262 offsetof(struct zio, io_offset_node));
263
264 for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
265 int (*compfn) (const void *, const void *);
266
267 /*
268 * The synchronous i/o queues are dispatched in FIFO rather
269 * than LBA order. This provides more consistent latency for
270 * these i/os.
271 */
272 if (p == ZIO_PRIORITY_SYNC_READ || p == ZIO_PRIORITY_SYNC_WRITE)
273 compfn = vdev_queue_timestamp_compare;
274 else
275 compfn = vdev_queue_offset_compare;
276
277 avl_create(vdev_queue_class_tree(vq, p), compfn,
278 sizeof (zio_t), offsetof(struct zio, io_queue_node));
279 }
280 }
281
282 void
283 vdev_queue_fini(vdev_t *vd)
284 {
285 vdev_queue_t *vq = &vd->vdev_queue;
286
287 for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
288 avl_destroy(vdev_queue_class_tree(vq, p));
289 avl_destroy(&vq->vq_active_tree);
290 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));
291 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE));
292
293 mutex_destroy(&vq->vq_lock);
294 }
295
296 static void
297 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
298 {
299 spa_t *spa = zio->io_spa;
300 hrtime_t t = gethrtime_unscaled();
301
302 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
303 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
304 avl_add(vdev_queue_type_tree(vq, zio->io_type), zio);
305
306 atomic_inc_64(&spa->spa_queue_stats[zio->io_priority].spa_queued);
307 mutex_enter(&spa->spa_iokstat_lock);
308 if (spa->spa_iokstat != NULL)
309 kstat_waitq_enter_time(spa->spa_iokstat->ks_data, t);
310 if (vq->vq_vdev->vdev_iokstat != NULL)
311 kstat_waitq_enter_time(vq->vq_vdev->vdev_iokstat->ks_data, t);
312 mutex_exit(&spa->spa_iokstat_lock);
313 }
314
315 static void
316 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
317 {
318 spa_t *spa = zio->io_spa;
319 hrtime_t t = gethrtime_unscaled();
320
321 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
322 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
323 avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio);
324
325 ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0);
326 atomic_dec_64(&spa->spa_queue_stats[zio->io_priority].spa_queued);
327
328 mutex_enter(&spa->spa_iokstat_lock);
329 if (spa->spa_iokstat != NULL)
330 kstat_waitq_exit_time(spa->spa_iokstat->ks_data, t);
331 if (vq->vq_vdev->vdev_iokstat != NULL)
332 kstat_waitq_exit_time(vq->vq_vdev->vdev_iokstat->ks_data, t);
333 mutex_exit(&spa->spa_iokstat_lock);
334 }
335
336 static void
337 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
338 {
339 spa_t *spa = zio->io_spa;
340 hrtime_t t = gethrtime_unscaled();
341
342 ASSERT(MUTEX_HELD(&vq->vq_lock));
343 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
344 vq->vq_class[zio->io_priority].vqc_active++;
345 avl_add(&vq->vq_active_tree, zio);
346
347 atomic_inc_64(&spa->spa_queue_stats[zio->io_priority].spa_active);
348 mutex_enter(&spa->spa_iokstat_lock);
349 if (spa->spa_iokstat != NULL)
350 kstat_runq_enter_time(spa->spa_iokstat->ks_data, t);
351 if (vq->vq_vdev->vdev_iokstat != NULL)
352 kstat_runq_enter_time(vq->vq_vdev->vdev_iokstat->ks_data, t);
353 mutex_exit(&spa->spa_iokstat_lock);
354 }
355
356 static void
357 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
358 {
359 spa_t *spa = zio->io_spa;
360 hrtime_t t = gethrtime_unscaled();
361
362 ASSERT(MUTEX_HELD(&vq->vq_lock));
363 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
364 vq->vq_class[zio->io_priority].vqc_active--;
365 avl_remove(&vq->vq_active_tree, zio);
366
367 ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0);
368 atomic_dec_64(&spa->spa_queue_stats[zio->io_priority].spa_active);
369
370 mutex_enter(&spa->spa_iokstat_lock);
371 if (spa->spa_iokstat != NULL) {
372 kstat_io_t *ksio = spa->spa_iokstat->ks_data;
373
374 kstat_runq_exit_time(spa->spa_iokstat->ks_data, t);
375 if (zio->io_type == ZIO_TYPE_READ) {
376 ksio->reads++;
377 ksio->nread += zio->io_size;
378 } else if (zio->io_type == ZIO_TYPE_WRITE) {
379 ksio->writes++;
380 ksio->nwritten += zio->io_size;
381 }
382 }
383
384 if (vq->vq_vdev->vdev_iokstat != NULL) {
385 kstat_io_t *ksio = vq->vq_vdev->vdev_iokstat->ks_data;
386
387 kstat_runq_exit_time(ksio, t);
388 if (zio->io_type == ZIO_TYPE_READ) {
389 ksio->reads++;
390 ksio->nread += zio->io_size;
391 } else if (zio->io_type == ZIO_TYPE_WRITE) {
392 ksio->writes++;
393 ksio->nwritten += zio->io_size;
394 }
395 }
396 mutex_exit(&spa->spa_iokstat_lock);
397 }
398
399 static void
400 vdev_queue_agg_io_done(zio_t *aio)
401 {
402 if (aio->io_type == ZIO_TYPE_READ) {
403 zio_t *pio;
404 zio_link_t *zl = NULL;
405 while ((pio = zio_walk_parents(aio, &zl)) != NULL) {
406 abd_copy_off(pio->io_abd, aio->io_abd,
407 0, pio->io_offset - aio->io_offset, pio->io_size);
408 }
409 }
410
411 abd_free(aio->io_abd);
412 }
413
414 static uint64_t
415 scan_prio2active(uint64_t prio, boolean_t max_active)
416 {
417 uint64_t act, act_max;
418
419 if (max_active) {
420 act_max = MAX(MAX(zfs_vdev_sync_read_max_active,
421 zfs_vdev_sync_write_max_active),
422 MAX(zfs_vdev_async_read_max_active,
423 zfs_vdev_async_write_max_active));
424 act = ((prio * (zfs_vdev_sync_read_max_active +
425 zfs_vdev_sync_write_max_active +
426 zfs_vdev_async_read_max_active +
427 zfs_vdev_async_write_max_active)) / 100);
428 } else {
429 act_max = MAX(MAX(zfs_vdev_sync_read_min_active,
430 zfs_vdev_sync_write_min_active),
431 MAX(zfs_vdev_async_read_min_active,
432 zfs_vdev_async_write_min_active));
433 act = ((prio * (zfs_vdev_sync_read_min_active +
434 zfs_vdev_sync_write_min_active +
435 zfs_vdev_async_read_min_active +
436 zfs_vdev_async_write_min_active)) / 100);
437 }
438 act = MAX(MIN(act, act_max), 1);
439
440 return (act);
441 }
442
443 static int
444 vdev_queue_class_min_active(zio_priority_t p, vdev_queue_t *vq)
445 {
446 int zfs_min_active = 0;
447 int vqc_min_active;
448 vdev_prop_t prop = VDEV_ZIO_PRIO_TO_PROP_MIN(p);
449
450 ASSERT(VDEV_PROP_MIN_VALID(prop));
451 vqc_min_active = vdev_queue_get_prop_uint64(vq, prop);
452
453 switch (p) {
454 case ZIO_PRIORITY_SYNC_READ:
455 zfs_min_active = zfs_vdev_sync_read_min_active;
456 break;
457 case ZIO_PRIORITY_SYNC_WRITE:
458 zfs_min_active = zfs_vdev_sync_write_min_active;
459 break;
460 case ZIO_PRIORITY_ASYNC_READ:
461 zfs_min_active = zfs_vdev_async_read_min_active;
462 break;
463 case ZIO_PRIORITY_ASYNC_WRITE:
464 zfs_min_active = zfs_vdev_async_write_min_active;
465 break;
466 case ZIO_PRIORITY_RESILVER: {
467 uint64_t prio = vq->vq_vdev->vdev_spa->spa_resilver_prio;
468 if (prio > 0)
469 zfs_min_active = scan_prio2active(prio, B_FALSE);
470 else
471 zfs_min_active = zfs_vdev_resilver_min_active;
472 break;
473 }
474 case ZIO_PRIORITY_SCRUB: {
475 uint64_t prio = vq->vq_vdev->vdev_spa->spa_scrub_prio;
476 if (prio > 0)
477 zfs_min_active = scan_prio2active(prio, B_FALSE);
478 else
479 zfs_min_active = zfs_vdev_scrub_min_active;
480 break;
481 }
482 default:
483 panic("invalid priority %u", p);
484 return (0);
485 }
486
487 /* zero vdev-specific setting means "use zfs global setting" */
488 return ((vqc_min_active) ? vqc_min_active : zfs_min_active);
489 }
490
491 static int
492 vdev_queue_max_async_writes(spa_t *spa, vdev_queue_t *vq)
493 {
494 int writes;
495 uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total;
496 uint64_t min_bytes = zfs_dirty_data_max *
497 zfs_vdev_async_write_active_min_dirty_percent / 100;
498 uint64_t max_bytes = zfs_dirty_data_max *
499 zfs_vdev_async_write_active_max_dirty_percent / 100;
500
501 /*
502 * vdev-specific properties override global tunables
503 * zero vdev-specific settings indicate fallback on the globals
504 */
505 int vqc_min_active =
506 vdev_queue_get_prop_uint64(vq, VDEV_PROP_AWRITE_MINACTIVE);
507 int min_active =
508 (vqc_min_active) ? vqc_min_active : zfs_vdev_async_write_min_active;
509 int vqc_max_active =
510 vdev_queue_get_prop_uint64(vq, VDEV_PROP_AWRITE_MAXACTIVE);
511 int max_active =
512 (vqc_max_active) ? vqc_max_active : zfs_vdev_async_write_max_active;
513
514 /*
515 * Sync tasks correspond to interactive user actions. To reduce the
516 * execution time of those actions we push data out as fast as possible.
517 */
518 if (spa_has_pending_synctask(spa)) {
519 return (zfs_vdev_async_write_max_active);
520 }
521
522 if (dirty < min_bytes)
523 return (min_active);
524 if (dirty > max_bytes)
525 return (max_active);
526
527 /*
528 * linear interpolation:
529 * slope = (max_writes - min_writes) / (max_bytes - min_bytes)
530 * move right by min_bytes
531 * move up by min_writes
532 */
533 writes = (dirty - min_bytes) * (max_active - min_active) /
534 (max_bytes - min_bytes) + min_active;
535 ASSERT3U(writes, >=, min_active);
536 ASSERT3U(writes, <=, max_active);
537 return (writes);
538 }
539
540 static int
541 vdev_queue_class_max_active(spa_t *spa, zio_priority_t p, vdev_queue_t *vq)
542 {
543 int zfs_max_active = 0;
544 int vqc_max_active;
545 vdev_prop_t prop = VDEV_ZIO_PRIO_TO_PROP_MAX(p);
546
547 ASSERT(VDEV_PROP_MAX_VALID(prop));
548 vqc_max_active = vdev_queue_get_prop_uint64(vq, prop);
549
550 switch (p) {
551 case ZIO_PRIORITY_SYNC_READ:
552 zfs_max_active = zfs_vdev_sync_read_max_active;
553 break;
554 case ZIO_PRIORITY_SYNC_WRITE:
555 zfs_max_active = zfs_vdev_sync_write_max_active;
556 break;
557 case ZIO_PRIORITY_ASYNC_READ:
558 zfs_max_active = zfs_vdev_async_read_max_active;
559 break;
560 case ZIO_PRIORITY_ASYNC_WRITE:
561 /* takes into account vdev-specific props internally */
562 vqc_max_active = vdev_queue_max_async_writes(spa, vq);
563 ASSERT(vqc_max_active);
564 break;
565 case ZIO_PRIORITY_RESILVER: {
566 uint64_t prio = vq->vq_vdev->vdev_spa->spa_resilver_prio;
567 if (prio > 0)
568 zfs_max_active = scan_prio2active(prio, B_TRUE);
569 else
570 zfs_max_active = zfs_vdev_resilver_max_active;
571 break;
572 }
573 case ZIO_PRIORITY_SCRUB: {
574 uint64_t prio = vq->vq_vdev->vdev_spa->spa_scrub_prio;
575 if (prio > 0)
576 zfs_max_active = scan_prio2active(prio, B_TRUE);
577 else
578 zfs_max_active = zfs_vdev_scrub_max_active;
579 break;
580 }
581 default:
582 panic("invalid priority %u", p);
583 return (0);
584 }
585
586 /* zero vdev-specific setting means "use zfs global setting" */
587 return ((vqc_max_active) ? vqc_max_active : zfs_max_active);
588 }
589
590 /*
591 * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if
592 * there is no eligible class.
593 */
594 static zio_priority_t
595 vdev_queue_class_to_issue(vdev_queue_t *vq)
596 {
597 spa_t *spa = vq->vq_vdev->vdev_spa;
598 zio_priority_t p;
599
600 if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
601 return (ZIO_PRIORITY_NUM_QUEUEABLE);
602
603 /* find a queue that has not reached its minimum # outstanding i/os */
604 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
605 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
606 vq->vq_class[p].vqc_active <
607 vdev_queue_class_min_active(p, vq))
608 return (p);
609 }
610
611 /*
612 * If we haven't found a queue, look for one that hasn't reached its
613 * maximum # outstanding i/os.
614 */
615 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
616 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
617 vq->vq_class[p].vqc_active <
618 vdev_queue_class_max_active(spa, p, vq))
619 return (p);
620 }
621
622 /* No eligible queued i/os */
623 return (ZIO_PRIORITY_NUM_QUEUEABLE);
624 }
625
626 /*
627 * Compute the range spanned by two i/os, which is the endpoint of the last
628 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
629 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
630 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
631 */
632 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
633 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
634
635 static zio_t *
636 vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
637 {
638 zio_t *first, *last, *aio, *dio, *mandatory, *nio;
639 uint64_t maxgap = 0;
640 uint64_t size;
641 boolean_t stretch = B_FALSE;
642 avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type);
643 enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
644
645 if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE)
646 return (NULL);
647
648 first = last = zio;
649
650 if (zio->io_type == ZIO_TYPE_READ)
651 maxgap = zfs_vdev_read_gap_limit;
652
653 /*
654 * We can aggregate I/Os that are sufficiently adjacent and of
655 * the same flavor, as expressed by the AGG_INHERIT flags.
656 * The latter requirement is necessary so that certain
657 * attributes of the I/O, such as whether it's a normal I/O
658 * or a scrub/resilver, can be preserved in the aggregate.
659 * We can include optional I/Os, but don't allow them
660 * to begin a range as they add no benefit in that situation.
661 */
662
663 /*
664 * We keep track of the last non-optional I/O.
665 */
666 mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first;
667
668 /*
669 * Walk backwards through sufficiently contiguous I/Os
670 * recording the last non-optional I/O.
671 */
672 while ((dio = AVL_PREV(t, first)) != NULL &&
673 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
674 IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit &&
675 IO_GAP(dio, first) <= maxgap) {
676 first = dio;
677 if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL))
678 mandatory = first;
679 }
680
681 /*
682 * Skip any initial optional I/Os.
683 */
684 while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) {
685 first = AVL_NEXT(t, first);
686 ASSERT(first != NULL);
687 }
688
689 /*
690 * Walk forward through sufficiently contiguous I/Os.
691 * The aggregation limit does not apply to optional i/os, so that
692 * we can issue contiguous writes even if they are larger than the
693 * aggregation limit.
694 */
695 while ((dio = AVL_NEXT(t, last)) != NULL &&
696 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
697 (IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit ||
698 (dio->io_flags & ZIO_FLAG_OPTIONAL)) &&
699 IO_GAP(last, dio) <= maxgap) {
700 last = dio;
701 if (!(last->io_flags & ZIO_FLAG_OPTIONAL))
702 mandatory = last;
703 }
704
705 /*
706 * Now that we've established the range of the I/O aggregation
707 * we must decide what to do with trailing optional I/Os.
708 * For reads, there's nothing to do. While we are unable to
709 * aggregate further, it's possible that a trailing optional
710 * I/O would allow the underlying device to aggregate with
711 * subsequent I/Os. We must therefore determine if the next
712 * non-optional I/O is close enough to make aggregation
713 * worthwhile.
714 */
715 if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) {
716 zio_t *nio = last;
717 while ((dio = AVL_NEXT(t, nio)) != NULL &&
718 IO_GAP(nio, dio) == 0 &&
719 IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) {
720 nio = dio;
721 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
722 stretch = B_TRUE;
723 break;
724 }
725 }
726 }
727
728 if (stretch) {
729 /*
730 * We are going to include an optional io in our aggregated
731 * span, thus closing the write gap. Only mandatory i/os can
732 * start aggregated spans, so make sure that the next i/o
733 * after our span is mandatory.
734 */
735 dio = AVL_NEXT(t, last);
736 dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
737 } else {
738 /* do not include the optional i/o */
739 while (last != mandatory && last != first) {
740 ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL);
741 last = AVL_PREV(t, last);
742 ASSERT(last != NULL);
743 }
744 }
745
746 if (first == last)
747 return (NULL);
748
749 size = IO_SPAN(first, last);
750 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
751
752 aio = zio_vdev_delegated_io(first->io_vd, first->io_offset,
753 abd_alloc_for_io(size, B_TRUE), size, first->io_type,
754 zio->io_priority, flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
755 vdev_queue_agg_io_done, NULL);
756 aio->io_timestamp = first->io_timestamp;
757
758 nio = first;
759 do {
760 dio = nio;
761 nio = AVL_NEXT(t, dio);
762 ASSERT3U(dio->io_type, ==, aio->io_type);
763
764 if (dio->io_flags & ZIO_FLAG_NODATA) {
765 ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
766 abd_zero_off(aio->io_abd,
767 dio->io_offset - aio->io_offset, dio->io_size);
768 } else if (dio->io_type == ZIO_TYPE_WRITE) {
769 abd_copy_off(aio->io_abd, dio->io_abd,
770 dio->io_offset - aio->io_offset, 0, dio->io_size);
771 }
772
773 zio_add_child(dio, aio);
774 vdev_queue_io_remove(vq, dio);
775 zio_vdev_io_bypass(dio);
776 zio_execute(dio);
777 } while (dio != last);
778
779 return (aio);
780 }
781
782 static zio_t *
783 vdev_queue_io_to_issue(vdev_queue_t *vq)
784 {
785 zio_t *zio, *aio;
786 zio_priority_t p;
787 avl_index_t idx;
788 avl_tree_t *tree;
789 zio_t search;
790
791 again:
792 ASSERT(MUTEX_HELD(&vq->vq_lock));
793
794 p = vdev_queue_class_to_issue(vq);
795
796 if (p == ZIO_PRIORITY_NUM_QUEUEABLE) {
797 /* No eligible queued i/os */
798 return (NULL);
799 }
800
801 /*
802 * For LBA-ordered queues (async / scrub), issue the i/o which follows
803 * the most recently issued i/o in LBA (offset) order.
804 *
805 * For FIFO queues (sync), issue the i/o with the lowest timestamp.
806 */
807 tree = vdev_queue_class_tree(vq, p);
808 search.io_timestamp = 0;
809 search.io_offset = vq->vq_last_offset + 1;
810 VERIFY3P(avl_find(tree, &search, &idx), ==, NULL);
811 zio = avl_nearest(tree, idx, AVL_AFTER);
812 if (zio == NULL)
813 zio = avl_first(tree);
814 ASSERT3U(zio->io_priority, ==, p);
815
816 aio = vdev_queue_aggregate(vq, zio);
817 if (aio != NULL)
818 zio = aio;
819 else
820 vdev_queue_io_remove(vq, zio);
821
822 /*
823 * If the I/O is or was optional and therefore has no data, we need to
824 * simply discard it. We need to drop the vdev queue's lock to avoid a
825 * deadlock that we could encounter since this I/O will complete
826 * immediately.
827 */
828 if (zio->io_flags & ZIO_FLAG_NODATA) {
829 mutex_exit(&vq->vq_lock);
830 zio_vdev_io_bypass(zio);
831 zio_execute(zio);
832 mutex_enter(&vq->vq_lock);
833 goto again;
834 }
835
836 vdev_queue_pending_add(vq, zio);
837 vq->vq_last_offset = zio->io_offset;
838
839 return (zio);
840 }
841
842 zio_t *
843 vdev_queue_io(zio_t *zio)
844 {
845 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
846 zio_t *nio;
847
848 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
849 return (zio);
850
851 /*
852 * Children i/os inherent their parent's priority, which might
853 * not match the child's i/o type. Fix it up here.
854 */
855 if (zio->io_type == ZIO_TYPE_READ) {
856 if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
857 zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
858 zio->io_priority != ZIO_PRIORITY_SCRUB)
859 zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
860 } else {
861 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
862 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
863 zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE)
864 zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
865 }
866
867 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
868
869 mutex_enter(&vq->vq_lock);
870 zio->io_timestamp = gethrtime();
871 vdev_queue_io_add(vq, zio);
872 nio = vdev_queue_io_to_issue(vq);
873 mutex_exit(&vq->vq_lock);
874
875 if (nio == NULL)
876 return (NULL);
877
878 if (nio->io_done == vdev_queue_agg_io_done) {
879 zio_nowait(nio);
880 return (NULL);
881 }
882
883 return (nio);
884 }
885
886 void
887 vdev_queue_io_done(zio_t *zio)
888 {
889 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
890 zio_t *nio;
891
892 mutex_enter(&vq->vq_lock);
893
894 vdev_queue_pending_remove(vq, zio);
895
896 vq->vq_io_complete_ts = gethrtime();
897
898 while ((nio = vdev_queue_io_to_issue(vq)) != NULL) {
899 mutex_exit(&vq->vq_lock);
900 if (nio->io_done == vdev_queue_agg_io_done) {
901 zio_nowait(nio);
902 } else {
903 zio_vdev_io_reissue(nio);
904 zio_execute(nio);
905 }
906 mutex_enter(&vq->vq_lock);
907 }
908
909 mutex_exit(&vq->vq_lock);
910 }
911
912 uint64_t
913 vdev_queue_get_prop_uint64(vdev_queue_t *vq, vdev_prop_t p)
914 {
915 uint64_t val = 0;
916 int zprio = 0;
917 cos_t *cos = vq->vq_cos;
918
919 switch (p) {
920 case VDEV_PROP_READ_MINACTIVE:
921 case VDEV_PROP_AREAD_MINACTIVE:
922 case VDEV_PROP_WRITE_MINACTIVE:
923 case VDEV_PROP_AWRITE_MINACTIVE:
924 case VDEV_PROP_SCRUB_MINACTIVE:
925 case VDEV_PROP_RESILVER_MINACTIVE:
926 zprio = VDEV_PROP_TO_ZIO_PRIO_MIN(p);
927 ASSERT(ZIO_PRIORITY_QUEUEABLE_VALID(zprio));
928 if (vq->vq_cos != NULL) {
929 cos_prop_t p = COS_ZIO_PRIO_TO_PROP_MIN(zprio);
930 ASSERT(COS_PROP_MIN_VALID(p));
931 val = cos_get_prop_uint64(vq->vq_cos, p);
932 }
933 if (val == 0)
934 val = vq->vq_class[zprio].vqc_min_active;
935 break;
936 case VDEV_PROP_READ_MAXACTIVE:
937 case VDEV_PROP_AREAD_MAXACTIVE:
938 case VDEV_PROP_WRITE_MAXACTIVE:
939 case VDEV_PROP_AWRITE_MAXACTIVE:
940 case VDEV_PROP_SCRUB_MAXACTIVE:
941 case VDEV_PROP_RESILVER_MAXACTIVE:
942 zprio = VDEV_PROP_TO_ZIO_PRIO_MAX(p);
943 ASSERT(ZIO_PRIORITY_QUEUEABLE_VALID(zprio));
944 if (vq->vq_cos != NULL) {
945 cos_prop_t p = COS_ZIO_PRIO_TO_PROP_MAX(zprio);
946 ASSERT(COS_PROP_MAX_VALID(p));
947 val = cos_get_prop_uint64(vq->vq_cos, p);
948 }
949 if (val == 0)
950 val = vq->vq_class[zprio].vqc_max_active;
951 break;
952 case VDEV_PROP_PREFERRED_READ:
953 if (vq->vq_cos != NULL)
954 val = cos_get_prop_uint64(vq->vq_cos,
955 COS_PROP_PREFERRED_READ);
956 if (val == 0)
957 val = vq->vq_preferred_read;
958 break;
959 default:
960 panic("Non-numeric property requested\n");
961 return (0);
962 }
963
964 VERIFY(cos == vq->vq_cos);
965
966 return (val);
967 }