1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright 2013, Joyent, Inc. All rights reserved.
  14  */
  15 
  16 /*
  17  * The ZFS/Zone I/O throttle and scheduler attempts to ensure fair access to
  18  * ZFS I/O resources for each zone.
  19  *
  20  * I/O contention can be major pain point on a multi-tenant system. A single
  21  * zone can issue a stream of I/O operations, usually synchronous writes, which
  22  * disrupt I/O performance for all other zones. This problem is further
  23  * exacerbated by ZFS, which buffers all asynchronous writes in a single TXG,
  24  * a set of blocks which are atomically synced to disk. The process of
  25  * syncing a TXG can occupy all of a device's I/O bandwidth, thereby starving
  26  * out any pending read operations.
  27  *
  28  * There are two facets to this capability; the throttle and the scheduler.
  29  *
  30  * Throttle
  31  *
  32  * The requirements on the throttle are:
  33  *
 
1124         zfs_disk_rlastupdate = zp->io_dispatched;
1125         mutex_exit(&zfs_disk_lock);
1126 
1127         zone_rele(zonep);
1128 }
1129 
1130 /*
1131  * Called from vdev_disk_io_done when an IO completes.
1132  * Increment our counter for zone ops.
1133  * Calculate the IO latency avg. for this zone.
1134  */
1135 void
1136 zfs_zone_zio_done(zio_t *zp)
1137 {
1138         zone_t  *zonep;
1139         hrtime_t now, unow, udelta;
1140 
1141         if (zp->io_type == ZIO_TYPE_IOCTL)
1142                 return;
1143 
1144         if ((zonep = zone_find_by_id(zp->io_zoneid)) == NULL)
1145                 return;
1146 
1147         if (zp->io_dispatched == 0)
1148                 return;
1149 
1150         now = gethrtime();
1151         unow = NANO_TO_MICRO(now);
1152         udelta = unow - NANO_TO_MICRO(zp->io_dispatched);
1153 
1154         mutex_enter(&zfs_disk_lock);
1155         zfs_disk_rcnt--;
1156         zfs_disk_rtime += (now - zfs_disk_rlastupdate);
1157         zfs_disk_rlastupdate = now;
1158 
1159         if (udelta > zfs_zone_laggard_threshold)
1160                 zfs_disk_last_laggard = unow;
1161 
1162         mutex_exit(&zfs_disk_lock);
1163 
1164         if (zfs_zone_delay_enable) {
1165                 mutex_enter(&zonep->zone_stg_io_lock);
1166                 add_iop(zonep, unow, zp->io_type == ZIO_TYPE_READ ?
1167                     ZFS_ZONE_IOP_READ : ZFS_ZONE_IOP_WRITE, udelta);
 
 | 
   1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright 2014, Joyent, Inc. All rights reserved.
  14  */
  15 
  16 /*
  17  * The ZFS/Zone I/O throttle and scheduler attempts to ensure fair access to
  18  * ZFS I/O resources for each zone.
  19  *
  20  * I/O contention can be major pain point on a multi-tenant system. A single
  21  * zone can issue a stream of I/O operations, usually synchronous writes, which
  22  * disrupt I/O performance for all other zones. This problem is further
  23  * exacerbated by ZFS, which buffers all asynchronous writes in a single TXG,
  24  * a set of blocks which are atomically synced to disk. The process of
  25  * syncing a TXG can occupy all of a device's I/O bandwidth, thereby starving
  26  * out any pending read operations.
  27  *
  28  * There are two facets to this capability; the throttle and the scheduler.
  29  *
  30  * Throttle
  31  *
  32  * The requirements on the throttle are:
  33  *
 
1124         zfs_disk_rlastupdate = zp->io_dispatched;
1125         mutex_exit(&zfs_disk_lock);
1126 
1127         zone_rele(zonep);
1128 }
1129 
1130 /*
1131  * Called from vdev_disk_io_done when an IO completes.
1132  * Increment our counter for zone ops.
1133  * Calculate the IO latency avg. for this zone.
1134  */
1135 void
1136 zfs_zone_zio_done(zio_t *zp)
1137 {
1138         zone_t  *zonep;
1139         hrtime_t now, unow, udelta;
1140 
1141         if (zp->io_type == ZIO_TYPE_IOCTL)
1142                 return;
1143 
1144         if (zp->io_dispatched == 0)
1145                 return;
1146 
1147         if ((zonep = zone_find_by_id(zp->io_zoneid)) == NULL)
1148                 return;
1149 
1150         now = gethrtime();
1151         unow = NANO_TO_MICRO(now);
1152         udelta = unow - NANO_TO_MICRO(zp->io_dispatched);
1153 
1154         mutex_enter(&zfs_disk_lock);
1155         zfs_disk_rcnt--;
1156         zfs_disk_rtime += (now - zfs_disk_rlastupdate);
1157         zfs_disk_rlastupdate = now;
1158 
1159         if (udelta > zfs_zone_laggard_threshold)
1160                 zfs_disk_last_laggard = unow;
1161 
1162         mutex_exit(&zfs_disk_lock);
1163 
1164         if (zfs_zone_delay_enable) {
1165                 mutex_enter(&zonep->zone_stg_io_lock);
1166                 add_iop(zonep, unow, zp->io_type == ZIO_TYPE_READ ?
1167                     ZFS_ZONE_IOP_READ : ZFS_ZONE_IOP_WRITE, udelta);
 
 |