1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
  28  */
  29 
  30 #ifndef _SYS_METASLAB_IMPL_H
  31 #define _SYS_METASLAB_IMPL_H
  32 
  33 #include <sys/metaslab.h>
  34 #include <sys/space_map.h>
  35 #include <sys/range_tree.h>
  36 #include <sys/vdev.h>
  37 #include <sys/txg.h>
  38 #include <sys/avl.h>
  39 
  40 #ifdef  __cplusplus
  41 extern "C" {
  42 #endif
  43 
  44 /*
  45  * Metaslab allocation tracing record.
  46  */
  47 typedef struct metaslab_alloc_trace {
  48         list_node_t                     mat_list_node;
  49         metaslab_group_t                *mat_mg;
  50         metaslab_t                      *mat_msp;
  51         uint64_t                        mat_size;
  52         uint64_t                        mat_weight;
  53         uint32_t                        mat_dva_id;
  54         uint64_t                        mat_offset;
  55         int                                     mat_allocator;
  56 } metaslab_alloc_trace_t;
  57 
  58 /*
  59  * Used by the metaslab allocation tracing facility to indicate
  60  * error conditions. These errors are stored to the offset member
  61  * of the metaslab_alloc_trace_t record and displayed by mdb.
  62  */
  63 typedef enum trace_alloc_type {
  64         TRACE_ALLOC_FAILURE     = -1ULL,
  65         TRACE_TOO_SMALL         = -2ULL,
  66         TRACE_FORCE_GANG        = -3ULL,
  67         TRACE_NOT_ALLOCATABLE   = -4ULL,
  68         TRACE_GROUP_FAILURE     = -5ULL,
  69         TRACE_ENOSPC            = -6ULL,
  70         TRACE_CONDENSING        = -7ULL,
  71         TRACE_VDEV_ERROR        = -8ULL,
  72         TRACE_INITIALIZING      = -9ULL
  73 } trace_alloc_type_t;
  74 
  75 #define METASLAB_WEIGHT_PRIMARY         (1ULL << 63)
  76 #define METASLAB_WEIGHT_SECONDARY       (1ULL << 62)
  77 #define METASLAB_WEIGHT_CLAIM           (1ULL << 61)
  78 #define METASLAB_WEIGHT_TYPE            (1ULL << 60)
  79 #define METASLAB_ACTIVE_MASK            \
  80         (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY | \
  81         METASLAB_WEIGHT_CLAIM)
  82 
  83 /*
  84  * The metaslab weight is used to encode the amount of free space in a
  85  * metaslab, such that the "best" metaslab appears first when sorting the
  86  * metaslabs by weight. The weight (and therefore the "best" metaslab) can
  87  * be determined in two different ways: by computing a weighted sum of all
  88  * the free space in the metaslab (a space based weight) or by counting only
  89  * the free segments of the largest size (a segment based weight). We prefer
  90  * the segment based weight because it reflects how the free space is
  91  * comprised, but we cannot always use it -- legacy pools do not have the
  92  * space map histogram information necessary to determine the largest
  93  * contiguous regions. Pools that have the space map histogram determine
  94  * the segment weight by looking at each bucket in the histogram and
  95  * determining the free space whose size in bytes is in the range:
  96  *      [2^i, 2^(i+1))
  97  * We then encode the largest index, i, that contains regions into the
  98  * segment-weighted value.
  99  *
 100  * Space-based weight:
 101  *
 102  *      64      56      48      40      32      24      16      8       0
 103  *      +-------+-------+-------+-------+-------+-------+-------+-------+
 104  *      |PSC1|                  weighted-free space                     |
 105  *      +-------+-------+-------+-------+-------+-------+-------+-------+
 106  *
 107  *      PS - indicates primary and secondary activation
 108  *      C - indicates activation for claimed block zio
 109  *      space - the fragmentation-weighted space
 110  *
 111  * Segment-based weight:
 112  *
 113  *      64      56      48      40      32      24      16      8       0
 114  *      +-------+-------+-------+-------+-------+-------+-------+-------+
 115  *      |PSC0| idx|            count of segments in region              |
 116  *      +-------+-------+-------+-------+-------+-------+-------+-------+
 117  *
 118  *      PS - indicates primary and secondary activation
 119  *      C - indicates activation for claimed block zio
 120  *      idx - index for the highest bucket in the histogram
 121  *      count - number of segments in the specified bucket
 122  */
 123 #define WEIGHT_GET_ACTIVE(weight)               BF64_GET((weight), 61, 3)
 124 #define WEIGHT_SET_ACTIVE(weight, x)            BF64_SET((weight), 61, 3, x)
 125 
 126 #define WEIGHT_IS_SPACEBASED(weight)            \
 127         ((weight) == 0 || BF64_GET((weight), 60, 1))
 128 #define WEIGHT_SET_SPACEBASED(weight)           BF64_SET((weight), 60, 1, 1)
 129 
 130 /*
 131  * These macros are only applicable to segment-based weighting.
 132  */
 133 #define WEIGHT_GET_INDEX(weight)                BF64_GET((weight), 54, 6)
 134 #define WEIGHT_SET_INDEX(weight, x)             BF64_SET((weight), 54, 6, x)
 135 #define WEIGHT_GET_COUNT(weight)                BF64_GET((weight), 0, 54)
 136 #define WEIGHT_SET_COUNT(weight, x)             BF64_SET((weight), 0, 54, x)
 137 
 138 /*
 139  * A metaslab class encompasses a category of allocatable top-level vdevs.
 140  * Each top-level vdev is associated with a metaslab group which defines
 141  * the allocatable region for that vdev. Examples of these categories include
 142  * "normal" for data block allocations (i.e. main pool allocations) or "log"
 143  * for allocations designated for intent log devices (i.e. slog devices).
 144  * When a block allocation is requested from the SPA it is associated with a
 145  * metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging
 146  * to the class can be used to satisfy that request. Allocations are done
 147  * by traversing the metaslab groups that are linked off of the mc_rotor field.
 148  * This rotor points to the next metaslab group where allocations will be
 149  * attempted. Allocating a block is a 3 step process -- select the metaslab
 150  * group, select the metaslab, and then allocate the block. The metaslab
 151  * class defines the low-level block allocator that will be used as the
 152  * final step in allocation. These allocators are pluggable allowing each class
 153  * to use a block allocator that best suits that class.
 154  */
 155 struct metaslab_class {
 156         kmutex_t                mc_lock;
 157         spa_t                   *mc_spa;
 158         metaslab_group_t        *mc_rotor;
 159         metaslab_ops_t          *mc_ops;
 160         uint64_t                mc_aliquot;
 161 
 162         /*
 163          * Track the number of metaslab groups that have been initialized
 164          * and can accept allocations. An initialized metaslab group is
 165          * one has been completely added to the config (i.e. we have
 166          * updated the MOS config and the space has been added to the pool).
 167          */
 168         uint64_t                mc_groups;
 169 
 170         /*
 171          * Toggle to enable/disable the allocation throttle.
 172          */
 173         boolean_t               mc_alloc_throttle_enabled;
 174 
 175         /*
 176          * The allocation throttle works on a reservation system. Whenever
 177          * an asynchronous zio wants to perform an allocation it must
 178          * first reserve the number of blocks that it wants to allocate.
 179          * If there aren't sufficient slots available for the pending zio
 180          * then that I/O is throttled until more slots free up. The current
 181          * number of reserved allocations is maintained by the mc_alloc_slots
 182          * refcount. The mc_alloc_max_slots value determines the maximum
 183          * number of allocations that the system allows. Gang blocks are
 184          * allowed to reserve slots even if we've reached the maximum
 185          * number of allocations allowed.
 186          */
 187         uint64_t                *mc_alloc_max_slots;
 188         zfs_refcount_t          *mc_alloc_slots;
 189 
 190         uint64_t                mc_alloc_groups; /* # of allocatable groups */
 191 
 192         uint64_t                mc_alloc;       /* total allocated space */
 193         uint64_t                mc_deferred;    /* total deferred frees */
 194         uint64_t                mc_space;       /* total space (alloc + free) */
 195         uint64_t                mc_dspace;      /* total deflated space */
 196         uint64_t                mc_histogram[RANGE_TREE_HISTOGRAM_SIZE];
 197 };
 198 
 199 /*
 200  * Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs)
 201  * of a top-level vdev. They are linked togther to form a circular linked
 202  * list and can belong to only one metaslab class. Metaslab groups may become
 203  * ineligible for allocations for a number of reasons such as limited free
 204  * space, fragmentation, or going offline. When this happens the allocator will
 205  * simply find the next metaslab group in the linked list and attempt
 206  * to allocate from that group instead.
 207  */
 208 struct metaslab_group {
 209         kmutex_t                mg_lock;
 210         metaslab_t              **mg_primaries;
 211         metaslab_t              **mg_secondaries;
 212         avl_tree_t              mg_metaslab_tree;
 213         uint64_t                mg_aliquot;
 214         boolean_t               mg_allocatable;         /* can we allocate? */
 215         uint64_t                mg_ms_ready;
 216 
 217         /*
 218          * A metaslab group is considered to be initialized only after
 219          * we have updated the MOS config and added the space to the pool.
 220          * We only allow allocation attempts to a metaslab group if it
 221          * has been initialized.
 222          */
 223         boolean_t               mg_initialized;
 224 
 225         uint64_t                mg_free_capacity;       /* percentage free */
 226         int64_t                 mg_bias;
 227         int64_t                 mg_activation_count;
 228         metaslab_class_t        *mg_class;
 229         vdev_t                  *mg_vd;
 230         taskq_t                 *mg_taskq;
 231         metaslab_group_t        *mg_prev;
 232         metaslab_group_t        *mg_next;
 233 
 234         /*
 235          * In order for the allocation throttle to function properly, we cannot
 236          * have too many IOs going to each disk by default; the throttle
 237          * operates by allocating more work to disks that finish quickly, so
 238          * allocating larger chunks to each disk reduces its effectiveness.
 239          * However, if the number of IOs going to each allocator is too small,
 240          * we will not perform proper aggregation at the vdev_queue layer,
 241          * also resulting in decreased performance. Therefore, we will use a
 242          * ramp-up strategy.
 243          *
 244          * Each allocator in each metaslab group has a current queue depth
 245          * (mg_alloc_queue_depth[allocator]) and a current max queue depth
 246          * (mg_cur_max_alloc_queue_depth[allocator]), and each metaslab group
 247          * has an absolute max queue depth (mg_max_alloc_queue_depth).  We
 248          * add IOs to an allocator until the mg_alloc_queue_depth for that
 249          * allocator hits the cur_max. Every time an IO completes for a given
 250          * allocator on a given metaslab group, we increment its cur_max until
 251          * it reaches mg_max_alloc_queue_depth. The cur_max resets every txg to
 252          * help protect against disks that decrease in performance over time.
 253          *
 254          * It's possible for an allocator to handle more allocations than
 255          * its max. This can occur when gang blocks are required or when other
 256          * groups are unable to handle their share of allocations.
 257          */
 258         uint64_t                mg_max_alloc_queue_depth;
 259         uint64_t                *mg_cur_max_alloc_queue_depth;
 260         zfs_refcount_t          *mg_alloc_queue_depth;
 261         int                     mg_allocators;
 262         /*
 263          * A metalab group that can no longer allocate the minimum block
 264          * size will set mg_no_free_space. Once a metaslab group is out
 265          * of space then its share of work must be distributed to other
 266          * groups.
 267          */
 268         boolean_t               mg_no_free_space;
 269 
 270         uint64_t                mg_allocations;
 271         uint64_t                mg_failed_allocations;
 272         uint64_t                mg_fragmentation;
 273         uint64_t                mg_histogram[RANGE_TREE_HISTOGRAM_SIZE];
 274 
 275         int                     mg_ms_initializing;
 276         boolean_t               mg_initialize_updating;
 277         kmutex_t                mg_ms_initialize_lock;
 278         kcondvar_t              mg_ms_initialize_cv;
 279 };
 280 
 281 /*
 282  * This value defines the number of elements in the ms_lbas array. The value
 283  * of 64 was chosen as it covers all power of 2 buckets up to UINT64_MAX.
 284  * This is the equivalent of highbit(UINT64_MAX).
 285  */
 286 #define MAX_LBAS        64
 287 
 288 /*
 289  * Each metaslab maintains a set of in-core trees to track metaslab
 290  * operations.  The in-core free tree (ms_allocatable) contains the list of
 291  * free segments which are eligible for allocation.  As blocks are
 292  * allocated, the allocated segment are removed from the ms_allocatable and
 293  * added to a per txg allocation tree (ms_allocating).  As blocks are
 294  * freed, they are added to the free tree (ms_freeing).  These trees
 295  * allow us to process all allocations and frees in syncing context
 296  * where it is safe to update the on-disk space maps.  An additional set
 297  * of in-core trees is maintained to track deferred frees
 298  * (ms_defer).  Once a block is freed it will move from the
 299  * ms_freed to the ms_defer tree.  A deferred free means that a block
 300  * has been freed but cannot be used by the pool until TXG_DEFER_SIZE
 301  * transactions groups later.  For example, a block that is freed in txg
 302  * 50 will not be available for reallocation until txg 52 (50 +
 303  * TXG_DEFER_SIZE).  This provides a safety net for uberblock rollback.
 304  * A pool could be safely rolled back TXG_DEFERS_SIZE transactions
 305  * groups and ensure that no block has been reallocated.
 306  *
 307  * The simplified transition diagram looks like this:
 308  *
 309  *
 310  *      ALLOCATE
 311  *         |
 312  *         V
 313  *    free segment (ms_allocatable) -> ms_allocating[4] -> (write to space map)
 314  *         ^
 315  *         |                        ms_freeing <--- FREE
 316  *         |                             |
 317  *         |                             v
 318  *         |                         ms_freed
 319  *         |                             |
 320  *         +-------- ms_defer[2] <-------+-------> (write to space map)
 321  *
 322  *
 323  * Each metaslab's space is tracked in a single space map in the MOS,
 324  * which is only updated in syncing context.  Each time we sync a txg,
 325  * we append the allocs and frees from that txg to the space map.  The
 326  * pool space is only updated once all metaslabs have finished syncing.
 327  *
 328  * To load the in-core free tree we read the space map from disk.  This
 329  * object contains a series of alloc and free records that are combined
 330  * to make up the list of all free segments in this metaslab.  These
 331  * segments are represented in-core by the ms_allocatable and are stored
 332  * in an AVL tree.
 333  *
 334  * As the space map grows (as a result of the appends) it will
 335  * eventually become space-inefficient.  When the metaslab's in-core
 336  * free tree is zfs_condense_pct/100 times the size of the minimal
 337  * on-disk representation, we rewrite it in its minimized form.  If a
 338  * metaslab needs to condense then we must set the ms_condensing flag to
 339  * ensure that allocations are not performed on the metaslab that is
 340  * being written.
 341  */
 342 struct metaslab {
 343         kmutex_t        ms_lock;
 344         kmutex_t        ms_sync_lock;
 345         kcondvar_t      ms_load_cv;
 346         space_map_t     *ms_sm;
 347         uint64_t        ms_id;
 348         uint64_t        ms_start;
 349         uint64_t        ms_size;
 350         uint64_t        ms_fragmentation;
 351 
 352         range_tree_t    *ms_allocating[TXG_SIZE];
 353         range_tree_t    *ms_allocatable;
 354 
 355         /*
 356          * The following range trees are accessed only from syncing context.
 357          * ms_free*tree only have entries while syncing, and are empty
 358          * between syncs.
 359          */
 360         range_tree_t    *ms_freeing;    /* to free this syncing txg */
 361         range_tree_t    *ms_freed;      /* already freed this syncing txg */
 362         range_tree_t    *ms_defer[TXG_DEFER_SIZE];
 363         range_tree_t    *ms_checkpointing; /* to add to the checkpoint */
 364 
 365         boolean_t       ms_condensing;  /* condensing? */
 366         boolean_t       ms_condense_wanted;
 367         uint64_t        ms_condense_checked_txg;
 368 
 369         uint64_t        ms_initializing; /* leaves initializing this ms */
 370 
 371         /*
 372          * We must always hold the ms_lock when modifying ms_loaded
 373          * and ms_loading.
 374          */
 375         boolean_t       ms_loaded;
 376         boolean_t       ms_loading;
 377 
 378         int64_t         ms_deferspace;  /* sum of ms_defermap[] space   */
 379         uint64_t        ms_weight;      /* weight vs. others in group   */
 380         uint64_t        ms_activation_weight;   /* activation weight    */
 381 
 382         /*
 383          * Track of whenever a metaslab is selected for loading or allocation.
 384          * We use this value to determine how long the metaslab should
 385          * stay cached.
 386          */
 387         uint64_t        ms_selected_txg;
 388 
 389         uint64_t        ms_alloc_txg;   /* last successful alloc (debug only) */
 390         uint64_t        ms_max_size;    /* maximum allocatable size     */
 391 
 392         /*
 393          * -1 if it's not active in an allocator, otherwise set to the allocator
 394          * this metaslab is active for.
 395          */
 396         int             ms_allocator;
 397         boolean_t       ms_primary; /* Only valid if ms_allocator is not -1 */
 398 
 399         /*
 400          * The metaslab block allocators can optionally use a size-ordered
 401          * range tree and/or an array of LBAs. Not all allocators use
 402          * this functionality. The ms_allocatable_by_size should always
 403          * contain the same number of segments as the ms_allocatable. The
 404          * only difference is that the ms_allocatable_by_size is ordered by
 405          * segment sizes.
 406          */
 407         avl_tree_t      ms_allocatable_by_size;
 408         uint64_t        ms_lbas[MAX_LBAS];
 409 
 410         metaslab_group_t *ms_group;     /* metaslab group               */
 411         avl_node_t      ms_group_node;  /* node in metaslab group tree  */
 412         txg_node_t      ms_txg_node;    /* per-txg dirty metaslab links */
 413 
 414         boolean_t       ms_new;
 415 };
 416 
 417 #ifdef  __cplusplus
 418 }
 419 #endif
 420 
 421 #endif  /* _SYS_METASLAB_IMPL_H */