1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
28 * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
29 */
30
31 #ifndef _SYS_METASLAB_IMPL_H
32 #define _SYS_METASLAB_IMPL_H
33
34 #include <sys/metaslab.h>
35 #include <sys/space_map.h>
36 #include <sys/range_tree.h>
37 #include <sys/vdev.h>
38 #include <sys/txg.h>
39 #include <sys/avl.h>
40
41 #ifdef __cplusplus
42 extern "C" {
43 #endif
44
45 /*
46 * Metaslab allocation tracing record.
47 */
48 typedef struct metaslab_alloc_trace {
49 list_node_t mat_list_node;
50 metaslab_group_t *mat_mg;
51 metaslab_t *mat_msp;
52 uint64_t mat_size;
53 uint64_t mat_weight;
54 uint32_t mat_dva_id;
55 uint64_t mat_offset;
56 } metaslab_alloc_trace_t;
57
58 /*
59 * Used by the metaslab allocation tracing facility to indicate
60 * error conditions. These errors are stored to the offset member
61 * of the metaslab_alloc_trace_t record and displayed by mdb.
62 */
63 typedef enum trace_alloc_type {
64 TRACE_ALLOC_FAILURE = -1ULL,
65 TRACE_TOO_SMALL = -2ULL,
66 TRACE_FORCE_GANG = -3ULL,
67 TRACE_NOT_ALLOCATABLE = -4ULL,
68 TRACE_GROUP_FAILURE = -5ULL,
69 TRACE_ENOSPC = -6ULL,
70 TRACE_CONDENSING = -7ULL,
71 TRACE_VDEV_ERROR = -8ULL
72 } trace_alloc_type_t;
73
74 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
75 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
76 #define METASLAB_WEIGHT_TYPE (1ULL << 61)
77 #define METASLAB_ACTIVE_MASK \
78 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
79
80 /*
81 * The metaslab weight is used to encode the amount of free space in a
82 * metaslab, such that the "best" metaslab appears first when sorting the
83 * metaslabs by weight. The weight (and therefore the "best" metaslab) can
84 * be determined in two different ways: by computing a weighted sum of all
85 * the free space in the metaslab (a space based weight) or by counting only
86 * the free segments of the largest size (a segment based weight). We prefer
87 * the segment based weight because it reflects how the free space is
88 * comprised, but we cannot always use it -- legacy pools do not have the
89 * space map histogram information necessary to determine the largest
90 * contiguous regions. Pools that have the space map histogram determine
91 * the segment weight by looking at each bucket in the histogram and
92 * determining the free space whose size in bytes is in the range:
93 * [2^i, 2^(i+1))
94 * We then encode the largest index, i, that contains regions into the
95 * segment-weighted value.
96 *
97 * Space-based weight:
98 *
99 * 64 56 48 40 32 24 16 8 0
100 * +-------+-------+-------+-------+-------+-------+-------+-------+
101 * |PS1| weighted-free space |
102 * +-------+-------+-------+-------+-------+-------+-------+-------+
103 *
104 * PS - indicates primary and secondary activation
105 * space - the fragmentation-weighted space
106 *
107 * Segment-based weight:
108 *
109 * 64 56 48 40 32 24 16 8 0
110 * +-------+-------+-------+-------+-------+-------+-------+-------+
111 * |PS0| idx| count of segments in region |
112 * +-------+-------+-------+-------+-------+-------+-------+-------+
113 *
114 * PS - indicates primary and secondary activation
115 * idx - index for the highest bucket in the histogram
116 * count - number of segments in the specified bucket
117 */
118 #define WEIGHT_GET_ACTIVE(weight) BF64_GET((weight), 62, 2)
119 #define WEIGHT_SET_ACTIVE(weight, x) BF64_SET((weight), 62, 2, x)
120
121 #define WEIGHT_IS_SPACEBASED(weight) \
122 ((weight) == 0 || BF64_GET((weight), 61, 1))
123 #define WEIGHT_SET_SPACEBASED(weight) BF64_SET((weight), 61, 1, 1)
124
125 /*
126 * These macros are only applicable to segment-based weighting.
127 */
128 #define WEIGHT_GET_INDEX(weight) BF64_GET((weight), 55, 6)
129 #define WEIGHT_SET_INDEX(weight, x) BF64_SET((weight), 55, 6, x)
130 #define WEIGHT_GET_COUNT(weight) BF64_GET((weight), 0, 55)
131 #define WEIGHT_SET_COUNT(weight, x) BF64_SET((weight), 0, 55, x)
132
133 /*
134 * A metaslab class encompasses a category of allocatable top-level vdevs.
135 * Each top-level vdev is associated with a metaslab group which defines
136 * the allocatable region for that vdev. Examples of these categories include
137 * "normal" for data block allocations (i.e. main pool allocations) or "log"
138 * for allocations designated for intent log devices (i.e. slog devices).
139 * When a block allocation is requested from the SPA it is associated with a
140 * metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging
141 * to the class can be used to satisfy that request. Allocations are done
142 * by traversing the metaslab groups that are linked off of the mc_rotor field.
143 * This rotor points to the next metaslab group where allocations will be
144 * attempted. Allocating a block is a 3 step process -- select the metaslab
145 * group, select the metaslab, and then allocate the block. The metaslab
146 * class defines the low-level block allocator that will be used as the
147 * final step in allocation. These allocators are pluggable allowing each class
148 * to use a block allocator that best suits that class.
149 */
150 struct metaslab_class {
151 kmutex_t mc_lock;
152 spa_t *mc_spa;
153 metaslab_group_t *mc_rotor;
154 metaslab_ops_t *mc_ops;
155 uint64_t mc_aliquot;
156
157 /*
158 * Track the number of metaslab groups that have been initialized
159 * and can accept allocations. An initialized metaslab group is
160 * one has been completely added to the config (i.e. we have
161 * updated the MOS config and the space has been added to the pool).
162 */
163 uint64_t mc_groups;
164
165 /*
166 * Toggle to enable/disable the allocation throttle.
167 */
168 boolean_t mc_alloc_throttle_enabled;
169
170 /*
171 * The allocation throttle works on a reservation system. Whenever
172 * an asynchronous zio wants to perform an allocation it must
173 * first reserve the number of blocks that it wants to allocate.
174 * If there aren't sufficient slots available for the pending zio
175 * then that I/O is throttled until more slots free up. The current
176 * number of reserved allocations is maintained by the mc_alloc_slots
177 * refcount. The mc_alloc_max_slots value determines the maximum
178 * number of allocations that the system allows. Gang blocks are
179 * allowed to reserve slots even if we've reached the maximum
180 * number of allocations allowed.
181 */
182 uint64_t mc_alloc_max_slots;
183 refcount_t mc_alloc_slots;
184
185 uint64_t mc_alloc_groups; /* # of allocatable groups */
186
187 uint64_t mc_alloc; /* total allocated space */
188 uint64_t mc_deferred; /* total deferred frees */
189 uint64_t mc_space; /* total space (alloc + free) */
190 uint64_t mc_dspace; /* total deflated space */
191 uint64_t mc_histogram[RANGE_TREE_HISTOGRAM_SIZE];
192
193 kmutex_t mc_alloc_lock;
194 avl_tree_t mc_alloc_tree;
195 };
196
197 /*
198 * Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs)
199 * of a top-level vdev. They are linked togther to form a circular linked
200 * list and can belong to only one metaslab class. Metaslab groups may become
201 * ineligible for allocations for a number of reasons such as limited free
202 * space, fragmentation, or going offline. When this happens the allocator will
203 * simply find the next metaslab group in the linked list and attempt
204 * to allocate from that group instead.
205 */
206 struct metaslab_group {
207 kmutex_t mg_lock;
208 avl_tree_t mg_metaslab_tree;
209 uint64_t mg_aliquot;
210 boolean_t mg_allocatable; /* can we allocate? */
211
212 /*
213 * A metaslab group is considered to be initialized only after
214 * we have updated the MOS config and added the space to the pool.
215 * We only allow allocation attempts to a metaslab group if it
216 * has been initialized.
217 */
218 boolean_t mg_initialized;
219
220 uint64_t mg_free_capacity; /* percentage free */
221 int64_t mg_bias;
222 int64_t mg_activation_count;
223 metaslab_class_t *mg_class;
224 vdev_t *mg_vd;
225 taskq_t *mg_taskq;
226 metaslab_group_t *mg_prev;
227 metaslab_group_t *mg_next;
228
229 /*
230 * Each metaslab group can handle mg_max_alloc_queue_depth allocations
231 * which are tracked by mg_alloc_queue_depth. It's possible for a
232 * metaslab group to handle more allocations than its max. This
233 * can occur when gang blocks are required or when other groups
234 * are unable to handle their share of allocations.
235 */
236 uint64_t mg_max_alloc_queue_depth;
237 refcount_t mg_alloc_queue_depth;
238
239 /*
240 * A metalab group that can no longer allocate the minimum block
241 * size will set mg_no_free_space. Once a metaslab group is out
242 * of space then its share of work must be distributed to other
243 * groups.
244 */
245 boolean_t mg_no_free_space;
246
247 uint64_t mg_allocations;
248 uint64_t mg_failed_allocations;
249 uint64_t mg_fragmentation;
250 uint64_t mg_histogram[RANGE_TREE_HISTOGRAM_SIZE];
251 };
252
253 typedef struct {
254 uint64_t ts_birth; /* TXG at which this trimset starts */
255 range_tree_t *ts_tree; /* tree of extents in the trimset */
256 } metaslab_trimset_t;
257
258 /*
259 * This value defines the number of elements in the ms_lbas array. The value
260 * of 64 was chosen as it covers all power of 2 buckets up to UINT64_MAX.
261 * This is the equivalent of highbit(UINT64_MAX).
262 */
263 #define MAX_LBAS 64
264
265 /*
266 * Each metaslab maintains a set of in-core trees to track metaslab
267 * operations. The in-core free tree (ms_tree) contains the list of
268 * free segments which are eligible for allocation. As blocks are
269 * allocated, the allocated segments are removed from the ms_tree and
270 * added to a per txg allocation tree (ms_alloctree). This allows us to
271 * process all allocations in syncing context where it is safe to update
272 * the on-disk space maps. Frees are also processed in syncing context.
273 * Most frees are generated from syncing context, and those that are not
274 * are held in the spa_free_bplist for processing in syncing context.
275 * An additional set of in-core trees is maintained to track deferred
276 * frees (ms_defertree). Once a block is freed it will move from the
277 * ms_freedtree to the ms_defertree. A deferred free means that a block
278 * has been freed but cannot be used by the pool until TXG_DEFER_SIZE
279 * transactions groups later. For example, a block that is freed in txg
280 * 50 will not be available for reallocation until txg 52 (50 +
281 * TXG_DEFER_SIZE). This provides a safety net for uberblock rollback.
282 * A pool could be safely rolled back TXG_DEFERS_SIZE transactions
283 * groups and ensure that no block has been reallocated.
284 *
285 * The simplified transition diagram looks like this:
286 *
287 *
288 * ALLOCATE
289 * |
290 * V
291 * free segment (ms_tree) -----> ms_alloctree[4] ----> (write to space map)
292 * ^
293 * | ms_freeingtree <--- FREE
294 * | |
295 * | v
296 * | ms_freedtree
297 * | |
298 * +-------- ms_defertree[2] <-------+---------> (write to space map)
299 *
300 *
301 * Each metaslab's space is tracked in a single space map in the MOS,
302 * which is only updated in syncing context. Each time we sync a txg,
303 * we append the allocs and frees from that txg to the space map. The
304 * pool space is only updated once all metaslabs have finished syncing.
305 *
306 * To load the in-core free tree we read the space map from disk. This
307 * object contains a series of alloc and free records that are combined
308 * to make up the list of all free segments in this metaslab. These
309 * segments are represented in-core by the ms_tree and are stored in an
310 * AVL tree.
311 *
312 * As the space map grows (as a result of the appends) it will
313 * eventually become space-inefficient. When the metaslab's in-core
314 * free tree is zfs_condense_pct/100 times the size of the minimal
315 * on-disk representation, we rewrite it in its minimized form. If a
316 * metaslab needs to condense then we must set the ms_condensing flag to
317 * ensure that allocations are not performed on the metaslab that is
318 * being written.
319 */
320 struct metaslab {
321 kmutex_t ms_lock;
322 kcondvar_t ms_load_cv;
323 space_map_t *ms_sm;
324 uint64_t ms_id;
325 uint64_t ms_start;
326 uint64_t ms_size;
327 uint64_t ms_fragmentation;
328
329 range_tree_t *ms_alloctree[TXG_SIZE];
330 range_tree_t *ms_tree;
331
332 metaslab_trimset_t *ms_cur_ts; /* currently prepared trims */
333 metaslab_trimset_t *ms_prev_ts; /* previous (aging) trims */
334 kcondvar_t ms_trim_cv;
335 metaslab_trimset_t *ms_trimming_ts;
336
337 /*
338 * The following range trees are accessed only from syncing context.
339 * ms_free*tree only have entries while syncing, and are empty
340 * between syncs.
341 */
342 range_tree_t *ms_freeingtree; /* to free this syncing txg */
343 range_tree_t *ms_freedtree; /* already freed this syncing txg */
344 range_tree_t *ms_defertree[TXG_DEFER_SIZE];
345
346 boolean_t ms_condensing; /* condensing? */
347 boolean_t ms_condense_wanted;
348
349 /*
350 * We must hold both ms_lock and ms_group->mg_lock in order to
351 * modify ms_loaded.
352 */
353 boolean_t ms_loaded;
354 boolean_t ms_loading;
355
356 int64_t ms_deferspace; /* sum of ms_defermap[] space */
357 uint64_t ms_weight; /* weight vs. others in group */
358 uint64_t ms_activation_weight; /* activation weight */
359
360 /*
361 * Track of whenever a metaslab is selected for loading or allocation.
362 * We use this value to determine how long the metaslab should
363 * stay cached.
364 */
365 uint64_t ms_selected_txg;
366
367 uint64_t ms_alloc_txg; /* last successful alloc (debug only) */
368 uint64_t ms_max_size; /* maximum allocatable size */
369
370 /*
371 * The metaslab block allocators can optionally use a size-ordered
372 * range tree and/or an array of LBAs. Not all allocators use
373 * this functionality. The ms_size_tree should always contain the
374 * same number of segments as the ms_tree. The only difference
375 * is that the ms_size_tree is ordered by segment sizes.
376 */
377 avl_tree_t ms_size_tree;
378 uint64_t ms_lbas[MAX_LBAS];
379
380 metaslab_group_t *ms_group; /* metaslab group */
381 avl_node_t ms_group_node; /* node in metaslab group tree */
382 txg_node_t ms_txg_node; /* per-txg dirty metaslab links */
383 };
384
385 #ifdef __cplusplus
386 }
387 #endif
388
389 #endif /* _SYS_METASLAB_IMPL_H */