Print this page
fix copyright fubar, reduce diffs
15254 %ymm registers not restored after signal handler
15367 x86 getfpregs() summons corrupting %xmm ghosts
15333 want x86 /proc xregs support (libc_db, libproc, mdb, etc.)
15336 want libc functions for extended ucontext_t
15334 want ps_lwphandle-specific reg routines
15328 FPU_CW_INIT mistreats reserved bit
15335 i86pc fpu_subr.c isn't really platform-specific
15332 setcontext(2) isn't actually noreturn
15331 need <sys/stdalign.h>
Change-Id: I7060aa86042dfb989f77fc3323c065ea2eafa9ad
Conflicts:
usr/src/uts/common/fs/proc/prcontrol.c
usr/src/uts/intel/os/archdep.c
usr/src/uts/intel/sys/ucontext.h
usr/src/uts/intel/syscall/getcontext.c
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/os/kmem.c
+++ new/usr/src/uts/common/os/kmem.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23 - * Copyright (c) 2017, Joyent, Inc.
24 23 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
26 25 * Copyright 2018, Joyent, Inc.
27 - * Copyright 2020 Oxide Computer Company
26 + * Copyright 2023 Oxide Computer Company
28 27 */
29 28
30 29 /*
31 30 * Kernel memory allocator, as described in the following two papers and a
32 31 * statement about the consolidator:
33 32 *
34 33 * Jeff Bonwick,
35 34 * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
36 35 * Proceedings of the Summer 1994 Usenix Conference.
37 36 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
38 37 *
39 38 * Jeff Bonwick and Jonathan Adams,
40 39 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
41 40 * Arbitrary Resources.
42 41 * Proceedings of the 2001 Usenix Conference.
43 42 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
44 43 *
45 44 * kmem Slab Consolidator Big Theory Statement:
46 45 *
47 46 * 1. Motivation
48 47 *
49 48 * As stated in Bonwick94, slabs provide the following advantages over other
50 49 * allocation structures in terms of memory fragmentation:
51 50 *
52 51 * - Internal fragmentation (per-buffer wasted space) is minimal.
53 52 * - Severe external fragmentation (unused buffers on the free list) is
54 53 * unlikely.
55 54 *
56 55 * Segregating objects by size eliminates one source of external fragmentation,
57 56 * and according to Bonwick:
58 57 *
59 58 * The other reason that slabs reduce external fragmentation is that all
60 59 * objects in a slab are of the same type, so they have the same lifetime
61 60 * distribution. The resulting segregation of short-lived and long-lived
62 61 * objects at slab granularity reduces the likelihood of an entire page being
63 62 * held hostage due to a single long-lived allocation [Barrett93, Hanson90].
64 63 *
65 64 * While unlikely, severe external fragmentation remains possible. Clients that
66 65 * allocate both short- and long-lived objects from the same cache cannot
67 66 * anticipate the distribution of long-lived objects within the allocator's slab
68 67 * implementation. Even a small percentage of long-lived objects distributed
69 68 * randomly across many slabs can lead to a worst case scenario where the client
70 69 * frees the majority of its objects and the system gets back almost none of the
71 70 * slabs. Despite the client doing what it reasonably can to help the system
72 71 * reclaim memory, the allocator cannot shake free enough slabs because of
73 72 * lonely allocations stubbornly hanging on. Although the allocator is in a
74 73 * position to diagnose the fragmentation, there is nothing that the allocator
75 74 * by itself can do about it. It only takes a single allocated object to prevent
76 75 * an entire slab from being reclaimed, and any object handed out by
77 76 * kmem_cache_alloc() is by definition in the client's control. Conversely,
78 77 * although the client is in a position to move a long-lived object, it has no
79 78 * way of knowing if the object is causing fragmentation, and if so, where to
80 79 * move it. A solution necessarily requires further cooperation between the
81 80 * allocator and the client.
82 81 *
83 82 * 2. Move Callback
84 83 *
85 84 * The kmem slab consolidator therefore adds a move callback to the
86 85 * allocator/client interface, improving worst-case external fragmentation in
87 86 * kmem caches that supply a function to move objects from one memory location
88 87 * to another. In a situation of low memory kmem attempts to consolidate all of
89 88 * a cache's slabs at once; otherwise it works slowly to bring external
90 89 * fragmentation within the 1/8 limit guaranteed for internal fragmentation,
91 90 * thereby helping to avoid a low memory situation in the future.
92 91 *
93 92 * The callback has the following signature:
94 93 *
95 94 * kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg)
96 95 *
97 96 * It supplies the kmem client with two addresses: the allocated object that
98 97 * kmem wants to move and a buffer selected by kmem for the client to use as the
99 98 * copy destination. The callback is kmem's way of saying "Please get off of
100 99 * this buffer and use this one instead." kmem knows where it wants to move the
101 100 * object in order to best reduce fragmentation. All the client needs to know
102 101 * about the second argument (void *new) is that it is an allocated, constructed
103 102 * object ready to take the contents of the old object. When the move function
104 103 * is called, the system is likely to be low on memory, and the new object
105 104 * spares the client from having to worry about allocating memory for the
106 105 * requested move. The third argument supplies the size of the object, in case a
107 106 * single move function handles multiple caches whose objects differ only in
108 107 * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional
109 108 * user argument passed to the constructor, destructor, and reclaim functions is
110 109 * also passed to the move callback.
111 110 *
112 111 * 2.1 Setting the Move Callback
113 112 *
114 113 * The client sets the move callback after creating the cache and before
115 114 * allocating from it:
116 115 *
117 116 * object_cache = kmem_cache_create(...);
118 117 * kmem_cache_set_move(object_cache, object_move);
119 118 *
120 119 * 2.2 Move Callback Return Values
121 120 *
122 121 * Only the client knows about its own data and when is a good time to move it.
123 122 * The client is cooperating with kmem to return unused memory to the system,
124 123 * and kmem respectfully accepts this help at the client's convenience. When
125 124 * asked to move an object, the client can respond with any of the following:
126 125 *
127 126 * typedef enum kmem_cbrc {
128 127 * KMEM_CBRC_YES,
129 128 * KMEM_CBRC_NO,
130 129 * KMEM_CBRC_LATER,
131 130 * KMEM_CBRC_DONT_NEED,
132 131 * KMEM_CBRC_DONT_KNOW
133 132 * } kmem_cbrc_t;
134 133 *
135 134 * The client must not explicitly kmem_cache_free() either of the objects passed
136 135 * to the callback, since kmem wants to free them directly to the slab layer
137 136 * (bypassing the per-CPU magazine layer). The response tells kmem which of the
138 137 * objects to free:
139 138 *
140 139 * YES: (Did it) The client moved the object, so kmem frees the old one.
141 140 * NO: (Never) The client refused, so kmem frees the new object (the
142 141 * unused copy destination). kmem also marks the slab of the old
143 142 * object so as not to bother the client with further callbacks for
144 143 * that object as long as the slab remains on the partial slab list.
145 144 * (The system won't be getting the slab back as long as the
146 145 * immovable object holds it hostage, so there's no point in moving
147 146 * any of its objects.)
148 147 * LATER: The client is using the object and cannot move it now, so kmem
149 148 * frees the new object (the unused copy destination). kmem still
150 149 * attempts to move other objects off the slab, since it expects to
151 150 * succeed in clearing the slab in a later callback. The client
152 151 * should use LATER instead of NO if the object is likely to become
153 152 * movable very soon.
154 153 * DONT_NEED: The client no longer needs the object, so kmem frees the old along
155 154 * with the new object (the unused copy destination). This response
156 155 * is the client's opportunity to be a model citizen and give back as
157 156 * much as it can.
158 157 * DONT_KNOW: The client does not know about the object because
159 158 * a) the client has just allocated the object and not yet put it
160 159 * wherever it expects to find known objects
161 160 * b) the client has removed the object from wherever it expects to
162 161 * find known objects and is about to free it, or
163 162 * c) the client has freed the object.
164 163 * In all these cases (a, b, and c) kmem frees the new object (the
165 164 * unused copy destination). In the first case, the object is in
166 165 * use and the correct action is that for LATER; in the latter two
167 166 * cases, we know that the object is either freed or about to be
168 167 * freed, in which case it is either already in a magazine or about
169 168 * to be in one. In these cases, we know that the object will either
170 169 * be reallocated and reused, or it will end up in a full magazine
171 170 * that will be reaped (thereby liberating the slab). Because it
172 171 * is prohibitively expensive to differentiate these cases, and
173 172 * because the defrag code is executed when we're low on memory
174 173 * (thereby biasing the system to reclaim full magazines) we treat
175 174 * all DONT_KNOW cases as LATER and rely on cache reaping to
176 175 * generally clean up full magazines. While we take the same action
177 176 * for these cases, we maintain their semantic distinction: if
178 177 * defragmentation is not occurring, it is useful to know if this
179 178 * is due to objects in use (LATER) or objects in an unknown state
180 179 * of transition (DONT_KNOW).
181 180 *
182 181 * 2.3 Object States
183 182 *
184 183 * Neither kmem nor the client can be assumed to know the object's whereabouts
185 184 * at the time of the callback. An object belonging to a kmem cache may be in
186 185 * any of the following states:
187 186 *
188 187 * 1. Uninitialized on the slab
189 188 * 2. Allocated from the slab but not constructed (still uninitialized)
190 189 * 3. Allocated from the slab, constructed, but not yet ready for business
191 190 * (not in a valid state for the move callback)
192 191 * 4. In use (valid and known to the client)
193 192 * 5. About to be freed (no longer in a valid state for the move callback)
194 193 * 6. Freed to a magazine (still constructed)
195 194 * 7. Allocated from a magazine, not yet ready for business (not in a valid
196 195 * state for the move callback), and about to return to state #4
197 196 * 8. Deconstructed on a magazine that is about to be freed
198 197 * 9. Freed to the slab
199 198 *
200 199 * Since the move callback may be called at any time while the object is in any
201 200 * of the above states (except state #1), the client needs a safe way to
202 201 * determine whether or not it knows about the object. Specifically, the client
203 202 * needs to know whether or not the object is in state #4, the only state in
204 203 * which a move is valid. If the object is in any other state, the client should
205 204 * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of
206 205 * the object's fields.
207 206 *
208 207 * Note that although an object may be in state #4 when kmem initiates the move
209 208 * request, the object may no longer be in that state by the time kmem actually
210 209 * calls the move function. Not only does the client free objects
211 210 * asynchronously, kmem itself puts move requests on a queue where thay are
212 211 * pending until kmem processes them from another context. Also, objects freed
213 212 * to a magazine appear allocated from the point of view of the slab layer, so
214 213 * kmem may even initiate requests for objects in a state other than state #4.
215 214 *
216 215 * 2.3.1 Magazine Layer
217 216 *
218 217 * An important insight revealed by the states listed above is that the magazine
219 218 * layer is populated only by kmem_cache_free(). Magazines of constructed
220 219 * objects are never populated directly from the slab layer (which contains raw,
221 220 * unconstructed objects). Whenever an allocation request cannot be satisfied
222 221 * from the magazine layer, the magazines are bypassed and the request is
223 222 * satisfied from the slab layer (creating a new slab if necessary). kmem calls
224 223 * the object constructor only when allocating from the slab layer, and only in
225 224 * response to kmem_cache_alloc() or to prepare the destination buffer passed in
226 225 * the move callback. kmem does not preconstruct objects in anticipation of
227 226 * kmem_cache_alloc().
228 227 *
229 228 * 2.3.2 Object Constructor and Destructor
230 229 *
231 230 * If the client supplies a destructor, it must be valid to call the destructor
232 231 * on a newly created object (immediately after the constructor).
233 232 *
234 233 * 2.4 Recognizing Known Objects
235 234 *
236 235 * There is a simple test to determine safely whether or not the client knows
237 236 * about a given object in the move callback. It relies on the fact that kmem
238 237 * guarantees that the object of the move callback has only been touched by the
239 238 * client itself or else by kmem. kmem does this by ensuring that none of the
240 239 * cache's slabs are freed to the virtual memory (VM) subsystem while a move
241 240 * callback is pending. When the last object on a slab is freed, if there is a
242 241 * pending move, kmem puts the slab on a per-cache dead list and defers freeing
243 242 * slabs on that list until all pending callbacks are completed. That way,
244 243 * clients can be certain that the object of a move callback is in one of the
245 244 * states listed above, making it possible to distinguish known objects (in
246 245 * state #4) using the two low order bits of any pointer member (with the
247 246 * exception of 'char *' or 'short *' which may not be 4-byte aligned on some
248 247 * platforms).
249 248 *
250 249 * The test works as long as the client always transitions objects from state #4
251 250 * (known, in use) to state #5 (about to be freed, invalid) by setting the low
252 251 * order bit of the client-designated pointer member. Since kmem only writes
253 252 * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and
254 253 * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is
255 254 * guaranteed to set at least one of the two low order bits. Therefore, given an
256 255 * object with a back pointer to a 'container_t *o_container', the client can
257 256 * test
258 257 *
259 258 * container_t *container = object->o_container;
260 259 * if ((uintptr_t)container & 0x3) {
261 260 * return (KMEM_CBRC_DONT_KNOW);
262 261 * }
263 262 *
264 263 * Typically, an object will have a pointer to some structure with a list or
265 264 * hash where objects from the cache are kept while in use. Assuming that the
266 265 * client has some way of knowing that the container structure is valid and will
267 266 * not go away during the move, and assuming that the structure includes a lock
268 267 * to protect whatever collection is used, then the client would continue as
269 268 * follows:
270 269 *
271 270 * // Ensure that the container structure does not go away.
272 271 * if (container_hold(container) == 0) {
273 272 * return (KMEM_CBRC_DONT_KNOW);
274 273 * }
275 274 * mutex_enter(&container->c_objects_lock);
276 275 * if (container != object->o_container) {
277 276 * mutex_exit(&container->c_objects_lock);
278 277 * container_rele(container);
279 278 * return (KMEM_CBRC_DONT_KNOW);
280 279 * }
281 280 *
282 281 * At this point the client knows that the object cannot be freed as long as
283 282 * c_objects_lock is held. Note that after acquiring the lock, the client must
284 283 * recheck the o_container pointer in case the object was removed just before
285 284 * acquiring the lock.
286 285 *
287 286 * When the client is about to free an object, it must first remove that object
288 287 * from the list, hash, or other structure where it is kept. At that time, to
289 288 * mark the object so it can be distinguished from the remaining, known objects,
290 289 * the client sets the designated low order bit:
291 290 *
292 291 * mutex_enter(&container->c_objects_lock);
293 292 * object->o_container = (void *)((uintptr_t)object->o_container | 0x1);
294 293 * list_remove(&container->c_objects, object);
295 294 * mutex_exit(&container->c_objects_lock);
296 295 *
297 296 * In the common case, the object is freed to the magazine layer, where it may
298 297 * be reused on a subsequent allocation without the overhead of calling the
299 298 * constructor. While in the magazine it appears allocated from the point of
300 299 * view of the slab layer, making it a candidate for the move callback. Most
301 300 * objects unrecognized by the client in the move callback fall into this
302 301 * category and are cheaply distinguished from known objects by the test
303 302 * described earlier. Because searching magazines is prohibitively expensive
304 303 * for kmem, clients that do not mark freed objects (and therefore return
305 304 * KMEM_CBRC_DONT_KNOW for large numbers of objects) may find defragmentation
306 305 * efficacy reduced.
307 306 *
308 307 * Invalidating the designated pointer member before freeing the object marks
309 308 * the object to be avoided in the callback, and conversely, assigning a valid
310 309 * value to the designated pointer member after allocating the object makes the
311 310 * object fair game for the callback:
312 311 *
313 312 * ... allocate object ...
314 313 * ... set any initial state not set by the constructor ...
315 314 *
316 315 * mutex_enter(&container->c_objects_lock);
317 316 * list_insert_tail(&container->c_objects, object);
318 317 * membar_producer();
319 318 * object->o_container = container;
320 319 * mutex_exit(&container->c_objects_lock);
321 320 *
322 321 * Note that everything else must be valid before setting o_container makes the
323 322 * object fair game for the move callback. The membar_producer() call ensures
324 323 * that all the object's state is written to memory before setting the pointer
325 324 * that transitions the object from state #3 or #7 (allocated, constructed, not
326 325 * yet in use) to state #4 (in use, valid). That's important because the move
327 326 * function has to check the validity of the pointer before it can safely
328 327 * acquire the lock protecting the collection where it expects to find known
329 328 * objects.
330 329 *
331 330 * This method of distinguishing known objects observes the usual symmetry:
332 331 * invalidating the designated pointer is the first thing the client does before
333 332 * freeing the object, and setting the designated pointer is the last thing the
334 333 * client does after allocating the object. Of course, the client is not
335 334 * required to use this method. Fundamentally, how the client recognizes known
336 335 * objects is completely up to the client, but this method is recommended as an
337 336 * efficient and safe way to take advantage of the guarantees made by kmem. If
338 337 * the entire object is arbitrary data without any markable bits from a suitable
339 338 * pointer member, then the client must find some other method, such as
340 339 * searching a hash table of known objects.
341 340 *
342 341 * 2.5 Preventing Objects From Moving
343 342 *
344 343 * Besides a way to distinguish known objects, the other thing that the client
345 344 * needs is a strategy to ensure that an object will not move while the client
346 345 * is actively using it. The details of satisfying this requirement tend to be
347 346 * highly cache-specific. It might seem that the same rules that let a client
348 347 * remove an object safely should also decide when an object can be moved
349 348 * safely. However, any object state that makes a removal attempt invalid is
350 349 * likely to be long-lasting for objects that the client does not expect to
351 350 * remove. kmem knows nothing about the object state and is equally likely (from
352 351 * the client's point of view) to request a move for any object in the cache,
353 352 * whether prepared for removal or not. Even a low percentage of objects stuck
354 353 * in place by unremovability will defeat the consolidator if the stuck objects
355 354 * are the same long-lived allocations likely to hold slabs hostage.
356 355 * Fundamentally, the consolidator is not aimed at common cases. Severe external
357 356 * fragmentation is a worst case scenario manifested as sparsely allocated
358 357 * slabs, by definition a low percentage of the cache's objects. When deciding
359 358 * what makes an object movable, keep in mind the goal of the consolidator: to
360 359 * bring worst-case external fragmentation within the limits guaranteed for
361 360 * internal fragmentation. Removability is a poor criterion if it is likely to
362 361 * exclude more than an insignificant percentage of objects for long periods of
363 362 * time.
364 363 *
365 364 * A tricky general solution exists, and it has the advantage of letting you
366 365 * move any object at almost any moment, practically eliminating the likelihood
367 366 * that an object can hold a slab hostage. However, if there is a cache-specific
368 367 * way to ensure that an object is not actively in use in the vast majority of
369 368 * cases, a simpler solution that leverages this cache-specific knowledge is
370 369 * preferred.
371 370 *
372 371 * 2.5.1 Cache-Specific Solution
373 372 *
374 373 * As an example of a cache-specific solution, the ZFS znode cache takes
375 374 * advantage of the fact that the vast majority of znodes are only being
376 375 * referenced from the DNLC. (A typical case might be a few hundred in active
377 376 * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS
378 377 * client has established that it recognizes the znode and can access its fields
379 378 * safely (using the method described earlier), it then tests whether the znode
380 379 * is referenced by anything other than the DNLC. If so, it assumes that the
381 380 * znode may be in active use and is unsafe to move, so it drops its locks and
382 381 * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere
383 382 * else znodes are used, no change is needed to protect against the possibility
384 383 * of the znode moving. The disadvantage is that it remains possible for an
385 384 * application to hold a znode slab hostage with an open file descriptor.
386 385 * However, this case ought to be rare and the consolidator has a way to deal
387 386 * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same
388 387 * object, kmem eventually stops believing it and treats the slab as if the
389 388 * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can
390 389 * then focus on getting it off of the partial slab list by allocating rather
391 390 * than freeing all of its objects. (Either way of getting a slab off the
392 391 * free list reduces fragmentation.)
393 392 *
394 393 * 2.5.2 General Solution
395 394 *
396 395 * The general solution, on the other hand, requires an explicit hold everywhere
397 396 * the object is used to prevent it from moving. To keep the client locking
398 397 * strategy as uncomplicated as possible, kmem guarantees the simplifying
399 398 * assumption that move callbacks are sequential, even across multiple caches.
400 399 * Internally, a global queue processed by a single thread supports all caches
401 400 * implementing the callback function. No matter how many caches supply a move
402 401 * function, the consolidator never moves more than one object at a time, so the
403 402 * client does not have to worry about tricky lock ordering involving several
404 403 * related objects from different kmem caches.
405 404 *
406 405 * The general solution implements the explicit hold as a read-write lock, which
407 406 * allows multiple readers to access an object from the cache simultaneously
408 407 * while a single writer is excluded from moving it. A single rwlock for the
409 408 * entire cache would lock out all threads from using any of the cache's objects
410 409 * even though only a single object is being moved, so to reduce contention,
411 410 * the client can fan out the single rwlock into an array of rwlocks hashed by
412 411 * the object address, making it probable that moving one object will not
413 412 * prevent other threads from using a different object. The rwlock cannot be a
414 413 * member of the object itself, because the possibility of the object moving
415 414 * makes it unsafe to access any of the object's fields until the lock is
416 415 * acquired.
417 416 *
418 417 * Assuming a small, fixed number of locks, it's possible that multiple objects
419 418 * will hash to the same lock. A thread that needs to use multiple objects in
420 419 * the same function may acquire the same lock multiple times. Since rwlocks are
421 420 * reentrant for readers, and since there is never more than a single writer at
422 421 * a time (assuming that the client acquires the lock as a writer only when
423 422 * moving an object inside the callback), there would seem to be no problem.
424 423 * However, a client locking multiple objects in the same function must handle
425 424 * one case of potential deadlock: Assume that thread A needs to prevent both
426 425 * object 1 and object 2 from moving, and thread B, the callback, meanwhile
427 426 * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the
428 427 * same lock, that thread A will acquire the lock for object 1 as a reader
429 428 * before thread B sets the lock's write-wanted bit, preventing thread A from
430 429 * reacquiring the lock for object 2 as a reader. Unable to make forward
431 430 * progress, thread A will never release the lock for object 1, resulting in
432 431 * deadlock.
433 432 *
434 433 * There are two ways of avoiding the deadlock just described. The first is to
435 434 * use rw_tryenter() rather than rw_enter() in the callback function when
436 435 * attempting to acquire the lock as a writer. If tryenter discovers that the
437 436 * same object (or another object hashed to the same lock) is already in use, it
438 437 * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use
439 438 * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t,
440 439 * since it allows a thread to acquire the lock as a reader in spite of a
441 440 * waiting writer. This second approach insists on moving the object now, no
442 441 * matter how many readers the move function must wait for in order to do so,
443 442 * and could delay the completion of the callback indefinitely (blocking
444 443 * callbacks to other clients). In practice, a less insistent callback using
445 444 * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems
446 445 * little reason to use anything else.
447 446 *
448 447 * Avoiding deadlock is not the only problem that an implementation using an
449 448 * explicit hold needs to solve. Locking the object in the first place (to
450 449 * prevent it from moving) remains a problem, since the object could move
451 450 * between the time you obtain a pointer to the object and the time you acquire
452 451 * the rwlock hashed to that pointer value. Therefore the client needs to
453 452 * recheck the value of the pointer after acquiring the lock, drop the lock if
454 453 * the value has changed, and try again. This requires a level of indirection:
455 454 * something that points to the object rather than the object itself, that the
456 455 * client can access safely while attempting to acquire the lock. (The object
457 456 * itself cannot be referenced safely because it can move at any time.)
458 457 * The following lock-acquisition function takes whatever is safe to reference
459 458 * (arg), follows its pointer to the object (using function f), and tries as
460 459 * often as necessary to acquire the hashed lock and verify that the object
461 460 * still has not moved:
462 461 *
463 462 * object_t *
464 463 * object_hold(object_f f, void *arg)
465 464 * {
466 465 * object_t *op;
467 466 *
468 467 * op = f(arg);
469 468 * if (op == NULL) {
470 469 * return (NULL);
471 470 * }
472 471 *
473 472 * rw_enter(OBJECT_RWLOCK(op), RW_READER);
474 473 * while (op != f(arg)) {
475 474 * rw_exit(OBJECT_RWLOCK(op));
476 475 * op = f(arg);
477 476 * if (op == NULL) {
478 477 * break;
479 478 * }
480 479 * rw_enter(OBJECT_RWLOCK(op), RW_READER);
481 480 * }
482 481 *
483 482 * return (op);
484 483 * }
485 484 *
486 485 * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The
487 486 * lock reacquisition loop, while necessary, almost never executes. The function
488 487 * pointer f (used to obtain the object pointer from arg) has the following type
489 488 * definition:
490 489 *
491 490 * typedef object_t *(*object_f)(void *arg);
492 491 *
493 492 * An object_f implementation is likely to be as simple as accessing a structure
494 493 * member:
495 494 *
496 495 * object_t *
497 496 * s_object(void *arg)
498 497 * {
499 498 * something_t *sp = arg;
500 499 * return (sp->s_object);
501 500 * }
502 501 *
503 502 * The flexibility of a function pointer allows the path to the object to be
504 503 * arbitrarily complex and also supports the notion that depending on where you
505 504 * are using the object, you may need to get it from someplace different.
506 505 *
507 506 * The function that releases the explicit hold is simpler because it does not
508 507 * have to worry about the object moving:
509 508 *
510 509 * void
511 510 * object_rele(object_t *op)
512 511 * {
513 512 * rw_exit(OBJECT_RWLOCK(op));
514 513 * }
515 514 *
516 515 * The caller is spared these details so that obtaining and releasing an
517 516 * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller
518 517 * of object_hold() only needs to know that the returned object pointer is valid
519 518 * if not NULL and that the object will not move until released.
520 519 *
521 520 * Although object_hold() prevents an object from moving, it does not prevent it
522 521 * from being freed. The caller must take measures before calling object_hold()
523 522 * (afterwards is too late) to ensure that the held object cannot be freed. The
524 523 * caller must do so without accessing the unsafe object reference, so any lock
525 524 * or reference count used to ensure the continued existence of the object must
526 525 * live outside the object itself.
527 526 *
528 527 * Obtaining a new object is a special case where an explicit hold is impossible
529 528 * for the caller. Any function that returns a newly allocated object (either as
530 529 * a return value, or as an in-out paramter) must return it already held; after
531 530 * the caller gets it is too late, since the object cannot be safely accessed
532 531 * without the level of indirection described earlier. The following
533 532 * object_alloc() example uses the same code shown earlier to transition a new
534 533 * object into the state of being recognized (by the client) as a known object.
535 534 * The function must acquire the hold (rw_enter) before that state transition
536 535 * makes the object movable:
537 536 *
538 537 * static object_t *
539 538 * object_alloc(container_t *container)
540 539 * {
541 540 * object_t *object = kmem_cache_alloc(object_cache, 0);
542 541 * ... set any initial state not set by the constructor ...
543 542 * rw_enter(OBJECT_RWLOCK(object), RW_READER);
544 543 * mutex_enter(&container->c_objects_lock);
545 544 * list_insert_tail(&container->c_objects, object);
546 545 * membar_producer();
547 546 * object->o_container = container;
548 547 * mutex_exit(&container->c_objects_lock);
549 548 * return (object);
550 549 * }
551 550 *
552 551 * Functions that implicitly acquire an object hold (any function that calls
553 552 * object_alloc() to supply an object for the caller) need to be carefully noted
554 553 * so that the matching object_rele() is not neglected. Otherwise, leaked holds
555 554 * prevent all objects hashed to the affected rwlocks from ever being moved.
556 555 *
557 556 * The pointer to a held object can be hashed to the holding rwlock even after
558 557 * the object has been freed. Although it is possible to release the hold
559 558 * after freeing the object, you may decide to release the hold implicitly in
560 559 * whatever function frees the object, so as to release the hold as soon as
561 560 * possible, and for the sake of symmetry with the function that implicitly
562 561 * acquires the hold when it allocates the object. Here, object_free() releases
563 562 * the hold acquired by object_alloc(). Its implicit object_rele() forms a
564 563 * matching pair with object_hold():
565 564 *
566 565 * void
567 566 * object_free(object_t *object)
568 567 * {
569 568 * container_t *container;
570 569 *
571 570 * ASSERT(object_held(object));
572 571 * container = object->o_container;
573 572 * mutex_enter(&container->c_objects_lock);
574 573 * object->o_container =
575 574 * (void *)((uintptr_t)object->o_container | 0x1);
576 575 * list_remove(&container->c_objects, object);
577 576 * mutex_exit(&container->c_objects_lock);
578 577 * object_rele(object);
579 578 * kmem_cache_free(object_cache, object);
580 579 * }
581 580 *
582 581 * Note that object_free() cannot safely accept an object pointer as an argument
583 582 * unless the object is already held. Any function that calls object_free()
584 583 * needs to be carefully noted since it similarly forms a matching pair with
585 584 * object_hold().
586 585 *
587 586 * To complete the picture, the following callback function implements the
588 587 * general solution by moving objects only if they are currently unheld:
589 588 *
590 589 * static kmem_cbrc_t
591 590 * object_move(void *buf, void *newbuf, size_t size, void *arg)
592 591 * {
593 592 * object_t *op = buf, *np = newbuf;
594 593 * container_t *container;
595 594 *
596 595 * container = op->o_container;
597 596 * if ((uintptr_t)container & 0x3) {
598 597 * return (KMEM_CBRC_DONT_KNOW);
599 598 * }
600 599 *
601 600 * // Ensure that the container structure does not go away.
602 601 * if (container_hold(container) == 0) {
603 602 * return (KMEM_CBRC_DONT_KNOW);
604 603 * }
605 604 *
606 605 * mutex_enter(&container->c_objects_lock);
607 606 * if (container != op->o_container) {
608 607 * mutex_exit(&container->c_objects_lock);
609 608 * container_rele(container);
610 609 * return (KMEM_CBRC_DONT_KNOW);
611 610 * }
612 611 *
613 612 * if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) {
614 613 * mutex_exit(&container->c_objects_lock);
615 614 * container_rele(container);
616 615 * return (KMEM_CBRC_LATER);
617 616 * }
618 617 *
619 618 * object_move_impl(op, np); // critical section
620 619 * rw_exit(OBJECT_RWLOCK(op));
621 620 *
622 621 * op->o_container = (void *)((uintptr_t)op->o_container | 0x1);
623 622 * list_link_replace(&op->o_link_node, &np->o_link_node);
624 623 * mutex_exit(&container->c_objects_lock);
625 624 * container_rele(container);
626 625 * return (KMEM_CBRC_YES);
627 626 * }
628 627 *
629 628 * Note that object_move() must invalidate the designated o_container pointer of
630 629 * the old object in the same way that object_free() does, since kmem will free
631 630 * the object in response to the KMEM_CBRC_YES return value.
632 631 *
633 632 * The lock order in object_move() differs from object_alloc(), which locks
634 633 * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the
635 634 * callback uses rw_tryenter() (preventing the deadlock described earlier), it's
636 635 * not a problem. Holding the lock on the object list in the example above
637 636 * through the entire callback not only prevents the object from going away, it
638 637 * also allows you to lock the list elsewhere and know that none of its elements
639 638 * will move during iteration.
640 639 *
641 640 * Adding an explicit hold everywhere an object from the cache is used is tricky
642 641 * and involves much more change to client code than a cache-specific solution
643 642 * that leverages existing state to decide whether or not an object is
644 643 * movable. However, this approach has the advantage that no object remains
645 644 * immovable for any significant length of time, making it extremely unlikely
646 645 * that long-lived allocations can continue holding slabs hostage; and it works
647 646 * for any cache.
648 647 *
649 648 * 3. Consolidator Implementation
650 649 *
651 650 * Once the client supplies a move function that a) recognizes known objects and
652 651 * b) avoids moving objects that are actively in use, the remaining work is up
653 652 * to the consolidator to decide which objects to move and when to issue
654 653 * callbacks.
655 654 *
656 655 * The consolidator relies on the fact that a cache's slabs are ordered by
657 656 * usage. Each slab has a fixed number of objects. Depending on the slab's
658 657 * "color" (the offset of the first object from the beginning of the slab;
659 658 * offsets are staggered to mitigate false sharing of cache lines) it is either
660 659 * the maximum number of objects per slab determined at cache creation time or
661 660 * else the number closest to the maximum that fits within the space remaining
662 661 * after the initial offset. A completely allocated slab may contribute some
663 662 * internal fragmentation (per-slab overhead) but no external fragmentation, so
664 663 * it is of no interest to the consolidator. At the other extreme, slabs whose
665 664 * objects have all been freed to the slab are released to the virtual memory
666 665 * (VM) subsystem (objects freed to magazines are still allocated as far as the
667 666 * slab is concerned). External fragmentation exists when there are slabs
668 667 * somewhere between these extremes. A partial slab has at least one but not all
669 668 * of its objects allocated. The more partial slabs, and the fewer allocated
670 669 * objects on each of them, the higher the fragmentation. Hence the
671 670 * consolidator's overall strategy is to reduce the number of partial slabs by
672 671 * moving allocated objects from the least allocated slabs to the most allocated
673 672 * slabs.
674 673 *
675 674 * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated
676 675 * slabs are kept separately in an unordered list. Since the majority of slabs
677 676 * tend to be completely allocated (a typical unfragmented cache may have
678 677 * thousands of complete slabs and only a single partial slab), separating
679 678 * complete slabs improves the efficiency of partial slab ordering, since the
680 679 * complete slabs do not affect the depth or balance of the AVL tree. This
681 680 * ordered sequence of partial slabs acts as a "free list" supplying objects for
682 681 * allocation requests.
683 682 *
684 683 * Objects are always allocated from the first partial slab in the free list,
685 684 * where the allocation is most likely to eliminate a partial slab (by
686 685 * completely allocating it). Conversely, when a single object from a completely
687 686 * allocated slab is freed to the slab, that slab is added to the front of the
688 687 * free list. Since most free list activity involves highly allocated slabs
689 688 * coming and going at the front of the list, slabs tend naturally toward the
690 689 * ideal order: highly allocated at the front, sparsely allocated at the back.
691 690 * Slabs with few allocated objects are likely to become completely free if they
692 691 * keep a safe distance away from the front of the free list. Slab misorders
693 692 * interfere with the natural tendency of slabs to become completely free or
694 693 * completely allocated. For example, a slab with a single allocated object
695 694 * needs only a single free to escape the cache; its natural desire is
696 695 * frustrated when it finds itself at the front of the list where a second
697 696 * allocation happens just before the free could have released it. Another slab
698 697 * with all but one object allocated might have supplied the buffer instead, so
699 698 * that both (as opposed to neither) of the slabs would have been taken off the
700 699 * free list.
701 700 *
702 701 * Although slabs tend naturally toward the ideal order, misorders allowed by a
703 702 * simple list implementation defeat the consolidator's strategy of merging
704 703 * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem
705 704 * needs another way to fix misorders to optimize its callback strategy. One
706 705 * approach is to periodically scan a limited number of slabs, advancing a
707 706 * marker to hold the current scan position, and to move extreme misorders to
708 707 * the front or back of the free list and to the front or back of the current
709 708 * scan range. By making consecutive scan ranges overlap by one slab, the least
710 709 * allocated slab in the current range can be carried along from the end of one
711 710 * scan to the start of the next.
712 711 *
713 712 * Maintaining partial slabs in an AVL tree relieves kmem of this additional
714 713 * task, however. Since most of the cache's activity is in the magazine layer,
715 714 * and allocations from the slab layer represent only a startup cost, the
716 715 * overhead of maintaining a balanced tree is not a significant concern compared
717 716 * to the opportunity of reducing complexity by eliminating the partial slab
718 717 * scanner just described. The overhead of an AVL tree is minimized by
719 718 * maintaining only partial slabs in the tree and keeping completely allocated
720 719 * slabs separately in a list. To avoid increasing the size of the slab
721 720 * structure the AVL linkage pointers are reused for the slab's list linkage,
722 721 * since the slab will always be either partial or complete, never stored both
723 722 * ways at the same time. To further minimize the overhead of the AVL tree the
724 723 * compare function that orders partial slabs by usage divides the range of
725 724 * allocated object counts into bins such that counts within the same bin are
726 725 * considered equal. Binning partial slabs makes it less likely that allocating
727 726 * or freeing a single object will change the slab's order, requiring a tree
728 727 * reinsertion (an avl_remove() followed by an avl_add(), both potentially
729 728 * requiring some rebalancing of the tree). Allocation counts closest to
730 729 * completely free and completely allocated are left unbinned (finely sorted) to
731 730 * better support the consolidator's strategy of merging slabs at either
732 731 * extreme.
733 732 *
734 733 * 3.1 Assessing Fragmentation and Selecting Candidate Slabs
735 734 *
736 735 * The consolidator piggybacks on the kmem maintenance thread and is called on
737 736 * the same interval as kmem_cache_update(), once per cache every fifteen
738 737 * seconds. kmem maintains a running count of unallocated objects in the slab
739 738 * layer (cache_bufslab). The consolidator checks whether that number exceeds
740 739 * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether
741 740 * there is a significant number of slabs in the cache (arbitrarily a minimum
742 741 * 101 total slabs). Unused objects that have fallen out of the magazine layer's
743 742 * working set are included in the assessment, and magazines in the depot are
744 743 * reaped if those objects would lift cache_bufslab above the fragmentation
745 744 * threshold. Once the consolidator decides that a cache is fragmented, it looks
746 745 * for a candidate slab to reclaim, starting at the end of the partial slab free
747 746 * list and scanning backwards. At first the consolidator is choosy: only a slab
748 747 * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a
749 748 * single allocated object, regardless of percentage). If there is difficulty
750 749 * finding a candidate slab, kmem raises the allocation threshold incrementally,
751 750 * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce
752 751 * external fragmentation (unused objects on the free list) below 12.5% (1/8),
753 752 * even in the worst case of every slab in the cache being almost 7/8 allocated.
754 753 * The threshold can also be lowered incrementally when candidate slabs are easy
755 754 * to find, and the threshold is reset to the minimum 1/8 as soon as the cache
756 755 * is no longer fragmented.
757 756 *
758 757 * 3.2 Generating Callbacks
759 758 *
760 759 * Once an eligible slab is chosen, a callback is generated for every allocated
761 760 * object on the slab, in the hope that the client will move everything off the
762 761 * slab and make it reclaimable. Objects selected as move destinations are
763 762 * chosen from slabs at the front of the free list. Assuming slabs in the ideal
764 763 * order (most allocated at the front, least allocated at the back) and a
765 764 * cooperative client, the consolidator will succeed in removing slabs from both
766 765 * ends of the free list, completely allocating on the one hand and completely
767 766 * freeing on the other. Objects selected as move destinations are allocated in
768 767 * the kmem maintenance thread where move requests are enqueued. A separate
769 768 * callback thread removes pending callbacks from the queue and calls the
770 769 * client. The separate thread ensures that client code (the move function) does
771 770 * not interfere with internal kmem maintenance tasks. A map of pending
772 771 * callbacks keyed by object address (the object to be moved) is checked to
773 772 * ensure that duplicate callbacks are not generated for the same object.
774 773 * Allocating the move destination (the object to move to) prevents subsequent
775 774 * callbacks from selecting the same destination as an earlier pending callback.
776 775 *
777 776 * Move requests can also be generated by kmem_cache_reap() when the system is
778 777 * desperate for memory and by kmem_cache_move_notify(), called by the client to
779 778 * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible.
780 779 * The map of pending callbacks is protected by the same lock that protects the
781 780 * slab layer.
782 781 *
783 782 * When the system is desperate for memory, kmem does not bother to determine
784 783 * whether or not the cache exceeds the fragmentation threshold, but tries to
785 784 * consolidate as many slabs as possible. Normally, the consolidator chews
786 785 * slowly, one sparsely allocated slab at a time during each maintenance
787 786 * interval that the cache is fragmented. When desperate, the consolidator
788 787 * starts at the last partial slab and enqueues callbacks for every allocated
789 788 * object on every partial slab, working backwards until it reaches the first
790 789 * partial slab. The first partial slab, meanwhile, advances in pace with the
791 790 * consolidator as allocations to supply move destinations for the enqueued
792 791 * callbacks use up the highly allocated slabs at the front of the free list.
793 792 * Ideally, the overgrown free list collapses like an accordion, starting at
794 793 * both ends and ending at the center with a single partial slab.
795 794 *
796 795 * 3.3 Client Responses
797 796 *
798 797 * When the client returns KMEM_CBRC_NO in response to the move callback, kmem
799 798 * marks the slab that supplied the stuck object non-reclaimable and moves it to
800 799 * front of the free list. The slab remains marked as long as it remains on the
801 800 * free list, and it appears more allocated to the partial slab compare function
802 801 * than any unmarked slab, no matter how many of its objects are allocated.
803 802 * Since even one immovable object ties up the entire slab, the goal is to
804 803 * completely allocate any slab that cannot be completely freed. kmem does not
805 804 * bother generating callbacks to move objects from a marked slab unless the
806 805 * system is desperate.
807 806 *
808 807 * When the client responds KMEM_CBRC_LATER, kmem increments a count for the
809 808 * slab. If the client responds LATER too many times, kmem disbelieves and
810 809 * treats the response as a NO. The count is cleared when the slab is taken off
811 810 * the partial slab list or when the client moves one of the slab's objects.
812 811 *
813 812 * 4. Observability
814 813 *
815 814 * A kmem cache's external fragmentation is best observed with 'mdb -k' using
816 815 * the ::kmem_slabs dcmd. For a complete description of the command, enter
817 816 * '::help kmem_slabs' at the mdb prompt.
818 817 */
819 818
820 819 #include <sys/kmem_impl.h>
821 820 #include <sys/vmem_impl.h>
822 821 #include <sys/param.h>
823 822 #include <sys/sysmacros.h>
824 823 #include <sys/vm.h>
825 824 #include <sys/proc.h>
826 825 #include <sys/tuneable.h>
827 826 #include <sys/systm.h>
828 827 #include <sys/cmn_err.h>
829 828 #include <sys/debug.h>
830 829 #include <sys/sdt.h>
831 830 #include <sys/mutex.h>
832 831 #include <sys/bitmap.h>
833 832 #include <sys/atomic.h>
834 833 #include <sys/kobj.h>
835 834 #include <sys/disp.h>
836 835 #include <vm/seg_kmem.h>
837 836 #include <sys/log.h>
838 837 #include <sys/callb.h>
839 838 #include <sys/taskq.h>
840 839 #include <sys/modctl.h>
841 840 #include <sys/reboot.h>
842 841 #include <sys/id32.h>
843 842 #include <sys/zone.h>
844 843 #include <sys/netstack.h>
845 844 #ifdef DEBUG
846 845 #include <sys/random.h>
847 846 #endif
848 847
849 848 extern void streams_msg_init(void);
850 849 extern int segkp_fromheap;
851 850 extern void segkp_cache_free(void);
852 851 extern int callout_init_done;
853 852
854 853 struct kmem_cache_kstat {
855 854 kstat_named_t kmc_buf_size;
856 855 kstat_named_t kmc_align;
857 856 kstat_named_t kmc_chunk_size;
858 857 kstat_named_t kmc_slab_size;
859 858 kstat_named_t kmc_alloc;
860 859 kstat_named_t kmc_alloc_fail;
861 860 kstat_named_t kmc_free;
862 861 kstat_named_t kmc_depot_alloc;
863 862 kstat_named_t kmc_depot_free;
864 863 kstat_named_t kmc_depot_contention;
865 864 kstat_named_t kmc_slab_alloc;
866 865 kstat_named_t kmc_slab_free;
867 866 kstat_named_t kmc_buf_constructed;
868 867 kstat_named_t kmc_buf_avail;
869 868 kstat_named_t kmc_buf_inuse;
870 869 kstat_named_t kmc_buf_total;
871 870 kstat_named_t kmc_buf_max;
872 871 kstat_named_t kmc_slab_create;
873 872 kstat_named_t kmc_slab_destroy;
874 873 kstat_named_t kmc_vmem_source;
875 874 kstat_named_t kmc_hash_size;
876 875 kstat_named_t kmc_hash_lookup_depth;
877 876 kstat_named_t kmc_hash_rescale;
878 877 kstat_named_t kmc_full_magazines;
879 878 kstat_named_t kmc_empty_magazines;
880 879 kstat_named_t kmc_magazine_size;
881 880 kstat_named_t kmc_reap; /* number of kmem_cache_reap() calls */
882 881 kstat_named_t kmc_defrag; /* attempts to defrag all partial slabs */
883 882 kstat_named_t kmc_scan; /* attempts to defrag one partial slab */
884 883 kstat_named_t kmc_move_callbacks; /* sum of yes, no, later, dn, dk */
885 884 kstat_named_t kmc_move_yes;
886 885 kstat_named_t kmc_move_no;
887 886 kstat_named_t kmc_move_later;
888 887 kstat_named_t kmc_move_dont_need;
889 888 kstat_named_t kmc_move_dont_know; /* obj unrecognized by client ... */
890 889 kstat_named_t kmc_move_hunt_found; /* ... but found in mag layer */
891 890 kstat_named_t kmc_move_slabs_freed; /* slabs freed by consolidator */
892 891 kstat_named_t kmc_move_reclaimable; /* buffers, if consolidator ran */
893 892 } kmem_cache_kstat = {
894 893 { "buf_size", KSTAT_DATA_UINT64 },
895 894 { "align", KSTAT_DATA_UINT64 },
896 895 { "chunk_size", KSTAT_DATA_UINT64 },
897 896 { "slab_size", KSTAT_DATA_UINT64 },
898 897 { "alloc", KSTAT_DATA_UINT64 },
899 898 { "alloc_fail", KSTAT_DATA_UINT64 },
900 899 { "free", KSTAT_DATA_UINT64 },
901 900 { "depot_alloc", KSTAT_DATA_UINT64 },
902 901 { "depot_free", KSTAT_DATA_UINT64 },
903 902 { "depot_contention", KSTAT_DATA_UINT64 },
904 903 { "slab_alloc", KSTAT_DATA_UINT64 },
905 904 { "slab_free", KSTAT_DATA_UINT64 },
906 905 { "buf_constructed", KSTAT_DATA_UINT64 },
907 906 { "buf_avail", KSTAT_DATA_UINT64 },
908 907 { "buf_inuse", KSTAT_DATA_UINT64 },
909 908 { "buf_total", KSTAT_DATA_UINT64 },
910 909 { "buf_max", KSTAT_DATA_UINT64 },
911 910 { "slab_create", KSTAT_DATA_UINT64 },
912 911 { "slab_destroy", KSTAT_DATA_UINT64 },
913 912 { "vmem_source", KSTAT_DATA_UINT64 },
914 913 { "hash_size", KSTAT_DATA_UINT64 },
915 914 { "hash_lookup_depth", KSTAT_DATA_UINT64 },
916 915 { "hash_rescale", KSTAT_DATA_UINT64 },
917 916 { "full_magazines", KSTAT_DATA_UINT64 },
918 917 { "empty_magazines", KSTAT_DATA_UINT64 },
919 918 { "magazine_size", KSTAT_DATA_UINT64 },
920 919 { "reap", KSTAT_DATA_UINT64 },
921 920 { "defrag", KSTAT_DATA_UINT64 },
922 921 { "scan", KSTAT_DATA_UINT64 },
923 922 { "move_callbacks", KSTAT_DATA_UINT64 },
924 923 { "move_yes", KSTAT_DATA_UINT64 },
925 924 { "move_no", KSTAT_DATA_UINT64 },
926 925 { "move_later", KSTAT_DATA_UINT64 },
927 926 { "move_dont_need", KSTAT_DATA_UINT64 },
928 927 { "move_dont_know", KSTAT_DATA_UINT64 },
929 928 { "move_hunt_found", KSTAT_DATA_UINT64 },
930 929 { "move_slabs_freed", KSTAT_DATA_UINT64 },
931 930 { "move_reclaimable", KSTAT_DATA_UINT64 },
932 931 };
933 932
934 933 static kmutex_t kmem_cache_kstat_lock;
935 934
936 935 /*
937 936 * The default set of caches to back kmem_alloc().
938 937 * These sizes should be reevaluated periodically.
939 938 *
940 939 * We want allocations that are multiples of the coherency granularity
941 940 * (64 bytes) to be satisfied from a cache which is a multiple of 64
942 941 * bytes, so that it will be 64-byte aligned. For all multiples of 64,
943 942 * the next kmem_cache_size greater than or equal to it must be a
944 943 * multiple of 64.
945 944 *
946 945 * We split the table into two sections: size <= 4k and size > 4k. This
947 946 * saves a lot of space and cache footprint in our cache tables.
948 947 */
949 948 static const int kmem_alloc_sizes[] = {
950 949 1 * 8,
951 950 2 * 8,
952 951 3 * 8,
953 952 4 * 8, 5 * 8, 6 * 8, 7 * 8,
954 953 4 * 16, 5 * 16, 6 * 16, 7 * 16,
955 954 4 * 32, 5 * 32, 6 * 32, 7 * 32,
956 955 4 * 64, 5 * 64, 6 * 64, 7 * 64,
957 956 4 * 128, 5 * 128, 6 * 128, 7 * 128,
958 957 P2ALIGN(8192 / 7, 64),
959 958 P2ALIGN(8192 / 6, 64),
960 959 P2ALIGN(8192 / 5, 64),
961 960 P2ALIGN(8192 / 4, 64),
962 961 P2ALIGN(8192 / 3, 64),
963 962 P2ALIGN(8192 / 2, 64),
964 963 };
965 964
966 965 static const int kmem_big_alloc_sizes[] = {
967 966 2 * 4096, 3 * 4096,
968 967 2 * 8192, 3 * 8192,
969 968 4 * 8192, 5 * 8192, 6 * 8192, 7 * 8192,
970 969 8 * 8192, 9 * 8192, 10 * 8192, 11 * 8192,
971 970 12 * 8192, 13 * 8192, 14 * 8192, 15 * 8192,
972 971 16 * 8192
973 972 };
974 973
975 974 #define KMEM_MAXBUF 4096
976 975 #define KMEM_BIG_MAXBUF_32BIT 32768
977 976 #define KMEM_BIG_MAXBUF 131072
978 977
979 978 #define KMEM_BIG_MULTIPLE 4096 /* big_alloc_sizes must be a multiple */
980 979 #define KMEM_BIG_SHIFT 12 /* lg(KMEM_BIG_MULTIPLE) */
981 980
982 981 static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
983 982 static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT];
984 983
985 984 #define KMEM_ALLOC_TABLE_MAX (KMEM_MAXBUF >> KMEM_ALIGN_SHIFT)
986 985 static size_t kmem_big_alloc_table_max = 0; /* # of filled elements */
987 986
988 987 static kmem_magtype_t kmem_magtype[] = {
989 988 { 1, 8, 3200, 65536 },
990 989 { 3, 16, 256, 32768 },
991 990 { 7, 32, 64, 16384 },
992 991 { 15, 64, 0, 8192 },
993 992 { 31, 64, 0, 4096 },
994 993 { 47, 64, 0, 2048 },
995 994 { 63, 64, 0, 1024 },
996 995 { 95, 64, 0, 512 },
997 996 { 143, 64, 0, 0 },
998 997 };
999 998
1000 999 static uint32_t kmem_reaping;
1001 1000 static uint32_t kmem_reaping_idspace;
1002 1001
1003 1002 /*
1004 1003 * kmem tunables
1005 1004 */
1006 1005 clock_t kmem_reap_interval; /* cache reaping rate [15 * HZ ticks] */
1007 1006 int kmem_depot_contention = 3; /* max failed tryenters per real interval */
1008 1007 pgcnt_t kmem_reapahead = 0; /* start reaping N pages before pageout */
1009 1008 int kmem_panic = 1; /* whether to panic on error */
1010 1009 int kmem_logging = 1; /* kmem_log_enter() override */
1011 1010 uint32_t kmem_mtbf = 0; /* mean time between failures [default: off] */
1012 1011 size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */
1013 1012 size_t kmem_content_log_size; /* content log size [2% of memory] */
1014 1013 size_t kmem_failure_log_size; /* failure log [4 pages per CPU] */
1015 1014 size_t kmem_slab_log_size; /* slab create log [4 pages per CPU] */
1016 1015 size_t kmem_zerosized_log_size; /* zero-sized log [4 pages per CPU] */
1017 1016 size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */
1018 1017 size_t kmem_lite_minsize = 0; /* minimum buffer size for KMF_LITE */
1019 1018 size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */
1020 1019 int kmem_lite_pcs = 4; /* number of PCs to store in KMF_LITE mode */
1021 1020 size_t kmem_maxverify; /* maximum bytes to inspect in debug routines */
1022 1021 size_t kmem_minfirewall; /* hardware-enforced redzone threshold */
1023 1022
1024 1023 #ifdef DEBUG
1025 1024 int kmem_warn_zerosized = 1; /* whether to warn on zero-sized KM_SLEEP */
1026 1025 #else
1027 1026 int kmem_warn_zerosized = 0; /* whether to warn on zero-sized KM_SLEEP */
1028 1027 #endif
1029 1028
1030 1029 int kmem_panic_zerosized = 0; /* whether to panic on zero-sized KM_SLEEP */
1031 1030
1032 1031 #ifdef _LP64
1033 1032 size_t kmem_max_cached = KMEM_BIG_MAXBUF; /* maximum kmem_alloc cache */
1034 1033 #else
1035 1034 size_t kmem_max_cached = KMEM_BIG_MAXBUF_32BIT; /* maximum kmem_alloc cache */
1036 1035 #endif
1037 1036
1038 1037 #ifdef DEBUG
1039 1038 int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
1040 1039 #else
1041 1040 int kmem_flags = 0;
1042 1041 #endif
1043 1042 int kmem_ready;
1044 1043
1045 1044 static kmem_cache_t *kmem_slab_cache;
1046 1045 static kmem_cache_t *kmem_bufctl_cache;
1047 1046 static kmem_cache_t *kmem_bufctl_audit_cache;
1048 1047
1049 1048 static kmutex_t kmem_cache_lock; /* inter-cache linkage only */
1050 1049 static list_t kmem_caches;
1051 1050
1052 1051 static taskq_t *kmem_taskq;
1053 1052 static kmutex_t kmem_flags_lock;
1054 1053 static vmem_t *kmem_metadata_arena;
1055 1054 static vmem_t *kmem_msb_arena; /* arena for metadata caches */
1056 1055 static vmem_t *kmem_cache_arena;
1057 1056 static vmem_t *kmem_hash_arena;
1058 1057 static vmem_t *kmem_log_arena;
1059 1058 static vmem_t *kmem_oversize_arena;
1060 1059 static vmem_t *kmem_va_arena;
1061 1060 static vmem_t *kmem_default_arena;
1062 1061 static vmem_t *kmem_firewall_va_arena;
1063 1062 static vmem_t *kmem_firewall_arena;
1064 1063
1065 1064 static int kmem_zerosized; /* # of zero-sized allocs */
1066 1065
1067 1066 /*
1068 1067 * kmem slab consolidator thresholds (tunables)
1069 1068 */
1070 1069 size_t kmem_frag_minslabs = 101; /* minimum total slabs */
1071 1070 size_t kmem_frag_numer = 1; /* free buffers (numerator) */
1072 1071 size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */
1073 1072 /*
1074 1073 * Maximum number of slabs from which to move buffers during a single
1075 1074 * maintenance interval while the system is not low on memory.
1076 1075 */
1077 1076 size_t kmem_reclaim_max_slabs = 1;
1078 1077 /*
1079 1078 * Number of slabs to scan backwards from the end of the partial slab list
1080 1079 * when searching for buffers to relocate.
1081 1080 */
1082 1081 size_t kmem_reclaim_scan_range = 12;
1083 1082
1084 1083 /* consolidator knobs */
1085 1084 boolean_t kmem_move_noreap;
1086 1085 boolean_t kmem_move_blocked;
1087 1086 boolean_t kmem_move_fulltilt;
1088 1087 boolean_t kmem_move_any_partial;
1089 1088
1090 1089 #ifdef DEBUG
1091 1090 /*
1092 1091 * kmem consolidator debug tunables:
1093 1092 * Ensure code coverage by occasionally running the consolidator even when the
1094 1093 * caches are not fragmented (they may never be). These intervals are mean time
1095 1094 * in cache maintenance intervals (kmem_cache_update).
1096 1095 */
1097 1096 uint32_t kmem_mtb_move = 60; /* defrag 1 slab (~15min) */
1098 1097 uint32_t kmem_mtb_reap = 1800; /* defrag all slabs (~7.5hrs) */
1099 1098 #endif /* DEBUG */
1100 1099
1101 1100 static kmem_cache_t *kmem_defrag_cache;
1102 1101 static kmem_cache_t *kmem_move_cache;
1103 1102 static taskq_t *kmem_move_taskq;
1104 1103
1105 1104 static void kmem_cache_scan(kmem_cache_t *);
1106 1105 static void kmem_cache_defrag(kmem_cache_t *);
1107 1106 static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
1108 1107
1109 1108
1110 1109 kmem_log_header_t *kmem_transaction_log;
1111 1110 kmem_log_header_t *kmem_content_log;
1112 1111 kmem_log_header_t *kmem_failure_log;
1113 1112 kmem_log_header_t *kmem_slab_log;
1114 1113 kmem_log_header_t *kmem_zerosized_log;
1115 1114
1116 1115 static int kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
1117 1116
1118 1117 #define KMEM_BUFTAG_LITE_ENTER(bt, count, caller) \
1119 1118 if ((count) > 0) { \
1120 1119 pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history; \
1121 1120 pc_t *_e; \
1122 1121 /* memmove() the old entries down one notch */ \
1123 1122 for (_e = &_s[(count) - 1]; _e > _s; _e--) \
1124 1123 *_e = *(_e - 1); \
1125 1124 *_s = (uintptr_t)(caller); \
1126 1125 }
1127 1126
1128 1127 #define KMERR_MODIFIED 0 /* buffer modified while on freelist */
1129 1128 #define KMERR_REDZONE 1 /* redzone violation (write past end of buf) */
1130 1129 #define KMERR_DUPFREE 2 /* freed a buffer twice */
1131 1130 #define KMERR_BADADDR 3 /* freed a bad (unallocated) address */
1132 1131 #define KMERR_BADBUFTAG 4 /* buftag corrupted */
1133 1132 #define KMERR_BADBUFCTL 5 /* bufctl corrupted */
1134 1133 #define KMERR_BADCACHE 6 /* freed a buffer to the wrong cache */
1135 1134 #define KMERR_BADSIZE 7 /* alloc size != free size */
1136 1135 #define KMERR_BADBASE 8 /* buffer base address wrong */
1137 1136
1138 1137 struct {
1139 1138 hrtime_t kmp_timestamp; /* timestamp of panic */
1140 1139 int kmp_error; /* type of kmem error */
1141 1140 void *kmp_buffer; /* buffer that induced panic */
1142 1141 void *kmp_realbuf; /* real start address for buffer */
1143 1142 kmem_cache_t *kmp_cache; /* buffer's cache according to client */
1144 1143 kmem_cache_t *kmp_realcache; /* actual cache containing buffer */
1145 1144 kmem_slab_t *kmp_slab; /* slab accoring to kmem_findslab() */
1146 1145 kmem_bufctl_t *kmp_bufctl; /* bufctl */
1147 1146 } kmem_panic_info;
1148 1147
1149 1148
1150 1149 static void
1151 1150 copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
1152 1151 {
1153 1152 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1154 1153 uint64_t *buf = buf_arg;
1155 1154
1156 1155 while (buf < bufend)
1157 1156 *buf++ = pattern;
1158 1157 }
1159 1158
1160 1159 static void *
1161 1160 verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
1162 1161 {
1163 1162 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1164 1163 uint64_t *buf;
1165 1164
1166 1165 for (buf = buf_arg; buf < bufend; buf++)
1167 1166 if (*buf != pattern)
1168 1167 return (buf);
1169 1168 return (NULL);
1170 1169 }
1171 1170
1172 1171 static void *
1173 1172 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
1174 1173 {
1175 1174 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1176 1175 uint64_t *buf;
1177 1176
1178 1177 for (buf = buf_arg; buf < bufend; buf++) {
1179 1178 if (*buf != old) {
1180 1179 copy_pattern(old, buf_arg,
1181 1180 (char *)buf - (char *)buf_arg);
1182 1181 return (buf);
1183 1182 }
1184 1183 *buf = new;
1185 1184 }
1186 1185
1187 1186 return (NULL);
1188 1187 }
1189 1188
1190 1189 static void
1191 1190 kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1192 1191 {
1193 1192 kmem_cache_t *cp;
1194 1193
1195 1194 mutex_enter(&kmem_cache_lock);
1196 1195 for (cp = list_head(&kmem_caches); cp != NULL;
1197 1196 cp = list_next(&kmem_caches, cp))
1198 1197 if (tq != NULL)
1199 1198 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1200 1199 tqflag);
1201 1200 else
1202 1201 func(cp);
1203 1202 mutex_exit(&kmem_cache_lock);
1204 1203 }
1205 1204
1206 1205 static void
1207 1206 kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1208 1207 {
1209 1208 kmem_cache_t *cp;
1210 1209
1211 1210 mutex_enter(&kmem_cache_lock);
1212 1211 for (cp = list_head(&kmem_caches); cp != NULL;
1213 1212 cp = list_next(&kmem_caches, cp)) {
1214 1213 if (!(cp->cache_cflags & KMC_IDENTIFIER))
1215 1214 continue;
1216 1215 if (tq != NULL)
1217 1216 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1218 1217 tqflag);
1219 1218 else
1220 1219 func(cp);
1221 1220 }
1222 1221 mutex_exit(&kmem_cache_lock);
1223 1222 }
1224 1223
1225 1224 /*
1226 1225 * Debugging support. Given a buffer address, find its slab.
1227 1226 */
1228 1227 static kmem_slab_t *
1229 1228 kmem_findslab(kmem_cache_t *cp, void *buf)
1230 1229 {
1231 1230 kmem_slab_t *sp;
1232 1231
1233 1232 mutex_enter(&cp->cache_lock);
1234 1233 for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1235 1234 sp = list_next(&cp->cache_complete_slabs, sp)) {
1236 1235 if (KMEM_SLAB_MEMBER(sp, buf)) {
1237 1236 mutex_exit(&cp->cache_lock);
1238 1237 return (sp);
1239 1238 }
1240 1239 }
1241 1240 for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1242 1241 sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1243 1242 if (KMEM_SLAB_MEMBER(sp, buf)) {
1244 1243 mutex_exit(&cp->cache_lock);
1245 1244 return (sp);
1246 1245 }
1247 1246 }
1248 1247 mutex_exit(&cp->cache_lock);
1249 1248
1250 1249 return (NULL);
1251 1250 }
1252 1251
1253 1252 static void
1254 1253 kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
1255 1254 {
1256 1255 kmem_buftag_t *btp = NULL;
1257 1256 kmem_bufctl_t *bcp = NULL;
1258 1257 kmem_cache_t *cp = cparg;
1259 1258 kmem_slab_t *sp;
1260 1259 uint64_t *off;
1261 1260 void *buf = bufarg;
1262 1261
1263 1262 kmem_logging = 0; /* stop logging when a bad thing happens */
1264 1263
1265 1264 kmem_panic_info.kmp_timestamp = gethrtime();
1266 1265
1267 1266 sp = kmem_findslab(cp, buf);
1268 1267 if (sp == NULL) {
1269 1268 for (cp = list_tail(&kmem_caches); cp != NULL;
1270 1269 cp = list_prev(&kmem_caches, cp)) {
1271 1270 if ((sp = kmem_findslab(cp, buf)) != NULL)
1272 1271 break;
1273 1272 }
1274 1273 }
1275 1274
1276 1275 if (sp == NULL) {
1277 1276 cp = NULL;
1278 1277 error = KMERR_BADADDR;
1279 1278 } else {
1280 1279 if (cp != cparg)
1281 1280 error = KMERR_BADCACHE;
1282 1281 else
1283 1282 buf = (char *)bufarg - ((uintptr_t)bufarg -
1284 1283 (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1285 1284 if (buf != bufarg)
1286 1285 error = KMERR_BADBASE;
1287 1286 if (cp->cache_flags & KMF_BUFTAG)
1288 1287 btp = KMEM_BUFTAG(cp, buf);
1289 1288 if (cp->cache_flags & KMF_HASH) {
1290 1289 mutex_enter(&cp->cache_lock);
1291 1290 for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1292 1291 if (bcp->bc_addr == buf)
1293 1292 break;
1294 1293 mutex_exit(&cp->cache_lock);
1295 1294 if (bcp == NULL && btp != NULL)
1296 1295 bcp = btp->bt_bufctl;
1297 1296 if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1298 1297 NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
1299 1298 bcp->bc_addr != buf) {
1300 1299 error = KMERR_BADBUFCTL;
1301 1300 bcp = NULL;
1302 1301 }
1303 1302 }
1304 1303 }
1305 1304
1306 1305 kmem_panic_info.kmp_error = error;
1307 1306 kmem_panic_info.kmp_buffer = bufarg;
1308 1307 kmem_panic_info.kmp_realbuf = buf;
1309 1308 kmem_panic_info.kmp_cache = cparg;
1310 1309 kmem_panic_info.kmp_realcache = cp;
1311 1310 kmem_panic_info.kmp_slab = sp;
1312 1311 kmem_panic_info.kmp_bufctl = bcp;
1313 1312
1314 1313 printf("kernel memory allocator: ");
1315 1314
1316 1315 switch (error) {
1317 1316
1318 1317 case KMERR_MODIFIED:
1319 1318 printf("buffer modified after being freed\n");
1320 1319 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1321 1320 if (off == NULL) /* shouldn't happen */
1322 1321 off = buf;
1323 1322 printf("modification occurred at offset 0x%lx "
1324 1323 "(0x%llx replaced by 0x%llx)\n",
1325 1324 (uintptr_t)off - (uintptr_t)buf,
1326 1325 (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
1327 1326 break;
1328 1327
1329 1328 case KMERR_REDZONE:
1330 1329 printf("redzone violation: write past end of buffer\n");
1331 1330 break;
1332 1331
1333 1332 case KMERR_BADADDR:
1334 1333 printf("invalid free: buffer not in cache\n");
1335 1334 break;
1336 1335
1337 1336 case KMERR_DUPFREE:
1338 1337 printf("duplicate free: buffer freed twice\n");
1339 1338 break;
1340 1339
1341 1340 case KMERR_BADBUFTAG:
1342 1341 printf("boundary tag corrupted\n");
1343 1342 printf("bcp ^ bxstat = %lx, should be %lx\n",
1344 1343 (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1345 1344 KMEM_BUFTAG_FREE);
1346 1345 break;
1347 1346
1348 1347 case KMERR_BADBUFCTL:
1349 1348 printf("bufctl corrupted\n");
1350 1349 break;
1351 1350
1352 1351 case KMERR_BADCACHE:
1353 1352 printf("buffer freed to wrong cache\n");
1354 1353 printf("buffer was allocated from %s,\n", cp->cache_name);
1355 1354 printf("caller attempting free to %s.\n", cparg->cache_name);
1356 1355 break;
1357 1356
1358 1357 case KMERR_BADSIZE:
1359 1358 printf("bad free: free size (%u) != alloc size (%u)\n",
1360 1359 KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1361 1360 KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1362 1361 break;
1363 1362
1364 1363 case KMERR_BADBASE:
1365 1364 printf("bad free: free address (%p) != alloc address (%p)\n",
1366 1365 bufarg, buf);
1367 1366 break;
1368 1367 }
1369 1368
1370 1369 printf("buffer=%p bufctl=%p cache: %s\n",
1371 1370 bufarg, (void *)bcp, cparg->cache_name);
1372 1371
1373 1372 if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1374 1373 error != KMERR_BADBUFCTL) {
1375 1374 int d;
1376 1375 timestruc_t ts;
1377 1376 kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
1378 1377
1379 1378 hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
1380 1379 printf("previous transaction on buffer %p:\n", buf);
1381 1380 printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n",
1382 1381 (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1383 1382 (void *)sp, cp->cache_name);
1384 1383 for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
1385 1384 ulong_t off;
1386 1385 char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
1387 1386 printf("%s+%lx\n", sym ? sym : "?", off);
1388 1387 }
1389 1388 }
1390 1389 if (kmem_panic > 0)
1391 1390 panic("kernel heap corruption detected");
1392 1391 if (kmem_panic == 0)
1393 1392 debug_enter(NULL);
1394 1393 kmem_logging = 1; /* resume logging */
1395 1394 }
1396 1395
1397 1396 static kmem_log_header_t *
1398 1397 kmem_log_init(size_t logsize)
1399 1398 {
1400 1399 kmem_log_header_t *lhp;
1401 1400 int nchunks = 4 * max_ncpus;
1402 1401 size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
1403 1402 int i;
1404 1403
1405 1404 /*
1406 1405 * Make sure that lhp->lh_cpu[] is nicely aligned
1407 1406 * to prevent false sharing of cache lines.
1408 1407 */
1409 1408 lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
1410 1409 lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1411 1410 NULL, NULL, VM_SLEEP);
1412 1411 bzero(lhp, lhsize);
1413 1412
1414 1413 mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
1415 1414 lhp->lh_nchunks = nchunks;
1416 1415 lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
1417 1416 lhp->lh_base = vmem_alloc(kmem_log_arena,
1418 1417 lhp->lh_chunksize * nchunks, VM_SLEEP);
1419 1418 lhp->lh_free = vmem_alloc(kmem_log_arena,
1420 1419 nchunks * sizeof (int), VM_SLEEP);
1421 1420 bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1422 1421
1423 1422 for (i = 0; i < max_ncpus; i++) {
1424 1423 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1425 1424 mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1426 1425 clhp->clh_chunk = i;
1427 1426 }
1428 1427
1429 1428 for (i = max_ncpus; i < nchunks; i++)
1430 1429 lhp->lh_free[i] = i;
1431 1430
1432 1431 lhp->lh_head = max_ncpus;
1433 1432 lhp->lh_tail = 0;
1434 1433
1435 1434 return (lhp);
1436 1435 }
1437 1436
1438 1437 static void *
1439 1438 kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1440 1439 {
1441 1440 void *logspace;
1442 1441 kmem_cpu_log_header_t *clhp;
1443 1442
1444 1443 if (lhp == NULL || kmem_logging == 0 || panicstr)
1445 1444 return (NULL);
1446 1445
1447 1446 clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1448 1447
1449 1448 mutex_enter(&clhp->clh_lock);
1450 1449 clhp->clh_hits++;
1451 1450 if (size > clhp->clh_avail) {
1452 1451 mutex_enter(&lhp->lh_lock);
1453 1452 lhp->lh_hits++;
1454 1453 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1455 1454 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1456 1455 clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1457 1456 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1458 1457 clhp->clh_current = lhp->lh_base +
1459 1458 clhp->clh_chunk * lhp->lh_chunksize;
1460 1459 clhp->clh_avail = lhp->lh_chunksize;
1461 1460 if (size > lhp->lh_chunksize)
1462 1461 size = lhp->lh_chunksize;
1463 1462 mutex_exit(&lhp->lh_lock);
1464 1463 }
1465 1464 logspace = clhp->clh_current;
1466 1465 clhp->clh_current += size;
1467 1466 clhp->clh_avail -= size;
1468 1467 bcopy(data, logspace, size);
1469 1468 mutex_exit(&clhp->clh_lock);
1470 1469 return (logspace);
1471 1470 }
1472 1471
1473 1472 #define KMEM_AUDIT(lp, cp, bcp) \
1474 1473 { \
1475 1474 kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp); \
1476 1475 _bcp->bc_timestamp = gethrtime(); \
1477 1476 _bcp->bc_thread = curthread; \
1478 1477 _bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH); \
1479 1478 _bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp)); \
1480 1479 }
1481 1480
1482 1481 static void
1483 1482 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1484 1483 kmem_slab_t *sp, void *addr)
1485 1484 {
1486 1485 kmem_bufctl_audit_t bca;
1487 1486
1488 1487 bzero(&bca, sizeof (kmem_bufctl_audit_t));
1489 1488 bca.bc_addr = addr;
1490 1489 bca.bc_slab = sp;
1491 1490 bca.bc_cache = cp;
1492 1491 KMEM_AUDIT(lp, cp, &bca);
1493 1492 }
1494 1493
1495 1494 /*
1496 1495 * Create a new slab for cache cp.
1497 1496 */
1498 1497 static kmem_slab_t *
1499 1498 kmem_slab_create(kmem_cache_t *cp, int kmflag)
1500 1499 {
1501 1500 size_t slabsize = cp->cache_slabsize;
1502 1501 size_t chunksize = cp->cache_chunksize;
1503 1502 int cache_flags = cp->cache_flags;
1504 1503 size_t color, chunks;
1505 1504 char *buf, *slab;
1506 1505 kmem_slab_t *sp;
1507 1506 kmem_bufctl_t *bcp;
1508 1507 vmem_t *vmp = cp->cache_arena;
1509 1508
1510 1509 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1511 1510
1512 1511 color = cp->cache_color + cp->cache_align;
1513 1512 if (color > cp->cache_maxcolor)
1514 1513 color = cp->cache_mincolor;
1515 1514 cp->cache_color = color;
1516 1515
1517 1516 slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
1518 1517
1519 1518 if (slab == NULL)
1520 1519 goto vmem_alloc_failure;
1521 1520
1522 1521 ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1523 1522
1524 1523 /*
1525 1524 * Reverify what was already checked in kmem_cache_set_move(), since the
1526 1525 * consolidator depends (for correctness) on slabs being initialized
1527 1526 * with the 0xbaddcafe memory pattern (setting a low order bit usable by
1528 1527 * clients to distinguish uninitialized memory from known objects).
1529 1528 */
1530 1529 ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1531 1530 if (!(cp->cache_cflags & KMC_NOTOUCH))
1532 1531 copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1533 1532
1534 1533 if (cache_flags & KMF_HASH) {
1535 1534 if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
1536 1535 goto slab_alloc_failure;
1537 1536 chunks = (slabsize - color) / chunksize;
1538 1537 } else {
1539 1538 sp = KMEM_SLAB(cp, slab);
1540 1539 chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
1541 1540 }
1542 1541
1543 1542 sp->slab_cache = cp;
1544 1543 sp->slab_head = NULL;
1545 1544 sp->slab_refcnt = 0;
1546 1545 sp->slab_base = buf = slab + color;
1547 1546 sp->slab_chunks = chunks;
1548 1547 sp->slab_stuck_offset = (uint32_t)-1;
1549 1548 sp->slab_later_count = 0;
1550 1549 sp->slab_flags = 0;
1551 1550
1552 1551 ASSERT(chunks > 0);
1553 1552 while (chunks-- != 0) {
1554 1553 if (cache_flags & KMF_HASH) {
1555 1554 bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1556 1555 if (bcp == NULL)
1557 1556 goto bufctl_alloc_failure;
1558 1557 if (cache_flags & KMF_AUDIT) {
1559 1558 kmem_bufctl_audit_t *bcap =
1560 1559 (kmem_bufctl_audit_t *)bcp;
1561 1560 bzero(bcap, sizeof (kmem_bufctl_audit_t));
1562 1561 bcap->bc_cache = cp;
1563 1562 }
1564 1563 bcp->bc_addr = buf;
1565 1564 bcp->bc_slab = sp;
1566 1565 } else {
1567 1566 bcp = KMEM_BUFCTL(cp, buf);
1568 1567 }
1569 1568 if (cache_flags & KMF_BUFTAG) {
1570 1569 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1571 1570 btp->bt_redzone = KMEM_REDZONE_PATTERN;
1572 1571 btp->bt_bufctl = bcp;
1573 1572 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1574 1573 if (cache_flags & KMF_DEADBEEF) {
1575 1574 copy_pattern(KMEM_FREE_PATTERN, buf,
1576 1575 cp->cache_verify);
1577 1576 }
1578 1577 }
1579 1578 bcp->bc_next = sp->slab_head;
1580 1579 sp->slab_head = bcp;
1581 1580 buf += chunksize;
1582 1581 }
1583 1582
1584 1583 kmem_log_event(kmem_slab_log, cp, sp, slab);
1585 1584
1586 1585 return (sp);
1587 1586
1588 1587 bufctl_alloc_failure:
1589 1588
1590 1589 while ((bcp = sp->slab_head) != NULL) {
1591 1590 sp->slab_head = bcp->bc_next;
1592 1591 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1593 1592 }
1594 1593 kmem_cache_free(kmem_slab_cache, sp);
1595 1594
1596 1595 slab_alloc_failure:
1597 1596
1598 1597 vmem_free(vmp, slab, slabsize);
1599 1598
1600 1599 vmem_alloc_failure:
1601 1600
1602 1601 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1603 1602 atomic_inc_64(&cp->cache_alloc_fail);
1604 1603
1605 1604 return (NULL);
1606 1605 }
1607 1606
1608 1607 /*
1609 1608 * Destroy a slab.
1610 1609 */
1611 1610 static void
1612 1611 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1613 1612 {
1614 1613 vmem_t *vmp = cp->cache_arena;
1615 1614 void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1616 1615
1617 1616 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1618 1617 ASSERT(sp->slab_refcnt == 0);
1619 1618
1620 1619 if (cp->cache_flags & KMF_HASH) {
1621 1620 kmem_bufctl_t *bcp;
1622 1621 while ((bcp = sp->slab_head) != NULL) {
1623 1622 sp->slab_head = bcp->bc_next;
1624 1623 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1625 1624 }
1626 1625 kmem_cache_free(kmem_slab_cache, sp);
1627 1626 }
1628 1627 vmem_free(vmp, slab, cp->cache_slabsize);
1629 1628 }
1630 1629
1631 1630 static void *
1632 1631 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
1633 1632 {
1634 1633 kmem_bufctl_t *bcp, **hash_bucket;
1635 1634 void *buf;
1636 1635 boolean_t new_slab = (sp->slab_refcnt == 0);
1637 1636
1638 1637 ASSERT(MUTEX_HELD(&cp->cache_lock));
1639 1638 /*
1640 1639 * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
1641 1640 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1642 1641 * slab is newly created.
1643 1642 */
1644 1643 ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
1645 1644 (sp == avl_first(&cp->cache_partial_slabs))));
1646 1645 ASSERT(sp->slab_cache == cp);
1647 1646
1648 1647 cp->cache_slab_alloc++;
1649 1648 cp->cache_bufslab--;
1650 1649 sp->slab_refcnt++;
1651 1650
1652 1651 bcp = sp->slab_head;
1653 1652 sp->slab_head = bcp->bc_next;
1654 1653
1655 1654 if (cp->cache_flags & KMF_HASH) {
1656 1655 /*
1657 1656 * Add buffer to allocated-address hash table.
1658 1657 */
1659 1658 buf = bcp->bc_addr;
1660 1659 hash_bucket = KMEM_HASH(cp, buf);
1661 1660 bcp->bc_next = *hash_bucket;
1662 1661 *hash_bucket = bcp;
1663 1662 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1664 1663 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1665 1664 }
1666 1665 } else {
1667 1666 buf = KMEM_BUF(cp, bcp);
1668 1667 }
1669 1668
1670 1669 ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1671 1670
1672 1671 if (sp->slab_head == NULL) {
1673 1672 ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
1674 1673 if (new_slab) {
1675 1674 ASSERT(sp->slab_chunks == 1);
1676 1675 } else {
1677 1676 ASSERT(sp->slab_chunks > 1); /* the slab was partial */
1678 1677 avl_remove(&cp->cache_partial_slabs, sp);
1679 1678 sp->slab_later_count = 0; /* clear history */
1680 1679 sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
1681 1680 sp->slab_stuck_offset = (uint32_t)-1;
1682 1681 }
1683 1682 list_insert_head(&cp->cache_complete_slabs, sp);
1684 1683 cp->cache_complete_slab_count++;
1685 1684 return (buf);
1686 1685 }
1687 1686
1688 1687 ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
1689 1688 /*
1690 1689 * Peek to see if the magazine layer is enabled before
1691 1690 * we prefill. We're not holding the cpu cache lock,
1692 1691 * so the peek could be wrong, but there's no harm in it.
1693 1692 */
1694 1693 if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1695 1694 (KMEM_CPU_CACHE(cp)->cc_magsize != 0)) {
1696 1695 kmem_slab_prefill(cp, sp);
1697 1696 return (buf);
1698 1697 }
1699 1698
1700 1699 if (new_slab) {
1701 1700 avl_add(&cp->cache_partial_slabs, sp);
1702 1701 return (buf);
1703 1702 }
1704 1703
1705 1704 /*
1706 1705 * The slab is now more allocated than it was, so the
1707 1706 * order remains unchanged.
1708 1707 */
1709 1708 ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1710 1709 return (buf);
1711 1710 }
1712 1711
1713 1712 /*
1714 1713 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1715 1714 */
1716 1715 static void *
1717 1716 kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1718 1717 {
1719 1718 kmem_slab_t *sp;
1720 1719 void *buf;
1721 1720 boolean_t test_destructor;
1722 1721
1723 1722 mutex_enter(&cp->cache_lock);
1724 1723 test_destructor = (cp->cache_slab_alloc == 0);
1725 1724 sp = avl_first(&cp->cache_partial_slabs);
1726 1725 if (sp == NULL) {
1727 1726 ASSERT(cp->cache_bufslab == 0);
1728 1727
1729 1728 /*
1730 1729 * The freelist is empty. Create a new slab.
1731 1730 */
1732 1731 mutex_exit(&cp->cache_lock);
1733 1732 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1734 1733 return (NULL);
1735 1734 }
1736 1735 mutex_enter(&cp->cache_lock);
1737 1736 cp->cache_slab_create++;
1738 1737 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1739 1738 cp->cache_bufmax = cp->cache_buftotal;
1740 1739 cp->cache_bufslab += sp->slab_chunks;
1741 1740 }
1742 1741
1743 1742 buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1744 1743 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1745 1744 (cp->cache_complete_slab_count +
1746 1745 avl_numnodes(&cp->cache_partial_slabs) +
1747 1746 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1748 1747 mutex_exit(&cp->cache_lock);
1749 1748
1750 1749 if (test_destructor && cp->cache_destructor != NULL) {
1751 1750 /*
1752 1751 * On the first kmem_slab_alloc(), assert that it is valid to
1753 1752 * call the destructor on a newly constructed object without any
1754 1753 * client involvement.
1755 1754 */
1756 1755 if ((cp->cache_constructor == NULL) ||
1757 1756 cp->cache_constructor(buf, cp->cache_private,
1758 1757 kmflag) == 0) {
1759 1758 cp->cache_destructor(buf, cp->cache_private);
1760 1759 }
1761 1760 copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
1762 1761 cp->cache_bufsize);
1763 1762 if (cp->cache_flags & KMF_DEADBEEF) {
1764 1763 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1765 1764 }
1766 1765 }
1767 1766
1768 1767 return (buf);
1769 1768 }
1770 1769
1771 1770 static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
1772 1771
1773 1772 /*
1774 1773 * Free a raw (unconstructed) buffer to cp's slab layer.
1775 1774 */
1776 1775 static void
1777 1776 kmem_slab_free(kmem_cache_t *cp, void *buf)
1778 1777 {
1779 1778 kmem_slab_t *sp;
1780 1779 kmem_bufctl_t *bcp, **prev_bcpp;
1781 1780
1782 1781 ASSERT(buf != NULL);
1783 1782
1784 1783 mutex_enter(&cp->cache_lock);
1785 1784 cp->cache_slab_free++;
1786 1785
1787 1786 if (cp->cache_flags & KMF_HASH) {
1788 1787 /*
1789 1788 * Look up buffer in allocated-address hash table.
1790 1789 */
1791 1790 prev_bcpp = KMEM_HASH(cp, buf);
1792 1791 while ((bcp = *prev_bcpp) != NULL) {
1793 1792 if (bcp->bc_addr == buf) {
1794 1793 *prev_bcpp = bcp->bc_next;
1795 1794 sp = bcp->bc_slab;
1796 1795 break;
1797 1796 }
1798 1797 cp->cache_lookup_depth++;
1799 1798 prev_bcpp = &bcp->bc_next;
1800 1799 }
1801 1800 } else {
1802 1801 bcp = KMEM_BUFCTL(cp, buf);
1803 1802 sp = KMEM_SLAB(cp, buf);
1804 1803 }
1805 1804
1806 1805 if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1807 1806 mutex_exit(&cp->cache_lock);
1808 1807 kmem_error(KMERR_BADADDR, cp, buf);
1809 1808 return;
1810 1809 }
1811 1810
1812 1811 if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1813 1812 /*
1814 1813 * If this is the buffer that prevented the consolidator from
1815 1814 * clearing the slab, we can reset the slab flags now that the
1816 1815 * buffer is freed. (It makes sense to do this in
1817 1816 * kmem_cache_free(), where the client gives up ownership of the
1818 1817 * buffer, but on the hot path the test is too expensive.)
1819 1818 */
1820 1819 kmem_slab_move_yes(cp, sp, buf);
1821 1820 }
1822 1821
1823 1822 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1824 1823 if (cp->cache_flags & KMF_CONTENTS)
1825 1824 ((kmem_bufctl_audit_t *)bcp)->bc_contents =
1826 1825 kmem_log_enter(kmem_content_log, buf,
1827 1826 cp->cache_contents);
1828 1827 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1829 1828 }
1830 1829
1831 1830 bcp->bc_next = sp->slab_head;
1832 1831 sp->slab_head = bcp;
1833 1832
1834 1833 cp->cache_bufslab++;
1835 1834 ASSERT(sp->slab_refcnt >= 1);
1836 1835
1837 1836 if (--sp->slab_refcnt == 0) {
1838 1837 /*
1839 1838 * There are no outstanding allocations from this slab,
1840 1839 * so we can reclaim the memory.
1841 1840 */
1842 1841 if (sp->slab_chunks == 1) {
1843 1842 list_remove(&cp->cache_complete_slabs, sp);
1844 1843 cp->cache_complete_slab_count--;
1845 1844 } else {
1846 1845 avl_remove(&cp->cache_partial_slabs, sp);
1847 1846 }
1848 1847
1849 1848 cp->cache_buftotal -= sp->slab_chunks;
1850 1849 cp->cache_bufslab -= sp->slab_chunks;
1851 1850 /*
1852 1851 * Defer releasing the slab to the virtual memory subsystem
1853 1852 * while there is a pending move callback, since we guarantee
1854 1853 * that buffers passed to the move callback have only been
1855 1854 * touched by kmem or by the client itself. Since the memory
1856 1855 * patterns baddcafe (uninitialized) and deadbeef (freed) both
1857 1856 * set at least one of the two lowest order bits, the client can
1858 1857 * test those bits in the move callback to determine whether or
1859 1858 * not it knows about the buffer (assuming that the client also
1860 1859 * sets one of those low order bits whenever it frees a buffer).
1861 1860 */
1862 1861 if (cp->cache_defrag == NULL ||
1863 1862 (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1864 1863 !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
1865 1864 cp->cache_slab_destroy++;
1866 1865 mutex_exit(&cp->cache_lock);
1867 1866 kmem_slab_destroy(cp, sp);
1868 1867 } else {
1869 1868 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1870 1869 /*
1871 1870 * Slabs are inserted at both ends of the deadlist to
1872 1871 * distinguish between slabs freed while move callbacks
1873 1872 * are pending (list head) and a slab freed while the
1874 1873 * lock is dropped in kmem_move_buffers() (list tail) so
1875 1874 * that in both cases slab_destroy() is called from the
1876 1875 * right context.
1877 1876 */
1878 1877 if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
1879 1878 list_insert_tail(deadlist, sp);
1880 1879 } else {
1881 1880 list_insert_head(deadlist, sp);
1882 1881 }
1883 1882 cp->cache_defrag->kmd_deadcount++;
1884 1883 mutex_exit(&cp->cache_lock);
1885 1884 }
1886 1885 return;
1887 1886 }
1888 1887
1889 1888 if (bcp->bc_next == NULL) {
1890 1889 /* Transition the slab from completely allocated to partial. */
1891 1890 ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
1892 1891 ASSERT(sp->slab_chunks > 1);
1893 1892 list_remove(&cp->cache_complete_slabs, sp);
1894 1893 cp->cache_complete_slab_count--;
1895 1894 avl_add(&cp->cache_partial_slabs, sp);
1896 1895 } else {
1897 1896 (void) avl_update_gt(&cp->cache_partial_slabs, sp);
1898 1897 }
1899 1898
1900 1899 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1901 1900 (cp->cache_complete_slab_count +
1902 1901 avl_numnodes(&cp->cache_partial_slabs) +
1903 1902 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1904 1903 mutex_exit(&cp->cache_lock);
1905 1904 }
1906 1905
1907 1906 /*
1908 1907 * Return -1 if kmem_error, 1 if constructor fails, 0 if successful.
1909 1908 */
1910 1909 static int
1911 1910 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1912 1911 caddr_t caller)
1913 1912 {
1914 1913 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1915 1914 kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1916 1915 uint32_t mtbf;
1917 1916
1918 1917 if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1919 1918 kmem_error(KMERR_BADBUFTAG, cp, buf);
1920 1919 return (-1);
1921 1920 }
1922 1921
1923 1922 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
1924 1923
1925 1924 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1926 1925 kmem_error(KMERR_BADBUFCTL, cp, buf);
1927 1926 return (-1);
1928 1927 }
1929 1928
1930 1929 if (cp->cache_flags & KMF_DEADBEEF) {
1931 1930 if (!construct && (cp->cache_flags & KMF_LITE)) {
1932 1931 if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
1933 1932 kmem_error(KMERR_MODIFIED, cp, buf);
1934 1933 return (-1);
1935 1934 }
1936 1935 if (cp->cache_constructor != NULL)
1937 1936 *(uint64_t *)buf = btp->bt_redzone;
1938 1937 else
1939 1938 *(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
1940 1939 } else {
1941 1940 construct = 1;
1942 1941 if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
1943 1942 KMEM_UNINITIALIZED_PATTERN, buf,
1944 1943 cp->cache_verify)) {
1945 1944 kmem_error(KMERR_MODIFIED, cp, buf);
1946 1945 return (-1);
1947 1946 }
1948 1947 }
1949 1948 }
1950 1949 btp->bt_redzone = KMEM_REDZONE_PATTERN;
1951 1950
1952 1951 if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1953 1952 gethrtime() % mtbf == 0 &&
1954 1953 (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1955 1954 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1956 1955 if (!construct && cp->cache_destructor != NULL)
1957 1956 cp->cache_destructor(buf, cp->cache_private);
1958 1957 } else {
1959 1958 mtbf = 0;
1960 1959 }
1961 1960
1962 1961 if (mtbf || (construct && cp->cache_constructor != NULL &&
1963 1962 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
1964 1963 atomic_inc_64(&cp->cache_alloc_fail);
1965 1964 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1966 1965 if (cp->cache_flags & KMF_DEADBEEF)
1967 1966 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1968 1967 kmem_slab_free(cp, buf);
1969 1968 return (1);
1970 1969 }
1971 1970
1972 1971 if (cp->cache_flags & KMF_AUDIT) {
1973 1972 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1974 1973 }
1975 1974
1976 1975 if ((cp->cache_flags & KMF_LITE) &&
1977 1976 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
1978 1977 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
1979 1978 }
1980 1979
1981 1980 return (0);
1982 1981 }
1983 1982
1984 1983 static int
1985 1984 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
1986 1985 {
1987 1986 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1988 1987 kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1989 1988 kmem_slab_t *sp;
1990 1989
1991 1990 if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
1992 1991 if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1993 1992 kmem_error(KMERR_DUPFREE, cp, buf);
1994 1993 return (-1);
1995 1994 }
1996 1995 sp = kmem_findslab(cp, buf);
1997 1996 if (sp == NULL || sp->slab_cache != cp)
1998 1997 kmem_error(KMERR_BADADDR, cp, buf);
1999 1998 else
2000 1999 kmem_error(KMERR_REDZONE, cp, buf);
2001 2000 return (-1);
2002 2001 }
2003 2002
2004 2003 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2005 2004
2006 2005 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
2007 2006 kmem_error(KMERR_BADBUFCTL, cp, buf);
2008 2007 return (-1);
2009 2008 }
2010 2009
2011 2010 if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
2012 2011 kmem_error(KMERR_REDZONE, cp, buf);
2013 2012 return (-1);
2014 2013 }
2015 2014
2016 2015 if (cp->cache_flags & KMF_AUDIT) {
2017 2016 if (cp->cache_flags & KMF_CONTENTS)
2018 2017 bcp->bc_contents = kmem_log_enter(kmem_content_log,
2019 2018 buf, cp->cache_contents);
2020 2019 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2021 2020 }
2022 2021
2023 2022 if ((cp->cache_flags & KMF_LITE) &&
2024 2023 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2025 2024 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2026 2025 }
2027 2026
2028 2027 if (cp->cache_flags & KMF_DEADBEEF) {
2029 2028 if (cp->cache_flags & KMF_LITE)
2030 2029 btp->bt_redzone = *(uint64_t *)buf;
2031 2030 else if (cp->cache_destructor != NULL)
2032 2031 cp->cache_destructor(buf, cp->cache_private);
2033 2032
2034 2033 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2035 2034 }
2036 2035
2037 2036 return (0);
2038 2037 }
2039 2038
2040 2039 /*
2041 2040 * Free each object in magazine mp to cp's slab layer, and free mp itself.
2042 2041 */
2043 2042 static void
2044 2043 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2045 2044 {
2046 2045 int round;
2047 2046
2048 2047 ASSERT(!list_link_active(&cp->cache_link) ||
2049 2048 taskq_member(kmem_taskq, curthread));
2050 2049
2051 2050 for (round = 0; round < nrounds; round++) {
2052 2051 void *buf = mp->mag_round[round];
2053 2052
2054 2053 if (cp->cache_flags & KMF_DEADBEEF) {
2055 2054 if (verify_pattern(KMEM_FREE_PATTERN, buf,
2056 2055 cp->cache_verify) != NULL) {
2057 2056 kmem_error(KMERR_MODIFIED, cp, buf);
2058 2057 continue;
2059 2058 }
2060 2059 if ((cp->cache_flags & KMF_LITE) &&
2061 2060 cp->cache_destructor != NULL) {
2062 2061 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2063 2062 *(uint64_t *)buf = btp->bt_redzone;
2064 2063 cp->cache_destructor(buf, cp->cache_private);
2065 2064 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2066 2065 }
2067 2066 } else if (cp->cache_destructor != NULL) {
2068 2067 cp->cache_destructor(buf, cp->cache_private);
2069 2068 }
2070 2069
2071 2070 kmem_slab_free(cp, buf);
2072 2071 }
2073 2072 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2074 2073 kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2075 2074 }
2076 2075
2077 2076 /*
2078 2077 * Allocate a magazine from the depot.
2079 2078 */
2080 2079 static kmem_magazine_t *
2081 2080 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2082 2081 {
2083 2082 kmem_magazine_t *mp;
2084 2083
2085 2084 /*
2086 2085 * If we can't get the depot lock without contention,
2087 2086 * update our contention count. We use the depot
2088 2087 * contention rate to determine whether we need to
2089 2088 * increase the magazine size for better scalability.
2090 2089 */
2091 2090 if (!mutex_tryenter(&cp->cache_depot_lock)) {
2092 2091 mutex_enter(&cp->cache_depot_lock);
2093 2092 cp->cache_depot_contention++;
2094 2093 }
2095 2094
2096 2095 if ((mp = mlp->ml_list) != NULL) {
2097 2096 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2098 2097 mlp->ml_list = mp->mag_next;
2099 2098 if (--mlp->ml_total < mlp->ml_min)
2100 2099 mlp->ml_min = mlp->ml_total;
2101 2100 mlp->ml_alloc++;
2102 2101 }
2103 2102
2104 2103 mutex_exit(&cp->cache_depot_lock);
2105 2104
2106 2105 return (mp);
2107 2106 }
2108 2107
2109 2108 /*
2110 2109 * Free a magazine to the depot.
2111 2110 */
2112 2111 static void
2113 2112 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2114 2113 {
2115 2114 mutex_enter(&cp->cache_depot_lock);
2116 2115 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2117 2116 mp->mag_next = mlp->ml_list;
2118 2117 mlp->ml_list = mp;
2119 2118 mlp->ml_total++;
2120 2119 mutex_exit(&cp->cache_depot_lock);
2121 2120 }
2122 2121
2123 2122 /*
2124 2123 * Update the working set statistics for cp's depot.
2125 2124 */
2126 2125 static void
2127 2126 kmem_depot_ws_update(kmem_cache_t *cp)
2128 2127 {
2129 2128 mutex_enter(&cp->cache_depot_lock);
2130 2129 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2131 2130 cp->cache_full.ml_min = cp->cache_full.ml_total;
2132 2131 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2133 2132 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2134 2133 mutex_exit(&cp->cache_depot_lock);
2135 2134 }
2136 2135
2137 2136 /*
2138 2137 * Set the working set statistics for cp's depot to zero. (Everything is
2139 2138 * eligible for reaping.)
2140 2139 */
2141 2140 static void
2142 2141 kmem_depot_ws_zero(kmem_cache_t *cp)
2143 2142 {
2144 2143 mutex_enter(&cp->cache_depot_lock);
2145 2144 cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
2146 2145 cp->cache_full.ml_min = cp->cache_full.ml_total;
2147 2146 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
2148 2147 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2149 2148 mutex_exit(&cp->cache_depot_lock);
2150 2149 }
2151 2150
2152 2151 /*
2153 2152 * The number of bytes to reap before we call kpreempt(). The default (1MB)
2154 2153 * causes us to preempt reaping up to hundreds of times per second. Using a
2155 2154 * larger value (1GB) causes this to have virtually no effect.
2156 2155 */
2157 2156 size_t kmem_reap_preempt_bytes = 1024 * 1024;
2158 2157
2159 2158 /*
2160 2159 * Reap all magazines that have fallen out of the depot's working set.
2161 2160 */
2162 2161 static void
2163 2162 kmem_depot_ws_reap(kmem_cache_t *cp)
2164 2163 {
2165 2164 size_t bytes = 0;
2166 2165 long reap;
2167 2166 kmem_magazine_t *mp;
2168 2167
2169 2168 ASSERT(!list_link_active(&cp->cache_link) ||
2170 2169 taskq_member(kmem_taskq, curthread));
2171 2170
2172 2171 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2173 2172 while (reap-- &&
2174 2173 (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) {
2175 2174 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2176 2175 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2177 2176 if (bytes > kmem_reap_preempt_bytes) {
2178 2177 kpreempt(KPREEMPT_SYNC);
2179 2178 bytes = 0;
2180 2179 }
2181 2180 }
2182 2181
2183 2182 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2184 2183 while (reap-- &&
2185 2184 (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) {
2186 2185 kmem_magazine_destroy(cp, mp, 0);
2187 2186 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2188 2187 if (bytes > kmem_reap_preempt_bytes) {
2189 2188 kpreempt(KPREEMPT_SYNC);
2190 2189 bytes = 0;
2191 2190 }
2192 2191 }
2193 2192 }
2194 2193
2195 2194 static void
2196 2195 kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
2197 2196 {
2198 2197 ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
2199 2198 (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
2200 2199 ASSERT(ccp->cc_magsize > 0);
2201 2200
2202 2201 ccp->cc_ploaded = ccp->cc_loaded;
2203 2202 ccp->cc_prounds = ccp->cc_rounds;
2204 2203 ccp->cc_loaded = mp;
2205 2204 ccp->cc_rounds = rounds;
2206 2205 }
2207 2206
2208 2207 /*
2209 2208 * Intercept kmem alloc/free calls during crash dump in order to avoid
2210 2209 * changing kmem state while memory is being saved to the dump device.
2211 2210 * Otherwise, ::kmem_verify will report "corrupt buffers". Note that
2212 2211 * there are no locks because only one CPU calls kmem during a crash
2213 2212 * dump. To enable this feature, first create the associated vmem
2214 2213 * arena with VMC_DUMPSAFE.
2215 2214 */
2216 2215 static void *kmem_dump_start; /* start of pre-reserved heap */
2217 2216 static void *kmem_dump_end; /* end of heap area */
2218 2217 static void *kmem_dump_curr; /* current free heap pointer */
2219 2218 static size_t kmem_dump_size; /* size of heap area */
2220 2219
2221 2220 /* append to each buf created in the pre-reserved heap */
2222 2221 typedef struct kmem_dumpctl {
2223 2222 void *kdc_next; /* cache dump free list linkage */
2224 2223 } kmem_dumpctl_t;
2225 2224
2226 2225 #define KMEM_DUMPCTL(cp, buf) \
2227 2226 ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2228 2227 sizeof (void *)))
2229 2228
2230 2229 /* set non zero for full report */
2231 2230 uint_t kmem_dump_verbose = 0;
2232 2231
2233 2232 /* stats for overize heap */
2234 2233 uint_t kmem_dump_oversize_allocs = 0;
2235 2234 uint_t kmem_dump_oversize_max = 0;
2236 2235
2237 2236 static void
2238 2237 kmem_dumppr(char **pp, char *e, const char *format, ...)
2239 2238 {
2240 2239 char *p = *pp;
2241 2240
2242 2241 if (p < e) {
2243 2242 int n;
2244 2243 va_list ap;
2245 2244
2246 2245 va_start(ap, format);
2247 2246 n = vsnprintf(p, e - p, format, ap);
2248 2247 va_end(ap);
2249 2248 *pp = p + n;
2250 2249 }
2251 2250 }
2252 2251
2253 2252 /*
2254 2253 * Called when dumpadm(8) configures dump parameters.
2255 2254 */
2256 2255 void
2257 2256 kmem_dump_init(size_t size)
2258 2257 {
2259 2258 /* Our caller ensures size is always set. */
2260 2259 ASSERT3U(size, >, 0);
2261 2260
2262 2261 if (kmem_dump_start != NULL)
2263 2262 kmem_free(kmem_dump_start, kmem_dump_size);
2264 2263
2265 2264 kmem_dump_start = kmem_alloc(size, KM_SLEEP);
2266 2265 kmem_dump_size = size;
2267 2266 kmem_dump_curr = kmem_dump_start;
2268 2267 kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2269 2268 copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2270 2269 }
2271 2270
2272 2271 /*
2273 2272 * Set flag for each kmem_cache_t if is safe to use alternate dump
2274 2273 * memory. Called just before panic crash dump starts. Set the flag
2275 2274 * for the calling CPU.
2276 2275 */
2277 2276 void
2278 2277 kmem_dump_begin(void)
2279 2278 {
2280 2279 kmem_cache_t *cp;
2281 2280
2282 2281 ASSERT(panicstr != NULL);
2283 2282
2284 2283 for (cp = list_head(&kmem_caches); cp != NULL;
2285 2284 cp = list_next(&kmem_caches, cp)) {
2286 2285 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2287 2286
2288 2287 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2289 2288 cp->cache_flags |= KMF_DUMPDIVERT;
2290 2289 ccp->cc_flags |= KMF_DUMPDIVERT;
2291 2290 ccp->cc_dump_rounds = ccp->cc_rounds;
2292 2291 ccp->cc_dump_prounds = ccp->cc_prounds;
2293 2292 ccp->cc_rounds = ccp->cc_prounds = -1;
2294 2293 } else {
2295 2294 cp->cache_flags |= KMF_DUMPUNSAFE;
2296 2295 ccp->cc_flags |= KMF_DUMPUNSAFE;
2297 2296 }
2298 2297 }
2299 2298 }
2300 2299
2301 2300 /*
2302 2301 * finished dump intercept
2303 2302 * print any warnings on the console
2304 2303 * return verbose information to dumpsys() in the given buffer
2305 2304 */
2306 2305 size_t
2307 2306 kmem_dump_finish(char *buf, size_t size)
2308 2307 {
2309 2308 int percent = 0;
2310 2309 size_t used;
2311 2310 char *e = buf + size;
2312 2311 char *p = buf;
2313 2312
2314 2313 if (kmem_dump_curr == kmem_dump_end) {
2315 2314 cmn_err(CE_WARN, "exceeded kmem_dump space of %lu "
2316 2315 "bytes: kmem state in dump may be inconsistent",
2317 2316 kmem_dump_size);
2318 2317 }
2319 2318
2320 2319 if (kmem_dump_verbose == 0)
2321 2320 return (0);
2322 2321
2323 2322 used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2324 2323 percent = (used * 100) / kmem_dump_size;
2325 2324
2326 2325 kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2327 2326 kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2328 2327 kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2329 2328 kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2330 2329 kmem_dump_oversize_allocs);
2331 2330 kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2332 2331 kmem_dump_oversize_max);
2333 2332
2334 2333 /* return buffer size used */
2335 2334 if (p < e)
2336 2335 bzero(p, e - p);
2337 2336 return (p - buf);
2338 2337 }
2339 2338
2340 2339 /*
2341 2340 * Allocate a constructed object from alternate dump memory.
2342 2341 */
2343 2342 void *
2344 2343 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2345 2344 {
2346 2345 void *buf;
2347 2346 void *curr;
2348 2347 char *bufend;
2349 2348
2350 2349 /* return a constructed object */
2351 2350 if ((buf = cp->cache_dump.kd_freelist) != NULL) {
2352 2351 cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2353 2352 return (buf);
2354 2353 }
2355 2354
2356 2355 /* create a new constructed object */
2357 2356 curr = kmem_dump_curr;
2358 2357 buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2359 2358 bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2360 2359
2361 2360 /* hat layer objects cannot cross a page boundary */
2362 2361 if (cp->cache_align < PAGESIZE) {
2363 2362 char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2364 2363 if (bufend > page) {
2365 2364 bufend += page - (char *)buf;
2366 2365 buf = (void *)page;
2367 2366 }
2368 2367 }
2369 2368
2370 2369 /* fall back to normal alloc if reserved area is used up */
2371 2370 if (bufend > (char *)kmem_dump_end) {
2372 2371 kmem_dump_curr = kmem_dump_end;
2373 2372 cp->cache_dump.kd_alloc_fails++;
2374 2373 return (NULL);
2375 2374 }
2376 2375
2377 2376 /*
2378 2377 * Must advance curr pointer before calling a constructor that
2379 2378 * may also allocate memory.
2380 2379 */
2381 2380 kmem_dump_curr = bufend;
2382 2381
2383 2382 /* run constructor */
2384 2383 if (cp->cache_constructor != NULL &&
2385 2384 cp->cache_constructor(buf, cp->cache_private, kmflag)
2386 2385 != 0) {
2387 2386 #ifdef DEBUG
2388 2387 printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2389 2388 cp->cache_name, (void *)cp);
2390 2389 #endif
2391 2390 /* reset curr pointer iff no allocs were done */
2392 2391 if (kmem_dump_curr == bufend)
2393 2392 kmem_dump_curr = curr;
2394 2393
2395 2394 cp->cache_dump.kd_alloc_fails++;
2396 2395 /* fall back to normal alloc if the constructor fails */
2397 2396 return (NULL);
2398 2397 }
2399 2398
2400 2399 return (buf);
2401 2400 }
2402 2401
2403 2402 /*
2404 2403 * Free a constructed object in alternate dump memory.
2405 2404 */
2406 2405 int
2407 2406 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2408 2407 {
2409 2408 /* save constructed buffers for next time */
2410 2409 if ((char *)buf >= (char *)kmem_dump_start &&
2411 2410 (char *)buf < (char *)kmem_dump_end) {
2412 2411 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist;
2413 2412 cp->cache_dump.kd_freelist = buf;
2414 2413 return (0);
2415 2414 }
2416 2415
2417 2416 /* just drop buffers that were allocated before dump started */
2418 2417 if (kmem_dump_curr < kmem_dump_end)
2419 2418 return (0);
2420 2419
2421 2420 /* fall back to normal free if reserved area is used up */
2422 2421 return (1);
2423 2422 }
2424 2423
2425 2424 /*
2426 2425 * Allocate a constructed object from cache cp.
2427 2426 */
2428 2427 void *
2429 2428 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2430 2429 {
2431 2430 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2432 2431 kmem_magazine_t *fmp;
2433 2432 void *buf;
2434 2433
2435 2434 mutex_enter(&ccp->cc_lock);
2436 2435 for (;;) {
2437 2436 /*
2438 2437 * If there's an object available in the current CPU's
2439 2438 * loaded magazine, just take it and return.
2440 2439 */
2441 2440 if (ccp->cc_rounds > 0) {
2442 2441 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2443 2442 ccp->cc_alloc++;
2444 2443 mutex_exit(&ccp->cc_lock);
2445 2444 if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2446 2445 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2447 2446 ASSERT(!(ccp->cc_flags &
2448 2447 KMF_DUMPDIVERT));
2449 2448 cp->cache_dump.kd_unsafe++;
2450 2449 }
2451 2450 if ((ccp->cc_flags & KMF_BUFTAG) &&
2452 2451 kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2453 2452 caller()) != 0) {
2454 2453 if (kmflag & KM_NOSLEEP)
2455 2454 return (NULL);
2456 2455 mutex_enter(&ccp->cc_lock);
2457 2456 continue;
2458 2457 }
2459 2458 }
2460 2459 return (buf);
2461 2460 }
2462 2461
2463 2462 /*
2464 2463 * The loaded magazine is empty. If the previously loaded
2465 2464 * magazine was full, exchange them and try again.
2466 2465 */
2467 2466 if (ccp->cc_prounds > 0) {
2468 2467 kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2469 2468 continue;
2470 2469 }
2471 2470
2472 2471 /*
2473 2472 * Return an alternate buffer at dump time to preserve
2474 2473 * the heap.
2475 2474 */
2476 2475 if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2477 2476 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2478 2477 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2479 2478 /* log it so that we can warn about it */
2480 2479 cp->cache_dump.kd_unsafe++;
2481 2480 } else {
2482 2481 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2483 2482 NULL) {
2484 2483 mutex_exit(&ccp->cc_lock);
2485 2484 return (buf);
2486 2485 }
2487 2486 break; /* fall back to slab layer */
2488 2487 }
2489 2488 }
2490 2489
2491 2490 /*
2492 2491 * If the magazine layer is disabled, break out now.
2493 2492 */
2494 2493 if (ccp->cc_magsize == 0)
2495 2494 break;
2496 2495
2497 2496 /*
2498 2497 * Try to get a full magazine from the depot.
2499 2498 */
2500 2499 fmp = kmem_depot_alloc(cp, &cp->cache_full);
2501 2500 if (fmp != NULL) {
2502 2501 if (ccp->cc_ploaded != NULL)
2503 2502 kmem_depot_free(cp, &cp->cache_empty,
2504 2503 ccp->cc_ploaded);
2505 2504 kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
2506 2505 continue;
2507 2506 }
2508 2507
2509 2508 /*
2510 2509 * There are no full magazines in the depot,
2511 2510 * so fall through to the slab layer.
2512 2511 */
2513 2512 break;
2514 2513 }
2515 2514 mutex_exit(&ccp->cc_lock);
2516 2515
2517 2516 /*
2518 2517 * We couldn't allocate a constructed object from the magazine layer,
2519 2518 * so get a raw buffer from the slab layer and apply its constructor.
2520 2519 */
2521 2520 buf = kmem_slab_alloc(cp, kmflag);
2522 2521
2523 2522 if (buf == NULL)
2524 2523 return (NULL);
2525 2524
2526 2525 if (cp->cache_flags & KMF_BUFTAG) {
2527 2526 /*
2528 2527 * Make kmem_cache_alloc_debug() apply the constructor for us.
2529 2528 */
2530 2529 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2531 2530 if (rc != 0) {
2532 2531 if (kmflag & KM_NOSLEEP)
2533 2532 return (NULL);
2534 2533 /*
2535 2534 * kmem_cache_alloc_debug() detected corruption
2536 2535 * but didn't panic (kmem_panic <= 0). We should not be
2537 2536 * here because the constructor failed (indicated by a
2538 2537 * return code of 1). Try again.
2539 2538 */
2540 2539 ASSERT(rc == -1);
2541 2540 return (kmem_cache_alloc(cp, kmflag));
2542 2541 }
2543 2542 return (buf);
2544 2543 }
2545 2544
2546 2545 if (cp->cache_constructor != NULL &&
2547 2546 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2548 2547 atomic_inc_64(&cp->cache_alloc_fail);
2549 2548 kmem_slab_free(cp, buf);
2550 2549 return (NULL);
2551 2550 }
2552 2551
2553 2552 return (buf);
2554 2553 }
2555 2554
2556 2555 /*
2557 2556 * The freed argument tells whether or not kmem_cache_free_debug() has already
2558 2557 * been called so that we can avoid the duplicate free error. For example, a
2559 2558 * buffer on a magazine has already been freed by the client but is still
2560 2559 * constructed.
2561 2560 */
2562 2561 static void
2563 2562 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2564 2563 {
2565 2564 if (!freed && (cp->cache_flags & KMF_BUFTAG))
2566 2565 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2567 2566 return;
2568 2567
2569 2568 /*
2570 2569 * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
2571 2570 * kmem_cache_free_debug() will have already applied the destructor.
2572 2571 */
2573 2572 if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2574 2573 cp->cache_destructor != NULL) {
2575 2574 if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */
2576 2575 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2577 2576 *(uint64_t *)buf = btp->bt_redzone;
2578 2577 cp->cache_destructor(buf, cp->cache_private);
2579 2578 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2580 2579 } else {
2581 2580 cp->cache_destructor(buf, cp->cache_private);
2582 2581 }
2583 2582 }
2584 2583
2585 2584 kmem_slab_free(cp, buf);
2586 2585 }
2587 2586
2588 2587 /*
2589 2588 * Used when there's no room to free a buffer to the per-CPU cache.
2590 2589 * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the
2591 2590 * caller should try freeing to the per-CPU cache again.
2592 2591 * Note that we don't directly install the magazine in the cpu cache,
2593 2592 * since its state may have changed wildly while the lock was dropped.
2594 2593 */
2595 2594 static int
2596 2595 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2597 2596 {
2598 2597 kmem_magazine_t *emp;
2599 2598 kmem_magtype_t *mtp;
2600 2599
2601 2600 ASSERT(MUTEX_HELD(&ccp->cc_lock));
2602 2601 ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
2603 2602 ((uint_t)ccp->cc_rounds == -1)) &&
2604 2603 ((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
2605 2604 ((uint_t)ccp->cc_prounds == -1)));
2606 2605
2607 2606 emp = kmem_depot_alloc(cp, &cp->cache_empty);
2608 2607 if (emp != NULL) {
2609 2608 if (ccp->cc_ploaded != NULL)
2610 2609 kmem_depot_free(cp, &cp->cache_full,
2611 2610 ccp->cc_ploaded);
2612 2611 kmem_cpu_reload(ccp, emp, 0);
2613 2612 return (1);
2614 2613 }
2615 2614 /*
2616 2615 * There are no empty magazines in the depot,
2617 2616 * so try to allocate a new one. We must drop all locks
2618 2617 * across kmem_cache_alloc() because lower layers may
2619 2618 * attempt to allocate from this cache.
2620 2619 */
2621 2620 mtp = cp->cache_magtype;
2622 2621 mutex_exit(&ccp->cc_lock);
2623 2622 emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
2624 2623 mutex_enter(&ccp->cc_lock);
2625 2624
2626 2625 if (emp != NULL) {
2627 2626 /*
2628 2627 * We successfully allocated an empty magazine.
2629 2628 * However, we had to drop ccp->cc_lock to do it,
2630 2629 * so the cache's magazine size may have changed.
2631 2630 * If so, free the magazine and try again.
2632 2631 */
2633 2632 if (ccp->cc_magsize != mtp->mt_magsize) {
2634 2633 mutex_exit(&ccp->cc_lock);
2635 2634 kmem_cache_free(mtp->mt_cache, emp);
2636 2635 mutex_enter(&ccp->cc_lock);
2637 2636 return (1);
2638 2637 }
2639 2638
2640 2639 /*
2641 2640 * We got a magazine of the right size. Add it to
2642 2641 * the depot and try the whole dance again.
2643 2642 */
2644 2643 kmem_depot_free(cp, &cp->cache_empty, emp);
2645 2644 return (1);
2646 2645 }
2647 2646
2648 2647 /*
2649 2648 * We couldn't allocate an empty magazine,
2650 2649 * so fall through to the slab layer.
2651 2650 */
2652 2651 return (0);
2653 2652 }
2654 2653
2655 2654 /*
2656 2655 * Free a constructed object to cache cp.
2657 2656 */
2658 2657 void
2659 2658 kmem_cache_free(kmem_cache_t *cp, void *buf)
2660 2659 {
2661 2660 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2662 2661
2663 2662 /*
2664 2663 * The client must not free either of the buffers passed to the move
2665 2664 * callback function.
2666 2665 */
2667 2666 ASSERT(cp->cache_defrag == NULL ||
2668 2667 cp->cache_defrag->kmd_thread != curthread ||
2669 2668 (buf != cp->cache_defrag->kmd_from_buf &&
2670 2669 buf != cp->cache_defrag->kmd_to_buf));
2671 2670
2672 2671 if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2673 2672 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2674 2673 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2675 2674 /* log it so that we can warn about it */
2676 2675 cp->cache_dump.kd_unsafe++;
2677 2676 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2678 2677 return;
2679 2678 }
2680 2679 if (ccp->cc_flags & KMF_BUFTAG) {
2681 2680 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2682 2681 return;
2683 2682 }
2684 2683 }
2685 2684
2686 2685 mutex_enter(&ccp->cc_lock);
2687 2686 /*
2688 2687 * Any changes to this logic should be reflected in kmem_slab_prefill()
2689 2688 */
2690 2689 for (;;) {
2691 2690 /*
2692 2691 * If there's a slot available in the current CPU's
2693 2692 * loaded magazine, just put the object there and return.
2694 2693 */
2695 2694 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2696 2695 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2697 2696 ccp->cc_free++;
2698 2697 mutex_exit(&ccp->cc_lock);
2699 2698 return;
2700 2699 }
2701 2700
2702 2701 /*
2703 2702 * The loaded magazine is full. If the previously loaded
2704 2703 * magazine was empty, exchange them and try again.
2705 2704 */
2706 2705 if (ccp->cc_prounds == 0) {
2707 2706 kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2708 2707 continue;
2709 2708 }
2710 2709
2711 2710 /*
2712 2711 * If the magazine layer is disabled, break out now.
2713 2712 */
2714 2713 if (ccp->cc_magsize == 0)
2715 2714 break;
2716 2715
2717 2716 if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
2718 2717 /*
2719 2718 * We couldn't free our constructed object to the
2720 2719 * magazine layer, so apply its destructor and free it
2721 2720 * to the slab layer.
2722 2721 */
2723 2722 break;
2724 2723 }
2725 2724 }
2726 2725 mutex_exit(&ccp->cc_lock);
2727 2726 kmem_slab_free_constructed(cp, buf, B_TRUE);
2728 2727 }
2729 2728
2730 2729 static void
2731 2730 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2732 2731 {
2733 2732 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2734 2733 int cache_flags = cp->cache_flags;
2735 2734
2736 2735 kmem_bufctl_t *next, *head;
2737 2736 size_t nbufs;
2738 2737
2739 2738 /*
2740 2739 * Completely allocate the newly created slab and put the pre-allocated
2741 2740 * buffers in magazines. Any of the buffers that cannot be put in
2742 2741 * magazines must be returned to the slab.
2743 2742 */
2744 2743 ASSERT(MUTEX_HELD(&cp->cache_lock));
2745 2744 ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
2746 2745 ASSERT(cp->cache_constructor == NULL);
2747 2746 ASSERT(sp->slab_cache == cp);
2748 2747 ASSERT(sp->slab_refcnt == 1);
2749 2748 ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
2750 2749 ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2751 2750
2752 2751 head = sp->slab_head;
2753 2752 nbufs = (sp->slab_chunks - sp->slab_refcnt);
2754 2753 sp->slab_head = NULL;
2755 2754 sp->slab_refcnt += nbufs;
2756 2755 cp->cache_bufslab -= nbufs;
2757 2756 cp->cache_slab_alloc += nbufs;
2758 2757 list_insert_head(&cp->cache_complete_slabs, sp);
2759 2758 cp->cache_complete_slab_count++;
2760 2759 mutex_exit(&cp->cache_lock);
2761 2760 mutex_enter(&ccp->cc_lock);
2762 2761
2763 2762 while (head != NULL) {
2764 2763 void *buf = KMEM_BUF(cp, head);
2765 2764 /*
2766 2765 * If there's a slot available in the current CPU's
2767 2766 * loaded magazine, just put the object there and
2768 2767 * continue.
2769 2768 */
2770 2769 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2771 2770 ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
2772 2771 buf;
2773 2772 ccp->cc_free++;
2774 2773 nbufs--;
2775 2774 head = head->bc_next;
2776 2775 continue;
2777 2776 }
2778 2777
2779 2778 /*
2780 2779 * The loaded magazine is full. If the previously
2781 2780 * loaded magazine was empty, exchange them and try
2782 2781 * again.
2783 2782 */
2784 2783 if (ccp->cc_prounds == 0) {
2785 2784 kmem_cpu_reload(ccp, ccp->cc_ploaded,
2786 2785 ccp->cc_prounds);
2787 2786 continue;
2788 2787 }
2789 2788
2790 2789 /*
2791 2790 * If the magazine layer is disabled, break out now.
2792 2791 */
2793 2792
2794 2793 if (ccp->cc_magsize == 0) {
2795 2794 break;
2796 2795 }
2797 2796
2798 2797 if (!kmem_cpucache_magazine_alloc(ccp, cp))
2799 2798 break;
2800 2799 }
2801 2800 mutex_exit(&ccp->cc_lock);
2802 2801 if (nbufs != 0) {
2803 2802 ASSERT(head != NULL);
2804 2803
2805 2804 /*
2806 2805 * If there was a failure, return remaining objects to
2807 2806 * the slab
2808 2807 */
2809 2808 while (head != NULL) {
2810 2809 ASSERT(nbufs != 0);
2811 2810 next = head->bc_next;
2812 2811 head->bc_next = NULL;
|
↓ open down ↓ |
2775 lines elided |
↑ open up ↑ |
2813 2812 kmem_slab_free(cp, KMEM_BUF(cp, head));
2814 2813 head = next;
2815 2814 nbufs--;
2816 2815 }
2817 2816 }
2818 2817 ASSERT(head == NULL);
2819 2818 ASSERT(nbufs == 0);
2820 2819 mutex_enter(&cp->cache_lock);
2821 2820 }
2822 2821
2822 +/*
2823 + * kmem_rezalloc() is currently considered private until we sort out how we want
2824 + * to handle realloc vs. reallocf style interfaces.
2825 + */
2826 +void *
2827 +kmem_rezalloc(void *oldbuf, size_t oldsize, size_t newsize, int kmflag)
2828 +{
2829 + void *newbuf = kmem_alloc(newsize, kmflag);
2830 + if (newbuf == NULL) {
2831 + return (NULL);
2832 + }
2833 +
2834 + bcopy(oldbuf, newbuf, MIN(oldsize, newsize));
2835 + if (newsize > oldsize) {
2836 + void *start = (void *)((uintptr_t)newbuf + oldsize);
2837 + bzero(start, newsize - oldsize);
2838 + }
2839 +
2840 + if (oldbuf != NULL) {
2841 + ASSERT3U(oldsize, !=, 0);
2842 + kmem_free(oldbuf, oldsize);
2843 + }
2844 +
2845 + return (newbuf);
2846 +}
2847 +
2823 2848 void *
2824 2849 kmem_zalloc(size_t size, int kmflag)
2825 2850 {
2826 2851 size_t index;
2827 2852 void *buf;
2828 2853
2829 2854 if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2830 2855 kmem_cache_t *cp = kmem_alloc_table[index];
2831 2856 buf = kmem_cache_alloc(cp, kmflag);
2832 2857 if (buf != NULL) {
2833 2858 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2834 2859 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2835 2860 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2836 2861 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2837 2862
2838 2863 if (cp->cache_flags & KMF_LITE) {
2839 2864 KMEM_BUFTAG_LITE_ENTER(btp,
2840 2865 kmem_lite_count, caller());
2841 2866 }
2842 2867 }
2843 2868 bzero(buf, size);
2844 2869 }
2845 2870 } else {
2846 2871 buf = kmem_alloc(size, kmflag);
2847 2872 if (buf != NULL)
2848 2873 bzero(buf, size);
2849 2874 }
2850 2875 return (buf);
2851 2876 }
2852 2877
2853 2878 void *
2854 2879 kmem_alloc(size_t size, int kmflag)
2855 2880 {
2856 2881 size_t index;
2857 2882 kmem_cache_t *cp;
2858 2883 void *buf;
2859 2884
2860 2885 if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2861 2886 cp = kmem_alloc_table[index];
2862 2887 /* fall through to kmem_cache_alloc() */
2863 2888
2864 2889 } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2865 2890 kmem_big_alloc_table_max) {
2866 2891 cp = kmem_big_alloc_table[index];
2867 2892 /* fall through to kmem_cache_alloc() */
2868 2893
2869 2894 } else {
2870 2895 if (size == 0) {
2871 2896 if (kmflag != KM_SLEEP && !(kmflag & KM_PANIC))
2872 2897 return (NULL);
2873 2898
2874 2899 /*
2875 2900 * If this is a sleeping allocation or one that has
2876 2901 * been specified to panic on allocation failure, we
2877 2902 * consider it to be deprecated behavior to allocate
2878 2903 * 0 bytes. If we have been configured to panic under
2879 2904 * this condition, we panic; if to warn, we warn -- and
2880 2905 * regardless, we log to the kmem_zerosized_log that
2881 2906 * that this condition has occurred (which gives us
2882 2907 * enough information to be able to debug it).
2883 2908 */
2884 2909 if (kmem_panic && kmem_panic_zerosized)
2885 2910 panic("attempted to kmem_alloc() size of 0");
2886 2911
2887 2912 if (kmem_warn_zerosized) {
2888 2913 cmn_err(CE_WARN, "kmem_alloc(): sleeping "
2889 2914 "allocation with size of 0; "
2890 2915 "see kmem_zerosized_log for details");
2891 2916 }
2892 2917
2893 2918 kmem_log_event(kmem_zerosized_log, NULL, NULL, NULL);
2894 2919
2895 2920 return (NULL);
2896 2921 }
2897 2922
2898 2923 buf = vmem_alloc(kmem_oversize_arena, size,
2899 2924 kmflag & KM_VMFLAGS);
2900 2925 if (buf == NULL)
2901 2926 kmem_log_event(kmem_failure_log, NULL, NULL,
2902 2927 (void *)size);
2903 2928 else if (KMEM_DUMP(kmem_slab_cache)) {
2904 2929 /* stats for dump intercept */
2905 2930 kmem_dump_oversize_allocs++;
2906 2931 if (size > kmem_dump_oversize_max)
2907 2932 kmem_dump_oversize_max = size;
2908 2933 }
2909 2934 return (buf);
2910 2935 }
2911 2936
2912 2937 buf = kmem_cache_alloc(cp, kmflag);
2913 2938 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2914 2939 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2915 2940 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2916 2941 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2917 2942
2918 2943 if (cp->cache_flags & KMF_LITE) {
2919 2944 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller());
2920 2945 }
2921 2946 }
2922 2947 return (buf);
2923 2948 }
2924 2949
2925 2950 void
2926 2951 kmem_free(void *buf, size_t size)
2927 2952 {
2928 2953 size_t index;
2929 2954 kmem_cache_t *cp;
2930 2955
2931 2956 if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) {
2932 2957 cp = kmem_alloc_table[index];
2933 2958 /* fall through to kmem_cache_free() */
2934 2959
2935 2960 } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2936 2961 kmem_big_alloc_table_max) {
2937 2962 cp = kmem_big_alloc_table[index];
2938 2963 /* fall through to kmem_cache_free() */
2939 2964
2940 2965 } else {
2941 2966 EQUIV(buf == NULL, size == 0);
2942 2967 if (buf == NULL && size == 0)
2943 2968 return;
2944 2969 vmem_free(kmem_oversize_arena, buf, size);
2945 2970 return;
2946 2971 }
2947 2972
2948 2973 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2949 2974 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2950 2975 uint32_t *ip = (uint32_t *)btp;
2951 2976 if (ip[1] != KMEM_SIZE_ENCODE(size)) {
2952 2977 if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
2953 2978 kmem_error(KMERR_DUPFREE, cp, buf);
2954 2979 return;
2955 2980 }
2956 2981 if (KMEM_SIZE_VALID(ip[1])) {
2957 2982 ip[0] = KMEM_SIZE_ENCODE(size);
2958 2983 kmem_error(KMERR_BADSIZE, cp, buf);
2959 2984 } else {
2960 2985 kmem_error(KMERR_REDZONE, cp, buf);
2961 2986 }
2962 2987 return;
2963 2988 }
2964 2989 if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
2965 2990 kmem_error(KMERR_REDZONE, cp, buf);
2966 2991 return;
2967 2992 }
2968 2993 btp->bt_redzone = KMEM_REDZONE_PATTERN;
2969 2994 if (cp->cache_flags & KMF_LITE) {
2970 2995 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
2971 2996 caller());
2972 2997 }
2973 2998 }
2974 2999 kmem_cache_free(cp, buf);
2975 3000 }
2976 3001
2977 3002 void *
2978 3003 kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
2979 3004 {
2980 3005 size_t realsize = size + vmp->vm_quantum;
2981 3006 void *addr;
2982 3007
2983 3008 /*
2984 3009 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
2985 3010 * vm_quantum will cause integer wraparound. Check for this, and
2986 3011 * blow off the firewall page in this case. Note that such a
2987 3012 * giant allocation (the entire kernel address space) can never
2988 3013 * be satisfied, so it will either fail immediately (VM_NOSLEEP)
2989 3014 * or sleep forever (VM_SLEEP). Thus, there is no need for a
2990 3015 * corresponding check in kmem_firewall_va_free().
2991 3016 */
2992 3017 if (realsize < size)
2993 3018 realsize = size;
2994 3019
2995 3020 /*
2996 3021 * While boot still owns resource management, make sure that this
2997 3022 * redzone virtual address allocation is properly accounted for in
2998 3023 * OBPs "virtual-memory" "available" lists because we're
2999 3024 * effectively claiming them for a red zone. If we don't do this,
3000 3025 * the available lists become too fragmented and too large for the
3001 3026 * current boot/kernel memory list interface.
3002 3027 */
3003 3028 addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
3004 3029
3005 3030 if (addr != NULL && kvseg.s_base == NULL && realsize != size)
3006 3031 (void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
3007 3032
3008 3033 return (addr);
3009 3034 }
3010 3035
3011 3036 void
3012 3037 kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
3013 3038 {
3014 3039 ASSERT((kvseg.s_base == NULL ?
3015 3040 va_to_pfn((char *)addr + size) :
3016 3041 hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
3017 3042
3018 3043 vmem_free(vmp, addr, size + vmp->vm_quantum);
3019 3044 }
3020 3045
3021 3046 /*
3022 3047 * Try to allocate at least `size' bytes of memory without sleeping or
3023 3048 * panicking. Return actual allocated size in `asize'. If allocation failed,
3024 3049 * try final allocation with sleep or panic allowed.
3025 3050 */
3026 3051 void *
3027 3052 kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
3028 3053 {
3029 3054 void *p;
3030 3055
3031 3056 *asize = P2ROUNDUP(size, KMEM_ALIGN);
3032 3057 do {
3033 3058 p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
3034 3059 if (p != NULL)
3035 3060 return (p);
3036 3061 *asize += KMEM_ALIGN;
3037 3062 } while (*asize <= PAGESIZE);
3038 3063
3039 3064 *asize = P2ROUNDUP(size, KMEM_ALIGN);
3040 3065 return (kmem_alloc(*asize, kmflag));
3041 3066 }
3042 3067
3043 3068 /*
3044 3069 * Reclaim all unused memory from a cache.
3045 3070 */
3046 3071 static void
3047 3072 kmem_cache_reap(kmem_cache_t *cp)
3048 3073 {
3049 3074 ASSERT(taskq_member(kmem_taskq, curthread));
3050 3075 cp->cache_reap++;
3051 3076
3052 3077 /*
3053 3078 * Ask the cache's owner to free some memory if possible.
3054 3079 * The idea is to handle things like the inode cache, which
3055 3080 * typically sits on a bunch of memory that it doesn't truly
3056 3081 * *need*. Reclaim policy is entirely up to the owner; this
3057 3082 * callback is just an advisory plea for help.
3058 3083 */
3059 3084 if (cp->cache_reclaim != NULL) {
3060 3085 long delta;
3061 3086
3062 3087 /*
3063 3088 * Reclaimed memory should be reapable (not included in the
3064 3089 * depot's working set).
3065 3090 */
3066 3091 delta = cp->cache_full.ml_total;
3067 3092 cp->cache_reclaim(cp->cache_private);
3068 3093 delta = cp->cache_full.ml_total - delta;
3069 3094 if (delta > 0) {
3070 3095 mutex_enter(&cp->cache_depot_lock);
3071 3096 cp->cache_full.ml_reaplimit += delta;
3072 3097 cp->cache_full.ml_min += delta;
3073 3098 mutex_exit(&cp->cache_depot_lock);
3074 3099 }
3075 3100 }
3076 3101
3077 3102 kmem_depot_ws_reap(cp);
3078 3103
3079 3104 if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3080 3105 kmem_cache_defrag(cp);
3081 3106 }
3082 3107 }
3083 3108
3084 3109 static void
3085 3110 kmem_reap_timeout(void *flag_arg)
3086 3111 {
3087 3112 uint32_t *flag = (uint32_t *)flag_arg;
3088 3113
3089 3114 ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3090 3115 *flag = 0;
3091 3116 }
3092 3117
3093 3118 static void
3094 3119 kmem_reap_done(void *flag)
3095 3120 {
3096 3121 if (!callout_init_done) {
3097 3122 /* can't schedule a timeout at this point */
3098 3123 kmem_reap_timeout(flag);
3099 3124 } else {
3100 3125 (void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
3101 3126 }
3102 3127 }
3103 3128
3104 3129 static void
3105 3130 kmem_reap_start(void *flag)
3106 3131 {
3107 3132 ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3108 3133
3109 3134 if (flag == &kmem_reaping) {
3110 3135 kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3111 3136 /*
3112 3137 * if we have segkp under heap, reap segkp cache.
3113 3138 */
3114 3139 if (segkp_fromheap)
3115 3140 segkp_cache_free();
3116 3141 }
3117 3142 else
3118 3143 kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3119 3144
3120 3145 /*
3121 3146 * We use taskq_dispatch() to schedule a timeout to clear
3122 3147 * the flag so that kmem_reap() becomes self-throttling:
3123 3148 * we won't reap again until the current reap completes *and*
3124 3149 * at least kmem_reap_interval ticks have elapsed.
3125 3150 */
3126 3151 if (taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP) ==
3127 3152 TASKQID_INVALID)
3128 3153 kmem_reap_done(flag);
3129 3154 }
3130 3155
3131 3156 static void
3132 3157 kmem_reap_common(void *flag_arg)
3133 3158 {
3134 3159 uint32_t *flag = (uint32_t *)flag_arg;
3135 3160
3136 3161 if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3137 3162 atomic_cas_32(flag, 0, 1) != 0)
3138 3163 return;
3139 3164
3140 3165 /*
3141 3166 * It may not be kosher to do memory allocation when a reap is called
3142 3167 * (for example, if vmem_populate() is in the call chain). So we
3143 3168 * start the reap going with a TQ_NOALLOC dispatch. If the dispatch
3144 3169 * fails, we reset the flag, and the next reap will try again.
3145 3170 */
3146 3171 if (taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC) ==
3147 3172 TASKQID_INVALID)
3148 3173 *flag = 0;
3149 3174 }
3150 3175
3151 3176 /*
3152 3177 * Reclaim all unused memory from all caches. Called from the VM system
3153 3178 * when memory gets tight.
3154 3179 */
3155 3180 void
3156 3181 kmem_reap(void)
3157 3182 {
3158 3183 kmem_reap_common(&kmem_reaping);
3159 3184 }
3160 3185
3161 3186 /*
3162 3187 * Reclaim all unused memory from identifier arenas, called when a vmem
3163 3188 * arena not back by memory is exhausted. Since reaping memory-backed caches
3164 3189 * cannot help with identifier exhaustion, we avoid both a large amount of
3165 3190 * work and unwanted side-effects from reclaim callbacks.
3166 3191 */
3167 3192 void
3168 3193 kmem_reap_idspace(void)
3169 3194 {
3170 3195 kmem_reap_common(&kmem_reaping_idspace);
3171 3196 }
3172 3197
3173 3198 /*
3174 3199 * Purge all magazines from a cache and set its magazine limit to zero.
3175 3200 * All calls are serialized by the kmem_taskq lock, except for the final
3176 3201 * call from kmem_cache_destroy().
3177 3202 */
3178 3203 static void
3179 3204 kmem_cache_magazine_purge(kmem_cache_t *cp)
3180 3205 {
3181 3206 kmem_cpu_cache_t *ccp;
3182 3207 kmem_magazine_t *mp, *pmp;
3183 3208 int rounds, prounds, cpu_seqid;
3184 3209
3185 3210 ASSERT(!list_link_active(&cp->cache_link) ||
3186 3211 taskq_member(kmem_taskq, curthread));
3187 3212 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
3188 3213
3189 3214 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3190 3215 ccp = &cp->cache_cpu[cpu_seqid];
3191 3216
3192 3217 mutex_enter(&ccp->cc_lock);
3193 3218 mp = ccp->cc_loaded;
3194 3219 pmp = ccp->cc_ploaded;
3195 3220 rounds = ccp->cc_rounds;
3196 3221 prounds = ccp->cc_prounds;
3197 3222 ccp->cc_loaded = NULL;
3198 3223 ccp->cc_ploaded = NULL;
3199 3224 ccp->cc_rounds = -1;
3200 3225 ccp->cc_prounds = -1;
3201 3226 ccp->cc_magsize = 0;
3202 3227 mutex_exit(&ccp->cc_lock);
3203 3228
3204 3229 if (mp)
3205 3230 kmem_magazine_destroy(cp, mp, rounds);
3206 3231 if (pmp)
3207 3232 kmem_magazine_destroy(cp, pmp, prounds);
3208 3233 }
3209 3234
3210 3235 kmem_depot_ws_zero(cp);
3211 3236 kmem_depot_ws_reap(cp);
3212 3237 }
3213 3238
3214 3239 /*
3215 3240 * Enable per-cpu magazines on a cache.
3216 3241 */
3217 3242 static void
3218 3243 kmem_cache_magazine_enable(kmem_cache_t *cp)
3219 3244 {
3220 3245 int cpu_seqid;
3221 3246
3222 3247 if (cp->cache_flags & KMF_NOMAGAZINE)
3223 3248 return;
3224 3249
3225 3250 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3226 3251 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3227 3252 mutex_enter(&ccp->cc_lock);
3228 3253 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
3229 3254 mutex_exit(&ccp->cc_lock);
3230 3255 }
3231 3256
3232 3257 }
3233 3258
3234 3259 /*
3235 3260 * Allow our caller to determine if there are running reaps.
3236 3261 *
3237 3262 * This call is very conservative and may return B_TRUE even when
3238 3263 * reaping activity isn't active. If it returns B_FALSE, then reaping
3239 3264 * activity is definitely inactive.
3240 3265 */
3241 3266 boolean_t
3242 3267 kmem_cache_reap_active(void)
3243 3268 {
3244 3269 return (!taskq_empty(kmem_taskq));
3245 3270 }
3246 3271
3247 3272 /*
3248 3273 * Reap (almost) everything soon.
3249 3274 *
3250 3275 * Note: this does not wait for the reap-tasks to complete. Caller
3251 3276 * should use kmem_cache_reap_active() (above) and/or moderation to
3252 3277 * avoid scheduling too many reap-tasks.
3253 3278 */
3254 3279 void
3255 3280 kmem_cache_reap_soon(kmem_cache_t *cp)
3256 3281 {
3257 3282 ASSERT(list_link_active(&cp->cache_link));
3258 3283
3259 3284 kmem_depot_ws_zero(cp);
3260 3285
3261 3286 (void) taskq_dispatch(kmem_taskq,
3262 3287 (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3263 3288 }
3264 3289
3265 3290 /*
3266 3291 * Recompute a cache's magazine size. The trade-off is that larger magazines
3267 3292 * provide a higher transfer rate with the depot, while smaller magazines
3268 3293 * reduce memory consumption. Magazine resizing is an expensive operation;
3269 3294 * it should not be done frequently.
3270 3295 *
3271 3296 * Changes to the magazine size are serialized by the kmem_taskq lock.
3272 3297 *
3273 3298 * Note: at present this only grows the magazine size. It might be useful
3274 3299 * to allow shrinkage too.
3275 3300 */
3276 3301 static void
3277 3302 kmem_cache_magazine_resize(kmem_cache_t *cp)
3278 3303 {
3279 3304 kmem_magtype_t *mtp = cp->cache_magtype;
3280 3305
3281 3306 ASSERT(taskq_member(kmem_taskq, curthread));
3282 3307
3283 3308 if (cp->cache_chunksize < mtp->mt_maxbuf) {
3284 3309 kmem_cache_magazine_purge(cp);
3285 3310 mutex_enter(&cp->cache_depot_lock);
3286 3311 cp->cache_magtype = ++mtp;
3287 3312 cp->cache_depot_contention_prev =
3288 3313 cp->cache_depot_contention + INT_MAX;
3289 3314 mutex_exit(&cp->cache_depot_lock);
3290 3315 kmem_cache_magazine_enable(cp);
3291 3316 }
3292 3317 }
3293 3318
3294 3319 /*
3295 3320 * Rescale a cache's hash table, so that the table size is roughly the
3296 3321 * cache size. We want the average lookup time to be extremely small.
3297 3322 */
3298 3323 static void
3299 3324 kmem_hash_rescale(kmem_cache_t *cp)
3300 3325 {
3301 3326 kmem_bufctl_t **old_table, **new_table, *bcp;
3302 3327 size_t old_size, new_size, h;
3303 3328
3304 3329 ASSERT(taskq_member(kmem_taskq, curthread));
3305 3330
3306 3331 new_size = MAX(KMEM_HASH_INITIAL,
3307 3332 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
3308 3333 old_size = cp->cache_hash_mask + 1;
3309 3334
3310 3335 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
3311 3336 return;
3312 3337
3313 3338 new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
3314 3339 VM_NOSLEEP);
3315 3340 if (new_table == NULL)
3316 3341 return;
3317 3342 bzero(new_table, new_size * sizeof (void *));
3318 3343
3319 3344 mutex_enter(&cp->cache_lock);
3320 3345
3321 3346 old_size = cp->cache_hash_mask + 1;
3322 3347 old_table = cp->cache_hash_table;
3323 3348
3324 3349 cp->cache_hash_mask = new_size - 1;
3325 3350 cp->cache_hash_table = new_table;
3326 3351 cp->cache_rescale++;
3327 3352
3328 3353 for (h = 0; h < old_size; h++) {
3329 3354 bcp = old_table[h];
3330 3355 while (bcp != NULL) {
3331 3356 void *addr = bcp->bc_addr;
3332 3357 kmem_bufctl_t *next_bcp = bcp->bc_next;
3333 3358 kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
3334 3359 bcp->bc_next = *hash_bucket;
3335 3360 *hash_bucket = bcp;
3336 3361 bcp = next_bcp;
3337 3362 }
3338 3363 }
3339 3364
3340 3365 mutex_exit(&cp->cache_lock);
3341 3366
3342 3367 vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
3343 3368 }
3344 3369
3345 3370 /*
3346 3371 * Perform periodic maintenance on a cache: hash rescaling, depot working-set
3347 3372 * update, magazine resizing, and slab consolidation.
3348 3373 */
3349 3374 static void
3350 3375 kmem_cache_update(kmem_cache_t *cp)
3351 3376 {
3352 3377 int need_hash_rescale = 0;
3353 3378 int need_magazine_resize = 0;
3354 3379
3355 3380 ASSERT(MUTEX_HELD(&kmem_cache_lock));
3356 3381
3357 3382 /*
3358 3383 * If the cache has become much larger or smaller than its hash table,
3359 3384 * fire off a request to rescale the hash table.
3360 3385 */
3361 3386 mutex_enter(&cp->cache_lock);
3362 3387
3363 3388 if ((cp->cache_flags & KMF_HASH) &&
3364 3389 (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
3365 3390 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
3366 3391 cp->cache_hash_mask > KMEM_HASH_INITIAL)))
3367 3392 need_hash_rescale = 1;
3368 3393
3369 3394 mutex_exit(&cp->cache_lock);
3370 3395
3371 3396 /*
3372 3397 * Update the depot working set statistics.
3373 3398 */
3374 3399 kmem_depot_ws_update(cp);
3375 3400
3376 3401 /*
3377 3402 * If there's a lot of contention in the depot,
3378 3403 * increase the magazine size.
3379 3404 */
3380 3405 mutex_enter(&cp->cache_depot_lock);
3381 3406
3382 3407 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
3383 3408 (int)(cp->cache_depot_contention -
3384 3409 cp->cache_depot_contention_prev) > kmem_depot_contention)
3385 3410 need_magazine_resize = 1;
3386 3411
3387 3412 cp->cache_depot_contention_prev = cp->cache_depot_contention;
3388 3413
3389 3414 mutex_exit(&cp->cache_depot_lock);
3390 3415
3391 3416 if (need_hash_rescale)
3392 3417 (void) taskq_dispatch(kmem_taskq,
3393 3418 (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
3394 3419
3395 3420 if (need_magazine_resize)
3396 3421 (void) taskq_dispatch(kmem_taskq,
3397 3422 (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3398 3423
3399 3424 if (cp->cache_defrag != NULL)
3400 3425 (void) taskq_dispatch(kmem_taskq,
3401 3426 (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
3402 3427 }
3403 3428
3404 3429 static void kmem_update(void *);
3405 3430
3406 3431 static void
3407 3432 kmem_update_timeout(void *dummy)
3408 3433 {
3409 3434 (void) timeout(kmem_update, dummy, kmem_reap_interval);
3410 3435 }
3411 3436
3412 3437 static void
3413 3438 kmem_update(void *dummy)
3414 3439 {
3415 3440 kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
3416 3441
3417 3442 /*
3418 3443 * We use taskq_dispatch() to reschedule the timeout so that
3419 3444 * kmem_update() becomes self-throttling: it won't schedule
3420 3445 * new tasks until all previous tasks have completed.
3421 3446 */
3422 3447 if (taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP)
3423 3448 == TASKQID_INVALID)
3424 3449 kmem_update_timeout(NULL);
3425 3450 }
3426 3451
3427 3452 static int
3428 3453 kmem_cache_kstat_update(kstat_t *ksp, int rw)
3429 3454 {
3430 3455 struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
3431 3456 kmem_cache_t *cp = ksp->ks_private;
3432 3457 uint64_t cpu_buf_avail;
3433 3458 uint64_t buf_avail = 0;
3434 3459 int cpu_seqid;
3435 3460 long reap;
3436 3461
3437 3462 ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
3438 3463
3439 3464 if (rw == KSTAT_WRITE)
3440 3465 return (EACCES);
3441 3466
3442 3467 mutex_enter(&cp->cache_lock);
3443 3468
3444 3469 kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail;
3445 3470 kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc;
3446 3471 kmcp->kmc_free.value.ui64 = cp->cache_slab_free;
3447 3472 kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc;
3448 3473 kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free;
3449 3474
3450 3475 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3451 3476 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3452 3477
3453 3478 mutex_enter(&ccp->cc_lock);
3454 3479
3455 3480 cpu_buf_avail = 0;
3456 3481 if (ccp->cc_rounds > 0)
3457 3482 cpu_buf_avail += ccp->cc_rounds;
3458 3483 if (ccp->cc_prounds > 0)
3459 3484 cpu_buf_avail += ccp->cc_prounds;
3460 3485
3461 3486 kmcp->kmc_alloc.value.ui64 += ccp->cc_alloc;
3462 3487 kmcp->kmc_free.value.ui64 += ccp->cc_free;
3463 3488 buf_avail += cpu_buf_avail;
3464 3489
3465 3490 mutex_exit(&ccp->cc_lock);
3466 3491 }
3467 3492
3468 3493 mutex_enter(&cp->cache_depot_lock);
3469 3494
3470 3495 kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc;
3471 3496 kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc;
3472 3497 kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention;
3473 3498 kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total;
3474 3499 kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total;
3475 3500 kmcp->kmc_magazine_size.value.ui64 =
3476 3501 (cp->cache_flags & KMF_NOMAGAZINE) ?
3477 3502 0 : cp->cache_magtype->mt_magsize;
3478 3503
3479 3504 kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc;
3480 3505 kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc;
3481 3506 buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
3482 3507
3483 3508 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3484 3509 reap = MIN(reap, cp->cache_full.ml_total);
3485 3510
3486 3511 mutex_exit(&cp->cache_depot_lock);
3487 3512
3488 3513 kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize;
3489 3514 kmcp->kmc_align.value.ui64 = cp->cache_align;
3490 3515 kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize;
3491 3516 kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize;
3492 3517 kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
3493 3518 buf_avail += cp->cache_bufslab;
3494 3519 kmcp->kmc_buf_avail.value.ui64 = buf_avail;
3495 3520 kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail;
3496 3521 kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal;
3497 3522 kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax;
3498 3523 kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create;
3499 3524 kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy;
3500 3525 kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ?
3501 3526 cp->cache_hash_mask + 1 : 0;
3502 3527 kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth;
3503 3528 kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale;
3504 3529 kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id;
3505 3530 kmcp->kmc_reap.value.ui64 = cp->cache_reap;
3506 3531
3507 3532 if (cp->cache_defrag == NULL) {
3508 3533 kmcp->kmc_move_callbacks.value.ui64 = 0;
3509 3534 kmcp->kmc_move_yes.value.ui64 = 0;
3510 3535 kmcp->kmc_move_no.value.ui64 = 0;
3511 3536 kmcp->kmc_move_later.value.ui64 = 0;
3512 3537 kmcp->kmc_move_dont_need.value.ui64 = 0;
3513 3538 kmcp->kmc_move_dont_know.value.ui64 = 0;
3514 3539 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3515 3540 kmcp->kmc_move_slabs_freed.value.ui64 = 0;
3516 3541 kmcp->kmc_defrag.value.ui64 = 0;
3517 3542 kmcp->kmc_scan.value.ui64 = 0;
3518 3543 kmcp->kmc_move_reclaimable.value.ui64 = 0;
3519 3544 } else {
3520 3545 int64_t reclaimable;
3521 3546
3522 3547 kmem_defrag_t *kd = cp->cache_defrag;
3523 3548 kmcp->kmc_move_callbacks.value.ui64 = kd->kmd_callbacks;
3524 3549 kmcp->kmc_move_yes.value.ui64 = kd->kmd_yes;
3525 3550 kmcp->kmc_move_no.value.ui64 = kd->kmd_no;
3526 3551 kmcp->kmc_move_later.value.ui64 = kd->kmd_later;
3527 3552 kmcp->kmc_move_dont_need.value.ui64 = kd->kmd_dont_need;
3528 3553 kmcp->kmc_move_dont_know.value.ui64 = kd->kmd_dont_know;
3529 3554 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3530 3555 kmcp->kmc_move_slabs_freed.value.ui64 = kd->kmd_slabs_freed;
3531 3556 kmcp->kmc_defrag.value.ui64 = kd->kmd_defrags;
3532 3557 kmcp->kmc_scan.value.ui64 = kd->kmd_scans;
3533 3558
3534 3559 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3535 3560 reclaimable = MAX(reclaimable, 0);
3536 3561 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3537 3562 kmcp->kmc_move_reclaimable.value.ui64 = reclaimable;
3538 3563 }
3539 3564
3540 3565 mutex_exit(&cp->cache_lock);
3541 3566 return (0);
3542 3567 }
3543 3568
3544 3569 /*
3545 3570 * Return a named statistic about a particular cache.
3546 3571 * This shouldn't be called very often, so it's currently designed for
3547 3572 * simplicity (leverages existing kstat support) rather than efficiency.
3548 3573 */
3549 3574 uint64_t
3550 3575 kmem_cache_stat(kmem_cache_t *cp, char *name)
3551 3576 {
3552 3577 int i;
3553 3578 kstat_t *ksp = cp->cache_kstat;
3554 3579 kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
3555 3580 uint64_t value = 0;
3556 3581
3557 3582 if (ksp != NULL) {
3558 3583 mutex_enter(&kmem_cache_kstat_lock);
3559 3584 (void) kmem_cache_kstat_update(ksp, KSTAT_READ);
3560 3585 for (i = 0; i < ksp->ks_ndata; i++) {
3561 3586 if (strcmp(knp[i].name, name) == 0) {
3562 3587 value = knp[i].value.ui64;
3563 3588 break;
3564 3589 }
3565 3590 }
3566 3591 mutex_exit(&kmem_cache_kstat_lock);
3567 3592 }
3568 3593 return (value);
3569 3594 }
3570 3595
3571 3596 /*
3572 3597 * Return an estimate of currently available kernel heap memory.
3573 3598 * On 32-bit systems, physical memory may exceed virtual memory,
3574 3599 * we just truncate the result at 1GB.
3575 3600 */
3576 3601 size_t
3577 3602 kmem_avail(void)
3578 3603 {
3579 3604 spgcnt_t rmem = availrmem - tune.t_minarmem;
3580 3605 spgcnt_t fmem = freemem - minfree;
3581 3606
3582 3607 return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
3583 3608 1 << (30 - PAGESHIFT))));
3584 3609 }
3585 3610
3586 3611 /*
3587 3612 * Return the maximum amount of memory that is (in theory) allocatable
3588 3613 * from the heap. This may be used as an estimate only since there
3589 3614 * is no guarentee this space will still be available when an allocation
3590 3615 * request is made, nor that the space may be allocated in one big request
3591 3616 * due to kernel heap fragmentation.
3592 3617 */
3593 3618 size_t
3594 3619 kmem_maxavail(void)
3595 3620 {
3596 3621 spgcnt_t pmem = availrmem - tune.t_minarmem;
3597 3622 spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
3598 3623
3599 3624 return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
3600 3625 }
3601 3626
3602 3627 /*
3603 3628 * Indicate whether memory-intensive kmem debugging is enabled.
3604 3629 */
3605 3630 int
3606 3631 kmem_debugging(void)
3607 3632 {
3608 3633 return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
3609 3634 }
3610 3635
3611 3636 /* binning function, sorts finely at the two extremes */
3612 3637 #define KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift) \
3613 3638 ((((sp)->slab_refcnt <= (binshift)) || \
3614 3639 (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift))) \
3615 3640 ? -(sp)->slab_refcnt \
3616 3641 : -((binshift) + ((sp)->slab_refcnt >> (binshift))))
3617 3642
3618 3643 /*
3619 3644 * Minimizing the number of partial slabs on the freelist minimizes
3620 3645 * fragmentation (the ratio of unused buffers held by the slab layer). There are
3621 3646 * two ways to get a slab off of the freelist: 1) free all the buffers on the
3622 3647 * slab, and 2) allocate all the buffers on the slab. It follows that we want
3623 3648 * the most-used slabs at the front of the list where they have the best chance
3624 3649 * of being completely allocated, and the least-used slabs at a safe distance
3625 3650 * from the front to improve the odds that the few remaining buffers will all be
3626 3651 * freed before another allocation can tie up the slab. For that reason a slab
3627 3652 * with a higher slab_refcnt sorts less than than a slab with a lower
3628 3653 * slab_refcnt.
3629 3654 *
3630 3655 * However, if a slab has at least one buffer that is deemed unfreeable, we
3631 3656 * would rather have that slab at the front of the list regardless of
3632 3657 * slab_refcnt, since even one unfreeable buffer makes the entire slab
3633 3658 * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move()
3634 3659 * callback, the slab is marked unfreeable for as long as it remains on the
3635 3660 * freelist.
3636 3661 */
3637 3662 static int
3638 3663 kmem_partial_slab_cmp(const void *p0, const void *p1)
3639 3664 {
3640 3665 const kmem_cache_t *cp;
3641 3666 const kmem_slab_t *s0 = p0;
3642 3667 const kmem_slab_t *s1 = p1;
3643 3668 int w0, w1;
3644 3669 size_t binshift;
3645 3670
3646 3671 ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
3647 3672 ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
3648 3673 ASSERT(s0->slab_cache == s1->slab_cache);
3649 3674 cp = s1->slab_cache;
3650 3675 ASSERT(MUTEX_HELD(&cp->cache_lock));
3651 3676 binshift = cp->cache_partial_binshift;
3652 3677
3653 3678 /* weight of first slab */
3654 3679 w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
3655 3680 if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
3656 3681 w0 -= cp->cache_maxchunks;
3657 3682 }
3658 3683
3659 3684 /* weight of second slab */
3660 3685 w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
3661 3686 if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
3662 3687 w1 -= cp->cache_maxchunks;
3663 3688 }
3664 3689
3665 3690 if (w0 < w1)
3666 3691 return (-1);
3667 3692 if (w0 > w1)
3668 3693 return (1);
3669 3694
3670 3695 /* compare pointer values */
3671 3696 if ((uintptr_t)s0 < (uintptr_t)s1)
3672 3697 return (-1);
3673 3698 if ((uintptr_t)s0 > (uintptr_t)s1)
3674 3699 return (1);
3675 3700
3676 3701 return (0);
3677 3702 }
3678 3703
3679 3704 /*
3680 3705 * It must be valid to call the destructor (if any) on a newly created object.
3681 3706 * That is, the constructor (if any) must leave the object in a valid state for
3682 3707 * the destructor.
3683 3708 */
3684 3709 kmem_cache_t *
3685 3710 kmem_cache_create(
3686 3711 char *name, /* descriptive name for this cache */
3687 3712 size_t bufsize, /* size of the objects it manages */
3688 3713 size_t align, /* required object alignment */
3689 3714 int (*constructor)(void *, void *, int), /* object constructor */
3690 3715 void (*destructor)(void *, void *), /* object destructor */
3691 3716 void (*reclaim)(void *), /* memory reclaim callback */
3692 3717 void *private, /* pass-thru arg for constr/destr/reclaim */
3693 3718 vmem_t *vmp, /* vmem source for slab allocation */
3694 3719 int cflags) /* cache creation flags */
3695 3720 {
3696 3721 int cpu_seqid;
3697 3722 size_t chunksize;
3698 3723 kmem_cache_t *cp;
3699 3724 kmem_magtype_t *mtp;
3700 3725 size_t csize = KMEM_CACHE_SIZE(max_ncpus);
3701 3726
3702 3727 #ifdef DEBUG
3703 3728 /*
3704 3729 * Cache names should conform to the rules for valid C identifiers
3705 3730 */
3706 3731 if (!strident_valid(name)) {
3707 3732 cmn_err(CE_CONT,
3708 3733 "kmem_cache_create: '%s' is an invalid cache name\n"
3709 3734 "cache names must conform to the rules for "
3710 3735 "C identifiers\n", name);
3711 3736 }
3712 3737 #endif /* DEBUG */
3713 3738
3714 3739 if (vmp == NULL)
3715 3740 vmp = kmem_default_arena;
3716 3741
3717 3742 /*
3718 3743 * If this kmem cache has an identifier vmem arena as its source, mark
3719 3744 * it such to allow kmem_reap_idspace().
3720 3745 */
3721 3746 ASSERT(!(cflags & KMC_IDENTIFIER)); /* consumer should not set this */
3722 3747 if (vmp->vm_cflags & VMC_IDENTIFIER)
3723 3748 cflags |= KMC_IDENTIFIER;
3724 3749
3725 3750 /*
3726 3751 * Get a kmem_cache structure. We arrange that cp->cache_cpu[]
3727 3752 * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3728 3753 * false sharing of per-CPU data.
3729 3754 */
3730 3755 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3731 3756 P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3732 3757 bzero(cp, csize);
3733 3758 list_link_init(&cp->cache_link);
3734 3759
3735 3760 if (align == 0)
3736 3761 align = KMEM_ALIGN;
3737 3762
3738 3763 /*
3739 3764 * If we're not at least KMEM_ALIGN aligned, we can't use free
3740 3765 * memory to hold bufctl information (because we can't safely
3741 3766 * perform word loads and stores on it).
3742 3767 */
3743 3768 if (align < KMEM_ALIGN)
3744 3769 cflags |= KMC_NOTOUCH;
3745 3770
3746 3771 if (!ISP2(align) || align > vmp->vm_quantum)
3747 3772 panic("kmem_cache_create: bad alignment %lu", align);
3748 3773
3749 3774 mutex_enter(&kmem_flags_lock);
3750 3775 if (kmem_flags & KMF_RANDOMIZE)
3751 3776 kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3752 3777 KMF_RANDOMIZE;
3753 3778 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3754 3779 mutex_exit(&kmem_flags_lock);
3755 3780
3756 3781 /*
3757 3782 * Make sure all the various flags are reasonable.
3758 3783 */
3759 3784 ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3760 3785
3761 3786 if (cp->cache_flags & KMF_LITE) {
3762 3787 if (bufsize >= kmem_lite_minsize &&
3763 3788 align <= kmem_lite_maxalign &&
3764 3789 P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3765 3790 cp->cache_flags |= KMF_BUFTAG;
3766 3791 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3767 3792 } else {
3768 3793 cp->cache_flags &= ~KMF_DEBUG;
3769 3794 }
3770 3795 }
3771 3796
3772 3797 if (cp->cache_flags & KMF_DEADBEEF)
3773 3798 cp->cache_flags |= KMF_REDZONE;
3774 3799
3775 3800 if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3776 3801 cp->cache_flags |= KMF_NOMAGAZINE;
3777 3802
3778 3803 if (cflags & KMC_NODEBUG)
3779 3804 cp->cache_flags &= ~KMF_DEBUG;
3780 3805
3781 3806 if (cflags & KMC_NOTOUCH)
3782 3807 cp->cache_flags &= ~KMF_TOUCH;
3783 3808
3784 3809 if (cflags & KMC_PREFILL)
3785 3810 cp->cache_flags |= KMF_PREFILL;
3786 3811
3787 3812 if (cflags & KMC_NOHASH)
3788 3813 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3789 3814
3790 3815 if (cflags & KMC_NOMAGAZINE)
3791 3816 cp->cache_flags |= KMF_NOMAGAZINE;
3792 3817
3793 3818 if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3794 3819 cp->cache_flags |= KMF_REDZONE;
3795 3820
3796 3821 if (!(cp->cache_flags & KMF_AUDIT))
3797 3822 cp->cache_flags &= ~KMF_CONTENTS;
3798 3823
3799 3824 if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3800 3825 !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3801 3826 cp->cache_flags |= KMF_FIREWALL;
3802 3827
3803 3828 if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
3804 3829 cp->cache_flags &= ~KMF_FIREWALL;
3805 3830
3806 3831 if (cp->cache_flags & KMF_FIREWALL) {
3807 3832 cp->cache_flags &= ~KMF_BUFTAG;
3808 3833 cp->cache_flags |= KMF_NOMAGAZINE;
3809 3834 ASSERT(vmp == kmem_default_arena);
3810 3835 vmp = kmem_firewall_arena;
3811 3836 }
3812 3837
3813 3838 /*
3814 3839 * Set cache properties.
3815 3840 */
3816 3841 (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3817 3842 strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3818 3843 cp->cache_bufsize = bufsize;
3819 3844 cp->cache_align = align;
3820 3845 cp->cache_constructor = constructor;
3821 3846 cp->cache_destructor = destructor;
3822 3847 cp->cache_reclaim = reclaim;
3823 3848 cp->cache_private = private;
3824 3849 cp->cache_arena = vmp;
3825 3850 cp->cache_cflags = cflags;
3826 3851
3827 3852 /*
3828 3853 * Determine the chunk size.
3829 3854 */
3830 3855 chunksize = bufsize;
3831 3856
3832 3857 if (align >= KMEM_ALIGN) {
3833 3858 chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
3834 3859 cp->cache_bufctl = chunksize - KMEM_ALIGN;
3835 3860 }
3836 3861
3837 3862 if (cp->cache_flags & KMF_BUFTAG) {
3838 3863 cp->cache_bufctl = chunksize;
3839 3864 cp->cache_buftag = chunksize;
3840 3865 if (cp->cache_flags & KMF_LITE)
3841 3866 chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
3842 3867 else
3843 3868 chunksize += sizeof (kmem_buftag_t);
3844 3869 }
3845 3870
3846 3871 if (cp->cache_flags & KMF_DEADBEEF) {
3847 3872 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3848 3873 if (cp->cache_flags & KMF_LITE)
3849 3874 cp->cache_verify = sizeof (uint64_t);
3850 3875 }
3851 3876
3852 3877 cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3853 3878
3854 3879 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3855 3880
3856 3881 /*
3857 3882 * Now that we know the chunk size, determine the optimal slab size.
3858 3883 */
3859 3884 if (vmp == kmem_firewall_arena) {
3860 3885 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3861 3886 cp->cache_mincolor = cp->cache_slabsize - chunksize;
3862 3887 cp->cache_maxcolor = cp->cache_mincolor;
3863 3888 cp->cache_flags |= KMF_HASH;
3864 3889 ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3865 3890 } else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
3866 3891 !(cp->cache_flags & KMF_AUDIT) &&
3867 3892 chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
3868 3893 cp->cache_slabsize = vmp->vm_quantum;
3869 3894 cp->cache_mincolor = 0;
3870 3895 cp->cache_maxcolor =
3871 3896 (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3872 3897 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3873 3898 ASSERT(!(cp->cache_flags & KMF_AUDIT));
3874 3899 } else {
3875 3900 size_t chunks, bestfit, waste, slabsize;
3876 3901 size_t minwaste = LONG_MAX;
3877 3902
3878 3903 bestfit = 0;
3879 3904 for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
3880 3905 slabsize = P2ROUNDUP(chunksize * chunks,
3881 3906 vmp->vm_quantum);
3882 3907 chunks = slabsize / chunksize;
3883 3908 waste = (slabsize % chunksize) / chunks;
3884 3909 if (waste < minwaste) {
3885 3910 minwaste = waste;
3886 3911 bestfit = slabsize;
3887 3912 }
3888 3913 }
3889 3914 if (cflags & KMC_QCACHE)
3890 3915 bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
3891 3916 cp->cache_slabsize = bestfit;
3892 3917 cp->cache_mincolor = 0;
3893 3918 cp->cache_maxcolor = bestfit % chunksize;
3894 3919 cp->cache_flags |= KMF_HASH;
3895 3920 }
3896 3921
3897 3922 cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3898 3923 cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3899 3924
3900 3925 /*
3901 3926 * Disallowing prefill when either the DEBUG or HASH flag is set or when
3902 3927 * there is a constructor avoids some tricky issues with debug setup
3903 3928 * that may be revisited later. We cannot allow prefill in a
3904 3929 * metadata cache because of potential recursion.
3905 3930 */
3906 3931 if (vmp == kmem_msb_arena ||
3907 3932 cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3908 3933 cp->cache_constructor != NULL)
3909 3934 cp->cache_flags &= ~KMF_PREFILL;
3910 3935
3911 3936 if (cp->cache_flags & KMF_HASH) {
3912 3937 ASSERT(!(cflags & KMC_NOHASH));
3913 3938 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3914 3939 kmem_bufctl_audit_cache : kmem_bufctl_cache;
3915 3940 }
3916 3941
3917 3942 if (cp->cache_maxcolor >= vmp->vm_quantum)
3918 3943 cp->cache_maxcolor = vmp->vm_quantum - 1;
3919 3944
3920 3945 cp->cache_color = cp->cache_mincolor;
3921 3946
3922 3947 /*
3923 3948 * Initialize the rest of the slab layer.
3924 3949 */
3925 3950 mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3926 3951
3927 3952 avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3928 3953 sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3929 3954 /* LINTED: E_TRUE_LOGICAL_EXPR */
3930 3955 ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3931 3956 /* reuse partial slab AVL linkage for complete slab list linkage */
3932 3957 list_create(&cp->cache_complete_slabs,
3933 3958 sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3934 3959
3935 3960 if (cp->cache_flags & KMF_HASH) {
3936 3961 cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3937 3962 KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
3938 3963 bzero(cp->cache_hash_table,
3939 3964 KMEM_HASH_INITIAL * sizeof (void *));
3940 3965 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3941 3966 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3942 3967 }
3943 3968
3944 3969 /*
3945 3970 * Initialize the depot.
3946 3971 */
3947 3972 mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
3948 3973
3949 3974 for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
3950 3975 continue;
3951 3976
3952 3977 cp->cache_magtype = mtp;
3953 3978
3954 3979 /*
3955 3980 * Initialize the CPU layer.
3956 3981 */
3957 3982 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3958 3983 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3959 3984 mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
3960 3985 ccp->cc_flags = cp->cache_flags;
3961 3986 ccp->cc_rounds = -1;
3962 3987 ccp->cc_prounds = -1;
3963 3988 }
3964 3989
3965 3990 /*
3966 3991 * Create the cache's kstats.
3967 3992 */
3968 3993 if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
3969 3994 "kmem_cache", KSTAT_TYPE_NAMED,
3970 3995 sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
3971 3996 KSTAT_FLAG_VIRTUAL)) != NULL) {
3972 3997 cp->cache_kstat->ks_data = &kmem_cache_kstat;
3973 3998 cp->cache_kstat->ks_update = kmem_cache_kstat_update;
3974 3999 cp->cache_kstat->ks_private = cp;
3975 4000 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
3976 4001 kstat_install(cp->cache_kstat);
3977 4002 }
3978 4003
3979 4004 /*
3980 4005 * Add the cache to the global list. This makes it visible
3981 4006 * to kmem_update(), so the cache must be ready for business.
3982 4007 */
3983 4008 mutex_enter(&kmem_cache_lock);
3984 4009 list_insert_tail(&kmem_caches, cp);
3985 4010 mutex_exit(&kmem_cache_lock);
3986 4011
3987 4012 if (kmem_ready)
3988 4013 kmem_cache_magazine_enable(cp);
3989 4014
3990 4015 return (cp);
3991 4016 }
3992 4017
3993 4018 static int
3994 4019 kmem_move_cmp(const void *buf, const void *p)
3995 4020 {
3996 4021 const kmem_move_t *kmm = p;
3997 4022 uintptr_t v1 = (uintptr_t)buf;
3998 4023 uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
3999 4024 return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
4000 4025 }
4001 4026
4002 4027 static void
4003 4028 kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
4004 4029 {
4005 4030 kmd->kmd_reclaim_numer = 1;
4006 4031 }
4007 4032
4008 4033 /*
4009 4034 * Initially, when choosing candidate slabs for buffers to move, we want to be
4010 4035 * very selective and take only slabs that are less than
4011 4036 * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate
4012 4037 * slabs, then we raise the allocation ceiling incrementally. The reclaim
4013 4038 * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no
4014 4039 * longer fragmented.
4015 4040 */
4016 4041 static void
4017 4042 kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
4018 4043 {
4019 4044 if (direction > 0) {
4020 4045 /* make it easier to find a candidate slab */
4021 4046 if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
4022 4047 kmd->kmd_reclaim_numer++;
4023 4048 }
4024 4049 } else {
4025 4050 /* be more selective */
4026 4051 if (kmd->kmd_reclaim_numer > 1) {
4027 4052 kmd->kmd_reclaim_numer--;
4028 4053 }
4029 4054 }
4030 4055 }
4031 4056
4032 4057 void
4033 4058 kmem_cache_set_move(kmem_cache_t *cp,
4034 4059 kmem_cbrc_t (*move)(void *, void *, size_t, void *))
4035 4060 {
4036 4061 kmem_defrag_t *defrag;
4037 4062
4038 4063 ASSERT(move != NULL);
4039 4064 /*
4040 4065 * The consolidator does not support NOTOUCH caches because kmem cannot
4041 4066 * initialize their slabs with the 0xbaddcafe memory pattern, which sets
4042 4067 * a low order bit usable by clients to distinguish uninitialized memory
4043 4068 * from known objects (see kmem_slab_create).
4044 4069 */
4045 4070 ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
4046 4071 ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
4047 4072
4048 4073 /*
4049 4074 * We should not be holding anyone's cache lock when calling
4050 4075 * kmem_cache_alloc(), so allocate in all cases before acquiring the
4051 4076 * lock.
4052 4077 */
4053 4078 defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
4054 4079
4055 4080 mutex_enter(&cp->cache_lock);
4056 4081
4057 4082 if (KMEM_IS_MOVABLE(cp)) {
4058 4083 if (cp->cache_move == NULL) {
4059 4084 ASSERT(cp->cache_slab_alloc == 0);
4060 4085
4061 4086 cp->cache_defrag = defrag;
4062 4087 defrag = NULL; /* nothing to free */
4063 4088 bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4064 4089 avl_create(&cp->cache_defrag->kmd_moves_pending,
4065 4090 kmem_move_cmp, sizeof (kmem_move_t),
4066 4091 offsetof(kmem_move_t, kmm_entry));
4067 4092 /* LINTED: E_TRUE_LOGICAL_EXPR */
4068 4093 ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
4069 4094 /* reuse the slab's AVL linkage for deadlist linkage */
4070 4095 list_create(&cp->cache_defrag->kmd_deadlist,
4071 4096 sizeof (kmem_slab_t),
4072 4097 offsetof(kmem_slab_t, slab_link));
4073 4098 kmem_reset_reclaim_threshold(cp->cache_defrag);
4074 4099 }
4075 4100 cp->cache_move = move;
4076 4101 }
4077 4102
4078 4103 mutex_exit(&cp->cache_lock);
4079 4104
4080 4105 if (defrag != NULL) {
4081 4106 kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4082 4107 }
4083 4108 }
4084 4109
4085 4110 void
4086 4111 kmem_cache_destroy(kmem_cache_t *cp)
4087 4112 {
4088 4113 int cpu_seqid;
4089 4114
4090 4115 /*
4091 4116 * Remove the cache from the global cache list so that no one else
4092 4117 * can schedule tasks on its behalf, wait for any pending tasks to
4093 4118 * complete, purge the cache, and then destroy it.
4094 4119 */
4095 4120 mutex_enter(&kmem_cache_lock);
4096 4121 list_remove(&kmem_caches, cp);
4097 4122 mutex_exit(&kmem_cache_lock);
4098 4123
4099 4124 if (kmem_taskq != NULL)
4100 4125 taskq_wait(kmem_taskq);
4101 4126
4102 4127 if (kmem_move_taskq != NULL && cp->cache_defrag != NULL)
4103 4128 taskq_wait(kmem_move_taskq);
4104 4129
4105 4130 kmem_cache_magazine_purge(cp);
4106 4131
4107 4132 mutex_enter(&cp->cache_lock);
4108 4133 if (cp->cache_buftotal != 0)
4109 4134 cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
4110 4135 cp->cache_name, (void *)cp);
4111 4136 if (cp->cache_defrag != NULL) {
4112 4137 avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4113 4138 list_destroy(&cp->cache_defrag->kmd_deadlist);
4114 4139 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4115 4140 cp->cache_defrag = NULL;
4116 4141 }
4117 4142 /*
4118 4143 * The cache is now dead. There should be no further activity. We
4119 4144 * enforce this by setting land mines in the constructor, destructor,
4120 4145 * reclaim, and move routines that induce a kernel text fault if
4121 4146 * invoked.
4122 4147 */
4123 4148 cp->cache_constructor = (int (*)(void *, void *, int))1;
4124 4149 cp->cache_destructor = (void (*)(void *, void *))2;
4125 4150 cp->cache_reclaim = (void (*)(void *))3;
4126 4151 cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
4127 4152 mutex_exit(&cp->cache_lock);
4128 4153
4129 4154 kstat_delete(cp->cache_kstat);
4130 4155
4131 4156 if (cp->cache_hash_table != NULL)
4132 4157 vmem_free(kmem_hash_arena, cp->cache_hash_table,
4133 4158 (cp->cache_hash_mask + 1) * sizeof (void *));
4134 4159
4135 4160 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
4136 4161 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
4137 4162
4138 4163 mutex_destroy(&cp->cache_depot_lock);
4139 4164 mutex_destroy(&cp->cache_lock);
4140 4165
4141 4166 vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
4142 4167 }
4143 4168
4144 4169 /*ARGSUSED*/
4145 4170 static int
4146 4171 kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
4147 4172 {
4148 4173 ASSERT(MUTEX_HELD(&cpu_lock));
4149 4174 if (what == CPU_UNCONFIG) {
4150 4175 kmem_cache_applyall(kmem_cache_magazine_purge,
4151 4176 kmem_taskq, TQ_SLEEP);
4152 4177 kmem_cache_applyall(kmem_cache_magazine_enable,
4153 4178 kmem_taskq, TQ_SLEEP);
4154 4179 }
4155 4180 return (0);
4156 4181 }
4157 4182
4158 4183 static void
4159 4184 kmem_alloc_caches_create(const int *array, size_t count,
4160 4185 kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift)
4161 4186 {
4162 4187 char name[KMEM_CACHE_NAMELEN + 1];
4163 4188 size_t table_unit = (1 << shift); /* range of one alloc_table entry */
4164 4189 size_t size = table_unit;
4165 4190 int i;
4166 4191
4167 4192 for (i = 0; i < count; i++) {
4168 4193 size_t cache_size = array[i];
4169 4194 size_t align = KMEM_ALIGN;
4170 4195 kmem_cache_t *cp;
4171 4196
4172 4197 /* if the table has an entry for maxbuf, we're done */
4173 4198 if (size > maxbuf)
4174 4199 break;
4175 4200
4176 4201 /* cache size must be a multiple of the table unit */
4177 4202 ASSERT(P2PHASE(cache_size, table_unit) == 0);
4178 4203
4179 4204 /*
4180 4205 * If they allocate a multiple of the coherency granularity,
4181 4206 * they get a coherency-granularity-aligned address.
4182 4207 */
4183 4208 if (IS_P2ALIGNED(cache_size, 64))
4184 4209 align = 64;
4185 4210 if (IS_P2ALIGNED(cache_size, PAGESIZE))
4186 4211 align = PAGESIZE;
4187 4212 (void) snprintf(name, sizeof (name),
4188 4213 "kmem_alloc_%lu", cache_size);
4189 4214 cp = kmem_cache_create(name, cache_size, align,
4190 4215 NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
4191 4216
4192 4217 while (size <= cache_size) {
4193 4218 alloc_table[(size - 1) >> shift] = cp;
4194 4219 size += table_unit;
4195 4220 }
4196 4221 }
4197 4222
4198 4223 ASSERT(size > maxbuf); /* i.e. maxbuf <= max(cache_size) */
4199 4224 }
4200 4225
4201 4226 static void
4202 4227 kmem_cache_init(int pass, int use_large_pages)
4203 4228 {
4204 4229 int i;
4205 4230 size_t maxbuf;
4206 4231 kmem_magtype_t *mtp;
4207 4232
4208 4233 for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
4209 4234 char name[KMEM_CACHE_NAMELEN + 1];
4210 4235
4211 4236 mtp = &kmem_magtype[i];
4212 4237 (void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
4213 4238 mtp->mt_cache = kmem_cache_create(name,
4214 4239 (mtp->mt_magsize + 1) * sizeof (void *),
4215 4240 mtp->mt_align, NULL, NULL, NULL, NULL,
4216 4241 kmem_msb_arena, KMC_NOHASH);
4217 4242 }
4218 4243
4219 4244 kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
4220 4245 sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
4221 4246 kmem_msb_arena, KMC_NOHASH);
4222 4247
4223 4248 kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
4224 4249 sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
4225 4250 kmem_msb_arena, KMC_NOHASH);
4226 4251
4227 4252 kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
4228 4253 sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
4229 4254 kmem_msb_arena, KMC_NOHASH);
4230 4255
4231 4256 if (pass == 2) {
4232 4257 kmem_va_arena = vmem_create("kmem_va",
4233 4258 NULL, 0, PAGESIZE,
4234 4259 vmem_alloc, vmem_free, heap_arena,
4235 4260 8 * PAGESIZE, VM_SLEEP);
4236 4261
4237 4262 if (use_large_pages) {
4238 4263 kmem_default_arena = vmem_xcreate("kmem_default",
4239 4264 NULL, 0, PAGESIZE,
4240 4265 segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
4241 4266 0, VMC_DUMPSAFE | VM_SLEEP);
4242 4267 } else {
4243 4268 kmem_default_arena = vmem_create("kmem_default",
4244 4269 NULL, 0, PAGESIZE,
4245 4270 segkmem_alloc, segkmem_free, kmem_va_arena,
4246 4271 0, VMC_DUMPSAFE | VM_SLEEP);
4247 4272 }
4248 4273
4249 4274 /* Figure out what our maximum cache size is */
4250 4275 maxbuf = kmem_max_cached;
4251 4276 if (maxbuf <= KMEM_MAXBUF) {
4252 4277 maxbuf = 0;
4253 4278 kmem_max_cached = KMEM_MAXBUF;
4254 4279 } else {
4255 4280 size_t size = 0;
4256 4281 size_t max =
4257 4282 sizeof (kmem_big_alloc_sizes) / sizeof (int);
4258 4283 /*
4259 4284 * Round maxbuf up to an existing cache size. If maxbuf
4260 4285 * is larger than the largest cache, we truncate it to
4261 4286 * the largest cache's size.
4262 4287 */
4263 4288 for (i = 0; i < max; i++) {
4264 4289 size = kmem_big_alloc_sizes[i];
4265 4290 if (maxbuf <= size)
4266 4291 break;
4267 4292 }
4268 4293 kmem_max_cached = maxbuf = size;
4269 4294 }
4270 4295
4271 4296 /*
4272 4297 * The big alloc table may not be completely overwritten, so
4273 4298 * we clear out any stale cache pointers from the first pass.
4274 4299 */
4275 4300 bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table));
4276 4301 } else {
4277 4302 /*
4278 4303 * During the first pass, the kmem_alloc_* caches
4279 4304 * are treated as metadata.
4280 4305 */
4281 4306 kmem_default_arena = kmem_msb_arena;
4282 4307 maxbuf = KMEM_BIG_MAXBUF_32BIT;
4283 4308 }
4284 4309
4285 4310 /*
4286 4311 * Set up the default caches to back kmem_alloc()
4287 4312 */
4288 4313 kmem_alloc_caches_create(
4289 4314 kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int),
4290 4315 kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT);
4291 4316
4292 4317 kmem_alloc_caches_create(
4293 4318 kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int),
4294 4319 kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT);
4295 4320
4296 4321 kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT;
4297 4322 }
4298 4323
4299 4324 void
4300 4325 kmem_init(void)
4301 4326 {
4302 4327 kmem_cache_t *cp;
4303 4328 int old_kmem_flags = kmem_flags;
4304 4329 int use_large_pages = 0;
4305 4330 size_t maxverify, minfirewall;
4306 4331
4307 4332 kstat_init();
4308 4333
4309 4334 /*
4310 4335 * Don't do firewalled allocations if the heap is less than 1TB
4311 4336 * (i.e. on a 32-bit kernel)
4312 4337 * The resulting VM_NEXTFIT allocations would create too much
4313 4338 * fragmentation in a small heap.
4314 4339 */
4315 4340 #if defined(_LP64)
4316 4341 maxverify = minfirewall = PAGESIZE / 2;
4317 4342 #else
4318 4343 maxverify = minfirewall = ULONG_MAX;
4319 4344 #endif
4320 4345
4321 4346 /* LINTED */
4322 4347 ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
4323 4348
4324 4349 list_create(&kmem_caches, sizeof (kmem_cache_t),
4325 4350 offsetof(kmem_cache_t, cache_link));
4326 4351
4327 4352 kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
4328 4353 vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
4329 4354 VM_SLEEP | VMC_NO_QCACHE);
4330 4355
4331 4356 kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
4332 4357 PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
4333 4358 VMC_DUMPSAFE | VM_SLEEP);
4334 4359
4335 4360 kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
4336 4361 segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4337 4362
4338 4363 kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
4339 4364 segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4340 4365
4341 4366 kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
4342 4367 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4343 4368
4344 4369 kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
4345 4370 NULL, 0, PAGESIZE,
4346 4371 kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
4347 4372 0, VM_SLEEP);
4348 4373
4349 4374 kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
4350 4375 segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0,
4351 4376 VMC_DUMPSAFE | VM_SLEEP);
4352 4377
4353 4378 /* temporary oversize arena for mod_read_system_file */
4354 4379 kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
4355 4380 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4356 4381
4357 4382 kmem_reap_interval = 15 * hz;
4358 4383
4359 4384 /*
4360 4385 * Read /etc/system. This is a chicken-and-egg problem because
4361 4386 * kmem_flags may be set in /etc/system, but mod_read_system_file()
4362 4387 * needs to use the allocator. The simplest solution is to create
4363 4388 * all the standard kmem caches, read /etc/system, destroy all the
4364 4389 * caches we just created, and then create them all again in light
4365 4390 * of the (possibly) new kmem_flags and other kmem tunables.
4366 4391 */
4367 4392 kmem_cache_init(1, 0);
4368 4393
4369 4394 mod_read_system_file(boothowto & RB_ASKNAME);
4370 4395
4371 4396 while ((cp = list_tail(&kmem_caches)) != NULL)
4372 4397 kmem_cache_destroy(cp);
4373 4398
4374 4399 vmem_destroy(kmem_oversize_arena);
4375 4400
4376 4401 if (old_kmem_flags & KMF_STICKY)
4377 4402 kmem_flags = old_kmem_flags;
4378 4403
4379 4404 if (!(kmem_flags & KMF_AUDIT))
4380 4405 vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
4381 4406
4382 4407 if (kmem_maxverify == 0)
4383 4408 kmem_maxverify = maxverify;
4384 4409
4385 4410 if (kmem_minfirewall == 0)
4386 4411 kmem_minfirewall = minfirewall;
4387 4412
4388 4413 /*
4389 4414 * give segkmem a chance to figure out if we are using large pages
4390 4415 * for the kernel heap
4391 4416 */
4392 4417 use_large_pages = segkmem_lpsetup();
4393 4418
4394 4419 /*
4395 4420 * To protect against corruption, we keep the actual number of callers
4396 4421 * KMF_LITE records seperate from the tunable. We arbitrarily clamp
4397 4422 * to 16, since the overhead for small buffers quickly gets out of
4398 4423 * hand.
4399 4424 *
4400 4425 * The real limit would depend on the needs of the largest KMC_NOHASH
4401 4426 * cache.
4402 4427 */
4403 4428 kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
4404 4429 kmem_lite_pcs = kmem_lite_count;
4405 4430
4406 4431 /*
4407 4432 * Normally, we firewall oversized allocations when possible, but
4408 4433 * if we are using large pages for kernel memory, and we don't have
4409 4434 * any non-LITE debugging flags set, we want to allocate oversized
4410 4435 * buffers from large pages, and so skip the firewalling.
4411 4436 */
4412 4437 if (use_large_pages &&
4413 4438 ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
4414 4439 kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
4415 4440 PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
4416 4441 0, VMC_DUMPSAFE | VM_SLEEP);
4417 4442 } else {
4418 4443 kmem_oversize_arena = vmem_create("kmem_oversize",
4419 4444 NULL, 0, PAGESIZE,
4420 4445 segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
4421 4446 kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE |
4422 4447 VM_SLEEP);
4423 4448 }
4424 4449
4425 4450 kmem_cache_init(2, use_large_pages);
4426 4451
4427 4452 if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
4428 4453 if (kmem_transaction_log_size == 0)
4429 4454 kmem_transaction_log_size = kmem_maxavail() / 50;
4430 4455 kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
4431 4456 }
4432 4457
4433 4458 if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
4434 4459 if (kmem_content_log_size == 0)
4435 4460 kmem_content_log_size = kmem_maxavail() / 50;
4436 4461 kmem_content_log = kmem_log_init(kmem_content_log_size);
4437 4462 }
4438 4463
4439 4464 kmem_failure_log = kmem_log_init(kmem_failure_log_size);
4440 4465 kmem_slab_log = kmem_log_init(kmem_slab_log_size);
4441 4466 kmem_zerosized_log = kmem_log_init(kmem_zerosized_log_size);
4442 4467
4443 4468 /*
4444 4469 * Initialize STREAMS message caches so allocb() is available.
4445 4470 * This allows us to initialize the logging framework (cmn_err(9F),
4446 4471 * strlog(9F), etc) so we can start recording messages.
4447 4472 */
4448 4473 streams_msg_init();
4449 4474
4450 4475 /*
4451 4476 * Initialize the ZSD framework in Zones so modules loaded henceforth
4452 4477 * can register their callbacks.
4453 4478 */
4454 4479 zone_zsd_init();
4455 4480
4456 4481 log_init();
4457 4482 taskq_init();
4458 4483
4459 4484 /*
4460 4485 * Warn about invalid or dangerous values of kmem_flags.
4461 4486 * Always warn about unsupported values.
4462 4487 */
4463 4488 if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
4464 4489 KMF_CONTENTS | KMF_LITE)) != 0) ||
4465 4490 ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
4466 4491 cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x.",
4467 4492 kmem_flags);
4468 4493
4469 4494 #ifdef DEBUG
4470 4495 if ((kmem_flags & KMF_DEBUG) == 0)
4471 4496 cmn_err(CE_NOTE, "kmem debugging disabled.");
4472 4497 #else
4473 4498 /*
4474 4499 * For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
4475 4500 * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
4476 4501 * if KMF_AUDIT is set). We should warn the user about the performance
4477 4502 * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
4478 4503 * isn't set (since that disables AUDIT).
4479 4504 */
4480 4505 if (!(kmem_flags & KMF_LITE) &&
4481 4506 (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
4482 4507 cmn_err(CE_WARN, "High-overhead kmem debugging features "
4483 4508 "enabled (kmem_flags = 0x%x). Performance degradation "
4484 4509 "and large memory overhead possible.", kmem_flags);
4485 4510 #endif /* not DEBUG */
4486 4511
4487 4512 kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
4488 4513
4489 4514 kmem_ready = 1;
4490 4515
4491 4516 /*
4492 4517 * Initialize the platform-specific aligned/DMA memory allocator.
4493 4518 */
4494 4519 ka_init();
4495 4520
4496 4521 /*
4497 4522 * Initialize 32-bit ID cache.
4498 4523 */
4499 4524 id32_init();
4500 4525
4501 4526 /*
4502 4527 * Initialize the networking stack so modules loaded can
4503 4528 * register their callbacks.
4504 4529 */
4505 4530 netstack_init();
4506 4531 }
4507 4532
4508 4533 static void
4509 4534 kmem_move_init(void)
4510 4535 {
4511 4536 kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
4512 4537 sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
4513 4538 kmem_msb_arena, KMC_NOHASH);
4514 4539 kmem_move_cache = kmem_cache_create("kmem_move_cache",
4515 4540 sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
4516 4541 kmem_msb_arena, KMC_NOHASH);
4517 4542
4518 4543 /*
4519 4544 * kmem guarantees that move callbacks are sequential and that even
4520 4545 * across multiple caches no two moves ever execute simultaneously.
4521 4546 * Move callbacks are processed on a separate taskq so that client code
4522 4547 * does not interfere with internal maintenance tasks.
4523 4548 */
4524 4549 kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
4525 4550 minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
4526 4551 }
4527 4552
4528 4553 void
4529 4554 kmem_thread_init(void)
4530 4555 {
4531 4556 kmem_move_init();
4532 4557
4533 4558 /*
4534 4559 * This taskq is used for various kmem maintenance functions, including
4535 4560 * kmem_reap(). When maintenance is required on every cache,
4536 4561 * kmem_cache_applyall() dispatches one task per cache onto this queue.
4537 4562 *
4538 4563 * In the case of kmem_reap(), the system may be under increasingly
4539 4564 * dire memory pressure and may not be able to allocate a new task
4540 4565 * entry. The count of entries to prepopulate (below) should cover at
4541 4566 * least as many caches as we generally expect to exist on the system
4542 4567 * so that they may all be scheduled for reaping under those
4543 4568 * conditions.
4544 4569 */
4545 4570 kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
4546 4571 600, INT_MAX, TASKQ_PREPOPULATE);
4547 4572 }
4548 4573
4549 4574 void
4550 4575 kmem_mp_init(void)
4551 4576 {
4552 4577 mutex_enter(&cpu_lock);
4553 4578 register_cpu_setup_func(kmem_cpu_setup, NULL);
4554 4579 mutex_exit(&cpu_lock);
4555 4580
4556 4581 kmem_update_timeout(NULL);
4557 4582
4558 4583 taskq_mp_init();
4559 4584 }
4560 4585
4561 4586 /*
4562 4587 * Return the slab of the allocated buffer, or NULL if the buffer is not
4563 4588 * allocated. This function may be called with a known slab address to determine
4564 4589 * whether or not the buffer is allocated, or with a NULL slab address to obtain
4565 4590 * an allocated buffer's slab.
4566 4591 */
4567 4592 static kmem_slab_t *
4568 4593 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4569 4594 {
4570 4595 kmem_bufctl_t *bcp, *bufbcp;
4571 4596
4572 4597 ASSERT(MUTEX_HELD(&cp->cache_lock));
4573 4598 ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4574 4599
4575 4600 if (cp->cache_flags & KMF_HASH) {
4576 4601 for (bcp = *KMEM_HASH(cp, buf);
4577 4602 (bcp != NULL) && (bcp->bc_addr != buf);
4578 4603 bcp = bcp->bc_next) {
4579 4604 continue;
4580 4605 }
4581 4606 ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
4582 4607 return (bcp == NULL ? NULL : bcp->bc_slab);
4583 4608 }
4584 4609
4585 4610 if (sp == NULL) {
4586 4611 sp = KMEM_SLAB(cp, buf);
4587 4612 }
4588 4613 bufbcp = KMEM_BUFCTL(cp, buf);
4589 4614 for (bcp = sp->slab_head;
4590 4615 (bcp != NULL) && (bcp != bufbcp);
4591 4616 bcp = bcp->bc_next) {
4592 4617 continue;
4593 4618 }
4594 4619 return (bcp == NULL ? sp : NULL);
4595 4620 }
4596 4621
4597 4622 static boolean_t
4598 4623 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4599 4624 {
4600 4625 long refcnt = sp->slab_refcnt;
4601 4626
4602 4627 ASSERT(cp->cache_defrag != NULL);
4603 4628
4604 4629 /*
4605 4630 * For code coverage we want to be able to move an object within the
4606 4631 * same slab (the only partial slab) even if allocating the destination
4607 4632 * buffer resulted in a completely allocated slab.
4608 4633 */
4609 4634 if (flags & KMM_DEBUG) {
4610 4635 return ((flags & KMM_DESPERATE) ||
4611 4636 ((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0));
4612 4637 }
4613 4638
4614 4639 /* If we're desperate, we don't care if the client said NO. */
4615 4640 if (flags & KMM_DESPERATE) {
4616 4641 return (refcnt < sp->slab_chunks); /* any partial */
4617 4642 }
4618 4643
4619 4644 if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4620 4645 return (B_FALSE);
4621 4646 }
4622 4647
4623 4648 if ((refcnt == 1) || kmem_move_any_partial) {
4624 4649 return (refcnt < sp->slab_chunks);
4625 4650 }
4626 4651
4627 4652 /*
4628 4653 * The reclaim threshold is adjusted at each kmem_cache_scan() so that
4629 4654 * slabs with a progressively higher percentage of used buffers can be
4630 4655 * reclaimed until the cache as a whole is no longer fragmented.
4631 4656 *
4632 4657 * sp->slab_refcnt kmd_reclaim_numer
4633 4658 * --------------- < ------------------
4634 4659 * sp->slab_chunks KMEM_VOID_FRACTION
4635 4660 */
4636 4661 return ((refcnt * KMEM_VOID_FRACTION) <
4637 4662 (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4638 4663 }
4639 4664
4640 4665 /*
4641 4666 * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(),
4642 4667 * or when the buffer is freed.
4643 4668 */
4644 4669 static void
4645 4670 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4646 4671 {
4647 4672 ASSERT(MUTEX_HELD(&cp->cache_lock));
4648 4673 ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4649 4674
4650 4675 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4651 4676 return;
4652 4677 }
4653 4678
4654 4679 if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4655 4680 if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
4656 4681 avl_remove(&cp->cache_partial_slabs, sp);
4657 4682 sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
4658 4683 sp->slab_stuck_offset = (uint32_t)-1;
4659 4684 avl_add(&cp->cache_partial_slabs, sp);
4660 4685 }
4661 4686 } else {
4662 4687 sp->slab_later_count = 0;
4663 4688 sp->slab_stuck_offset = (uint32_t)-1;
4664 4689 }
4665 4690 }
4666 4691
4667 4692 static void
4668 4693 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4669 4694 {
4670 4695 ASSERT(taskq_member(kmem_move_taskq, curthread));
4671 4696 ASSERT(MUTEX_HELD(&cp->cache_lock));
4672 4697 ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4673 4698
4674 4699 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4675 4700 return;
4676 4701 }
4677 4702
4678 4703 avl_remove(&cp->cache_partial_slabs, sp);
4679 4704 sp->slab_later_count = 0;
4680 4705 sp->slab_flags |= KMEM_SLAB_NOMOVE;
4681 4706 sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
4682 4707 avl_add(&cp->cache_partial_slabs, sp);
4683 4708 }
4684 4709
4685 4710 static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
4686 4711
4687 4712 /*
4688 4713 * The move callback takes two buffer addresses, the buffer to be moved, and a
4689 4714 * newly allocated and constructed buffer selected by kmem as the destination.
4690 4715 * It also takes the size of the buffer and an optional user argument specified
4691 4716 * at cache creation time. kmem guarantees that the buffer to be moved has not
4692 4717 * been unmapped by the virtual memory subsystem. Beyond that, it cannot
4693 4718 * guarantee the present whereabouts of the buffer to be moved, so it is up to
4694 4719 * the client to safely determine whether or not it is still using the buffer.
4695 4720 * The client must not free either of the buffers passed to the move callback,
4696 4721 * since kmem wants to free them directly to the slab layer. The client response
4697 4722 * tells kmem which of the two buffers to free:
4698 4723 *
4699 4724 * YES kmem frees the old buffer (the move was successful)
4700 4725 * NO kmem frees the new buffer, marks the slab of the old buffer
4701 4726 * non-reclaimable to avoid bothering the client again
4702 4727 * LATER kmem frees the new buffer, increments slab_later_count
4703 4728 * DONT_KNOW kmem frees the new buffer
4704 4729 * DONT_NEED kmem frees both the old buffer and the new buffer
4705 4730 *
4706 4731 * The pending callback argument now being processed contains both of the
4707 4732 * buffers (old and new) passed to the move callback function, the slab of the
4708 4733 * old buffer, and flags related to the move request, such as whether or not the
4709 4734 * system was desperate for memory.
4710 4735 *
4711 4736 * Slabs are not freed while there is a pending callback, but instead are kept
4712 4737 * on a deadlist, which is drained after the last callback completes. This means
4713 4738 * that slabs are safe to access until kmem_move_end(), no matter how many of
4714 4739 * their buffers have been freed. Once slab_refcnt reaches zero, it stays at
4715 4740 * zero for as long as the slab remains on the deadlist and until the slab is
4716 4741 * freed.
4717 4742 */
4718 4743 static void
4719 4744 kmem_move_buffer(kmem_move_t *callback)
4720 4745 {
4721 4746 kmem_cbrc_t response;
4722 4747 kmem_slab_t *sp = callback->kmm_from_slab;
4723 4748 kmem_cache_t *cp = sp->slab_cache;
4724 4749 boolean_t free_on_slab;
4725 4750
4726 4751 ASSERT(taskq_member(kmem_move_taskq, curthread));
4727 4752 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4728 4753 ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
4729 4754
4730 4755 /*
4731 4756 * The number of allocated buffers on the slab may have changed since we
4732 4757 * last checked the slab's reclaimability (when the pending move was
4733 4758 * enqueued), or the client may have responded NO when asked to move
4734 4759 * another buffer on the same slab.
4735 4760 */
4736 4761 if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4737 4762 kmem_slab_free(cp, callback->kmm_to_buf);
4738 4763 kmem_move_end(cp, callback);
4739 4764 return;
4740 4765 }
4741 4766
4742 4767 /*
4743 4768 * Checking the slab layer is easy, so we might as well do that here
4744 4769 * in case we can avoid bothering the client.
4745 4770 */
4746 4771 mutex_enter(&cp->cache_lock);
4747 4772 free_on_slab = (kmem_slab_allocated(cp, sp,
4748 4773 callback->kmm_from_buf) == NULL);
4749 4774 mutex_exit(&cp->cache_lock);
4750 4775
4751 4776 if (free_on_slab) {
4752 4777 kmem_slab_free(cp, callback->kmm_to_buf);
4753 4778 kmem_move_end(cp, callback);
4754 4779 return;
4755 4780 }
4756 4781
4757 4782 if (cp->cache_flags & KMF_BUFTAG) {
4758 4783 /*
4759 4784 * Make kmem_cache_alloc_debug() apply the constructor for us.
4760 4785 */
4761 4786 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4762 4787 KM_NOSLEEP, 1, caller()) != 0) {
4763 4788 kmem_move_end(cp, callback);
4764 4789 return;
4765 4790 }
4766 4791 } else if (cp->cache_constructor != NULL &&
4767 4792 cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4768 4793 KM_NOSLEEP) != 0) {
4769 4794 atomic_inc_64(&cp->cache_alloc_fail);
4770 4795 kmem_slab_free(cp, callback->kmm_to_buf);
4771 4796 kmem_move_end(cp, callback);
4772 4797 return;
4773 4798 }
4774 4799
4775 4800 cp->cache_defrag->kmd_callbacks++;
4776 4801 cp->cache_defrag->kmd_thread = curthread;
4777 4802 cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4778 4803 cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4779 4804 DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4780 4805 callback);
4781 4806
4782 4807 response = cp->cache_move(callback->kmm_from_buf,
4783 4808 callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4784 4809
4785 4810 DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4786 4811 callback, kmem_cbrc_t, response);
4787 4812 cp->cache_defrag->kmd_thread = NULL;
4788 4813 cp->cache_defrag->kmd_from_buf = NULL;
4789 4814 cp->cache_defrag->kmd_to_buf = NULL;
4790 4815
4791 4816 if (response == KMEM_CBRC_YES) {
4792 4817 cp->cache_defrag->kmd_yes++;
4793 4818 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4794 4819 /* slab safe to access until kmem_move_end() */
4795 4820 if (sp->slab_refcnt == 0)
4796 4821 cp->cache_defrag->kmd_slabs_freed++;
4797 4822 mutex_enter(&cp->cache_lock);
4798 4823 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4799 4824 mutex_exit(&cp->cache_lock);
4800 4825 kmem_move_end(cp, callback);
4801 4826 return;
4802 4827 }
4803 4828
4804 4829 switch (response) {
4805 4830 case KMEM_CBRC_NO:
4806 4831 cp->cache_defrag->kmd_no++;
4807 4832 mutex_enter(&cp->cache_lock);
4808 4833 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4809 4834 mutex_exit(&cp->cache_lock);
4810 4835 break;
4811 4836 case KMEM_CBRC_LATER:
4812 4837 cp->cache_defrag->kmd_later++;
4813 4838 mutex_enter(&cp->cache_lock);
4814 4839 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4815 4840 mutex_exit(&cp->cache_lock);
4816 4841 break;
4817 4842 }
4818 4843
4819 4844 if (++sp->slab_later_count >= KMEM_DISBELIEF) {
4820 4845 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4821 4846 } else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
4822 4847 sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
4823 4848 callback->kmm_from_buf);
4824 4849 }
4825 4850 mutex_exit(&cp->cache_lock);
4826 4851 break;
4827 4852 case KMEM_CBRC_DONT_NEED:
4828 4853 cp->cache_defrag->kmd_dont_need++;
4829 4854 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4830 4855 if (sp->slab_refcnt == 0)
4831 4856 cp->cache_defrag->kmd_slabs_freed++;
4832 4857 mutex_enter(&cp->cache_lock);
4833 4858 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4834 4859 mutex_exit(&cp->cache_lock);
4835 4860 break;
4836 4861 case KMEM_CBRC_DONT_KNOW:
4837 4862 /*
4838 4863 * If we don't know if we can move this buffer or not, we'll
4839 4864 * just assume that we can't: if the buffer is in fact free,
4840 4865 * then it is sitting in one of the per-CPU magazines or in
4841 4866 * a full magazine in the depot layer. Either way, because
4842 4867 * defrag is induced in the same logic that reaps a cache,
4843 4868 * it's likely that full magazines will be returned to the
4844 4869 * system soon (thereby accomplishing what we're trying to
4845 4870 * accomplish here: return those magazines to their slabs).
4846 4871 * Given this, any work that we might do now to locate a buffer
4847 4872 * in a magazine is wasted (and expensive!) work; we bump
4848 4873 * a counter in this case and otherwise assume that we can't
4849 4874 * move it.
4850 4875 */
4851 4876 cp->cache_defrag->kmd_dont_know++;
4852 4877 break;
4853 4878 default:
4854 4879 panic("'%s' (%p) unexpected move callback response %d\n",
4855 4880 cp->cache_name, (void *)cp, response);
4856 4881 }
4857 4882
4858 4883 kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
4859 4884 kmem_move_end(cp, callback);
4860 4885 }
4861 4886
4862 4887 /* Return B_FALSE if there is insufficient memory for the move request. */
4863 4888 static boolean_t
4864 4889 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
4865 4890 {
4866 4891 void *to_buf;
4867 4892 avl_index_t index;
4868 4893 kmem_move_t *callback, *pending;
4869 4894 ulong_t n;
4870 4895
4871 4896 ASSERT(taskq_member(kmem_taskq, curthread));
4872 4897 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4873 4898 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4874 4899
4875 4900 callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
4876 4901
4877 4902 if (callback == NULL)
4878 4903 return (B_FALSE);
4879 4904
4880 4905 callback->kmm_from_slab = sp;
4881 4906 callback->kmm_from_buf = buf;
4882 4907 callback->kmm_flags = flags;
4883 4908
4884 4909 mutex_enter(&cp->cache_lock);
4885 4910
4886 4911 n = avl_numnodes(&cp->cache_partial_slabs);
4887 4912 if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) {
4888 4913 mutex_exit(&cp->cache_lock);
4889 4914 kmem_cache_free(kmem_move_cache, callback);
4890 4915 return (B_TRUE); /* there is no need for the move request */
4891 4916 }
4892 4917
4893 4918 pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
4894 4919 if (pending != NULL) {
4895 4920 /*
4896 4921 * If the move is already pending and we're desperate now,
4897 4922 * update the move flags.
4898 4923 */
4899 4924 if (flags & KMM_DESPERATE) {
4900 4925 pending->kmm_flags |= KMM_DESPERATE;
4901 4926 }
4902 4927 mutex_exit(&cp->cache_lock);
4903 4928 kmem_cache_free(kmem_move_cache, callback);
4904 4929 return (B_TRUE);
4905 4930 }
4906 4931
4907 4932 to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
4908 4933 B_FALSE);
4909 4934 callback->kmm_to_buf = to_buf;
4910 4935 avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
4911 4936
4912 4937 mutex_exit(&cp->cache_lock);
4913 4938
4914 4939 if (taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
4915 4940 callback, TQ_NOSLEEP) == TASKQID_INVALID) {
4916 4941 mutex_enter(&cp->cache_lock);
4917 4942 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4918 4943 mutex_exit(&cp->cache_lock);
4919 4944 kmem_slab_free(cp, to_buf);
4920 4945 kmem_cache_free(kmem_move_cache, callback);
4921 4946 return (B_FALSE);
4922 4947 }
4923 4948
4924 4949 return (B_TRUE);
4925 4950 }
4926 4951
4927 4952 static void
4928 4953 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
4929 4954 {
4930 4955 avl_index_t index;
4931 4956
4932 4957 ASSERT(cp->cache_defrag != NULL);
4933 4958 ASSERT(taskq_member(kmem_move_taskq, curthread));
4934 4959 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4935 4960
4936 4961 mutex_enter(&cp->cache_lock);
4937 4962 VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
4938 4963 callback->kmm_from_buf, &index) != NULL);
4939 4964 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4940 4965 if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
4941 4966 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
4942 4967 kmem_slab_t *sp;
4943 4968
4944 4969 /*
4945 4970 * The last pending move completed. Release all slabs from the
4946 4971 * front of the dead list except for any slab at the tail that
4947 4972 * needs to be released from the context of kmem_move_buffers().
4948 4973 * kmem deferred unmapping the buffers on these slabs in order
4949 4974 * to guarantee that buffers passed to the move callback have
4950 4975 * been touched only by kmem or by the client itself.
4951 4976 */
4952 4977 while ((sp = list_remove_head(deadlist)) != NULL) {
4953 4978 if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
4954 4979 list_insert_tail(deadlist, sp);
4955 4980 break;
4956 4981 }
4957 4982 cp->cache_defrag->kmd_deadcount--;
4958 4983 cp->cache_slab_destroy++;
4959 4984 mutex_exit(&cp->cache_lock);
4960 4985 kmem_slab_destroy(cp, sp);
4961 4986 mutex_enter(&cp->cache_lock);
4962 4987 }
4963 4988 }
4964 4989 mutex_exit(&cp->cache_lock);
4965 4990 kmem_cache_free(kmem_move_cache, callback);
4966 4991 }
4967 4992
4968 4993 /*
4969 4994 * Move buffers from least used slabs first by scanning backwards from the end
4970 4995 * of the partial slab list. Scan at most max_scan candidate slabs and move
4971 4996 * buffers from at most max_slabs slabs (0 for all partial slabs in both cases).
4972 4997 * If desperate to reclaim memory, move buffers from any partial slab, otherwise
4973 4998 * skip slabs with a ratio of allocated buffers at or above the current
4974 4999 * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the
4975 5000 * scan is aborted) so that the caller can adjust the reclaimability threshold
4976 5001 * depending on how many reclaimable slabs it finds.
4977 5002 *
4978 5003 * kmem_move_buffers() drops and reacquires cache_lock every time it issues a
4979 5004 * move request, since it is not valid for kmem_move_begin() to call
4980 5005 * kmem_cache_alloc() or taskq_dispatch() with cache_lock held.
4981 5006 */
4982 5007 static int
4983 5008 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
4984 5009 int flags)
4985 5010 {
4986 5011 kmem_slab_t *sp;
4987 5012 void *buf;
4988 5013 int i, j; /* slab index, buffer index */
4989 5014 int s; /* reclaimable slabs */
4990 5015 int b; /* allocated (movable) buffers on reclaimable slab */
4991 5016 boolean_t success;
4992 5017 int refcnt;
4993 5018 int nomove;
4994 5019
4995 5020 ASSERT(taskq_member(kmem_taskq, curthread));
4996 5021 ASSERT(MUTEX_HELD(&cp->cache_lock));
4997 5022 ASSERT(kmem_move_cache != NULL);
4998 5023 ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
4999 5024 ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
5000 5025 avl_numnodes(&cp->cache_partial_slabs) > 1);
5001 5026
5002 5027 if (kmem_move_blocked) {
5003 5028 return (0);
5004 5029 }
5005 5030
5006 5031 if (kmem_move_fulltilt) {
5007 5032 flags |= KMM_DESPERATE;
5008 5033 }
5009 5034
5010 5035 if (max_scan == 0 || (flags & KMM_DESPERATE)) {
5011 5036 /*
5012 5037 * Scan as many slabs as needed to find the desired number of
5013 5038 * candidate slabs.
5014 5039 */
5015 5040 max_scan = (size_t)-1;
5016 5041 }
5017 5042
5018 5043 if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
5019 5044 /* Find as many candidate slabs as possible. */
5020 5045 max_slabs = (size_t)-1;
5021 5046 }
5022 5047
5023 5048 sp = avl_last(&cp->cache_partial_slabs);
5024 5049 ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
5025 5050 for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) &&
5026 5051 ((sp != avl_first(&cp->cache_partial_slabs)) ||
5027 5052 (flags & KMM_DEBUG));
5028 5053 sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
5029 5054
5030 5055 if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
5031 5056 continue;
5032 5057 }
5033 5058 s++;
5034 5059
5035 5060 /* Look for allocated buffers to move. */
5036 5061 for (j = 0, b = 0, buf = sp->slab_base;
5037 5062 (j < sp->slab_chunks) && (b < sp->slab_refcnt);
5038 5063 buf = (((char *)buf) + cp->cache_chunksize), j++) {
5039 5064
5040 5065 if (kmem_slab_allocated(cp, sp, buf) == NULL) {
5041 5066 continue;
5042 5067 }
5043 5068
5044 5069 b++;
5045 5070
5046 5071 /*
5047 5072 * Prevent the slab from being destroyed while we drop
5048 5073 * cache_lock and while the pending move is not yet
5049 5074 * registered. Flag the pending move while
5050 5075 * kmd_moves_pending may still be empty, since we can't
5051 5076 * yet rely on a non-zero pending move count to prevent
5052 5077 * the slab from being destroyed.
5053 5078 */
5054 5079 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5055 5080 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5056 5081 /*
5057 5082 * Recheck refcnt and nomove after reacquiring the lock,
5058 5083 * since these control the order of partial slabs, and
5059 5084 * we want to know if we can pick up the scan where we
5060 5085 * left off.
5061 5086 */
5062 5087 refcnt = sp->slab_refcnt;
5063 5088 nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5064 5089 mutex_exit(&cp->cache_lock);
5065 5090
5066 5091 success = kmem_move_begin(cp, sp, buf, flags);
5067 5092
5068 5093 /*
5069 5094 * Now, before the lock is reacquired, kmem could
5070 5095 * process all pending move requests and purge the
5071 5096 * deadlist, so that upon reacquiring the lock, sp has
5072 5097 * been remapped. Or, the client may free all the
5073 5098 * objects on the slab while the pending moves are still
5074 5099 * on the taskq. Therefore, the KMEM_SLAB_MOVE_PENDING
5075 5100 * flag causes the slab to be put at the end of the
5076 5101 * deadlist and prevents it from being destroyed, since
5077 5102 * we plan to destroy it here after reacquiring the
5078 5103 * lock.
5079 5104 */
5080 5105 mutex_enter(&cp->cache_lock);
5081 5106 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5082 5107 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5083 5108
5084 5109 if (sp->slab_refcnt == 0) {
5085 5110 list_t *deadlist =
5086 5111 &cp->cache_defrag->kmd_deadlist;
5087 5112 list_remove(deadlist, sp);
5088 5113
5089 5114 if (!avl_is_empty(
5090 5115 &cp->cache_defrag->kmd_moves_pending)) {
5091 5116 /*
5092 5117 * A pending move makes it unsafe to
5093 5118 * destroy the slab, because even though
5094 5119 * the move is no longer needed, the
5095 5120 * context where that is determined
5096 5121 * requires the slab to exist.
5097 5122 * Fortunately, a pending move also
5098 5123 * means we don't need to destroy the
5099 5124 * slab here, since it will get
5100 5125 * destroyed along with any other slabs
5101 5126 * on the deadlist after the last
5102 5127 * pending move completes.
5103 5128 */
5104 5129 list_insert_head(deadlist, sp);
5105 5130 return (-1);
5106 5131 }
5107 5132
5108 5133 /*
5109 5134 * Destroy the slab now if it was completely
5110 5135 * freed while we dropped cache_lock and there
5111 5136 * are no pending moves. Since slab_refcnt
5112 5137 * cannot change once it reaches zero, no new
5113 5138 * pending moves from that slab are possible.
5114 5139 */
5115 5140 cp->cache_defrag->kmd_deadcount--;
5116 5141 cp->cache_slab_destroy++;
5117 5142 mutex_exit(&cp->cache_lock);
5118 5143 kmem_slab_destroy(cp, sp);
5119 5144 mutex_enter(&cp->cache_lock);
5120 5145 /*
5121 5146 * Since we can't pick up the scan where we left
5122 5147 * off, abort the scan and say nothing about the
5123 5148 * number of reclaimable slabs.
5124 5149 */
5125 5150 return (-1);
5126 5151 }
5127 5152
5128 5153 if (!success) {
5129 5154 /*
5130 5155 * Abort the scan if there is not enough memory
5131 5156 * for the request and say nothing about the
5132 5157 * number of reclaimable slabs.
5133 5158 */
5134 5159 return (-1);
5135 5160 }
5136 5161
5137 5162 /*
5138 5163 * The slab's position changed while the lock was
5139 5164 * dropped, so we don't know where we are in the
5140 5165 * sequence any more.
5141 5166 */
5142 5167 if (sp->slab_refcnt != refcnt) {
5143 5168 /*
5144 5169 * If this is a KMM_DEBUG move, the slab_refcnt
5145 5170 * may have changed because we allocated a
5146 5171 * destination buffer on the same slab. In that
5147 5172 * case, we're not interested in counting it.
5148 5173 */
5149 5174 return (-1);
5150 5175 }
5151 5176 if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove)
5152 5177 return (-1);
5153 5178
5154 5179 /*
5155 5180 * Generating a move request allocates a destination
5156 5181 * buffer from the slab layer, bumping the first partial
5157 5182 * slab if it is completely allocated. If the current
5158 5183 * slab becomes the first partial slab as a result, we
5159 5184 * can't continue to scan backwards.
5160 5185 *
5161 5186 * If this is a KMM_DEBUG move and we allocated the
5162 5187 * destination buffer from the last partial slab, then
5163 5188 * the buffer we're moving is on the same slab and our
5164 5189 * slab_refcnt has changed, causing us to return before
5165 5190 * reaching here if there are no partial slabs left.
5166 5191 */
5167 5192 ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5168 5193 if (sp == avl_first(&cp->cache_partial_slabs)) {
5169 5194 /*
5170 5195 * We're not interested in a second KMM_DEBUG
5171 5196 * move.
5172 5197 */
5173 5198 goto end_scan;
5174 5199 }
5175 5200 }
5176 5201 }
5177 5202 end_scan:
5178 5203
5179 5204 return (s);
5180 5205 }
5181 5206
5182 5207 typedef struct kmem_move_notify_args {
5183 5208 kmem_cache_t *kmna_cache;
5184 5209 void *kmna_buf;
5185 5210 } kmem_move_notify_args_t;
5186 5211
5187 5212 static void
5188 5213 kmem_cache_move_notify_task(void *arg)
5189 5214 {
5190 5215 kmem_move_notify_args_t *args = arg;
5191 5216 kmem_cache_t *cp = args->kmna_cache;
5192 5217 void *buf = args->kmna_buf;
5193 5218 kmem_slab_t *sp;
5194 5219
5195 5220 ASSERT(taskq_member(kmem_taskq, curthread));
5196 5221 ASSERT(list_link_active(&cp->cache_link));
5197 5222
5198 5223 kmem_free(args, sizeof (kmem_move_notify_args_t));
5199 5224 mutex_enter(&cp->cache_lock);
5200 5225 sp = kmem_slab_allocated(cp, NULL, buf);
5201 5226
5202 5227 /* Ignore the notification if the buffer is no longer allocated. */
5203 5228 if (sp == NULL) {
5204 5229 mutex_exit(&cp->cache_lock);
5205 5230 return;
5206 5231 }
5207 5232
5208 5233 /* Ignore the notification if there's no reason to move the buffer. */
5209 5234 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5210 5235 /*
5211 5236 * So far the notification is not ignored. Ignore the
5212 5237 * notification if the slab is not marked by an earlier refusal
5213 5238 * to move a buffer.
5214 5239 */
5215 5240 if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
5216 5241 (sp->slab_later_count == 0)) {
5217 5242 mutex_exit(&cp->cache_lock);
5218 5243 return;
5219 5244 }
5220 5245
5221 5246 kmem_slab_move_yes(cp, sp, buf);
5222 5247 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5223 5248 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5224 5249 mutex_exit(&cp->cache_lock);
5225 5250 /* see kmem_move_buffers() about dropping the lock */
5226 5251 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5227 5252 mutex_enter(&cp->cache_lock);
5228 5253 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5229 5254 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5230 5255 if (sp->slab_refcnt == 0) {
5231 5256 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5232 5257 list_remove(deadlist, sp);
5233 5258
5234 5259 if (!avl_is_empty(
5235 5260 &cp->cache_defrag->kmd_moves_pending)) {
5236 5261 list_insert_head(deadlist, sp);
5237 5262 mutex_exit(&cp->cache_lock);
5238 5263 return;
5239 5264 }
5240 5265
5241 5266 cp->cache_defrag->kmd_deadcount--;
5242 5267 cp->cache_slab_destroy++;
5243 5268 mutex_exit(&cp->cache_lock);
5244 5269 kmem_slab_destroy(cp, sp);
5245 5270 return;
5246 5271 }
5247 5272 } else {
5248 5273 kmem_slab_move_yes(cp, sp, buf);
5249 5274 }
5250 5275 mutex_exit(&cp->cache_lock);
5251 5276 }
5252 5277
5253 5278 void
5254 5279 kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5255 5280 {
5256 5281 kmem_move_notify_args_t *args;
5257 5282
5258 5283 args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
5259 5284 if (args != NULL) {
5260 5285 args->kmna_cache = cp;
5261 5286 args->kmna_buf = buf;
5262 5287 if (taskq_dispatch(kmem_taskq,
5263 5288 (task_func_t *)kmem_cache_move_notify_task, args,
5264 5289 TQ_NOSLEEP) == TASKQID_INVALID)
5265 5290 kmem_free(args, sizeof (kmem_move_notify_args_t));
5266 5291 }
5267 5292 }
5268 5293
5269 5294 static void
5270 5295 kmem_cache_defrag(kmem_cache_t *cp)
5271 5296 {
5272 5297 size_t n;
5273 5298
5274 5299 ASSERT(cp->cache_defrag != NULL);
5275 5300
5276 5301 mutex_enter(&cp->cache_lock);
5277 5302 n = avl_numnodes(&cp->cache_partial_slabs);
5278 5303 if (n > 1) {
5279 5304 /* kmem_move_buffers() drops and reacquires cache_lock */
5280 5305 cp->cache_defrag->kmd_defrags++;
5281 5306 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5282 5307 }
5283 5308 mutex_exit(&cp->cache_lock);
5284 5309 }
5285 5310
5286 5311 /* Is this cache above the fragmentation threshold? */
5287 5312 static boolean_t
5288 5313 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5289 5314 {
5290 5315 /*
5291 5316 * nfree kmem_frag_numer
5292 5317 * ------------------ > ---------------
5293 5318 * cp->cache_buftotal kmem_frag_denom
5294 5319 */
5295 5320 return ((nfree * kmem_frag_denom) >
5296 5321 (cp->cache_buftotal * kmem_frag_numer));
5297 5322 }
5298 5323
5299 5324 static boolean_t
5300 5325 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5301 5326 {
5302 5327 boolean_t fragmented;
5303 5328 uint64_t nfree;
5304 5329
5305 5330 ASSERT(MUTEX_HELD(&cp->cache_lock));
5306 5331 *doreap = B_FALSE;
5307 5332
5308 5333 if (kmem_move_fulltilt) {
5309 5334 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5310 5335 return (B_TRUE);
5311 5336 }
5312 5337 } else {
5313 5338 if ((cp->cache_complete_slab_count + avl_numnodes(
5314 5339 &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5315 5340 return (B_FALSE);
5316 5341 }
5317 5342 }
5318 5343
5319 5344 nfree = cp->cache_bufslab;
5320 5345 fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5321 5346 kmem_cache_frag_threshold(cp, nfree));
5322 5347
5323 5348 /*
5324 5349 * Free buffers in the magazine layer appear allocated from the point of
5325 5350 * view of the slab layer. We want to know if the slab layer would
5326 5351 * appear fragmented if we included free buffers from magazines that
5327 5352 * have fallen out of the working set.
5328 5353 */
5329 5354 if (!fragmented) {
5330 5355 long reap;
5331 5356
5332 5357 mutex_enter(&cp->cache_depot_lock);
5333 5358 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5334 5359 reap = MIN(reap, cp->cache_full.ml_total);
5335 5360 mutex_exit(&cp->cache_depot_lock);
5336 5361
5337 5362 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5338 5363 if (kmem_cache_frag_threshold(cp, nfree)) {
5339 5364 *doreap = B_TRUE;
5340 5365 }
5341 5366 }
5342 5367
5343 5368 return (fragmented);
5344 5369 }
5345 5370
5346 5371 /* Called periodically from kmem_taskq */
5347 5372 static void
5348 5373 kmem_cache_scan(kmem_cache_t *cp)
5349 5374 {
5350 5375 boolean_t reap = B_FALSE;
5351 5376 kmem_defrag_t *kmd;
5352 5377
5353 5378 ASSERT(taskq_member(kmem_taskq, curthread));
5354 5379
5355 5380 mutex_enter(&cp->cache_lock);
5356 5381
5357 5382 kmd = cp->cache_defrag;
5358 5383 if (kmd->kmd_consolidate > 0) {
5359 5384 kmd->kmd_consolidate--;
5360 5385 mutex_exit(&cp->cache_lock);
5361 5386 kmem_cache_reap(cp);
5362 5387 return;
5363 5388 }
5364 5389
5365 5390 if (kmem_cache_is_fragmented(cp, &reap)) {
5366 5391 int slabs_found;
5367 5392
5368 5393 /*
5369 5394 * Consolidate reclaimable slabs from the end of the partial
5370 5395 * slab list (scan at most kmem_reclaim_scan_range slabs to find
5371 5396 * reclaimable slabs). Keep track of how many candidate slabs we
5372 5397 * looked for and how many we actually found so we can adjust
5373 5398 * the definition of a candidate slab if we're having trouble
5374 5399 * finding them.
5375 5400 *
5376 5401 * kmem_move_buffers() drops and reacquires cache_lock.
5377 5402 */
5378 5403 kmd->kmd_scans++;
5379 5404 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5380 5405 kmem_reclaim_max_slabs, 0);
5381 5406 if (slabs_found >= 0) {
5382 5407 kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
5383 5408 kmd->kmd_slabs_found += slabs_found;
5384 5409 }
5385 5410
5386 5411 if (++kmd->kmd_tries >= kmem_reclaim_scan_range) {
5387 5412 kmd->kmd_tries = 0;
5388 5413
5389 5414 /*
5390 5415 * If we had difficulty finding candidate slabs in
5391 5416 * previous scans, adjust the threshold so that
5392 5417 * candidates are easier to find.
5393 5418 */
5394 5419 if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
5395 5420 kmem_adjust_reclaim_threshold(kmd, -1);
5396 5421 } else if ((kmd->kmd_slabs_found * 2) <
5397 5422 kmd->kmd_slabs_sought) {
5398 5423 kmem_adjust_reclaim_threshold(kmd, 1);
5399 5424 }
5400 5425 kmd->kmd_slabs_sought = 0;
5401 5426 kmd->kmd_slabs_found = 0;
5402 5427 }
5403 5428 } else {
5404 5429 kmem_reset_reclaim_threshold(cp->cache_defrag);
5405 5430 #ifdef DEBUG
5406 5431 if (!avl_is_empty(&cp->cache_partial_slabs)) {
5407 5432 /*
5408 5433 * In a debug kernel we want the consolidator to
5409 5434 * run occasionally even when there is plenty of
5410 5435 * memory.
5411 5436 */
5412 5437 uint16_t debug_rand;
5413 5438
5414 5439 (void) random_get_bytes((uint8_t *)&debug_rand, 2);
5415 5440 if (!kmem_move_noreap &&
5416 5441 ((debug_rand % kmem_mtb_reap) == 0)) {
5417 5442 mutex_exit(&cp->cache_lock);
5418 5443 kmem_cache_reap(cp);
5419 5444 return;
5420 5445 } else if ((debug_rand % kmem_mtb_move) == 0) {
5421 5446 kmd->kmd_scans++;
5422 5447 (void) kmem_move_buffers(cp,
5423 5448 kmem_reclaim_scan_range, 1, KMM_DEBUG);
5424 5449 }
5425 5450 }
5426 5451 #endif /* DEBUG */
5427 5452 }
5428 5453
5429 5454 mutex_exit(&cp->cache_lock);
5430 5455
5431 5456 if (reap)
5432 5457 kmem_depot_ws_reap(cp);
5433 5458 }
|
↓ open down ↓ |
2601 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX