1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 * Copyright 2018, Joyent, Inc.
26 */
27
28 /*
29 * Kernel memory allocator, as described in the following two papers and a
30 * statement about the consolidator:
31 *
32 * Jeff Bonwick,
33 * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
34 * Proceedings of the Summer 1994 Usenix Conference.
35 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
36 *
37 * Jeff Bonwick and Jonathan Adams,
38 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
39 * Arbitrary Resources.
40 * Proceedings of the 2001 Usenix Conference.
41 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
42 *
43 * kmem Slab Consolidator Big Theory Statement:
44 *
45 * 1. Motivation
46 *
47 * As stated in Bonwick94, slabs provide the following advantages over other
48 * allocation structures in terms of memory fragmentation:
49 *
50 * - Internal fragmentation (per-buffer wasted space) is minimal.
51 * - Severe external fragmentation (unused buffers on the free list) is
52 * unlikely.
53 *
54 * Segregating objects by size eliminates one source of external fragmentation,
55 * and according to Bonwick:
56 *
57 * The other reason that slabs reduce external fragmentation is that all
58 * objects in a slab are of the same type, so they have the same lifetime
59 * distribution. The resulting segregation of short-lived and long-lived
60 * objects at slab granularity reduces the likelihood of an entire page being
61 * held hostage due to a single long-lived allocation [Barrett93, Hanson90].
62 *
63 * While unlikely, severe external fragmentation remains possible. Clients that
64 * allocate both short- and long-lived objects from the same cache cannot
65 * anticipate the distribution of long-lived objects within the allocator's slab
66 * implementation. Even a small percentage of long-lived objects distributed
67 * randomly across many slabs can lead to a worst case scenario where the client
68 * frees the majority of its objects and the system gets back almost none of the
69 * slabs. Despite the client doing what it reasonably can to help the system
70 * reclaim memory, the allocator cannot shake free enough slabs because of
71 * lonely allocations stubbornly hanging on. Although the allocator is in a
72 * position to diagnose the fragmentation, there is nothing that the allocator
73 * by itself can do about it. It only takes a single allocated object to prevent
74 * an entire slab from being reclaimed, and any object handed out by
75 * kmem_cache_alloc() is by definition in the client's control. Conversely,
76 * although the client is in a position to move a long-lived object, it has no
77 * way of knowing if the object is causing fragmentation, and if so, where to
78 * move it. A solution necessarily requires further cooperation between the
79 * allocator and the client.
80 *
81 * 2. Move Callback
82 *
83 * The kmem slab consolidator therefore adds a move callback to the
84 * allocator/client interface, improving worst-case external fragmentation in
85 * kmem caches that supply a function to move objects from one memory location
86 * to another. In a situation of low memory kmem attempts to consolidate all of
87 * a cache's slabs at once; otherwise it works slowly to bring external
88 * fragmentation within the 1/8 limit guaranteed for internal fragmentation,
89 * thereby helping to avoid a low memory situation in the future.
90 *
91 * The callback has the following signature:
92 *
93 * kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg)
94 *
95 * It supplies the kmem client with two addresses: the allocated object that
96 * kmem wants to move and a buffer selected by kmem for the client to use as the
97 * copy destination. The callback is kmem's way of saying "Please get off of
98 * this buffer and use this one instead." kmem knows where it wants to move the
99 * object in order to best reduce fragmentation. All the client needs to know
100 * about the second argument (void *new) is that it is an allocated, constructed
101 * object ready to take the contents of the old object. When the move function
102 * is called, the system is likely to be low on memory, and the new object
103 * spares the client from having to worry about allocating memory for the
104 * requested move. The third argument supplies the size of the object, in case a
105 * single move function handles multiple caches whose objects differ only in
106 * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional
107 * user argument passed to the constructor, destructor, and reclaim functions is
108 * also passed to the move callback.
109 *
110 * 2.1 Setting the Move Callback
111 *
112 * The client sets the move callback after creating the cache and before
113 * allocating from it:
114 *
115 * object_cache = kmem_cache_create(...);
116 * kmem_cache_set_move(object_cache, object_move);
117 *
118 * 2.2 Move Callback Return Values
119 *
120 * Only the client knows about its own data and when is a good time to move it.
121 * The client is cooperating with kmem to return unused memory to the system,
122 * and kmem respectfully accepts this help at the client's convenience. When
123 * asked to move an object, the client can respond with any of the following:
124 *
125 * typedef enum kmem_cbrc {
126 * KMEM_CBRC_YES,
127 * KMEM_CBRC_NO,
128 * KMEM_CBRC_LATER,
129 * KMEM_CBRC_DONT_NEED,
130 * KMEM_CBRC_DONT_KNOW
131 * } kmem_cbrc_t;
132 *
133 * The client must not explicitly kmem_cache_free() either of the objects passed
134 * to the callback, since kmem wants to free them directly to the slab layer
135 * (bypassing the per-CPU magazine layer). The response tells kmem which of the
136 * objects to free:
137 *
138 * YES: (Did it) The client moved the object, so kmem frees the old one.
139 * NO: (Never) The client refused, so kmem frees the new object (the
140 * unused copy destination). kmem also marks the slab of the old
141 * object so as not to bother the client with further callbacks for
142 * that object as long as the slab remains on the partial slab list.
143 * (The system won't be getting the slab back as long as the
144 * immovable object holds it hostage, so there's no point in moving
145 * any of its objects.)
146 * LATER: The client is using the object and cannot move it now, so kmem
147 * frees the new object (the unused copy destination). kmem still
148 * attempts to move other objects off the slab, since it expects to
149 * succeed in clearing the slab in a later callback. The client
150 * should use LATER instead of NO if the object is likely to become
151 * movable very soon.
152 * DONT_NEED: The client no longer needs the object, so kmem frees the old along
153 * with the new object (the unused copy destination). This response
154 * is the client's opportunity to be a model citizen and give back as
155 * much as it can.
156 * DONT_KNOW: The client does not know about the object because
157 * a) the client has just allocated the object and not yet put it
158 * wherever it expects to find known objects
159 * b) the client has removed the object from wherever it expects to
160 * find known objects and is about to free it, or
161 * c) the client has freed the object.
162 * In all these cases (a, b, and c) kmem frees the new object (the
163 * unused copy destination). In the first case, the object is in
164 * use and the correct action is that for LATER; in the latter two
165 * cases, we know that the object is either freed or about to be
166 * freed, in which case it is either already in a magazine or about
167 * to be in one. In these cases, we know that the object will either
168 * be reallocated and reused, or it will end up in a full magazine
169 * that will be reaped (thereby liberating the slab). Because it
170 * is prohibitively expensive to differentiate these cases, and
171 * because the defrag code is executed when we're low on memory
172 * (thereby biasing the system to reclaim full magazines) we treat
173 * all DONT_KNOW cases as LATER and rely on cache reaping to
174 * generally clean up full magazines. While we take the same action
175 * for these cases, we maintain their semantic distinction: if
176 * defragmentation is not occurring, it is useful to know if this
177 * is due to objects in use (LATER) or objects in an unknown state
178 * of transition (DONT_KNOW).
179 *
180 * 2.3 Object States
181 *
182 * Neither kmem nor the client can be assumed to know the object's whereabouts
183 * at the time of the callback. An object belonging to a kmem cache may be in
184 * any of the following states:
185 *
186 * 1. Uninitialized on the slab
187 * 2. Allocated from the slab but not constructed (still uninitialized)
188 * 3. Allocated from the slab, constructed, but not yet ready for business
189 * (not in a valid state for the move callback)
190 * 4. In use (valid and known to the client)
191 * 5. About to be freed (no longer in a valid state for the move callback)
192 * 6. Freed to a magazine (still constructed)
193 * 7. Allocated from a magazine, not yet ready for business (not in a valid
194 * state for the move callback), and about to return to state #4
195 * 8. Deconstructed on a magazine that is about to be freed
196 * 9. Freed to the slab
197 *
198 * Since the move callback may be called at any time while the object is in any
199 * of the above states (except state #1), the client needs a safe way to
200 * determine whether or not it knows about the object. Specifically, the client
201 * needs to know whether or not the object is in state #4, the only state in
202 * which a move is valid. If the object is in any other state, the client should
203 * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of
204 * the object's fields.
205 *
206 * Note that although an object may be in state #4 when kmem initiates the move
207 * request, the object may no longer be in that state by the time kmem actually
208 * calls the move function. Not only does the client free objects
209 * asynchronously, kmem itself puts move requests on a queue where thay are
210 * pending until kmem processes them from another context. Also, objects freed
211 * to a magazine appear allocated from the point of view of the slab layer, so
212 * kmem may even initiate requests for objects in a state other than state #4.
213 *
214 * 2.3.1 Magazine Layer
215 *
216 * An important insight revealed by the states listed above is that the magazine
217 * layer is populated only by kmem_cache_free(). Magazines of constructed
218 * objects are never populated directly from the slab layer (which contains raw,
219 * unconstructed objects). Whenever an allocation request cannot be satisfied
220 * from the magazine layer, the magazines are bypassed and the request is
221 * satisfied from the slab layer (creating a new slab if necessary). kmem calls
222 * the object constructor only when allocating from the slab layer, and only in
223 * response to kmem_cache_alloc() or to prepare the destination buffer passed in
224 * the move callback. kmem does not preconstruct objects in anticipation of
225 * kmem_cache_alloc().
226 *
227 * 2.3.2 Object Constructor and Destructor
228 *
229 * If the client supplies a destructor, it must be valid to call the destructor
230 * on a newly created object (immediately after the constructor).
231 *
232 * 2.4 Recognizing Known Objects
233 *
234 * There is a simple test to determine safely whether or not the client knows
235 * about a given object in the move callback. It relies on the fact that kmem
236 * guarantees that the object of the move callback has only been touched by the
237 * client itself or else by kmem. kmem does this by ensuring that none of the
238 * cache's slabs are freed to the virtual memory (VM) subsystem while a move
239 * callback is pending. When the last object on a slab is freed, if there is a
240 * pending move, kmem puts the slab on a per-cache dead list and defers freeing
241 * slabs on that list until all pending callbacks are completed. That way,
242 * clients can be certain that the object of a move callback is in one of the
243 * states listed above, making it possible to distinguish known objects (in
244 * state #4) using the two low order bits of any pointer member (with the
245 * exception of 'char *' or 'short *' which may not be 4-byte aligned on some
246 * platforms).
247 *
248 * The test works as long as the client always transitions objects from state #4
249 * (known, in use) to state #5 (about to be freed, invalid) by setting the low
250 * order bit of the client-designated pointer member. Since kmem only writes
251 * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and
252 * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is
253 * guaranteed to set at least one of the two low order bits. Therefore, given an
254 * object with a back pointer to a 'container_t *o_container', the client can
255 * test
256 *
257 * container_t *container = object->o_container;
258 * if ((uintptr_t)container & 0x3) {
259 * return (KMEM_CBRC_DONT_KNOW);
260 * }
261 *
262 * Typically, an object will have a pointer to some structure with a list or
263 * hash where objects from the cache are kept while in use. Assuming that the
264 * client has some way of knowing that the container structure is valid and will
265 * not go away during the move, and assuming that the structure includes a lock
266 * to protect whatever collection is used, then the client would continue as
267 * follows:
268 *
269 * // Ensure that the container structure does not go away.
270 * if (container_hold(container) == 0) {
271 * return (KMEM_CBRC_DONT_KNOW);
272 * }
273 * mutex_enter(&container->c_objects_lock);
274 * if (container != object->o_container) {
275 * mutex_exit(&container->c_objects_lock);
276 * container_rele(container);
277 * return (KMEM_CBRC_DONT_KNOW);
278 * }
279 *
280 * At this point the client knows that the object cannot be freed as long as
281 * c_objects_lock is held. Note that after acquiring the lock, the client must
282 * recheck the o_container pointer in case the object was removed just before
283 * acquiring the lock.
284 *
285 * When the client is about to free an object, it must first remove that object
286 * from the list, hash, or other structure where it is kept. At that time, to
287 * mark the object so it can be distinguished from the remaining, known objects,
288 * the client sets the designated low order bit:
289 *
290 * mutex_enter(&container->c_objects_lock);
291 * object->o_container = (void *)((uintptr_t)object->o_container | 0x1);
292 * list_remove(&container->c_objects, object);
293 * mutex_exit(&container->c_objects_lock);
294 *
295 * In the common case, the object is freed to the magazine layer, where it may
296 * be reused on a subsequent allocation without the overhead of calling the
297 * constructor. While in the magazine it appears allocated from the point of
298 * view of the slab layer, making it a candidate for the move callback. Most
299 * objects unrecognized by the client in the move callback fall into this
300 * category and are cheaply distinguished from known objects by the test
301 * described earlier. Because searching magazines is prohibitively expensive
302 * for kmem, clients that do not mark freed objects (and therefore return
303 * KMEM_CBRC_DONT_KNOW for large numbers of objects) may find defragmentation
304 * efficacy reduced.
305 *
306 * Invalidating the designated pointer member before freeing the object marks
307 * the object to be avoided in the callback, and conversely, assigning a valid
308 * value to the designated pointer member after allocating the object makes the
309 * object fair game for the callback:
310 *
311 * ... allocate object ...
312 * ... set any initial state not set by the constructor ...
313 *
314 * mutex_enter(&container->c_objects_lock);
315 * list_insert_tail(&container->c_objects, object);
316 * membar_producer();
317 * object->o_container = container;
318 * mutex_exit(&container->c_objects_lock);
319 *
320 * Note that everything else must be valid before setting o_container makes the
321 * object fair game for the move callback. The membar_producer() call ensures
322 * that all the object's state is written to memory before setting the pointer
323 * that transitions the object from state #3 or #7 (allocated, constructed, not
324 * yet in use) to state #4 (in use, valid). That's important because the move
325 * function has to check the validity of the pointer before it can safely
326 * acquire the lock protecting the collection where it expects to find known
327 * objects.
328 *
329 * This method of distinguishing known objects observes the usual symmetry:
330 * invalidating the designated pointer is the first thing the client does before
331 * freeing the object, and setting the designated pointer is the last thing the
332 * client does after allocating the object. Of course, the client is not
333 * required to use this method. Fundamentally, how the client recognizes known
334 * objects is completely up to the client, but this method is recommended as an
335 * efficient and safe way to take advantage of the guarantees made by kmem. If
336 * the entire object is arbitrary data without any markable bits from a suitable
337 * pointer member, then the client must find some other method, such as
338 * searching a hash table of known objects.
339 *
340 * 2.5 Preventing Objects From Moving
341 *
342 * Besides a way to distinguish known objects, the other thing that the client
343 * needs is a strategy to ensure that an object will not move while the client
344 * is actively using it. The details of satisfying this requirement tend to be
345 * highly cache-specific. It might seem that the same rules that let a client
346 * remove an object safely should also decide when an object can be moved
347 * safely. However, any object state that makes a removal attempt invalid is
348 * likely to be long-lasting for objects that the client does not expect to
349 * remove. kmem knows nothing about the object state and is equally likely (from
350 * the client's point of view) to request a move for any object in the cache,
351 * whether prepared for removal or not. Even a low percentage of objects stuck
352 * in place by unremovability will defeat the consolidator if the stuck objects
353 * are the same long-lived allocations likely to hold slabs hostage.
354 * Fundamentally, the consolidator is not aimed at common cases. Severe external
355 * fragmentation is a worst case scenario manifested as sparsely allocated
356 * slabs, by definition a low percentage of the cache's objects. When deciding
357 * what makes an object movable, keep in mind the goal of the consolidator: to
358 * bring worst-case external fragmentation within the limits guaranteed for
359 * internal fragmentation. Removability is a poor criterion if it is likely to
360 * exclude more than an insignificant percentage of objects for long periods of
361 * time.
362 *
363 * A tricky general solution exists, and it has the advantage of letting you
364 * move any object at almost any moment, practically eliminating the likelihood
365 * that an object can hold a slab hostage. However, if there is a cache-specific
366 * way to ensure that an object is not actively in use in the vast majority of
367 * cases, a simpler solution that leverages this cache-specific knowledge is
368 * preferred.
369 *
370 * 2.5.1 Cache-Specific Solution
371 *
372 * As an example of a cache-specific solution, the ZFS znode cache takes
373 * advantage of the fact that the vast majority of znodes are only being
374 * referenced from the DNLC. (A typical case might be a few hundred in active
375 * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS
376 * client has established that it recognizes the znode and can access its fields
377 * safely (using the method described earlier), it then tests whether the znode
378 * is referenced by anything other than the DNLC. If so, it assumes that the
379 * znode may be in active use and is unsafe to move, so it drops its locks and
380 * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere
381 * else znodes are used, no change is needed to protect against the possibility
382 * of the znode moving. The disadvantage is that it remains possible for an
383 * application to hold a znode slab hostage with an open file descriptor.
384 * However, this case ought to be rare and the consolidator has a way to deal
385 * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same
386 * object, kmem eventually stops believing it and treats the slab as if the
387 * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can
388 * then focus on getting it off of the partial slab list by allocating rather
389 * than freeing all of its objects. (Either way of getting a slab off the
390 * free list reduces fragmentation.)
391 *
392 * 2.5.2 General Solution
393 *
394 * The general solution, on the other hand, requires an explicit hold everywhere
395 * the object is used to prevent it from moving. To keep the client locking
396 * strategy as uncomplicated as possible, kmem guarantees the simplifying
397 * assumption that move callbacks are sequential, even across multiple caches.
398 * Internally, a global queue processed by a single thread supports all caches
399 * implementing the callback function. No matter how many caches supply a move
400 * function, the consolidator never moves more than one object at a time, so the
401 * client does not have to worry about tricky lock ordering involving several
402 * related objects from different kmem caches.
403 *
404 * The general solution implements the explicit hold as a read-write lock, which
405 * allows multiple readers to access an object from the cache simultaneously
406 * while a single writer is excluded from moving it. A single rwlock for the
407 * entire cache would lock out all threads from using any of the cache's objects
408 * even though only a single object is being moved, so to reduce contention,
409 * the client can fan out the single rwlock into an array of rwlocks hashed by
410 * the object address, making it probable that moving one object will not
411 * prevent other threads from using a different object. The rwlock cannot be a
412 * member of the object itself, because the possibility of the object moving
413 * makes it unsafe to access any of the object's fields until the lock is
414 * acquired.
415 *
416 * Assuming a small, fixed number of locks, it's possible that multiple objects
417 * will hash to the same lock. A thread that needs to use multiple objects in
418 * the same function may acquire the same lock multiple times. Since rwlocks are
419 * reentrant for readers, and since there is never more than a single writer at
420 * a time (assuming that the client acquires the lock as a writer only when
421 * moving an object inside the callback), there would seem to be no problem.
422 * However, a client locking multiple objects in the same function must handle
423 * one case of potential deadlock: Assume that thread A needs to prevent both
424 * object 1 and object 2 from moving, and thread B, the callback, meanwhile
425 * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the
426 * same lock, that thread A will acquire the lock for object 1 as a reader
427 * before thread B sets the lock's write-wanted bit, preventing thread A from
428 * reacquiring the lock for object 2 as a reader. Unable to make forward
429 * progress, thread A will never release the lock for object 1, resulting in
430 * deadlock.
431 *
432 * There are two ways of avoiding the deadlock just described. The first is to
433 * use rw_tryenter() rather than rw_enter() in the callback function when
434 * attempting to acquire the lock as a writer. If tryenter discovers that the
435 * same object (or another object hashed to the same lock) is already in use, it
436 * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use
437 * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t,
438 * since it allows a thread to acquire the lock as a reader in spite of a
439 * waiting writer. This second approach insists on moving the object now, no
440 * matter how many readers the move function must wait for in order to do so,
441 * and could delay the completion of the callback indefinitely (blocking
442 * callbacks to other clients). In practice, a less insistent callback using
443 * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems
444 * little reason to use anything else.
445 *
446 * Avoiding deadlock is not the only problem that an implementation using an
447 * explicit hold needs to solve. Locking the object in the first place (to
448 * prevent it from moving) remains a problem, since the object could move
449 * between the time you obtain a pointer to the object and the time you acquire
450 * the rwlock hashed to that pointer value. Therefore the client needs to
451 * recheck the value of the pointer after acquiring the lock, drop the lock if
452 * the value has changed, and try again. This requires a level of indirection:
453 * something that points to the object rather than the object itself, that the
454 * client can access safely while attempting to acquire the lock. (The object
455 * itself cannot be referenced safely because it can move at any time.)
456 * The following lock-acquisition function takes whatever is safe to reference
457 * (arg), follows its pointer to the object (using function f), and tries as
458 * often as necessary to acquire the hashed lock and verify that the object
459 * still has not moved:
460 *
461 * object_t *
462 * object_hold(object_f f, void *arg)
463 * {
464 * object_t *op;
465 *
466 * op = f(arg);
467 * if (op == NULL) {
468 * return (NULL);
469 * }
470 *
471 * rw_enter(OBJECT_RWLOCK(op), RW_READER);
472 * while (op != f(arg)) {
473 * rw_exit(OBJECT_RWLOCK(op));
474 * op = f(arg);
475 * if (op == NULL) {
476 * break;
477 * }
478 * rw_enter(OBJECT_RWLOCK(op), RW_READER);
479 * }
480 *
481 * return (op);
482 * }
483 *
484 * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The
485 * lock reacquisition loop, while necessary, almost never executes. The function
486 * pointer f (used to obtain the object pointer from arg) has the following type
487 * definition:
488 *
489 * typedef object_t *(*object_f)(void *arg);
490 *
491 * An object_f implementation is likely to be as simple as accessing a structure
492 * member:
493 *
494 * object_t *
495 * s_object(void *arg)
496 * {
497 * something_t *sp = arg;
498 * return (sp->s_object);
499 * }
500 *
501 * The flexibility of a function pointer allows the path to the object to be
502 * arbitrarily complex and also supports the notion that depending on where you
503 * are using the object, you may need to get it from someplace different.
504 *
505 * The function that releases the explicit hold is simpler because it does not
506 * have to worry about the object moving:
507 *
508 * void
509 * object_rele(object_t *op)
510 * {
511 * rw_exit(OBJECT_RWLOCK(op));
512 * }
513 *
514 * The caller is spared these details so that obtaining and releasing an
515 * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller
516 * of object_hold() only needs to know that the returned object pointer is valid
517 * if not NULL and that the object will not move until released.
518 *
519 * Although object_hold() prevents an object from moving, it does not prevent it
520 * from being freed. The caller must take measures before calling object_hold()
521 * (afterwards is too late) to ensure that the held object cannot be freed. The
522 * caller must do so without accessing the unsafe object reference, so any lock
523 * or reference count used to ensure the continued existence of the object must
524 * live outside the object itself.
525 *
526 * Obtaining a new object is a special case where an explicit hold is impossible
527 * for the caller. Any function that returns a newly allocated object (either as
528 * a return value, or as an in-out paramter) must return it already held; after
529 * the caller gets it is too late, since the object cannot be safely accessed
530 * without the level of indirection described earlier. The following
531 * object_alloc() example uses the same code shown earlier to transition a new
532 * object into the state of being recognized (by the client) as a known object.
533 * The function must acquire the hold (rw_enter) before that state transition
534 * makes the object movable:
535 *
536 * static object_t *
537 * object_alloc(container_t *container)
538 * {
539 * object_t *object = kmem_cache_alloc(object_cache, 0);
540 * ... set any initial state not set by the constructor ...
541 * rw_enter(OBJECT_RWLOCK(object), RW_READER);
542 * mutex_enter(&container->c_objects_lock);
543 * list_insert_tail(&container->c_objects, object);
544 * membar_producer();
545 * object->o_container = container;
546 * mutex_exit(&container->c_objects_lock);
547 * return (object);
548 * }
549 *
550 * Functions that implicitly acquire an object hold (any function that calls
551 * object_alloc() to supply an object for the caller) need to be carefully noted
552 * so that the matching object_rele() is not neglected. Otherwise, leaked holds
553 * prevent all objects hashed to the affected rwlocks from ever being moved.
554 *
555 * The pointer to a held object can be hashed to the holding rwlock even after
556 * the object has been freed. Although it is possible to release the hold
557 * after freeing the object, you may decide to release the hold implicitly in
558 * whatever function frees the object, so as to release the hold as soon as
559 * possible, and for the sake of symmetry with the function that implicitly
560 * acquires the hold when it allocates the object. Here, object_free() releases
561 * the hold acquired by object_alloc(). Its implicit object_rele() forms a
562 * matching pair with object_hold():
563 *
564 * void
565 * object_free(object_t *object)
566 * {
567 * container_t *container;
568 *
569 * ASSERT(object_held(object));
570 * container = object->o_container;
571 * mutex_enter(&container->c_objects_lock);
572 * object->o_container =
573 * (void *)((uintptr_t)object->o_container | 0x1);
574 * list_remove(&container->c_objects, object);
575 * mutex_exit(&container->c_objects_lock);
576 * object_rele(object);
577 * kmem_cache_free(object_cache, object);
578 * }
579 *
580 * Note that object_free() cannot safely accept an object pointer as an argument
581 * unless the object is already held. Any function that calls object_free()
582 * needs to be carefully noted since it similarly forms a matching pair with
583 * object_hold().
584 *
585 * To complete the picture, the following callback function implements the
586 * general solution by moving objects only if they are currently unheld:
587 *
588 * static kmem_cbrc_t
589 * object_move(void *buf, void *newbuf, size_t size, void *arg)
590 * {
591 * object_t *op = buf, *np = newbuf;
592 * container_t *container;
593 *
594 * container = op->o_container;
595 * if ((uintptr_t)container & 0x3) {
596 * return (KMEM_CBRC_DONT_KNOW);
597 * }
598 *
599 * // Ensure that the container structure does not go away.
600 * if (container_hold(container) == 0) {
601 * return (KMEM_CBRC_DONT_KNOW);
602 * }
603 *
604 * mutex_enter(&container->c_objects_lock);
605 * if (container != op->o_container) {
606 * mutex_exit(&container->c_objects_lock);
607 * container_rele(container);
608 * return (KMEM_CBRC_DONT_KNOW);
609 * }
610 *
611 * if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) {
612 * mutex_exit(&container->c_objects_lock);
613 * container_rele(container);
614 * return (KMEM_CBRC_LATER);
615 * }
616 *
617 * object_move_impl(op, np); // critical section
618 * rw_exit(OBJECT_RWLOCK(op));
619 *
620 * op->o_container = (void *)((uintptr_t)op->o_container | 0x1);
621 * list_link_replace(&op->o_link_node, &np->o_link_node);
622 * mutex_exit(&container->c_objects_lock);
623 * container_rele(container);
624 * return (KMEM_CBRC_YES);
625 * }
626 *
627 * Note that object_move() must invalidate the designated o_container pointer of
628 * the old object in the same way that object_free() does, since kmem will free
629 * the object in response to the KMEM_CBRC_YES return value.
630 *
631 * The lock order in object_move() differs from object_alloc(), which locks
632 * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the
633 * callback uses rw_tryenter() (preventing the deadlock described earlier), it's
634 * not a problem. Holding the lock on the object list in the example above
635 * through the entire callback not only prevents the object from going away, it
636 * also allows you to lock the list elsewhere and know that none of its elements
637 * will move during iteration.
638 *
639 * Adding an explicit hold everywhere an object from the cache is used is tricky
640 * and involves much more change to client code than a cache-specific solution
641 * that leverages existing state to decide whether or not an object is
642 * movable. However, this approach has the advantage that no object remains
643 * immovable for any significant length of time, making it extremely unlikely
644 * that long-lived allocations can continue holding slabs hostage; and it works
645 * for any cache.
646 *
647 * 3. Consolidator Implementation
648 *
649 * Once the client supplies a move function that a) recognizes known objects and
650 * b) avoids moving objects that are actively in use, the remaining work is up
651 * to the consolidator to decide which objects to move and when to issue
652 * callbacks.
653 *
654 * The consolidator relies on the fact that a cache's slabs are ordered by
655 * usage. Each slab has a fixed number of objects. Depending on the slab's
656 * "color" (the offset of the first object from the beginning of the slab;
657 * offsets are staggered to mitigate false sharing of cache lines) it is either
658 * the maximum number of objects per slab determined at cache creation time or
659 * else the number closest to the maximum that fits within the space remaining
660 * after the initial offset. A completely allocated slab may contribute some
661 * internal fragmentation (per-slab overhead) but no external fragmentation, so
662 * it is of no interest to the consolidator. At the other extreme, slabs whose
663 * objects have all been freed to the slab are released to the virtual memory
664 * (VM) subsystem (objects freed to magazines are still allocated as far as the
665 * slab is concerned). External fragmentation exists when there are slabs
666 * somewhere between these extremes. A partial slab has at least one but not all
667 * of its objects allocated. The more partial slabs, and the fewer allocated
668 * objects on each of them, the higher the fragmentation. Hence the
669 * consolidator's overall strategy is to reduce the number of partial slabs by
670 * moving allocated objects from the least allocated slabs to the most allocated
671 * slabs.
672 *
673 * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated
674 * slabs are kept separately in an unordered list. Since the majority of slabs
675 * tend to be completely allocated (a typical unfragmented cache may have
676 * thousands of complete slabs and only a single partial slab), separating
677 * complete slabs improves the efficiency of partial slab ordering, since the
678 * complete slabs do not affect the depth or balance of the AVL tree. This
679 * ordered sequence of partial slabs acts as a "free list" supplying objects for
680 * allocation requests.
681 *
682 * Objects are always allocated from the first partial slab in the free list,
683 * where the allocation is most likely to eliminate a partial slab (by
684 * completely allocating it). Conversely, when a single object from a completely
685 * allocated slab is freed to the slab, that slab is added to the front of the
686 * free list. Since most free list activity involves highly allocated slabs
687 * coming and going at the front of the list, slabs tend naturally toward the
688 * ideal order: highly allocated at the front, sparsely allocated at the back.
689 * Slabs with few allocated objects are likely to become completely free if they
690 * keep a safe distance away from the front of the free list. Slab misorders
691 * interfere with the natural tendency of slabs to become completely free or
692 * completely allocated. For example, a slab with a single allocated object
693 * needs only a single free to escape the cache; its natural desire is
694 * frustrated when it finds itself at the front of the list where a second
695 * allocation happens just before the free could have released it. Another slab
696 * with all but one object allocated might have supplied the buffer instead, so
697 * that both (as opposed to neither) of the slabs would have been taken off the
698 * free list.
699 *
700 * Although slabs tend naturally toward the ideal order, misorders allowed by a
701 * simple list implementation defeat the consolidator's strategy of merging
702 * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem
703 * needs another way to fix misorders to optimize its callback strategy. One
704 * approach is to periodically scan a limited number of slabs, advancing a
705 * marker to hold the current scan position, and to move extreme misorders to
706 * the front or back of the free list and to the front or back of the current
707 * scan range. By making consecutive scan ranges overlap by one slab, the least
708 * allocated slab in the current range can be carried along from the end of one
709 * scan to the start of the next.
710 *
711 * Maintaining partial slabs in an AVL tree relieves kmem of this additional
712 * task, however. Since most of the cache's activity is in the magazine layer,
713 * and allocations from the slab layer represent only a startup cost, the
714 * overhead of maintaining a balanced tree is not a significant concern compared
715 * to the opportunity of reducing complexity by eliminating the partial slab
716 * scanner just described. The overhead of an AVL tree is minimized by
717 * maintaining only partial slabs in the tree and keeping completely allocated
718 * slabs separately in a list. To avoid increasing the size of the slab
719 * structure the AVL linkage pointers are reused for the slab's list linkage,
720 * since the slab will always be either partial or complete, never stored both
721 * ways at the same time. To further minimize the overhead of the AVL tree the
722 * compare function that orders partial slabs by usage divides the range of
723 * allocated object counts into bins such that counts within the same bin are
724 * considered equal. Binning partial slabs makes it less likely that allocating
725 * or freeing a single object will change the slab's order, requiring a tree
726 * reinsertion (an avl_remove() followed by an avl_add(), both potentially
727 * requiring some rebalancing of the tree). Allocation counts closest to
728 * completely free and completely allocated are left unbinned (finely sorted) to
729 * better support the consolidator's strategy of merging slabs at either
730 * extreme.
731 *
732 * 3.1 Assessing Fragmentation and Selecting Candidate Slabs
733 *
734 * The consolidator piggybacks on the kmem maintenance thread and is called on
735 * the same interval as kmem_cache_update(), once per cache every fifteen
736 * seconds. kmem maintains a running count of unallocated objects in the slab
737 * layer (cache_bufslab). The consolidator checks whether that number exceeds
738 * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether
739 * there is a significant number of slabs in the cache (arbitrarily a minimum
740 * 101 total slabs). Unused objects that have fallen out of the magazine layer's
741 * working set are included in the assessment, and magazines in the depot are
742 * reaped if those objects would lift cache_bufslab above the fragmentation
743 * threshold. Once the consolidator decides that a cache is fragmented, it looks
744 * for a candidate slab to reclaim, starting at the end of the partial slab free
745 * list and scanning backwards. At first the consolidator is choosy: only a slab
746 * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a
747 * single allocated object, regardless of percentage). If there is difficulty
748 * finding a candidate slab, kmem raises the allocation threshold incrementally,
749 * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce
750 * external fragmentation (unused objects on the free list) below 12.5% (1/8),
751 * even in the worst case of every slab in the cache being almost 7/8 allocated.
752 * The threshold can also be lowered incrementally when candidate slabs are easy
753 * to find, and the threshold is reset to the minimum 1/8 as soon as the cache
754 * is no longer fragmented.
755 *
756 * 3.2 Generating Callbacks
757 *
758 * Once an eligible slab is chosen, a callback is generated for every allocated
759 * object on the slab, in the hope that the client will move everything off the
760 * slab and make it reclaimable. Objects selected as move destinations are
761 * chosen from slabs at the front of the free list. Assuming slabs in the ideal
762 * order (most allocated at the front, least allocated at the back) and a
763 * cooperative client, the consolidator will succeed in removing slabs from both
764 * ends of the free list, completely allocating on the one hand and completely
765 * freeing on the other. Objects selected as move destinations are allocated in
766 * the kmem maintenance thread where move requests are enqueued. A separate
767 * callback thread removes pending callbacks from the queue and calls the
768 * client. The separate thread ensures that client code (the move function) does
769 * not interfere with internal kmem maintenance tasks. A map of pending
770 * callbacks keyed by object address (the object to be moved) is checked to
771 * ensure that duplicate callbacks are not generated for the same object.
772 * Allocating the move destination (the object to move to) prevents subsequent
773 * callbacks from selecting the same destination as an earlier pending callback.
774 *
775 * Move requests can also be generated by kmem_cache_reap() when the system is
776 * desperate for memory and by kmem_cache_move_notify(), called by the client to
777 * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible.
778 * The map of pending callbacks is protected by the same lock that protects the
779 * slab layer.
780 *
781 * When the system is desperate for memory, kmem does not bother to determine
782 * whether or not the cache exceeds the fragmentation threshold, but tries to
783 * consolidate as many slabs as possible. Normally, the consolidator chews
784 * slowly, one sparsely allocated slab at a time during each maintenance
785 * interval that the cache is fragmented. When desperate, the consolidator
786 * starts at the last partial slab and enqueues callbacks for every allocated
787 * object on every partial slab, working backwards until it reaches the first
788 * partial slab. The first partial slab, meanwhile, advances in pace with the
789 * consolidator as allocations to supply move destinations for the enqueued
790 * callbacks use up the highly allocated slabs at the front of the free list.
791 * Ideally, the overgrown free list collapses like an accordion, starting at
792 * both ends and ending at the center with a single partial slab.
793 *
794 * 3.3 Client Responses
795 *
796 * When the client returns KMEM_CBRC_NO in response to the move callback, kmem
797 * marks the slab that supplied the stuck object non-reclaimable and moves it to
798 * front of the free list. The slab remains marked as long as it remains on the
799 * free list, and it appears more allocated to the partial slab compare function
800 * than any unmarked slab, no matter how many of its objects are allocated.
801 * Since even one immovable object ties up the entire slab, the goal is to
802 * completely allocate any slab that cannot be completely freed. kmem does not
803 * bother generating callbacks to move objects from a marked slab unless the
804 * system is desperate.
805 *
806 * When the client responds KMEM_CBRC_LATER, kmem increments a count for the
807 * slab. If the client responds LATER too many times, kmem disbelieves and
808 * treats the response as a NO. The count is cleared when the slab is taken off
809 * the partial slab list or when the client moves one of the slab's objects.
810 *
811 * 4. Observability
812 *
813 * A kmem cache's external fragmentation is best observed with 'mdb -k' using
814 * the ::kmem_slabs dcmd. For a complete description of the command, enter
815 * '::help kmem_slabs' at the mdb prompt.
816 */
817
818 #include <sys/kmem_impl.h>
819 #include <sys/vmem_impl.h>
820 #include <sys/param.h>
821 #include <sys/sysmacros.h>
822 #include <sys/vm.h>
823 #include <sys/proc.h>
824 #include <sys/tuneable.h>
825 #include <sys/systm.h>
826 #include <sys/cmn_err.h>
827 #include <sys/debug.h>
828 #include <sys/sdt.h>
829 #include <sys/mutex.h>
830 #include <sys/bitmap.h>
831 #include <sys/atomic.h>
832 #include <sys/kobj.h>
833 #include <sys/disp.h>
834 #include <vm/seg_kmem.h>
835 #include <sys/log.h>
836 #include <sys/callb.h>
837 #include <sys/taskq.h>
838 #include <sys/modctl.h>
839 #include <sys/reboot.h>
840 #include <sys/id32.h>
841 #include <sys/zone.h>
842 #include <sys/netstack.h>
843 #ifdef DEBUG
844 #include <sys/random.h>
845 #endif
846
847 extern void streams_msg_init(void);
848 extern int segkp_fromheap;
849 extern void segkp_cache_free(void);
850 extern int callout_init_done;
851
852 struct kmem_cache_kstat {
853 kstat_named_t kmc_buf_size;
854 kstat_named_t kmc_align;
855 kstat_named_t kmc_chunk_size;
856 kstat_named_t kmc_slab_size;
857 kstat_named_t kmc_alloc;
858 kstat_named_t kmc_alloc_fail;
859 kstat_named_t kmc_free;
860 kstat_named_t kmc_depot_alloc;
861 kstat_named_t kmc_depot_free;
862 kstat_named_t kmc_depot_contention;
863 kstat_named_t kmc_slab_alloc;
864 kstat_named_t kmc_slab_free;
865 kstat_named_t kmc_buf_constructed;
866 kstat_named_t kmc_buf_avail;
867 kstat_named_t kmc_buf_inuse;
868 kstat_named_t kmc_buf_total;
869 kstat_named_t kmc_buf_max;
870 kstat_named_t kmc_slab_create;
871 kstat_named_t kmc_slab_destroy;
872 kstat_named_t kmc_vmem_source;
873 kstat_named_t kmc_hash_size;
874 kstat_named_t kmc_hash_lookup_depth;
875 kstat_named_t kmc_hash_rescale;
876 kstat_named_t kmc_full_magazines;
877 kstat_named_t kmc_empty_magazines;
878 kstat_named_t kmc_magazine_size;
879 kstat_named_t kmc_reap; /* number of kmem_cache_reap() calls */
880 kstat_named_t kmc_defrag; /* attempts to defrag all partial slabs */
881 kstat_named_t kmc_scan; /* attempts to defrag one partial slab */
882 kstat_named_t kmc_move_callbacks; /* sum of yes, no, later, dn, dk */
883 kstat_named_t kmc_move_yes;
884 kstat_named_t kmc_move_no;
885 kstat_named_t kmc_move_later;
886 kstat_named_t kmc_move_dont_need;
887 kstat_named_t kmc_move_dont_know; /* obj unrecognized by client ... */
888 kstat_named_t kmc_move_hunt_found; /* ... but found in mag layer */
889 kstat_named_t kmc_move_slabs_freed; /* slabs freed by consolidator */
890 kstat_named_t kmc_move_reclaimable; /* buffers, if consolidator ran */
891 } kmem_cache_kstat = {
892 { "buf_size", KSTAT_DATA_UINT64 },
893 { "align", KSTAT_DATA_UINT64 },
894 { "chunk_size", KSTAT_DATA_UINT64 },
895 { "slab_size", KSTAT_DATA_UINT64 },
896 { "alloc", KSTAT_DATA_UINT64 },
897 { "alloc_fail", KSTAT_DATA_UINT64 },
898 { "free", KSTAT_DATA_UINT64 },
899 { "depot_alloc", KSTAT_DATA_UINT64 },
900 { "depot_free", KSTAT_DATA_UINT64 },
901 { "depot_contention", KSTAT_DATA_UINT64 },
902 { "slab_alloc", KSTAT_DATA_UINT64 },
903 { "slab_free", KSTAT_DATA_UINT64 },
904 { "buf_constructed", KSTAT_DATA_UINT64 },
905 { "buf_avail", KSTAT_DATA_UINT64 },
906 { "buf_inuse", KSTAT_DATA_UINT64 },
907 { "buf_total", KSTAT_DATA_UINT64 },
908 { "buf_max", KSTAT_DATA_UINT64 },
909 { "slab_create", KSTAT_DATA_UINT64 },
910 { "slab_destroy", KSTAT_DATA_UINT64 },
911 { "vmem_source", KSTAT_DATA_UINT64 },
912 { "hash_size", KSTAT_DATA_UINT64 },
913 { "hash_lookup_depth", KSTAT_DATA_UINT64 },
914 { "hash_rescale", KSTAT_DATA_UINT64 },
915 { "full_magazines", KSTAT_DATA_UINT64 },
916 { "empty_magazines", KSTAT_DATA_UINT64 },
917 { "magazine_size", KSTAT_DATA_UINT64 },
918 { "reap", KSTAT_DATA_UINT64 },
919 { "defrag", KSTAT_DATA_UINT64 },
920 { "scan", KSTAT_DATA_UINT64 },
921 { "move_callbacks", KSTAT_DATA_UINT64 },
922 { "move_yes", KSTAT_DATA_UINT64 },
923 { "move_no", KSTAT_DATA_UINT64 },
924 { "move_later", KSTAT_DATA_UINT64 },
925 { "move_dont_need", KSTAT_DATA_UINT64 },
926 { "move_dont_know", KSTAT_DATA_UINT64 },
927 { "move_hunt_found", KSTAT_DATA_UINT64 },
928 { "move_slabs_freed", KSTAT_DATA_UINT64 },
929 { "move_reclaimable", KSTAT_DATA_UINT64 },
930 };
931
932 static kmutex_t kmem_cache_kstat_lock;
933
934 /*
935 * The default set of caches to back kmem_alloc().
936 * These sizes should be reevaluated periodically.
937 *
938 * We want allocations that are multiples of the coherency granularity
939 * (64 bytes) to be satisfied from a cache which is a multiple of 64
940 * bytes, so that it will be 64-byte aligned. For all multiples of 64,
941 * the next kmem_cache_size greater than or equal to it must be a
942 * multiple of 64.
943 *
944 * We split the table into two sections: size <= 4k and size > 4k. This
945 * saves a lot of space and cache footprint in our cache tables.
946 */
947 static const int kmem_alloc_sizes[] = {
948 1 * 8,
949 2 * 8,
950 3 * 8,
951 4 * 8, 5 * 8, 6 * 8, 7 * 8,
952 4 * 16, 5 * 16, 6 * 16, 7 * 16,
953 4 * 32, 5 * 32, 6 * 32, 7 * 32,
954 4 * 64, 5 * 64, 6 * 64, 7 * 64,
955 4 * 128, 5 * 128, 6 * 128, 7 * 128,
956 P2ALIGN(8192 / 7, 64),
957 P2ALIGN(8192 / 6, 64),
958 P2ALIGN(8192 / 5, 64),
959 P2ALIGN(8192 / 4, 64),
960 P2ALIGN(8192 / 3, 64),
961 P2ALIGN(8192 / 2, 64),
962 };
963
964 static const int kmem_big_alloc_sizes[] = {
965 2 * 4096, 3 * 4096,
966 2 * 8192, 3 * 8192,
967 4 * 8192, 5 * 8192, 6 * 8192, 7 * 8192,
968 8 * 8192, 9 * 8192, 10 * 8192, 11 * 8192,
969 12 * 8192, 13 * 8192, 14 * 8192, 15 * 8192,
970 16 * 8192
971 };
972
973 #define KMEM_MAXBUF 4096
974 #define KMEM_BIG_MAXBUF_32BIT 32768
975 #define KMEM_BIG_MAXBUF 131072
976
977 #define KMEM_BIG_MULTIPLE 4096 /* big_alloc_sizes must be a multiple */
978 #define KMEM_BIG_SHIFT 12 /* lg(KMEM_BIG_MULTIPLE) */
979
980 static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
981 static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT];
982
983 #define KMEM_ALLOC_TABLE_MAX (KMEM_MAXBUF >> KMEM_ALIGN_SHIFT)
984 static size_t kmem_big_alloc_table_max = 0; /* # of filled elements */
985
986 static kmem_magtype_t kmem_magtype[] = {
987 { 1, 8, 3200, 65536 },
988 { 3, 16, 256, 32768 },
989 { 7, 32, 64, 16384 },
990 { 15, 64, 0, 8192 },
991 { 31, 64, 0, 4096 },
992 { 47, 64, 0, 2048 },
993 { 63, 64, 0, 1024 },
994 { 95, 64, 0, 512 },
995 { 143, 64, 0, 0 },
996 };
997
998 static uint32_t kmem_reaping;
999 static uint32_t kmem_reaping_idspace;
1000
1001 /*
1002 * kmem tunables
1003 */
1004 clock_t kmem_reap_interval; /* cache reaping rate [15 * HZ ticks] */
1005 int kmem_depot_contention = 3; /* max failed tryenters per real interval */
1006 pgcnt_t kmem_reapahead = 0; /* start reaping N pages before pageout */
1007 int kmem_panic = 1; /* whether to panic on error */
1008 int kmem_logging = 1; /* kmem_log_enter() override */
1009 uint32_t kmem_mtbf = 0; /* mean time between failures [default: off] */
1010 size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */
1011 size_t kmem_content_log_size; /* content log size [2% of memory] */
1012 size_t kmem_failure_log_size; /* failure log [4 pages per CPU] */
1013 size_t kmem_slab_log_size; /* slab create log [4 pages per CPU] */
1014 size_t kmem_zerosized_log_size; /* zero-sized log [4 pages per CPU] */
1015 size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */
1016 size_t kmem_lite_minsize = 0; /* minimum buffer size for KMF_LITE */
1017 size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */
1018 int kmem_lite_pcs = 4; /* number of PCs to store in KMF_LITE mode */
1019 size_t kmem_maxverify; /* maximum bytes to inspect in debug routines */
1020 size_t kmem_minfirewall; /* hardware-enforced redzone threshold */
1021
1022 #ifdef DEBUG
1023 int kmem_warn_zerosized = 1; /* whether to warn on zero-sized KM_SLEEP */
1024 #else
1025 int kmem_warn_zerosized = 0; /* whether to warn on zero-sized KM_SLEEP */
1026 #endif
1027
1028 int kmem_panic_zerosized = 0; /* whether to panic on zero-sized KM_SLEEP */
1029
1030 #ifdef _LP64
1031 size_t kmem_max_cached = KMEM_BIG_MAXBUF; /* maximum kmem_alloc cache */
1032 #else
1033 size_t kmem_max_cached = KMEM_BIG_MAXBUF_32BIT; /* maximum kmem_alloc cache */
1034 #endif
1035
1036 #ifdef DEBUG
1037 int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
1038 #else
1039 int kmem_flags = 0;
1040 #endif
1041 int kmem_ready;
1042
1043 static kmem_cache_t *kmem_slab_cache;
1044 static kmem_cache_t *kmem_bufctl_cache;
1045 static kmem_cache_t *kmem_bufctl_audit_cache;
1046
1047 static kmutex_t kmem_cache_lock; /* inter-cache linkage only */
1048 static list_t kmem_caches;
1049
1050 static taskq_t *kmem_taskq;
1051 static kmutex_t kmem_flags_lock;
1052 static vmem_t *kmem_metadata_arena;
1053 static vmem_t *kmem_msb_arena; /* arena for metadata caches */
1054 static vmem_t *kmem_cache_arena;
1055 static vmem_t *kmem_hash_arena;
1056 static vmem_t *kmem_log_arena;
1057 static vmem_t *kmem_oversize_arena;
1058 static vmem_t *kmem_va_arena;
1059 static vmem_t *kmem_default_arena;
1060 static vmem_t *kmem_firewall_va_arena;
1061 static vmem_t *kmem_firewall_arena;
1062
1063 static int kmem_zerosized; /* # of zero-sized allocs */
1064
1065 /*
1066 * kmem slab consolidator thresholds (tunables)
1067 */
1068 size_t kmem_frag_minslabs = 101; /* minimum total slabs */
1069 size_t kmem_frag_numer = 1; /* free buffers (numerator) */
1070 size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */
1071 /*
1072 * Maximum number of slabs from which to move buffers during a single
1073 * maintenance interval while the system is not low on memory.
1074 */
1075 size_t kmem_reclaim_max_slabs = 1;
1076 /*
1077 * Number of slabs to scan backwards from the end of the partial slab list
1078 * when searching for buffers to relocate.
1079 */
1080 size_t kmem_reclaim_scan_range = 12;
1081
1082 /* consolidator knobs */
1083 boolean_t kmem_move_noreap;
1084 boolean_t kmem_move_blocked;
1085 boolean_t kmem_move_fulltilt;
1086 boolean_t kmem_move_any_partial;
1087
1088 #ifdef DEBUG
1089 /*
1090 * kmem consolidator debug tunables:
1091 * Ensure code coverage by occasionally running the consolidator even when the
1092 * caches are not fragmented (they may never be). These intervals are mean time
1093 * in cache maintenance intervals (kmem_cache_update).
1094 */
1095 uint32_t kmem_mtb_move = 60; /* defrag 1 slab (~15min) */
1096 uint32_t kmem_mtb_reap = 1800; /* defrag all slabs (~7.5hrs) */
1097 #endif /* DEBUG */
1098
1099 static kmem_cache_t *kmem_defrag_cache;
1100 static kmem_cache_t *kmem_move_cache;
1101 static taskq_t *kmem_move_taskq;
1102
1103 static void kmem_cache_scan(kmem_cache_t *);
1104 static void kmem_cache_defrag(kmem_cache_t *);
1105 static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
1106
1107
1108 kmem_log_header_t *kmem_transaction_log;
1109 kmem_log_header_t *kmem_content_log;
1110 kmem_log_header_t *kmem_failure_log;
1111 kmem_log_header_t *kmem_slab_log;
1112 kmem_log_header_t *kmem_zerosized_log;
1113
1114 static int kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
1115
1116 #define KMEM_BUFTAG_LITE_ENTER(bt, count, caller) \
1117 if ((count) > 0) { \
1118 pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history; \
1119 pc_t *_e; \
1120 /* memmove() the old entries down one notch */ \
1121 for (_e = &_s[(count) - 1]; _e > _s; _e--) \
1122 *_e = *(_e - 1); \
1123 *_s = (uintptr_t)(caller); \
1124 }
1125
1126 #define KMERR_MODIFIED 0 /* buffer modified while on freelist */
1127 #define KMERR_REDZONE 1 /* redzone violation (write past end of buf) */
1128 #define KMERR_DUPFREE 2 /* freed a buffer twice */
1129 #define KMERR_BADADDR 3 /* freed a bad (unallocated) address */
1130 #define KMERR_BADBUFTAG 4 /* buftag corrupted */
1131 #define KMERR_BADBUFCTL 5 /* bufctl corrupted */
1132 #define KMERR_BADCACHE 6 /* freed a buffer to the wrong cache */
1133 #define KMERR_BADSIZE 7 /* alloc size != free size */
1134 #define KMERR_BADBASE 8 /* buffer base address wrong */
1135
1136 struct {
1137 hrtime_t kmp_timestamp; /* timestamp of panic */
1138 int kmp_error; /* type of kmem error */
1139 void *kmp_buffer; /* buffer that induced panic */
1140 void *kmp_realbuf; /* real start address for buffer */
1141 kmem_cache_t *kmp_cache; /* buffer's cache according to client */
1142 kmem_cache_t *kmp_realcache; /* actual cache containing buffer */
1143 kmem_slab_t *kmp_slab; /* slab accoring to kmem_findslab() */
1144 kmem_bufctl_t *kmp_bufctl; /* bufctl */
1145 } kmem_panic_info;
1146
1147
1148 static void
1149 copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
1150 {
1151 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1152 uint64_t *buf = buf_arg;
1153
1154 while (buf < bufend)
1155 *buf++ = pattern;
1156 }
1157
1158 static void *
1159 verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
1160 {
1161 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1162 uint64_t *buf;
1163
1164 for (buf = buf_arg; buf < bufend; buf++)
1165 if (*buf != pattern)
1166 return (buf);
1167 return (NULL);
1168 }
1169
1170 static void *
1171 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
1172 {
1173 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1174 uint64_t *buf;
1175
1176 for (buf = buf_arg; buf < bufend; buf++) {
1177 if (*buf != old) {
1178 copy_pattern(old, buf_arg,
1179 (char *)buf - (char *)buf_arg);
1180 return (buf);
1181 }
1182 *buf = new;
1183 }
1184
1185 return (NULL);
1186 }
1187
1188 static void
1189 kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1190 {
1191 kmem_cache_t *cp;
1192
1193 mutex_enter(&kmem_cache_lock);
1194 for (cp = list_head(&kmem_caches); cp != NULL;
1195 cp = list_next(&kmem_caches, cp))
1196 if (tq != NULL)
1197 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1198 tqflag);
1199 else
1200 func(cp);
1201 mutex_exit(&kmem_cache_lock);
1202 }
1203
1204 static void
1205 kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1206 {
1207 kmem_cache_t *cp;
1208
1209 mutex_enter(&kmem_cache_lock);
1210 for (cp = list_head(&kmem_caches); cp != NULL;
1211 cp = list_next(&kmem_caches, cp)) {
1212 if (!(cp->cache_cflags & KMC_IDENTIFIER))
1213 continue;
1214 if (tq != NULL)
1215 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1216 tqflag);
1217 else
1218 func(cp);
1219 }
1220 mutex_exit(&kmem_cache_lock);
1221 }
1222
1223 /*
1224 * Debugging support. Given a buffer address, find its slab.
1225 */
1226 static kmem_slab_t *
1227 kmem_findslab(kmem_cache_t *cp, void *buf)
1228 {
1229 kmem_slab_t *sp;
1230
1231 mutex_enter(&cp->cache_lock);
1232 for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1233 sp = list_next(&cp->cache_complete_slabs, sp)) {
1234 if (KMEM_SLAB_MEMBER(sp, buf)) {
1235 mutex_exit(&cp->cache_lock);
1236 return (sp);
1237 }
1238 }
1239 for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1240 sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1241 if (KMEM_SLAB_MEMBER(sp, buf)) {
1242 mutex_exit(&cp->cache_lock);
1243 return (sp);
1244 }
1245 }
1246 mutex_exit(&cp->cache_lock);
1247
1248 return (NULL);
1249 }
1250
1251 static void
1252 kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
1253 {
1254 kmem_buftag_t *btp = NULL;
1255 kmem_bufctl_t *bcp = NULL;
1256 kmem_cache_t *cp = cparg;
1257 kmem_slab_t *sp;
1258 uint64_t *off;
1259 void *buf = bufarg;
1260
1261 kmem_logging = 0; /* stop logging when a bad thing happens */
1262
1263 kmem_panic_info.kmp_timestamp = gethrtime();
1264
1265 sp = kmem_findslab(cp, buf);
1266 if (sp == NULL) {
1267 for (cp = list_tail(&kmem_caches); cp != NULL;
1268 cp = list_prev(&kmem_caches, cp)) {
1269 if ((sp = kmem_findslab(cp, buf)) != NULL)
1270 break;
1271 }
1272 }
1273
1274 if (sp == NULL) {
1275 cp = NULL;
1276 error = KMERR_BADADDR;
1277 } else {
1278 if (cp != cparg)
1279 error = KMERR_BADCACHE;
1280 else
1281 buf = (char *)bufarg - ((uintptr_t)bufarg -
1282 (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1283 if (buf != bufarg)
1284 error = KMERR_BADBASE;
1285 if (cp->cache_flags & KMF_BUFTAG)
1286 btp = KMEM_BUFTAG(cp, buf);
1287 if (cp->cache_flags & KMF_HASH) {
1288 mutex_enter(&cp->cache_lock);
1289 for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1290 if (bcp->bc_addr == buf)
1291 break;
1292 mutex_exit(&cp->cache_lock);
1293 if (bcp == NULL && btp != NULL)
1294 bcp = btp->bt_bufctl;
1295 if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1296 NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
1297 bcp->bc_addr != buf) {
1298 error = KMERR_BADBUFCTL;
1299 bcp = NULL;
1300 }
1301 }
1302 }
1303
1304 kmem_panic_info.kmp_error = error;
1305 kmem_panic_info.kmp_buffer = bufarg;
1306 kmem_panic_info.kmp_realbuf = buf;
1307 kmem_panic_info.kmp_cache = cparg;
1308 kmem_panic_info.kmp_realcache = cp;
1309 kmem_panic_info.kmp_slab = sp;
1310 kmem_panic_info.kmp_bufctl = bcp;
1311
1312 printf("kernel memory allocator: ");
1313
1314 switch (error) {
1315
1316 case KMERR_MODIFIED:
1317 printf("buffer modified after being freed\n");
1318 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1319 if (off == NULL) /* shouldn't happen */
1320 off = buf;
1321 printf("modification occurred at offset 0x%lx "
1322 "(0x%llx replaced by 0x%llx)\n",
1323 (uintptr_t)off - (uintptr_t)buf,
1324 (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
1325 break;
1326
1327 case KMERR_REDZONE:
1328 printf("redzone violation: write past end of buffer\n");
1329 break;
1330
1331 case KMERR_BADADDR:
1332 printf("invalid free: buffer not in cache\n");
1333 break;
1334
1335 case KMERR_DUPFREE:
1336 printf("duplicate free: buffer freed twice\n");
1337 break;
1338
1339 case KMERR_BADBUFTAG:
1340 printf("boundary tag corrupted\n");
1341 printf("bcp ^ bxstat = %lx, should be %lx\n",
1342 (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1343 KMEM_BUFTAG_FREE);
1344 break;
1345
1346 case KMERR_BADBUFCTL:
1347 printf("bufctl corrupted\n");
1348 break;
1349
1350 case KMERR_BADCACHE:
1351 printf("buffer freed to wrong cache\n");
1352 printf("buffer was allocated from %s,\n", cp->cache_name);
1353 printf("caller attempting free to %s.\n", cparg->cache_name);
1354 break;
1355
1356 case KMERR_BADSIZE:
1357 printf("bad free: free size (%u) != alloc size (%u)\n",
1358 KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1359 KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1360 break;
1361
1362 case KMERR_BADBASE:
1363 printf("bad free: free address (%p) != alloc address (%p)\n",
1364 bufarg, buf);
1365 break;
1366 }
1367
1368 printf("buffer=%p bufctl=%p cache: %s\n",
1369 bufarg, (void *)bcp, cparg->cache_name);
1370
1371 if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1372 error != KMERR_BADBUFCTL) {
1373 int d;
1374 timestruc_t ts;
1375 kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
1376
1377 hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
1378 printf("previous transaction on buffer %p:\n", buf);
1379 printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n",
1380 (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1381 (void *)sp, cp->cache_name);
1382 for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
1383 ulong_t off;
1384 char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
1385 printf("%s+%lx\n", sym ? sym : "?", off);
1386 }
1387 }
1388 if (kmem_panic > 0)
1389 panic("kernel heap corruption detected");
1390 if (kmem_panic == 0)
1391 debug_enter(NULL);
1392 kmem_logging = 1; /* resume logging */
1393 }
1394
1395 static kmem_log_header_t *
1396 kmem_log_init(size_t logsize)
1397 {
1398 kmem_log_header_t *lhp;
1399 int nchunks = 4 * max_ncpus;
1400 size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
1401 int i;
1402
1403 /*
1404 * Make sure that lhp->lh_cpu[] is nicely aligned
1405 * to prevent false sharing of cache lines.
1406 */
1407 lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
1408 lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1409 NULL, NULL, VM_SLEEP);
1410 bzero(lhp, lhsize);
1411
1412 mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
1413 lhp->lh_nchunks = nchunks;
1414 lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
1415 lhp->lh_base = vmem_alloc(kmem_log_arena,
1416 lhp->lh_chunksize * nchunks, VM_SLEEP);
1417 lhp->lh_free = vmem_alloc(kmem_log_arena,
1418 nchunks * sizeof (int), VM_SLEEP);
1419 bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1420
1421 for (i = 0; i < max_ncpus; i++) {
1422 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1423 mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1424 clhp->clh_chunk = i;
1425 }
1426
1427 for (i = max_ncpus; i < nchunks; i++)
1428 lhp->lh_free[i] = i;
1429
1430 lhp->lh_head = max_ncpus;
1431 lhp->lh_tail = 0;
1432
1433 return (lhp);
1434 }
1435
1436 static void *
1437 kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1438 {
1439 void *logspace;
1440 kmem_cpu_log_header_t *clhp;
1441
1442 if (lhp == NULL || kmem_logging == 0 || panicstr)
1443 return (NULL);
1444
1445 clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1446
1447 mutex_enter(&clhp->clh_lock);
1448 clhp->clh_hits++;
1449 if (size > clhp->clh_avail) {
1450 mutex_enter(&lhp->lh_lock);
1451 lhp->lh_hits++;
1452 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1453 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1454 clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1455 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1456 clhp->clh_current = lhp->lh_base +
1457 clhp->clh_chunk * lhp->lh_chunksize;
1458 clhp->clh_avail = lhp->lh_chunksize;
1459 if (size > lhp->lh_chunksize)
1460 size = lhp->lh_chunksize;
1461 mutex_exit(&lhp->lh_lock);
1462 }
1463 logspace = clhp->clh_current;
1464 clhp->clh_current += size;
1465 clhp->clh_avail -= size;
1466 bcopy(data, logspace, size);
1467 mutex_exit(&clhp->clh_lock);
1468 return (logspace);
1469 }
1470
1471 #define KMEM_AUDIT(lp, cp, bcp) \
1472 { \
1473 kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp); \
1474 _bcp->bc_timestamp = gethrtime(); \
1475 _bcp->bc_thread = curthread; \
1476 _bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH); \
1477 _bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp)); \
1478 }
1479
1480 static void
1481 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1482 kmem_slab_t *sp, void *addr)
1483 {
1484 kmem_bufctl_audit_t bca;
1485
1486 bzero(&bca, sizeof (kmem_bufctl_audit_t));
1487 bca.bc_addr = addr;
1488 bca.bc_slab = sp;
1489 bca.bc_cache = cp;
1490 KMEM_AUDIT(lp, cp, &bca);
1491 }
1492
1493 /*
1494 * Create a new slab for cache cp.
1495 */
1496 static kmem_slab_t *
1497 kmem_slab_create(kmem_cache_t *cp, int kmflag)
1498 {
1499 size_t slabsize = cp->cache_slabsize;
1500 size_t chunksize = cp->cache_chunksize;
1501 int cache_flags = cp->cache_flags;
1502 size_t color, chunks;
1503 char *buf, *slab;
1504 kmem_slab_t *sp;
1505 kmem_bufctl_t *bcp;
1506 vmem_t *vmp = cp->cache_arena;
1507
1508 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1509
1510 color = cp->cache_color + cp->cache_align;
1511 if (color > cp->cache_maxcolor)
1512 color = cp->cache_mincolor;
1513 cp->cache_color = color;
1514
1515 slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
1516
1517 if (slab == NULL)
1518 goto vmem_alloc_failure;
1519
1520 ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1521
1522 /*
1523 * Reverify what was already checked in kmem_cache_set_move(), since the
1524 * consolidator depends (for correctness) on slabs being initialized
1525 * with the 0xbaddcafe memory pattern (setting a low order bit usable by
1526 * clients to distinguish uninitialized memory from known objects).
1527 */
1528 ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1529 if (!(cp->cache_cflags & KMC_NOTOUCH))
1530 copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1531
1532 if (cache_flags & KMF_HASH) {
1533 if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
1534 goto slab_alloc_failure;
1535 chunks = (slabsize - color) / chunksize;
1536 } else {
1537 sp = KMEM_SLAB(cp, slab);
1538 chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
1539 }
1540
1541 sp->slab_cache = cp;
1542 sp->slab_head = NULL;
1543 sp->slab_refcnt = 0;
1544 sp->slab_base = buf = slab + color;
1545 sp->slab_chunks = chunks;
1546 sp->slab_stuck_offset = (uint32_t)-1;
1547 sp->slab_later_count = 0;
1548 sp->slab_flags = 0;
1549
1550 ASSERT(chunks > 0);
1551 while (chunks-- != 0) {
1552 if (cache_flags & KMF_HASH) {
1553 bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1554 if (bcp == NULL)
1555 goto bufctl_alloc_failure;
1556 if (cache_flags & KMF_AUDIT) {
1557 kmem_bufctl_audit_t *bcap =
1558 (kmem_bufctl_audit_t *)bcp;
1559 bzero(bcap, sizeof (kmem_bufctl_audit_t));
1560 bcap->bc_cache = cp;
1561 }
1562 bcp->bc_addr = buf;
1563 bcp->bc_slab = sp;
1564 } else {
1565 bcp = KMEM_BUFCTL(cp, buf);
1566 }
1567 if (cache_flags & KMF_BUFTAG) {
1568 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1569 btp->bt_redzone = KMEM_REDZONE_PATTERN;
1570 btp->bt_bufctl = bcp;
1571 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1572 if (cache_flags & KMF_DEADBEEF) {
1573 copy_pattern(KMEM_FREE_PATTERN, buf,
1574 cp->cache_verify);
1575 }
1576 }
1577 bcp->bc_next = sp->slab_head;
1578 sp->slab_head = bcp;
1579 buf += chunksize;
1580 }
1581
1582 kmem_log_event(kmem_slab_log, cp, sp, slab);
1583
1584 return (sp);
1585
1586 bufctl_alloc_failure:
1587
1588 while ((bcp = sp->slab_head) != NULL) {
1589 sp->slab_head = bcp->bc_next;
1590 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1591 }
1592 kmem_cache_free(kmem_slab_cache, sp);
1593
1594 slab_alloc_failure:
1595
1596 vmem_free(vmp, slab, slabsize);
1597
1598 vmem_alloc_failure:
1599
1600 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1601 atomic_inc_64(&cp->cache_alloc_fail);
1602
1603 return (NULL);
1604 }
1605
1606 /*
1607 * Destroy a slab.
1608 */
1609 static void
1610 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1611 {
1612 vmem_t *vmp = cp->cache_arena;
1613 void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1614
1615 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1616 ASSERT(sp->slab_refcnt == 0);
1617
1618 if (cp->cache_flags & KMF_HASH) {
1619 kmem_bufctl_t *bcp;
1620 while ((bcp = sp->slab_head) != NULL) {
1621 sp->slab_head = bcp->bc_next;
1622 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1623 }
1624 kmem_cache_free(kmem_slab_cache, sp);
1625 }
1626 vmem_free(vmp, slab, cp->cache_slabsize);
1627 }
1628
1629 static void *
1630 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
1631 {
1632 kmem_bufctl_t *bcp, **hash_bucket;
1633 void *buf;
1634 boolean_t new_slab = (sp->slab_refcnt == 0);
1635
1636 ASSERT(MUTEX_HELD(&cp->cache_lock));
1637 /*
1638 * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
1639 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1640 * slab is newly created.
1641 */
1642 ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
1643 (sp == avl_first(&cp->cache_partial_slabs))));
1644 ASSERT(sp->slab_cache == cp);
1645
1646 cp->cache_slab_alloc++;
1647 cp->cache_bufslab--;
1648 sp->slab_refcnt++;
1649
1650 bcp = sp->slab_head;
1651 sp->slab_head = bcp->bc_next;
1652
1653 if (cp->cache_flags & KMF_HASH) {
1654 /*
1655 * Add buffer to allocated-address hash table.
1656 */
1657 buf = bcp->bc_addr;
1658 hash_bucket = KMEM_HASH(cp, buf);
1659 bcp->bc_next = *hash_bucket;
1660 *hash_bucket = bcp;
1661 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1662 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1663 }
1664 } else {
1665 buf = KMEM_BUF(cp, bcp);
1666 }
1667
1668 ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1669
1670 if (sp->slab_head == NULL) {
1671 ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
1672 if (new_slab) {
1673 ASSERT(sp->slab_chunks == 1);
1674 } else {
1675 ASSERT(sp->slab_chunks > 1); /* the slab was partial */
1676 avl_remove(&cp->cache_partial_slabs, sp);
1677 sp->slab_later_count = 0; /* clear history */
1678 sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
1679 sp->slab_stuck_offset = (uint32_t)-1;
1680 }
1681 list_insert_head(&cp->cache_complete_slabs, sp);
1682 cp->cache_complete_slab_count++;
1683 return (buf);
1684 }
1685
1686 ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
1687 /*
1688 * Peek to see if the magazine layer is enabled before
1689 * we prefill. We're not holding the cpu cache lock,
1690 * so the peek could be wrong, but there's no harm in it.
1691 */
1692 if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1693 (KMEM_CPU_CACHE(cp)->cc_magsize != 0)) {
1694 kmem_slab_prefill(cp, sp);
1695 return (buf);
1696 }
1697
1698 if (new_slab) {
1699 avl_add(&cp->cache_partial_slabs, sp);
1700 return (buf);
1701 }
1702
1703 /*
1704 * The slab is now more allocated than it was, so the
1705 * order remains unchanged.
1706 */
1707 ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1708 return (buf);
1709 }
1710
1711 /*
1712 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1713 */
1714 static void *
1715 kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1716 {
1717 kmem_slab_t *sp;
1718 void *buf;
1719 boolean_t test_destructor;
1720
1721 mutex_enter(&cp->cache_lock);
1722 test_destructor = (cp->cache_slab_alloc == 0);
1723 sp = avl_first(&cp->cache_partial_slabs);
1724 if (sp == NULL) {
1725 ASSERT(cp->cache_bufslab == 0);
1726
1727 /*
1728 * The freelist is empty. Create a new slab.
1729 */
1730 mutex_exit(&cp->cache_lock);
1731 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1732 return (NULL);
1733 }
1734 mutex_enter(&cp->cache_lock);
1735 cp->cache_slab_create++;
1736 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1737 cp->cache_bufmax = cp->cache_buftotal;
1738 cp->cache_bufslab += sp->slab_chunks;
1739 }
1740
1741 buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1742 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1743 (cp->cache_complete_slab_count +
1744 avl_numnodes(&cp->cache_partial_slabs) +
1745 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1746 mutex_exit(&cp->cache_lock);
1747
1748 if (test_destructor && cp->cache_destructor != NULL) {
1749 /*
1750 * On the first kmem_slab_alloc(), assert that it is valid to
1751 * call the destructor on a newly constructed object without any
1752 * client involvement.
1753 */
1754 if ((cp->cache_constructor == NULL) ||
1755 cp->cache_constructor(buf, cp->cache_private,
1756 kmflag) == 0) {
1757 cp->cache_destructor(buf, cp->cache_private);
1758 }
1759 copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
1760 cp->cache_bufsize);
1761 if (cp->cache_flags & KMF_DEADBEEF) {
1762 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1763 }
1764 }
1765
1766 return (buf);
1767 }
1768
1769 static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
1770
1771 /*
1772 * Free a raw (unconstructed) buffer to cp's slab layer.
1773 */
1774 static void
1775 kmem_slab_free(kmem_cache_t *cp, void *buf)
1776 {
1777 kmem_slab_t *sp;
1778 kmem_bufctl_t *bcp, **prev_bcpp;
1779
1780 ASSERT(buf != NULL);
1781
1782 mutex_enter(&cp->cache_lock);
1783 cp->cache_slab_free++;
1784
1785 if (cp->cache_flags & KMF_HASH) {
1786 /*
1787 * Look up buffer in allocated-address hash table.
1788 */
1789 prev_bcpp = KMEM_HASH(cp, buf);
1790 while ((bcp = *prev_bcpp) != NULL) {
1791 if (bcp->bc_addr == buf) {
1792 *prev_bcpp = bcp->bc_next;
1793 sp = bcp->bc_slab;
1794 break;
1795 }
1796 cp->cache_lookup_depth++;
1797 prev_bcpp = &bcp->bc_next;
1798 }
1799 } else {
1800 bcp = KMEM_BUFCTL(cp, buf);
1801 sp = KMEM_SLAB(cp, buf);
1802 }
1803
1804 if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1805 mutex_exit(&cp->cache_lock);
1806 kmem_error(KMERR_BADADDR, cp, buf);
1807 return;
1808 }
1809
1810 if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1811 /*
1812 * If this is the buffer that prevented the consolidator from
1813 * clearing the slab, we can reset the slab flags now that the
1814 * buffer is freed. (It makes sense to do this in
1815 * kmem_cache_free(), where the client gives up ownership of the
1816 * buffer, but on the hot path the test is too expensive.)
1817 */
1818 kmem_slab_move_yes(cp, sp, buf);
1819 }
1820
1821 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1822 if (cp->cache_flags & KMF_CONTENTS)
1823 ((kmem_bufctl_audit_t *)bcp)->bc_contents =
1824 kmem_log_enter(kmem_content_log, buf,
1825 cp->cache_contents);
1826 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1827 }
1828
1829 bcp->bc_next = sp->slab_head;
1830 sp->slab_head = bcp;
1831
1832 cp->cache_bufslab++;
1833 ASSERT(sp->slab_refcnt >= 1);
1834
1835 if (--sp->slab_refcnt == 0) {
1836 /*
1837 * There are no outstanding allocations from this slab,
1838 * so we can reclaim the memory.
1839 */
1840 if (sp->slab_chunks == 1) {
1841 list_remove(&cp->cache_complete_slabs, sp);
1842 cp->cache_complete_slab_count--;
1843 } else {
1844 avl_remove(&cp->cache_partial_slabs, sp);
1845 }
1846
1847 cp->cache_buftotal -= sp->slab_chunks;
1848 cp->cache_bufslab -= sp->slab_chunks;
1849 /*
1850 * Defer releasing the slab to the virtual memory subsystem
1851 * while there is a pending move callback, since we guarantee
1852 * that buffers passed to the move callback have only been
1853 * touched by kmem or by the client itself. Since the memory
1854 * patterns baddcafe (uninitialized) and deadbeef (freed) both
1855 * set at least one of the two lowest order bits, the client can
1856 * test those bits in the move callback to determine whether or
1857 * not it knows about the buffer (assuming that the client also
1858 * sets one of those low order bits whenever it frees a buffer).
1859 */
1860 if (cp->cache_defrag == NULL ||
1861 (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1862 !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
1863 cp->cache_slab_destroy++;
1864 mutex_exit(&cp->cache_lock);
1865 kmem_slab_destroy(cp, sp);
1866 } else {
1867 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1868 /*
1869 * Slabs are inserted at both ends of the deadlist to
1870 * distinguish between slabs freed while move callbacks
1871 * are pending (list head) and a slab freed while the
1872 * lock is dropped in kmem_move_buffers() (list tail) so
1873 * that in both cases slab_destroy() is called from the
1874 * right context.
1875 */
1876 if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
1877 list_insert_tail(deadlist, sp);
1878 } else {
1879 list_insert_head(deadlist, sp);
1880 }
1881 cp->cache_defrag->kmd_deadcount++;
1882 mutex_exit(&cp->cache_lock);
1883 }
1884 return;
1885 }
1886
1887 if (bcp->bc_next == NULL) {
1888 /* Transition the slab from completely allocated to partial. */
1889 ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
1890 ASSERT(sp->slab_chunks > 1);
1891 list_remove(&cp->cache_complete_slabs, sp);
1892 cp->cache_complete_slab_count--;
1893 avl_add(&cp->cache_partial_slabs, sp);
1894 } else {
1895 (void) avl_update_gt(&cp->cache_partial_slabs, sp);
1896 }
1897
1898 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1899 (cp->cache_complete_slab_count +
1900 avl_numnodes(&cp->cache_partial_slabs) +
1901 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1902 mutex_exit(&cp->cache_lock);
1903 }
1904
1905 /*
1906 * Return -1 if kmem_error, 1 if constructor fails, 0 if successful.
1907 */
1908 static int
1909 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1910 caddr_t caller)
1911 {
1912 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1913 kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1914 uint32_t mtbf;
1915
1916 if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1917 kmem_error(KMERR_BADBUFTAG, cp, buf);
1918 return (-1);
1919 }
1920
1921 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
1922
1923 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1924 kmem_error(KMERR_BADBUFCTL, cp, buf);
1925 return (-1);
1926 }
1927
1928 if (cp->cache_flags & KMF_DEADBEEF) {
1929 if (!construct && (cp->cache_flags & KMF_LITE)) {
1930 if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
1931 kmem_error(KMERR_MODIFIED, cp, buf);
1932 return (-1);
1933 }
1934 if (cp->cache_constructor != NULL)
1935 *(uint64_t *)buf = btp->bt_redzone;
1936 else
1937 *(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
1938 } else {
1939 construct = 1;
1940 if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
1941 KMEM_UNINITIALIZED_PATTERN, buf,
1942 cp->cache_verify)) {
1943 kmem_error(KMERR_MODIFIED, cp, buf);
1944 return (-1);
1945 }
1946 }
1947 }
1948 btp->bt_redzone = KMEM_REDZONE_PATTERN;
1949
1950 if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1951 gethrtime() % mtbf == 0 &&
1952 (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1953 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1954 if (!construct && cp->cache_destructor != NULL)
1955 cp->cache_destructor(buf, cp->cache_private);
1956 } else {
1957 mtbf = 0;
1958 }
1959
1960 if (mtbf || (construct && cp->cache_constructor != NULL &&
1961 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
1962 atomic_inc_64(&cp->cache_alloc_fail);
1963 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1964 if (cp->cache_flags & KMF_DEADBEEF)
1965 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1966 kmem_slab_free(cp, buf);
1967 return (1);
1968 }
1969
1970 if (cp->cache_flags & KMF_AUDIT) {
1971 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1972 }
1973
1974 if ((cp->cache_flags & KMF_LITE) &&
1975 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
1976 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
1977 }
1978
1979 return (0);
1980 }
1981
1982 static int
1983 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
1984 {
1985 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1986 kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1987 kmem_slab_t *sp;
1988
1989 if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
1990 if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1991 kmem_error(KMERR_DUPFREE, cp, buf);
1992 return (-1);
1993 }
1994 sp = kmem_findslab(cp, buf);
1995 if (sp == NULL || sp->slab_cache != cp)
1996 kmem_error(KMERR_BADADDR, cp, buf);
1997 else
1998 kmem_error(KMERR_REDZONE, cp, buf);
1999 return (-1);
2000 }
2001
2002 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2003
2004 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
2005 kmem_error(KMERR_BADBUFCTL, cp, buf);
2006 return (-1);
2007 }
2008
2009 if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
2010 kmem_error(KMERR_REDZONE, cp, buf);
2011 return (-1);
2012 }
2013
2014 if (cp->cache_flags & KMF_AUDIT) {
2015 if (cp->cache_flags & KMF_CONTENTS)
2016 bcp->bc_contents = kmem_log_enter(kmem_content_log,
2017 buf, cp->cache_contents);
2018 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2019 }
2020
2021 if ((cp->cache_flags & KMF_LITE) &&
2022 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2023 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2024 }
2025
2026 if (cp->cache_flags & KMF_DEADBEEF) {
2027 if (cp->cache_flags & KMF_LITE)
2028 btp->bt_redzone = *(uint64_t *)buf;
2029 else if (cp->cache_destructor != NULL)
2030 cp->cache_destructor(buf, cp->cache_private);
2031
2032 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2033 }
2034
2035 return (0);
2036 }
2037
2038 /*
2039 * Free each object in magazine mp to cp's slab layer, and free mp itself.
2040 */
2041 static void
2042 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2043 {
2044 int round;
2045
2046 ASSERT(!list_link_active(&cp->cache_link) ||
2047 taskq_member(kmem_taskq, curthread));
2048
2049 for (round = 0; round < nrounds; round++) {
2050 void *buf = mp->mag_round[round];
2051
2052 if (cp->cache_flags & KMF_DEADBEEF) {
2053 if (verify_pattern(KMEM_FREE_PATTERN, buf,
2054 cp->cache_verify) != NULL) {
2055 kmem_error(KMERR_MODIFIED, cp, buf);
2056 continue;
2057 }
2058 if ((cp->cache_flags & KMF_LITE) &&
2059 cp->cache_destructor != NULL) {
2060 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2061 *(uint64_t *)buf = btp->bt_redzone;
2062 cp->cache_destructor(buf, cp->cache_private);
2063 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2064 }
2065 } else if (cp->cache_destructor != NULL) {
2066 cp->cache_destructor(buf, cp->cache_private);
2067 }
2068
2069 kmem_slab_free(cp, buf);
2070 }
2071 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2072 kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2073 }
2074
2075 /*
2076 * Allocate a magazine from the depot.
2077 */
2078 static kmem_magazine_t *
2079 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2080 {
2081 kmem_magazine_t *mp;
2082
2083 /*
2084 * If we can't get the depot lock without contention,
2085 * update our contention count. We use the depot
2086 * contention rate to determine whether we need to
2087 * increase the magazine size for better scalability.
2088 */
2089 if (!mutex_tryenter(&cp->cache_depot_lock)) {
2090 mutex_enter(&cp->cache_depot_lock);
2091 cp->cache_depot_contention++;
2092 }
2093
2094 if ((mp = mlp->ml_list) != NULL) {
2095 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2096 mlp->ml_list = mp->mag_next;
2097 if (--mlp->ml_total < mlp->ml_min)
2098 mlp->ml_min = mlp->ml_total;
2099 mlp->ml_alloc++;
2100 }
2101
2102 mutex_exit(&cp->cache_depot_lock);
2103
2104 return (mp);
2105 }
2106
2107 /*
2108 * Free a magazine to the depot.
2109 */
2110 static void
2111 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2112 {
2113 mutex_enter(&cp->cache_depot_lock);
2114 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2115 mp->mag_next = mlp->ml_list;
2116 mlp->ml_list = mp;
2117 mlp->ml_total++;
2118 mutex_exit(&cp->cache_depot_lock);
2119 }
2120
2121 /*
2122 * Update the working set statistics for cp's depot.
2123 */
2124 static void
2125 kmem_depot_ws_update(kmem_cache_t *cp)
2126 {
2127 mutex_enter(&cp->cache_depot_lock);
2128 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2129 cp->cache_full.ml_min = cp->cache_full.ml_total;
2130 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2131 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2132 mutex_exit(&cp->cache_depot_lock);
2133 }
2134
2135 /*
2136 * Set the working set statistics for cp's depot to zero. (Everything is
2137 * eligible for reaping.)
2138 */
2139 static void
2140 kmem_depot_ws_zero(kmem_cache_t *cp)
2141 {
2142 mutex_enter(&cp->cache_depot_lock);
2143 cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
2144 cp->cache_full.ml_min = cp->cache_full.ml_total;
2145 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
2146 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2147 mutex_exit(&cp->cache_depot_lock);
2148 }
2149
2150 /*
2151 * The number of bytes to reap before we call kpreempt(). The default (1MB)
2152 * causes us to preempt reaping up to hundreds of times per second. Using a
2153 * larger value (1GB) causes this to have virtually no effect.
2154 */
2155 size_t kmem_reap_preempt_bytes = 1024 * 1024;
2156
2157 /*
2158 * Reap all magazines that have fallen out of the depot's working set.
2159 */
2160 static void
2161 kmem_depot_ws_reap(kmem_cache_t *cp)
2162 {
2163 size_t bytes = 0;
2164 long reap;
2165 kmem_magazine_t *mp;
2166
2167 ASSERT(!list_link_active(&cp->cache_link) ||
2168 taskq_member(kmem_taskq, curthread));
2169
2170 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2171 while (reap-- &&
2172 (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) {
2173 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2174 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2175 if (bytes > kmem_reap_preempt_bytes) {
2176 kpreempt(KPREEMPT_SYNC);
2177 bytes = 0;
2178 }
2179 }
2180
2181 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2182 while (reap-- &&
2183 (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) {
2184 kmem_magazine_destroy(cp, mp, 0);
2185 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2186 if (bytes > kmem_reap_preempt_bytes) {
2187 kpreempt(KPREEMPT_SYNC);
2188 bytes = 0;
2189 }
2190 }
2191 }
2192
2193 static void
2194 kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
2195 {
2196 ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
2197 (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
2198 ASSERT(ccp->cc_magsize > 0);
2199
2200 ccp->cc_ploaded = ccp->cc_loaded;
2201 ccp->cc_prounds = ccp->cc_rounds;
2202 ccp->cc_loaded = mp;
2203 ccp->cc_rounds = rounds;
2204 }
2205
2206 /*
2207 * Intercept kmem alloc/free calls during crash dump in order to avoid
2208 * changing kmem state while memory is being saved to the dump device.
2209 * Otherwise, ::kmem_verify will report "corrupt buffers". Note that
2210 * there are no locks because only one CPU calls kmem during a crash
2211 * dump. To enable this feature, first create the associated vmem
2212 * arena with VMC_DUMPSAFE.
2213 */
2214 static void *kmem_dump_start; /* start of pre-reserved heap */
2215 static void *kmem_dump_end; /* end of heap area */
2216 static void *kmem_dump_curr; /* current free heap pointer */
2217 static size_t kmem_dump_size; /* size of heap area */
2218
2219 /* append to each buf created in the pre-reserved heap */
2220 typedef struct kmem_dumpctl {
2221 void *kdc_next; /* cache dump free list linkage */
2222 } kmem_dumpctl_t;
2223
2224 #define KMEM_DUMPCTL(cp, buf) \
2225 ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2226 sizeof (void *)))
2227
2228 /* set non zero for full report */
2229 uint_t kmem_dump_verbose = 0;
2230
2231 /* stats for overize heap */
2232 uint_t kmem_dump_oversize_allocs = 0;
2233 uint_t kmem_dump_oversize_max = 0;
2234
2235 static void
2236 kmem_dumppr(char **pp, char *e, const char *format, ...)
2237 {
2238 char *p = *pp;
2239
2240 if (p < e) {
2241 int n;
2242 va_list ap;
2243
2244 va_start(ap, format);
2245 n = vsnprintf(p, e - p, format, ap);
2246 va_end(ap);
2247 *pp = p + n;
2248 }
2249 }
2250
2251 /*
2252 * Called when dumpadm(1M) configures dump parameters.
2253 */
2254 void
2255 kmem_dump_init(size_t size)
2256 {
2257 /* Our caller ensures size is always set. */
2258 ASSERT3U(size, >, 0);
2259
2260 if (kmem_dump_start != NULL)
2261 kmem_free(kmem_dump_start, kmem_dump_size);
2262
2263 kmem_dump_start = kmem_alloc(size, KM_SLEEP);
2264 kmem_dump_size = size;
2265 kmem_dump_curr = kmem_dump_start;
2266 kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2267 copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2268 }
2269
2270 /*
2271 * Set flag for each kmem_cache_t if is safe to use alternate dump
2272 * memory. Called just before panic crash dump starts. Set the flag
2273 * for the calling CPU.
2274 */
2275 void
2276 kmem_dump_begin(void)
2277 {
2278 kmem_cache_t *cp;
2279
2280 ASSERT(panicstr != NULL);
2281
2282 for (cp = list_head(&kmem_caches); cp != NULL;
2283 cp = list_next(&kmem_caches, cp)) {
2284 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2285
2286 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2287 cp->cache_flags |= KMF_DUMPDIVERT;
2288 ccp->cc_flags |= KMF_DUMPDIVERT;
2289 ccp->cc_dump_rounds = ccp->cc_rounds;
2290 ccp->cc_dump_prounds = ccp->cc_prounds;
2291 ccp->cc_rounds = ccp->cc_prounds = -1;
2292 } else {
2293 cp->cache_flags |= KMF_DUMPUNSAFE;
2294 ccp->cc_flags |= KMF_DUMPUNSAFE;
2295 }
2296 }
2297 }
2298
2299 /*
2300 * finished dump intercept
2301 * print any warnings on the console
2302 * return verbose information to dumpsys() in the given buffer
2303 */
2304 size_t
2305 kmem_dump_finish(char *buf, size_t size)
2306 {
2307 int percent = 0;
2308 size_t used;
2309 char *e = buf + size;
2310 char *p = buf;
2311
2312 if (kmem_dump_curr == kmem_dump_end) {
2313 cmn_err(CE_WARN, "exceeded kmem_dump space of %lu "
2314 "bytes: kmem state in dump may be inconsistent",
2315 kmem_dump_size);
2316 }
2317
2318 if (kmem_dump_verbose == 0)
2319 return (0);
2320
2321 used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2322 percent = (used * 100) / kmem_dump_size;
2323
2324 kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2325 kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2326 kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2327 kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2328 kmem_dump_oversize_allocs);
2329 kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2330 kmem_dump_oversize_max);
2331
2332 /* return buffer size used */
2333 if (p < e)
2334 bzero(p, e - p);
2335 return (p - buf);
2336 }
2337
2338 /*
2339 * Allocate a constructed object from alternate dump memory.
2340 */
2341 void *
2342 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2343 {
2344 void *buf;
2345 void *curr;
2346 char *bufend;
2347
2348 /* return a constructed object */
2349 if ((buf = cp->cache_dump.kd_freelist) != NULL) {
2350 cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2351 return (buf);
2352 }
2353
2354 /* create a new constructed object */
2355 curr = kmem_dump_curr;
2356 buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2357 bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2358
2359 /* hat layer objects cannot cross a page boundary */
2360 if (cp->cache_align < PAGESIZE) {
2361 char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2362 if (bufend > page) {
2363 bufend += page - (char *)buf;
2364 buf = (void *)page;
2365 }
2366 }
2367
2368 /* fall back to normal alloc if reserved area is used up */
2369 if (bufend > (char *)kmem_dump_end) {
2370 kmem_dump_curr = kmem_dump_end;
2371 cp->cache_dump.kd_alloc_fails++;
2372 return (NULL);
2373 }
2374
2375 /*
2376 * Must advance curr pointer before calling a constructor that
2377 * may also allocate memory.
2378 */
2379 kmem_dump_curr = bufend;
2380
2381 /* run constructor */
2382 if (cp->cache_constructor != NULL &&
2383 cp->cache_constructor(buf, cp->cache_private, kmflag)
2384 != 0) {
2385 #ifdef DEBUG
2386 printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2387 cp->cache_name, (void *)cp);
2388 #endif
2389 /* reset curr pointer iff no allocs were done */
2390 if (kmem_dump_curr == bufend)
2391 kmem_dump_curr = curr;
2392
2393 cp->cache_dump.kd_alloc_fails++;
2394 /* fall back to normal alloc if the constructor fails */
2395 return (NULL);
2396 }
2397
2398 return (buf);
2399 }
2400
2401 /*
2402 * Free a constructed object in alternate dump memory.
2403 */
2404 int
2405 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2406 {
2407 /* save constructed buffers for next time */
2408 if ((char *)buf >= (char *)kmem_dump_start &&
2409 (char *)buf < (char *)kmem_dump_end) {
2410 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist;
2411 cp->cache_dump.kd_freelist = buf;
2412 return (0);
2413 }
2414
2415 /* just drop buffers that were allocated before dump started */
2416 if (kmem_dump_curr < kmem_dump_end)
2417 return (0);
2418
2419 /* fall back to normal free if reserved area is used up */
2420 return (1);
2421 }
2422
2423 /*
2424 * Allocate a constructed object from cache cp.
2425 */
2426 void *
2427 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2428 {
2429 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2430 kmem_magazine_t *fmp;
2431 void *buf;
2432
2433 mutex_enter(&ccp->cc_lock);
2434 for (;;) {
2435 /*
2436 * If there's an object available in the current CPU's
2437 * loaded magazine, just take it and return.
2438 */
2439 if (ccp->cc_rounds > 0) {
2440 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2441 ccp->cc_alloc++;
2442 mutex_exit(&ccp->cc_lock);
2443 if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2444 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2445 ASSERT(!(ccp->cc_flags &
2446 KMF_DUMPDIVERT));
2447 cp->cache_dump.kd_unsafe++;
2448 }
2449 if ((ccp->cc_flags & KMF_BUFTAG) &&
2450 kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2451 caller()) != 0) {
2452 if (kmflag & KM_NOSLEEP)
2453 return (NULL);
2454 mutex_enter(&ccp->cc_lock);
2455 continue;
2456 }
2457 }
2458 return (buf);
2459 }
2460
2461 /*
2462 * The loaded magazine is empty. If the previously loaded
2463 * magazine was full, exchange them and try again.
2464 */
2465 if (ccp->cc_prounds > 0) {
2466 kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2467 continue;
2468 }
2469
2470 /*
2471 * Return an alternate buffer at dump time to preserve
2472 * the heap.
2473 */
2474 if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2475 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2476 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2477 /* log it so that we can warn about it */
2478 cp->cache_dump.kd_unsafe++;
2479 } else {
2480 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2481 NULL) {
2482 mutex_exit(&ccp->cc_lock);
2483 return (buf);
2484 }
2485 break; /* fall back to slab layer */
2486 }
2487 }
2488
2489 /*
2490 * If the magazine layer is disabled, break out now.
2491 */
2492 if (ccp->cc_magsize == 0)
2493 break;
2494
2495 /*
2496 * Try to get a full magazine from the depot.
2497 */
2498 fmp = kmem_depot_alloc(cp, &cp->cache_full);
2499 if (fmp != NULL) {
2500 if (ccp->cc_ploaded != NULL)
2501 kmem_depot_free(cp, &cp->cache_empty,
2502 ccp->cc_ploaded);
2503 kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
2504 continue;
2505 }
2506
2507 /*
2508 * There are no full magazines in the depot,
2509 * so fall through to the slab layer.
2510 */
2511 break;
2512 }
2513 mutex_exit(&ccp->cc_lock);
2514
2515 /*
2516 * We couldn't allocate a constructed object from the magazine layer,
2517 * so get a raw buffer from the slab layer and apply its constructor.
2518 */
2519 buf = kmem_slab_alloc(cp, kmflag);
2520
2521 if (buf == NULL)
2522 return (NULL);
2523
2524 if (cp->cache_flags & KMF_BUFTAG) {
2525 /*
2526 * Make kmem_cache_alloc_debug() apply the constructor for us.
2527 */
2528 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2529 if (rc != 0) {
2530 if (kmflag & KM_NOSLEEP)
2531 return (NULL);
2532 /*
2533 * kmem_cache_alloc_debug() detected corruption
2534 * but didn't panic (kmem_panic <= 0). We should not be
2535 * here because the constructor failed (indicated by a
2536 * return code of 1). Try again.
2537 */
2538 ASSERT(rc == -1);
2539 return (kmem_cache_alloc(cp, kmflag));
2540 }
2541 return (buf);
2542 }
2543
2544 if (cp->cache_constructor != NULL &&
2545 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2546 atomic_inc_64(&cp->cache_alloc_fail);
2547 kmem_slab_free(cp, buf);
2548 return (NULL);
2549 }
2550
2551 return (buf);
2552 }
2553
2554 /*
2555 * The freed argument tells whether or not kmem_cache_free_debug() has already
2556 * been called so that we can avoid the duplicate free error. For example, a
2557 * buffer on a magazine has already been freed by the client but is still
2558 * constructed.
2559 */
2560 static void
2561 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2562 {
2563 if (!freed && (cp->cache_flags & KMF_BUFTAG))
2564 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2565 return;
2566
2567 /*
2568 * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
2569 * kmem_cache_free_debug() will have already applied the destructor.
2570 */
2571 if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2572 cp->cache_destructor != NULL) {
2573 if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */
2574 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2575 *(uint64_t *)buf = btp->bt_redzone;
2576 cp->cache_destructor(buf, cp->cache_private);
2577 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2578 } else {
2579 cp->cache_destructor(buf, cp->cache_private);
2580 }
2581 }
2582
2583 kmem_slab_free(cp, buf);
2584 }
2585
2586 /*
2587 * Used when there's no room to free a buffer to the per-CPU cache.
2588 * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the
2589 * caller should try freeing to the per-CPU cache again.
2590 * Note that we don't directly install the magazine in the cpu cache,
2591 * since its state may have changed wildly while the lock was dropped.
2592 */
2593 static int
2594 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2595 {
2596 kmem_magazine_t *emp;
2597 kmem_magtype_t *mtp;
2598
2599 ASSERT(MUTEX_HELD(&ccp->cc_lock));
2600 ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
2601 ((uint_t)ccp->cc_rounds == -1)) &&
2602 ((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
2603 ((uint_t)ccp->cc_prounds == -1)));
2604
2605 emp = kmem_depot_alloc(cp, &cp->cache_empty);
2606 if (emp != NULL) {
2607 if (ccp->cc_ploaded != NULL)
2608 kmem_depot_free(cp, &cp->cache_full,
2609 ccp->cc_ploaded);
2610 kmem_cpu_reload(ccp, emp, 0);
2611 return (1);
2612 }
2613 /*
2614 * There are no empty magazines in the depot,
2615 * so try to allocate a new one. We must drop all locks
2616 * across kmem_cache_alloc() because lower layers may
2617 * attempt to allocate from this cache.
2618 */
2619 mtp = cp->cache_magtype;
2620 mutex_exit(&ccp->cc_lock);
2621 emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
2622 mutex_enter(&ccp->cc_lock);
2623
2624 if (emp != NULL) {
2625 /*
2626 * We successfully allocated an empty magazine.
2627 * However, we had to drop ccp->cc_lock to do it,
2628 * so the cache's magazine size may have changed.
2629 * If so, free the magazine and try again.
2630 */
2631 if (ccp->cc_magsize != mtp->mt_magsize) {
2632 mutex_exit(&ccp->cc_lock);
2633 kmem_cache_free(mtp->mt_cache, emp);
2634 mutex_enter(&ccp->cc_lock);
2635 return (1);
2636 }
2637
2638 /*
2639 * We got a magazine of the right size. Add it to
2640 * the depot and try the whole dance again.
2641 */
2642 kmem_depot_free(cp, &cp->cache_empty, emp);
2643 return (1);
2644 }
2645
2646 /*
2647 * We couldn't allocate an empty magazine,
2648 * so fall through to the slab layer.
2649 */
2650 return (0);
2651 }
2652
2653 /*
2654 * Free a constructed object to cache cp.
2655 */
2656 void
2657 kmem_cache_free(kmem_cache_t *cp, void *buf)
2658 {
2659 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2660
2661 /*
2662 * The client must not free either of the buffers passed to the move
2663 * callback function.
2664 */
2665 ASSERT(cp->cache_defrag == NULL ||
2666 cp->cache_defrag->kmd_thread != curthread ||
2667 (buf != cp->cache_defrag->kmd_from_buf &&
2668 buf != cp->cache_defrag->kmd_to_buf));
2669
2670 if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2671 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2672 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2673 /* log it so that we can warn about it */
2674 cp->cache_dump.kd_unsafe++;
2675 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2676 return;
2677 }
2678 if (ccp->cc_flags & KMF_BUFTAG) {
2679 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2680 return;
2681 }
2682 }
2683
2684 mutex_enter(&ccp->cc_lock);
2685 /*
2686 * Any changes to this logic should be reflected in kmem_slab_prefill()
2687 */
2688 for (;;) {
2689 /*
2690 * If there's a slot available in the current CPU's
2691 * loaded magazine, just put the object there and return.
2692 */
2693 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2694 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2695 ccp->cc_free++;
2696 mutex_exit(&ccp->cc_lock);
2697 return;
2698 }
2699
2700 /*
2701 * The loaded magazine is full. If the previously loaded
2702 * magazine was empty, exchange them and try again.
2703 */
2704 if (ccp->cc_prounds == 0) {
2705 kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2706 continue;
2707 }
2708
2709 /*
2710 * If the magazine layer is disabled, break out now.
2711 */
2712 if (ccp->cc_magsize == 0)
2713 break;
2714
2715 if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
2716 /*
2717 * We couldn't free our constructed object to the
2718 * magazine layer, so apply its destructor and free it
2719 * to the slab layer.
2720 */
2721 break;
2722 }
2723 }
2724 mutex_exit(&ccp->cc_lock);
2725 kmem_slab_free_constructed(cp, buf, B_TRUE);
2726 }
2727
2728 static void
2729 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2730 {
2731 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2732 int cache_flags = cp->cache_flags;
2733
2734 kmem_bufctl_t *next, *head;
2735 size_t nbufs;
2736
2737 /*
2738 * Completely allocate the newly created slab and put the pre-allocated
2739 * buffers in magazines. Any of the buffers that cannot be put in
2740 * magazines must be returned to the slab.
2741 */
2742 ASSERT(MUTEX_HELD(&cp->cache_lock));
2743 ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
2744 ASSERT(cp->cache_constructor == NULL);
2745 ASSERT(sp->slab_cache == cp);
2746 ASSERT(sp->slab_refcnt == 1);
2747 ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
2748 ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2749
2750 head = sp->slab_head;
2751 nbufs = (sp->slab_chunks - sp->slab_refcnt);
2752 sp->slab_head = NULL;
2753 sp->slab_refcnt += nbufs;
2754 cp->cache_bufslab -= nbufs;
2755 cp->cache_slab_alloc += nbufs;
2756 list_insert_head(&cp->cache_complete_slabs, sp);
2757 cp->cache_complete_slab_count++;
2758 mutex_exit(&cp->cache_lock);
2759 mutex_enter(&ccp->cc_lock);
2760
2761 while (head != NULL) {
2762 void *buf = KMEM_BUF(cp, head);
2763 /*
2764 * If there's a slot available in the current CPU's
2765 * loaded magazine, just put the object there and
2766 * continue.
2767 */
2768 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2769 ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
2770 buf;
2771 ccp->cc_free++;
2772 nbufs--;
2773 head = head->bc_next;
2774 continue;
2775 }
2776
2777 /*
2778 * The loaded magazine is full. If the previously
2779 * loaded magazine was empty, exchange them and try
2780 * again.
2781 */
2782 if (ccp->cc_prounds == 0) {
2783 kmem_cpu_reload(ccp, ccp->cc_ploaded,
2784 ccp->cc_prounds);
2785 continue;
2786 }
2787
2788 /*
2789 * If the magazine layer is disabled, break out now.
2790 */
2791
2792 if (ccp->cc_magsize == 0) {
2793 break;
2794 }
2795
2796 if (!kmem_cpucache_magazine_alloc(ccp, cp))
2797 break;
2798 }
2799 mutex_exit(&ccp->cc_lock);
2800 if (nbufs != 0) {
2801 ASSERT(head != NULL);
2802
2803 /*
2804 * If there was a failure, return remaining objects to
2805 * the slab
2806 */
2807 while (head != NULL) {
2808 ASSERT(nbufs != 0);
2809 next = head->bc_next;
2810 head->bc_next = NULL;
2811 kmem_slab_free(cp, KMEM_BUF(cp, head));
2812 head = next;
2813 nbufs--;
2814 }
2815 }
2816 ASSERT(head == NULL);
2817 ASSERT(nbufs == 0);
2818 mutex_enter(&cp->cache_lock);
2819 }
2820
2821 void *
2822 kmem_zalloc(size_t size, int kmflag)
2823 {
2824 size_t index;
2825 void *buf;
2826
2827 if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2828 kmem_cache_t *cp = kmem_alloc_table[index];
2829 buf = kmem_cache_alloc(cp, kmflag);
2830 if (buf != NULL) {
2831 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2832 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2833 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2834 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2835
2836 if (cp->cache_flags & KMF_LITE) {
2837 KMEM_BUFTAG_LITE_ENTER(btp,
2838 kmem_lite_count, caller());
2839 }
2840 }
2841 bzero(buf, size);
2842 }
2843 } else {
2844 buf = kmem_alloc(size, kmflag);
2845 if (buf != NULL)
2846 bzero(buf, size);
2847 }
2848 return (buf);
2849 }
2850
2851 void *
2852 kmem_alloc(size_t size, int kmflag)
2853 {
2854 size_t index;
2855 kmem_cache_t *cp;
2856 void *buf;
2857
2858 if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2859 cp = kmem_alloc_table[index];
2860 /* fall through to kmem_cache_alloc() */
2861
2862 } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2863 kmem_big_alloc_table_max) {
2864 cp = kmem_big_alloc_table[index];
2865 /* fall through to kmem_cache_alloc() */
2866
2867 } else {
2868 if (size == 0) {
2869 if (kmflag != KM_SLEEP && !(kmflag & KM_PANIC))
2870 return (NULL);
2871
2872 /*
2873 * If this is a sleeping allocation or one that has
2874 * been specified to panic on allocation failure, we
2875 * consider it to be deprecated behavior to allocate
2876 * 0 bytes. If we have been configured to panic under
2877 * this condition, we panic; if to warn, we warn -- and
2878 * regardless, we log to the kmem_zerosized_log that
2879 * that this condition has occurred (which gives us
2880 * enough information to be able to debug it).
2881 */
2882 if (kmem_panic && kmem_panic_zerosized)
2883 panic("attempted to kmem_alloc() size of 0");
2884
2885 if (kmem_warn_zerosized) {
2886 cmn_err(CE_WARN, "kmem_alloc(): sleeping "
2887 "allocation with size of 0; "
2888 "see kmem_zerosized_log for details");
2889 }
2890
2891 kmem_log_event(kmem_zerosized_log, NULL, NULL, NULL);
2892
2893 return (NULL);
2894 }
2895
2896 buf = vmem_alloc(kmem_oversize_arena, size,
2897 kmflag & KM_VMFLAGS);
2898 if (buf == NULL)
2899 kmem_log_event(kmem_failure_log, NULL, NULL,
2900 (void *)size);
2901 else if (KMEM_DUMP(kmem_slab_cache)) {
2902 /* stats for dump intercept */
2903 kmem_dump_oversize_allocs++;
2904 if (size > kmem_dump_oversize_max)
2905 kmem_dump_oversize_max = size;
2906 }
2907 return (buf);
2908 }
2909
2910 buf = kmem_cache_alloc(cp, kmflag);
2911 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2912 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2913 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2914 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2915
2916 if (cp->cache_flags & KMF_LITE) {
2917 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller());
2918 }
2919 }
2920 return (buf);
2921 }
2922
2923 void
2924 kmem_free(void *buf, size_t size)
2925 {
2926 size_t index;
2927 kmem_cache_t *cp;
2928
2929 if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) {
2930 cp = kmem_alloc_table[index];
2931 /* fall through to kmem_cache_free() */
2932
2933 } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2934 kmem_big_alloc_table_max) {
2935 cp = kmem_big_alloc_table[index];
2936 /* fall through to kmem_cache_free() */
2937
2938 } else {
2939 EQUIV(buf == NULL, size == 0);
2940 if (buf == NULL && size == 0)
2941 return;
2942 vmem_free(kmem_oversize_arena, buf, size);
2943 return;
2944 }
2945
2946 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2947 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2948 uint32_t *ip = (uint32_t *)btp;
2949 if (ip[1] != KMEM_SIZE_ENCODE(size)) {
2950 if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
2951 kmem_error(KMERR_DUPFREE, cp, buf);
2952 return;
2953 }
2954 if (KMEM_SIZE_VALID(ip[1])) {
2955 ip[0] = KMEM_SIZE_ENCODE(size);
2956 kmem_error(KMERR_BADSIZE, cp, buf);
2957 } else {
2958 kmem_error(KMERR_REDZONE, cp, buf);
2959 }
2960 return;
2961 }
2962 if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
2963 kmem_error(KMERR_REDZONE, cp, buf);
2964 return;
2965 }
2966 btp->bt_redzone = KMEM_REDZONE_PATTERN;
2967 if (cp->cache_flags & KMF_LITE) {
2968 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
2969 caller());
2970 }
2971 }
2972 kmem_cache_free(cp, buf);
2973 }
2974
2975 void *
2976 kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
2977 {
2978 size_t realsize = size + vmp->vm_quantum;
2979 void *addr;
2980
2981 /*
2982 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
2983 * vm_quantum will cause integer wraparound. Check for this, and
2984 * blow off the firewall page in this case. Note that such a
2985 * giant allocation (the entire kernel address space) can never
2986 * be satisfied, so it will either fail immediately (VM_NOSLEEP)
2987 * or sleep forever (VM_SLEEP). Thus, there is no need for a
2988 * corresponding check in kmem_firewall_va_free().
2989 */
2990 if (realsize < size)
2991 realsize = size;
2992
2993 /*
2994 * While boot still owns resource management, make sure that this
2995 * redzone virtual address allocation is properly accounted for in
2996 * OBPs "virtual-memory" "available" lists because we're
2997 * effectively claiming them for a red zone. If we don't do this,
2998 * the available lists become too fragmented and too large for the
2999 * current boot/kernel memory list interface.
3000 */
3001 addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
3002
3003 if (addr != NULL && kvseg.s_base == NULL && realsize != size)
3004 (void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
3005
3006 return (addr);
3007 }
3008
3009 void
3010 kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
3011 {
3012 ASSERT((kvseg.s_base == NULL ?
3013 va_to_pfn((char *)addr + size) :
3014 hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
3015
3016 vmem_free(vmp, addr, size + vmp->vm_quantum);
3017 }
3018
3019 /*
3020 * Try to allocate at least `size' bytes of memory without sleeping or
3021 * panicking. Return actual allocated size in `asize'. If allocation failed,
3022 * try final allocation with sleep or panic allowed.
3023 */
3024 void *
3025 kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
3026 {
3027 void *p;
3028
3029 *asize = P2ROUNDUP(size, KMEM_ALIGN);
3030 do {
3031 p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
3032 if (p != NULL)
3033 return (p);
3034 *asize += KMEM_ALIGN;
3035 } while (*asize <= PAGESIZE);
3036
3037 *asize = P2ROUNDUP(size, KMEM_ALIGN);
3038 return (kmem_alloc(*asize, kmflag));
3039 }
3040
3041 /*
3042 * Reclaim all unused memory from a cache.
3043 */
3044 static void
3045 kmem_cache_reap(kmem_cache_t *cp)
3046 {
3047 ASSERT(taskq_member(kmem_taskq, curthread));
3048 cp->cache_reap++;
3049
3050 /*
3051 * Ask the cache's owner to free some memory if possible.
3052 * The idea is to handle things like the inode cache, which
3053 * typically sits on a bunch of memory that it doesn't truly
3054 * *need*. Reclaim policy is entirely up to the owner; this
3055 * callback is just an advisory plea for help.
3056 */
3057 if (cp->cache_reclaim != NULL) {
3058 long delta;
3059
3060 /*
3061 * Reclaimed memory should be reapable (not included in the
3062 * depot's working set).
3063 */
3064 delta = cp->cache_full.ml_total;
3065 cp->cache_reclaim(cp->cache_private);
3066 delta = cp->cache_full.ml_total - delta;
3067 if (delta > 0) {
3068 mutex_enter(&cp->cache_depot_lock);
3069 cp->cache_full.ml_reaplimit += delta;
3070 cp->cache_full.ml_min += delta;
3071 mutex_exit(&cp->cache_depot_lock);
3072 }
3073 }
3074
3075 kmem_depot_ws_reap(cp);
3076
3077 if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3078 kmem_cache_defrag(cp);
3079 }
3080 }
3081
3082 static void
3083 kmem_reap_timeout(void *flag_arg)
3084 {
3085 uint32_t *flag = (uint32_t *)flag_arg;
3086
3087 ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3088 *flag = 0;
3089 }
3090
3091 static void
3092 kmem_reap_done(void *flag)
3093 {
3094 if (!callout_init_done) {
3095 /* can't schedule a timeout at this point */
3096 kmem_reap_timeout(flag);
3097 } else {
3098 (void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
3099 }
3100 }
3101
3102 static void
3103 kmem_reap_start(void *flag)
3104 {
3105 ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3106
3107 if (flag == &kmem_reaping) {
3108 kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3109 /*
3110 * if we have segkp under heap, reap segkp cache.
3111 */
3112 if (segkp_fromheap)
3113 segkp_cache_free();
3114 }
3115 else
3116 kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3117
3118 /*
3119 * We use taskq_dispatch() to schedule a timeout to clear
3120 * the flag so that kmem_reap() becomes self-throttling:
3121 * we won't reap again until the current reap completes *and*
3122 * at least kmem_reap_interval ticks have elapsed.
3123 */
3124 if (taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP) ==
3125 TASKQID_INVALID)
3126 kmem_reap_done(flag);
3127 }
3128
3129 static void
3130 kmem_reap_common(void *flag_arg)
3131 {
3132 uint32_t *flag = (uint32_t *)flag_arg;
3133
3134 if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3135 atomic_cas_32(flag, 0, 1) != 0)
3136 return;
3137
3138 /*
3139 * It may not be kosher to do memory allocation when a reap is called
3140 * (for example, if vmem_populate() is in the call chain). So we
3141 * start the reap going with a TQ_NOALLOC dispatch. If the dispatch
3142 * fails, we reset the flag, and the next reap will try again.
3143 */
3144 if (taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC) ==
3145 TASKQID_INVALID)
3146 *flag = 0;
3147 }
3148
3149 /*
3150 * Reclaim all unused memory from all caches. Called from the VM system
3151 * when memory gets tight.
3152 */
3153 void
3154 kmem_reap(void)
3155 {
3156 kmem_reap_common(&kmem_reaping);
3157 }
3158
3159 /*
3160 * Reclaim all unused memory from identifier arenas, called when a vmem
3161 * arena not back by memory is exhausted. Since reaping memory-backed caches
3162 * cannot help with identifier exhaustion, we avoid both a large amount of
3163 * work and unwanted side-effects from reclaim callbacks.
3164 */
3165 void
3166 kmem_reap_idspace(void)
3167 {
3168 kmem_reap_common(&kmem_reaping_idspace);
3169 }
3170
3171 /*
3172 * Purge all magazines from a cache and set its magazine limit to zero.
3173 * All calls are serialized by the kmem_taskq lock, except for the final
3174 * call from kmem_cache_destroy().
3175 */
3176 static void
3177 kmem_cache_magazine_purge(kmem_cache_t *cp)
3178 {
3179 kmem_cpu_cache_t *ccp;
3180 kmem_magazine_t *mp, *pmp;
3181 int rounds, prounds, cpu_seqid;
3182
3183 ASSERT(!list_link_active(&cp->cache_link) ||
3184 taskq_member(kmem_taskq, curthread));
3185 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
3186
3187 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3188 ccp = &cp->cache_cpu[cpu_seqid];
3189
3190 mutex_enter(&ccp->cc_lock);
3191 mp = ccp->cc_loaded;
3192 pmp = ccp->cc_ploaded;
3193 rounds = ccp->cc_rounds;
3194 prounds = ccp->cc_prounds;
3195 ccp->cc_loaded = NULL;
3196 ccp->cc_ploaded = NULL;
3197 ccp->cc_rounds = -1;
3198 ccp->cc_prounds = -1;
3199 ccp->cc_magsize = 0;
3200 mutex_exit(&ccp->cc_lock);
3201
3202 if (mp)
3203 kmem_magazine_destroy(cp, mp, rounds);
3204 if (pmp)
3205 kmem_magazine_destroy(cp, pmp, prounds);
3206 }
3207
3208 kmem_depot_ws_zero(cp);
3209 kmem_depot_ws_reap(cp);
3210 }
3211
3212 /*
3213 * Enable per-cpu magazines on a cache.
3214 */
3215 static void
3216 kmem_cache_magazine_enable(kmem_cache_t *cp)
3217 {
3218 int cpu_seqid;
3219
3220 if (cp->cache_flags & KMF_NOMAGAZINE)
3221 return;
3222
3223 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3224 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3225 mutex_enter(&ccp->cc_lock);
3226 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
3227 mutex_exit(&ccp->cc_lock);
3228 }
3229
3230 }
3231
3232 /*
3233 * Allow our caller to determine if there are running reaps.
3234 *
3235 * This call is very conservative and may return B_TRUE even when
3236 * reaping activity isn't active. If it returns B_FALSE, then reaping
3237 * activity is definitely inactive.
3238 */
3239 boolean_t
3240 kmem_cache_reap_active(void)
3241 {
3242 return (!taskq_empty(kmem_taskq));
3243 }
3244
3245 /*
3246 * Reap (almost) everything soon.
3247 *
3248 * Note: this does not wait for the reap-tasks to complete. Caller
3249 * should use kmem_cache_reap_active() (above) and/or moderation to
3250 * avoid scheduling too many reap-tasks.
3251 */
3252 void
3253 kmem_cache_reap_soon(kmem_cache_t *cp)
3254 {
3255 ASSERT(list_link_active(&cp->cache_link));
3256
3257 kmem_depot_ws_zero(cp);
3258
3259 (void) taskq_dispatch(kmem_taskq,
3260 (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3261 }
3262
3263 /*
3264 * Recompute a cache's magazine size. The trade-off is that larger magazines
3265 * provide a higher transfer rate with the depot, while smaller magazines
3266 * reduce memory consumption. Magazine resizing is an expensive operation;
3267 * it should not be done frequently.
3268 *
3269 * Changes to the magazine size are serialized by the kmem_taskq lock.
3270 *
3271 * Note: at present this only grows the magazine size. It might be useful
3272 * to allow shrinkage too.
3273 */
3274 static void
3275 kmem_cache_magazine_resize(kmem_cache_t *cp)
3276 {
3277 kmem_magtype_t *mtp = cp->cache_magtype;
3278
3279 ASSERT(taskq_member(kmem_taskq, curthread));
3280
3281 if (cp->cache_chunksize < mtp->mt_maxbuf) {
3282 kmem_cache_magazine_purge(cp);
3283 mutex_enter(&cp->cache_depot_lock);
3284 cp->cache_magtype = ++mtp;
3285 cp->cache_depot_contention_prev =
3286 cp->cache_depot_contention + INT_MAX;
3287 mutex_exit(&cp->cache_depot_lock);
3288 kmem_cache_magazine_enable(cp);
3289 }
3290 }
3291
3292 /*
3293 * Rescale a cache's hash table, so that the table size is roughly the
3294 * cache size. We want the average lookup time to be extremely small.
3295 */
3296 static void
3297 kmem_hash_rescale(kmem_cache_t *cp)
3298 {
3299 kmem_bufctl_t **old_table, **new_table, *bcp;
3300 size_t old_size, new_size, h;
3301
3302 ASSERT(taskq_member(kmem_taskq, curthread));
3303
3304 new_size = MAX(KMEM_HASH_INITIAL,
3305 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
3306 old_size = cp->cache_hash_mask + 1;
3307
3308 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
3309 return;
3310
3311 new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
3312 VM_NOSLEEP);
3313 if (new_table == NULL)
3314 return;
3315 bzero(new_table, new_size * sizeof (void *));
3316
3317 mutex_enter(&cp->cache_lock);
3318
3319 old_size = cp->cache_hash_mask + 1;
3320 old_table = cp->cache_hash_table;
3321
3322 cp->cache_hash_mask = new_size - 1;
3323 cp->cache_hash_table = new_table;
3324 cp->cache_rescale++;
3325
3326 for (h = 0; h < old_size; h++) {
3327 bcp = old_table[h];
3328 while (bcp != NULL) {
3329 void *addr = bcp->bc_addr;
3330 kmem_bufctl_t *next_bcp = bcp->bc_next;
3331 kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
3332 bcp->bc_next = *hash_bucket;
3333 *hash_bucket = bcp;
3334 bcp = next_bcp;
3335 }
3336 }
3337
3338 mutex_exit(&cp->cache_lock);
3339
3340 vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
3341 }
3342
3343 /*
3344 * Perform periodic maintenance on a cache: hash rescaling, depot working-set
3345 * update, magazine resizing, and slab consolidation.
3346 */
3347 static void
3348 kmem_cache_update(kmem_cache_t *cp)
3349 {
3350 int need_hash_rescale = 0;
3351 int need_magazine_resize = 0;
3352
3353 ASSERT(MUTEX_HELD(&kmem_cache_lock));
3354
3355 /*
3356 * If the cache has become much larger or smaller than its hash table,
3357 * fire off a request to rescale the hash table.
3358 */
3359 mutex_enter(&cp->cache_lock);
3360
3361 if ((cp->cache_flags & KMF_HASH) &&
3362 (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
3363 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
3364 cp->cache_hash_mask > KMEM_HASH_INITIAL)))
3365 need_hash_rescale = 1;
3366
3367 mutex_exit(&cp->cache_lock);
3368
3369 /*
3370 * Update the depot working set statistics.
3371 */
3372 kmem_depot_ws_update(cp);
3373
3374 /*
3375 * If there's a lot of contention in the depot,
3376 * increase the magazine size.
3377 */
3378 mutex_enter(&cp->cache_depot_lock);
3379
3380 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
3381 (int)(cp->cache_depot_contention -
3382 cp->cache_depot_contention_prev) > kmem_depot_contention)
3383 need_magazine_resize = 1;
3384
3385 cp->cache_depot_contention_prev = cp->cache_depot_contention;
3386
3387 mutex_exit(&cp->cache_depot_lock);
3388
3389 if (need_hash_rescale)
3390 (void) taskq_dispatch(kmem_taskq,
3391 (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
3392
3393 if (need_magazine_resize)
3394 (void) taskq_dispatch(kmem_taskq,
3395 (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3396
3397 if (cp->cache_defrag != NULL)
3398 (void) taskq_dispatch(kmem_taskq,
3399 (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
3400 }
3401
3402 static void kmem_update(void *);
3403
3404 static void
3405 kmem_update_timeout(void *dummy)
3406 {
3407 (void) timeout(kmem_update, dummy, kmem_reap_interval);
3408 }
3409
3410 static void
3411 kmem_update(void *dummy)
3412 {
3413 kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
3414
3415 /*
3416 * We use taskq_dispatch() to reschedule the timeout so that
3417 * kmem_update() becomes self-throttling: it won't schedule
3418 * new tasks until all previous tasks have completed.
3419 */
3420 if (taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP)
3421 == TASKQID_INVALID)
3422 kmem_update_timeout(NULL);
3423 }
3424
3425 static int
3426 kmem_cache_kstat_update(kstat_t *ksp, int rw)
3427 {
3428 struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
3429 kmem_cache_t *cp = ksp->ks_private;
3430 uint64_t cpu_buf_avail;
3431 uint64_t buf_avail = 0;
3432 int cpu_seqid;
3433 long reap;
3434
3435 ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
3436
3437 if (rw == KSTAT_WRITE)
3438 return (EACCES);
3439
3440 mutex_enter(&cp->cache_lock);
3441
3442 kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail;
3443 kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc;
3444 kmcp->kmc_free.value.ui64 = cp->cache_slab_free;
3445 kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc;
3446 kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free;
3447
3448 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3449 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3450
3451 mutex_enter(&ccp->cc_lock);
3452
3453 cpu_buf_avail = 0;
3454 if (ccp->cc_rounds > 0)
3455 cpu_buf_avail += ccp->cc_rounds;
3456 if (ccp->cc_prounds > 0)
3457 cpu_buf_avail += ccp->cc_prounds;
3458
3459 kmcp->kmc_alloc.value.ui64 += ccp->cc_alloc;
3460 kmcp->kmc_free.value.ui64 += ccp->cc_free;
3461 buf_avail += cpu_buf_avail;
3462
3463 mutex_exit(&ccp->cc_lock);
3464 }
3465
3466 mutex_enter(&cp->cache_depot_lock);
3467
3468 kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc;
3469 kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc;
3470 kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention;
3471 kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total;
3472 kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total;
3473 kmcp->kmc_magazine_size.value.ui64 =
3474 (cp->cache_flags & KMF_NOMAGAZINE) ?
3475 0 : cp->cache_magtype->mt_magsize;
3476
3477 kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc;
3478 kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc;
3479 buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
3480
3481 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3482 reap = MIN(reap, cp->cache_full.ml_total);
3483
3484 mutex_exit(&cp->cache_depot_lock);
3485
3486 kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize;
3487 kmcp->kmc_align.value.ui64 = cp->cache_align;
3488 kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize;
3489 kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize;
3490 kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
3491 buf_avail += cp->cache_bufslab;
3492 kmcp->kmc_buf_avail.value.ui64 = buf_avail;
3493 kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail;
3494 kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal;
3495 kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax;
3496 kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create;
3497 kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy;
3498 kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ?
3499 cp->cache_hash_mask + 1 : 0;
3500 kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth;
3501 kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale;
3502 kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id;
3503 kmcp->kmc_reap.value.ui64 = cp->cache_reap;
3504
3505 if (cp->cache_defrag == NULL) {
3506 kmcp->kmc_move_callbacks.value.ui64 = 0;
3507 kmcp->kmc_move_yes.value.ui64 = 0;
3508 kmcp->kmc_move_no.value.ui64 = 0;
3509 kmcp->kmc_move_later.value.ui64 = 0;
3510 kmcp->kmc_move_dont_need.value.ui64 = 0;
3511 kmcp->kmc_move_dont_know.value.ui64 = 0;
3512 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3513 kmcp->kmc_move_slabs_freed.value.ui64 = 0;
3514 kmcp->kmc_defrag.value.ui64 = 0;
3515 kmcp->kmc_scan.value.ui64 = 0;
3516 kmcp->kmc_move_reclaimable.value.ui64 = 0;
3517 } else {
3518 int64_t reclaimable;
3519
3520 kmem_defrag_t *kd = cp->cache_defrag;
3521 kmcp->kmc_move_callbacks.value.ui64 = kd->kmd_callbacks;
3522 kmcp->kmc_move_yes.value.ui64 = kd->kmd_yes;
3523 kmcp->kmc_move_no.value.ui64 = kd->kmd_no;
3524 kmcp->kmc_move_later.value.ui64 = kd->kmd_later;
3525 kmcp->kmc_move_dont_need.value.ui64 = kd->kmd_dont_need;
3526 kmcp->kmc_move_dont_know.value.ui64 = kd->kmd_dont_know;
3527 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3528 kmcp->kmc_move_slabs_freed.value.ui64 = kd->kmd_slabs_freed;
3529 kmcp->kmc_defrag.value.ui64 = kd->kmd_defrags;
3530 kmcp->kmc_scan.value.ui64 = kd->kmd_scans;
3531
3532 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3533 reclaimable = MAX(reclaimable, 0);
3534 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3535 kmcp->kmc_move_reclaimable.value.ui64 = reclaimable;
3536 }
3537
3538 mutex_exit(&cp->cache_lock);
3539 return (0);
3540 }
3541
3542 /*
3543 * Return a named statistic about a particular cache.
3544 * This shouldn't be called very often, so it's currently designed for
3545 * simplicity (leverages existing kstat support) rather than efficiency.
3546 */
3547 uint64_t
3548 kmem_cache_stat(kmem_cache_t *cp, char *name)
3549 {
3550 int i;
3551 kstat_t *ksp = cp->cache_kstat;
3552 kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
3553 uint64_t value = 0;
3554
3555 if (ksp != NULL) {
3556 mutex_enter(&kmem_cache_kstat_lock);
3557 (void) kmem_cache_kstat_update(ksp, KSTAT_READ);
3558 for (i = 0; i < ksp->ks_ndata; i++) {
3559 if (strcmp(knp[i].name, name) == 0) {
3560 value = knp[i].value.ui64;
3561 break;
3562 }
3563 }
3564 mutex_exit(&kmem_cache_kstat_lock);
3565 }
3566 return (value);
3567 }
3568
3569 /*
3570 * Return an estimate of currently available kernel heap memory.
3571 * On 32-bit systems, physical memory may exceed virtual memory,
3572 * we just truncate the result at 1GB.
3573 */
3574 size_t
3575 kmem_avail(void)
3576 {
3577 spgcnt_t rmem = availrmem - tune.t_minarmem;
3578 spgcnt_t fmem = freemem - minfree;
3579
3580 return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
3581 1 << (30 - PAGESHIFT))));
3582 }
3583
3584 /*
3585 * Return the maximum amount of memory that is (in theory) allocatable
3586 * from the heap. This may be used as an estimate only since there
3587 * is no guarentee this space will still be available when an allocation
3588 * request is made, nor that the space may be allocated in one big request
3589 * due to kernel heap fragmentation.
3590 */
3591 size_t
3592 kmem_maxavail(void)
3593 {
3594 spgcnt_t pmem = availrmem - tune.t_minarmem;
3595 spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
3596
3597 return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
3598 }
3599
3600 /*
3601 * Indicate whether memory-intensive kmem debugging is enabled.
3602 */
3603 int
3604 kmem_debugging(void)
3605 {
3606 return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
3607 }
3608
3609 /* binning function, sorts finely at the two extremes */
3610 #define KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift) \
3611 ((((sp)->slab_refcnt <= (binshift)) || \
3612 (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift))) \
3613 ? -(sp)->slab_refcnt \
3614 : -((binshift) + ((sp)->slab_refcnt >> (binshift))))
3615
3616 /*
3617 * Minimizing the number of partial slabs on the freelist minimizes
3618 * fragmentation (the ratio of unused buffers held by the slab layer). There are
3619 * two ways to get a slab off of the freelist: 1) free all the buffers on the
3620 * slab, and 2) allocate all the buffers on the slab. It follows that we want
3621 * the most-used slabs at the front of the list where they have the best chance
3622 * of being completely allocated, and the least-used slabs at a safe distance
3623 * from the front to improve the odds that the few remaining buffers will all be
3624 * freed before another allocation can tie up the slab. For that reason a slab
3625 * with a higher slab_refcnt sorts less than than a slab with a lower
3626 * slab_refcnt.
3627 *
3628 * However, if a slab has at least one buffer that is deemed unfreeable, we
3629 * would rather have that slab at the front of the list regardless of
3630 * slab_refcnt, since even one unfreeable buffer makes the entire slab
3631 * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move()
3632 * callback, the slab is marked unfreeable for as long as it remains on the
3633 * freelist.
3634 */
3635 static int
3636 kmem_partial_slab_cmp(const void *p0, const void *p1)
3637 {
3638 const kmem_cache_t *cp;
3639 const kmem_slab_t *s0 = p0;
3640 const kmem_slab_t *s1 = p1;
3641 int w0, w1;
3642 size_t binshift;
3643
3644 ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
3645 ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
3646 ASSERT(s0->slab_cache == s1->slab_cache);
3647 cp = s1->slab_cache;
3648 ASSERT(MUTEX_HELD(&cp->cache_lock));
3649 binshift = cp->cache_partial_binshift;
3650
3651 /* weight of first slab */
3652 w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
3653 if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
3654 w0 -= cp->cache_maxchunks;
3655 }
3656
3657 /* weight of second slab */
3658 w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
3659 if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
3660 w1 -= cp->cache_maxchunks;
3661 }
3662
3663 if (w0 < w1)
3664 return (-1);
3665 if (w0 > w1)
3666 return (1);
3667
3668 /* compare pointer values */
3669 if ((uintptr_t)s0 < (uintptr_t)s1)
3670 return (-1);
3671 if ((uintptr_t)s0 > (uintptr_t)s1)
3672 return (1);
3673
3674 return (0);
3675 }
3676
3677 /*
3678 * It must be valid to call the destructor (if any) on a newly created object.
3679 * That is, the constructor (if any) must leave the object in a valid state for
3680 * the destructor.
3681 */
3682 kmem_cache_t *
3683 kmem_cache_create(
3684 char *name, /* descriptive name for this cache */
3685 size_t bufsize, /* size of the objects it manages */
3686 size_t align, /* required object alignment */
3687 int (*constructor)(void *, void *, int), /* object constructor */
3688 void (*destructor)(void *, void *), /* object destructor */
3689 void (*reclaim)(void *), /* memory reclaim callback */
3690 void *private, /* pass-thru arg for constr/destr/reclaim */
3691 vmem_t *vmp, /* vmem source for slab allocation */
3692 int cflags) /* cache creation flags */
3693 {
3694 int cpu_seqid;
3695 size_t chunksize;
3696 kmem_cache_t *cp;
3697 kmem_magtype_t *mtp;
3698 size_t csize = KMEM_CACHE_SIZE(max_ncpus);
3699
3700 #ifdef DEBUG
3701 /*
3702 * Cache names should conform to the rules for valid C identifiers
3703 */
3704 if (!strident_valid(name)) {
3705 cmn_err(CE_CONT,
3706 "kmem_cache_create: '%s' is an invalid cache name\n"
3707 "cache names must conform to the rules for "
3708 "C identifiers\n", name);
3709 }
3710 #endif /* DEBUG */
3711
3712 if (vmp == NULL)
3713 vmp = kmem_default_arena;
3714
3715 /*
3716 * If this kmem cache has an identifier vmem arena as its source, mark
3717 * it such to allow kmem_reap_idspace().
3718 */
3719 ASSERT(!(cflags & KMC_IDENTIFIER)); /* consumer should not set this */
3720 if (vmp->vm_cflags & VMC_IDENTIFIER)
3721 cflags |= KMC_IDENTIFIER;
3722
3723 /*
3724 * Get a kmem_cache structure. We arrange that cp->cache_cpu[]
3725 * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3726 * false sharing of per-CPU data.
3727 */
3728 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3729 P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3730 bzero(cp, csize);
3731 list_link_init(&cp->cache_link);
3732
3733 if (align == 0)
3734 align = KMEM_ALIGN;
3735
3736 /*
3737 * If we're not at least KMEM_ALIGN aligned, we can't use free
3738 * memory to hold bufctl information (because we can't safely
3739 * perform word loads and stores on it).
3740 */
3741 if (align < KMEM_ALIGN)
3742 cflags |= KMC_NOTOUCH;
3743
3744 if (!ISP2(align) || align > vmp->vm_quantum)
3745 panic("kmem_cache_create: bad alignment %lu", align);
3746
3747 mutex_enter(&kmem_flags_lock);
3748 if (kmem_flags & KMF_RANDOMIZE)
3749 kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3750 KMF_RANDOMIZE;
3751 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3752 mutex_exit(&kmem_flags_lock);
3753
3754 /*
3755 * Make sure all the various flags are reasonable.
3756 */
3757 ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3758
3759 if (cp->cache_flags & KMF_LITE) {
3760 if (bufsize >= kmem_lite_minsize &&
3761 align <= kmem_lite_maxalign &&
3762 P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3763 cp->cache_flags |= KMF_BUFTAG;
3764 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3765 } else {
3766 cp->cache_flags &= ~KMF_DEBUG;
3767 }
3768 }
3769
3770 if (cp->cache_flags & KMF_DEADBEEF)
3771 cp->cache_flags |= KMF_REDZONE;
3772
3773 if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3774 cp->cache_flags |= KMF_NOMAGAZINE;
3775
3776 if (cflags & KMC_NODEBUG)
3777 cp->cache_flags &= ~KMF_DEBUG;
3778
3779 if (cflags & KMC_NOTOUCH)
3780 cp->cache_flags &= ~KMF_TOUCH;
3781
3782 if (cflags & KMC_PREFILL)
3783 cp->cache_flags |= KMF_PREFILL;
3784
3785 if (cflags & KMC_NOHASH)
3786 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3787
3788 if (cflags & KMC_NOMAGAZINE)
3789 cp->cache_flags |= KMF_NOMAGAZINE;
3790
3791 if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3792 cp->cache_flags |= KMF_REDZONE;
3793
3794 if (!(cp->cache_flags & KMF_AUDIT))
3795 cp->cache_flags &= ~KMF_CONTENTS;
3796
3797 if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3798 !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3799 cp->cache_flags |= KMF_FIREWALL;
3800
3801 if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
3802 cp->cache_flags &= ~KMF_FIREWALL;
3803
3804 if (cp->cache_flags & KMF_FIREWALL) {
3805 cp->cache_flags &= ~KMF_BUFTAG;
3806 cp->cache_flags |= KMF_NOMAGAZINE;
3807 ASSERT(vmp == kmem_default_arena);
3808 vmp = kmem_firewall_arena;
3809 }
3810
3811 /*
3812 * Set cache properties.
3813 */
3814 (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3815 strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3816 cp->cache_bufsize = bufsize;
3817 cp->cache_align = align;
3818 cp->cache_constructor = constructor;
3819 cp->cache_destructor = destructor;
3820 cp->cache_reclaim = reclaim;
3821 cp->cache_private = private;
3822 cp->cache_arena = vmp;
3823 cp->cache_cflags = cflags;
3824
3825 /*
3826 * Determine the chunk size.
3827 */
3828 chunksize = bufsize;
3829
3830 if (align >= KMEM_ALIGN) {
3831 chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
3832 cp->cache_bufctl = chunksize - KMEM_ALIGN;
3833 }
3834
3835 if (cp->cache_flags & KMF_BUFTAG) {
3836 cp->cache_bufctl = chunksize;
3837 cp->cache_buftag = chunksize;
3838 if (cp->cache_flags & KMF_LITE)
3839 chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
3840 else
3841 chunksize += sizeof (kmem_buftag_t);
3842 }
3843
3844 if (cp->cache_flags & KMF_DEADBEEF) {
3845 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3846 if (cp->cache_flags & KMF_LITE)
3847 cp->cache_verify = sizeof (uint64_t);
3848 }
3849
3850 cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3851
3852 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3853
3854 /*
3855 * Now that we know the chunk size, determine the optimal slab size.
3856 */
3857 if (vmp == kmem_firewall_arena) {
3858 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3859 cp->cache_mincolor = cp->cache_slabsize - chunksize;
3860 cp->cache_maxcolor = cp->cache_mincolor;
3861 cp->cache_flags |= KMF_HASH;
3862 ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3863 } else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
3864 !(cp->cache_flags & KMF_AUDIT) &&
3865 chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
3866 cp->cache_slabsize = vmp->vm_quantum;
3867 cp->cache_mincolor = 0;
3868 cp->cache_maxcolor =
3869 (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3870 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3871 ASSERT(!(cp->cache_flags & KMF_AUDIT));
3872 } else {
3873 size_t chunks, bestfit, waste, slabsize;
3874 size_t minwaste = LONG_MAX;
3875
3876 for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
3877 slabsize = P2ROUNDUP(chunksize * chunks,
3878 vmp->vm_quantum);
3879 chunks = slabsize / chunksize;
3880 waste = (slabsize % chunksize) / chunks;
3881 if (waste < minwaste) {
3882 minwaste = waste;
3883 bestfit = slabsize;
3884 }
3885 }
3886 if (cflags & KMC_QCACHE)
3887 bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
3888 cp->cache_slabsize = bestfit;
3889 cp->cache_mincolor = 0;
3890 cp->cache_maxcolor = bestfit % chunksize;
3891 cp->cache_flags |= KMF_HASH;
3892 }
3893
3894 cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3895 cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3896
3897 /*
3898 * Disallowing prefill when either the DEBUG or HASH flag is set or when
3899 * there is a constructor avoids some tricky issues with debug setup
3900 * that may be revisited later. We cannot allow prefill in a
3901 * metadata cache because of potential recursion.
3902 */
3903 if (vmp == kmem_msb_arena ||
3904 cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3905 cp->cache_constructor != NULL)
3906 cp->cache_flags &= ~KMF_PREFILL;
3907
3908 if (cp->cache_flags & KMF_HASH) {
3909 ASSERT(!(cflags & KMC_NOHASH));
3910 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3911 kmem_bufctl_audit_cache : kmem_bufctl_cache;
3912 }
3913
3914 if (cp->cache_maxcolor >= vmp->vm_quantum)
3915 cp->cache_maxcolor = vmp->vm_quantum - 1;
3916
3917 cp->cache_color = cp->cache_mincolor;
3918
3919 /*
3920 * Initialize the rest of the slab layer.
3921 */
3922 mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3923
3924 avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3925 sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3926 /* LINTED: E_TRUE_LOGICAL_EXPR */
3927 ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3928 /* reuse partial slab AVL linkage for complete slab list linkage */
3929 list_create(&cp->cache_complete_slabs,
3930 sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3931
3932 if (cp->cache_flags & KMF_HASH) {
3933 cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3934 KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
3935 bzero(cp->cache_hash_table,
3936 KMEM_HASH_INITIAL * sizeof (void *));
3937 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3938 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3939 }
3940
3941 /*
3942 * Initialize the depot.
3943 */
3944 mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
3945
3946 for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
3947 continue;
3948
3949 cp->cache_magtype = mtp;
3950
3951 /*
3952 * Initialize the CPU layer.
3953 */
3954 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3955 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3956 mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
3957 ccp->cc_flags = cp->cache_flags;
3958 ccp->cc_rounds = -1;
3959 ccp->cc_prounds = -1;
3960 }
3961
3962 /*
3963 * Create the cache's kstats.
3964 */
3965 if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
3966 "kmem_cache", KSTAT_TYPE_NAMED,
3967 sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
3968 KSTAT_FLAG_VIRTUAL)) != NULL) {
3969 cp->cache_kstat->ks_data = &kmem_cache_kstat;
3970 cp->cache_kstat->ks_update = kmem_cache_kstat_update;
3971 cp->cache_kstat->ks_private = cp;
3972 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
3973 kstat_install(cp->cache_kstat);
3974 }
3975
3976 /*
3977 * Add the cache to the global list. This makes it visible
3978 * to kmem_update(), so the cache must be ready for business.
3979 */
3980 mutex_enter(&kmem_cache_lock);
3981 list_insert_tail(&kmem_caches, cp);
3982 mutex_exit(&kmem_cache_lock);
3983
3984 if (kmem_ready)
3985 kmem_cache_magazine_enable(cp);
3986
3987 return (cp);
3988 }
3989
3990 static int
3991 kmem_move_cmp(const void *buf, const void *p)
3992 {
3993 const kmem_move_t *kmm = p;
3994 uintptr_t v1 = (uintptr_t)buf;
3995 uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
3996 return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
3997 }
3998
3999 static void
4000 kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
4001 {
4002 kmd->kmd_reclaim_numer = 1;
4003 }
4004
4005 /*
4006 * Initially, when choosing candidate slabs for buffers to move, we want to be
4007 * very selective and take only slabs that are less than
4008 * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate
4009 * slabs, then we raise the allocation ceiling incrementally. The reclaim
4010 * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no
4011 * longer fragmented.
4012 */
4013 static void
4014 kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
4015 {
4016 if (direction > 0) {
4017 /* make it easier to find a candidate slab */
4018 if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
4019 kmd->kmd_reclaim_numer++;
4020 }
4021 } else {
4022 /* be more selective */
4023 if (kmd->kmd_reclaim_numer > 1) {
4024 kmd->kmd_reclaim_numer--;
4025 }
4026 }
4027 }
4028
4029 void
4030 kmem_cache_set_move(kmem_cache_t *cp,
4031 kmem_cbrc_t (*move)(void *, void *, size_t, void *))
4032 {
4033 kmem_defrag_t *defrag;
4034
4035 ASSERT(move != NULL);
4036 /*
4037 * The consolidator does not support NOTOUCH caches because kmem cannot
4038 * initialize their slabs with the 0xbaddcafe memory pattern, which sets
4039 * a low order bit usable by clients to distinguish uninitialized memory
4040 * from known objects (see kmem_slab_create).
4041 */
4042 ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
4043 ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
4044
4045 /*
4046 * We should not be holding anyone's cache lock when calling
4047 * kmem_cache_alloc(), so allocate in all cases before acquiring the
4048 * lock.
4049 */
4050 defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
4051
4052 mutex_enter(&cp->cache_lock);
4053
4054 if (KMEM_IS_MOVABLE(cp)) {
4055 if (cp->cache_move == NULL) {
4056 ASSERT(cp->cache_slab_alloc == 0);
4057
4058 cp->cache_defrag = defrag;
4059 defrag = NULL; /* nothing to free */
4060 bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4061 avl_create(&cp->cache_defrag->kmd_moves_pending,
4062 kmem_move_cmp, sizeof (kmem_move_t),
4063 offsetof(kmem_move_t, kmm_entry));
4064 /* LINTED: E_TRUE_LOGICAL_EXPR */
4065 ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
4066 /* reuse the slab's AVL linkage for deadlist linkage */
4067 list_create(&cp->cache_defrag->kmd_deadlist,
4068 sizeof (kmem_slab_t),
4069 offsetof(kmem_slab_t, slab_link));
4070 kmem_reset_reclaim_threshold(cp->cache_defrag);
4071 }
4072 cp->cache_move = move;
4073 }
4074
4075 mutex_exit(&cp->cache_lock);
4076
4077 if (defrag != NULL) {
4078 kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4079 }
4080 }
4081
4082 void
4083 kmem_cache_destroy(kmem_cache_t *cp)
4084 {
4085 int cpu_seqid;
4086
4087 /*
4088 * Remove the cache from the global cache list so that no one else
4089 * can schedule tasks on its behalf, wait for any pending tasks to
4090 * complete, purge the cache, and then destroy it.
4091 */
4092 mutex_enter(&kmem_cache_lock);
4093 list_remove(&kmem_caches, cp);
4094 mutex_exit(&kmem_cache_lock);
4095
4096 if (kmem_taskq != NULL)
4097 taskq_wait(kmem_taskq);
4098
4099 if (kmem_move_taskq != NULL && cp->cache_defrag != NULL)
4100 taskq_wait(kmem_move_taskq);
4101
4102 kmem_cache_magazine_purge(cp);
4103
4104 mutex_enter(&cp->cache_lock);
4105 if (cp->cache_buftotal != 0)
4106 cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
4107 cp->cache_name, (void *)cp);
4108 if (cp->cache_defrag != NULL) {
4109 avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4110 list_destroy(&cp->cache_defrag->kmd_deadlist);
4111 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4112 cp->cache_defrag = NULL;
4113 }
4114 /*
4115 * The cache is now dead. There should be no further activity. We
4116 * enforce this by setting land mines in the constructor, destructor,
4117 * reclaim, and move routines that induce a kernel text fault if
4118 * invoked.
4119 */
4120 cp->cache_constructor = (int (*)(void *, void *, int))1;
4121 cp->cache_destructor = (void (*)(void *, void *))2;
4122 cp->cache_reclaim = (void (*)(void *))3;
4123 cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
4124 mutex_exit(&cp->cache_lock);
4125
4126 kstat_delete(cp->cache_kstat);
4127
4128 if (cp->cache_hash_table != NULL)
4129 vmem_free(kmem_hash_arena, cp->cache_hash_table,
4130 (cp->cache_hash_mask + 1) * sizeof (void *));
4131
4132 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
4133 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
4134
4135 mutex_destroy(&cp->cache_depot_lock);
4136 mutex_destroy(&cp->cache_lock);
4137
4138 vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
4139 }
4140
4141 /*ARGSUSED*/
4142 static int
4143 kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
4144 {
4145 ASSERT(MUTEX_HELD(&cpu_lock));
4146 if (what == CPU_UNCONFIG) {
4147 kmem_cache_applyall(kmem_cache_magazine_purge,
4148 kmem_taskq, TQ_SLEEP);
4149 kmem_cache_applyall(kmem_cache_magazine_enable,
4150 kmem_taskq, TQ_SLEEP);
4151 }
4152 return (0);
4153 }
4154
4155 static void
4156 kmem_alloc_caches_create(const int *array, size_t count,
4157 kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift)
4158 {
4159 char name[KMEM_CACHE_NAMELEN + 1];
4160 size_t table_unit = (1 << shift); /* range of one alloc_table entry */
4161 size_t size = table_unit;
4162 int i;
4163
4164 for (i = 0; i < count; i++) {
4165 size_t cache_size = array[i];
4166 size_t align = KMEM_ALIGN;
4167 kmem_cache_t *cp;
4168
4169 /* if the table has an entry for maxbuf, we're done */
4170 if (size > maxbuf)
4171 break;
4172
4173 /* cache size must be a multiple of the table unit */
4174 ASSERT(P2PHASE(cache_size, table_unit) == 0);
4175
4176 /*
4177 * If they allocate a multiple of the coherency granularity,
4178 * they get a coherency-granularity-aligned address.
4179 */
4180 if (IS_P2ALIGNED(cache_size, 64))
4181 align = 64;
4182 if (IS_P2ALIGNED(cache_size, PAGESIZE))
4183 align = PAGESIZE;
4184 (void) snprintf(name, sizeof (name),
4185 "kmem_alloc_%lu", cache_size);
4186 cp = kmem_cache_create(name, cache_size, align,
4187 NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
4188
4189 while (size <= cache_size) {
4190 alloc_table[(size - 1) >> shift] = cp;
4191 size += table_unit;
4192 }
4193 }
4194
4195 ASSERT(size > maxbuf); /* i.e. maxbuf <= max(cache_size) */
4196 }
4197
4198 static void
4199 kmem_cache_init(int pass, int use_large_pages)
4200 {
4201 int i;
4202 size_t maxbuf;
4203 kmem_magtype_t *mtp;
4204
4205 for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
4206 char name[KMEM_CACHE_NAMELEN + 1];
4207
4208 mtp = &kmem_magtype[i];
4209 (void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
4210 mtp->mt_cache = kmem_cache_create(name,
4211 (mtp->mt_magsize + 1) * sizeof (void *),
4212 mtp->mt_align, NULL, NULL, NULL, NULL,
4213 kmem_msb_arena, KMC_NOHASH);
4214 }
4215
4216 kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
4217 sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
4218 kmem_msb_arena, KMC_NOHASH);
4219
4220 kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
4221 sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
4222 kmem_msb_arena, KMC_NOHASH);
4223
4224 kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
4225 sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
4226 kmem_msb_arena, KMC_NOHASH);
4227
4228 if (pass == 2) {
4229 kmem_va_arena = vmem_create("kmem_va",
4230 NULL, 0, PAGESIZE,
4231 vmem_alloc, vmem_free, heap_arena,
4232 8 * PAGESIZE, VM_SLEEP);
4233
4234 if (use_large_pages) {
4235 kmem_default_arena = vmem_xcreate("kmem_default",
4236 NULL, 0, PAGESIZE,
4237 segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
4238 0, VMC_DUMPSAFE | VM_SLEEP);
4239 } else {
4240 kmem_default_arena = vmem_create("kmem_default",
4241 NULL, 0, PAGESIZE,
4242 segkmem_alloc, segkmem_free, kmem_va_arena,
4243 0, VMC_DUMPSAFE | VM_SLEEP);
4244 }
4245
4246 /* Figure out what our maximum cache size is */
4247 maxbuf = kmem_max_cached;
4248 if (maxbuf <= KMEM_MAXBUF) {
4249 maxbuf = 0;
4250 kmem_max_cached = KMEM_MAXBUF;
4251 } else {
4252 size_t size = 0;
4253 size_t max =
4254 sizeof (kmem_big_alloc_sizes) / sizeof (int);
4255 /*
4256 * Round maxbuf up to an existing cache size. If maxbuf
4257 * is larger than the largest cache, we truncate it to
4258 * the largest cache's size.
4259 */
4260 for (i = 0; i < max; i++) {
4261 size = kmem_big_alloc_sizes[i];
4262 if (maxbuf <= size)
4263 break;
4264 }
4265 kmem_max_cached = maxbuf = size;
4266 }
4267
4268 /*
4269 * The big alloc table may not be completely overwritten, so
4270 * we clear out any stale cache pointers from the first pass.
4271 */
4272 bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table));
4273 } else {
4274 /*
4275 * During the first pass, the kmem_alloc_* caches
4276 * are treated as metadata.
4277 */
4278 kmem_default_arena = kmem_msb_arena;
4279 maxbuf = KMEM_BIG_MAXBUF_32BIT;
4280 }
4281
4282 /*
4283 * Set up the default caches to back kmem_alloc()
4284 */
4285 kmem_alloc_caches_create(
4286 kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int),
4287 kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT);
4288
4289 kmem_alloc_caches_create(
4290 kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int),
4291 kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT);
4292
4293 kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT;
4294 }
4295
4296 void
4297 kmem_init(void)
4298 {
4299 kmem_cache_t *cp;
4300 int old_kmem_flags = kmem_flags;
4301 int use_large_pages = 0;
4302 size_t maxverify, minfirewall;
4303
4304 kstat_init();
4305
4306 /*
4307 * Don't do firewalled allocations if the heap is less than 1TB
4308 * (i.e. on a 32-bit kernel)
4309 * The resulting VM_NEXTFIT allocations would create too much
4310 * fragmentation in a small heap.
4311 */
4312 #if defined(_LP64)
4313 maxverify = minfirewall = PAGESIZE / 2;
4314 #else
4315 maxverify = minfirewall = ULONG_MAX;
4316 #endif
4317
4318 /* LINTED */
4319 ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
4320
4321 list_create(&kmem_caches, sizeof (kmem_cache_t),
4322 offsetof(kmem_cache_t, cache_link));
4323
4324 kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
4325 vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
4326 VM_SLEEP | VMC_NO_QCACHE);
4327
4328 kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
4329 PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
4330 VMC_DUMPSAFE | VM_SLEEP);
4331
4332 kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
4333 segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4334
4335 kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
4336 segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4337
4338 kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
4339 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4340
4341 kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
4342 NULL, 0, PAGESIZE,
4343 kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
4344 0, VM_SLEEP);
4345
4346 kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
4347 segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0,
4348 VMC_DUMPSAFE | VM_SLEEP);
4349
4350 /* temporary oversize arena for mod_read_system_file */
4351 kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
4352 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4353
4354 kmem_reap_interval = 15 * hz;
4355
4356 /*
4357 * Read /etc/system. This is a chicken-and-egg problem because
4358 * kmem_flags may be set in /etc/system, but mod_read_system_file()
4359 * needs to use the allocator. The simplest solution is to create
4360 * all the standard kmem caches, read /etc/system, destroy all the
4361 * caches we just created, and then create them all again in light
4362 * of the (possibly) new kmem_flags and other kmem tunables.
4363 */
4364 kmem_cache_init(1, 0);
4365
4366 mod_read_system_file(boothowto & RB_ASKNAME);
4367
4368 while ((cp = list_tail(&kmem_caches)) != NULL)
4369 kmem_cache_destroy(cp);
4370
4371 vmem_destroy(kmem_oversize_arena);
4372
4373 if (old_kmem_flags & KMF_STICKY)
4374 kmem_flags = old_kmem_flags;
4375
4376 if (!(kmem_flags & KMF_AUDIT))
4377 vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
4378
4379 if (kmem_maxverify == 0)
4380 kmem_maxverify = maxverify;
4381
4382 if (kmem_minfirewall == 0)
4383 kmem_minfirewall = minfirewall;
4384
4385 /*
4386 * give segkmem a chance to figure out if we are using large pages
4387 * for the kernel heap
4388 */
4389 use_large_pages = segkmem_lpsetup();
4390
4391 /*
4392 * To protect against corruption, we keep the actual number of callers
4393 * KMF_LITE records seperate from the tunable. We arbitrarily clamp
4394 * to 16, since the overhead for small buffers quickly gets out of
4395 * hand.
4396 *
4397 * The real limit would depend on the needs of the largest KMC_NOHASH
4398 * cache.
4399 */
4400 kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
4401 kmem_lite_pcs = kmem_lite_count;
4402
4403 /*
4404 * Normally, we firewall oversized allocations when possible, but
4405 * if we are using large pages for kernel memory, and we don't have
4406 * any non-LITE debugging flags set, we want to allocate oversized
4407 * buffers from large pages, and so skip the firewalling.
4408 */
4409 if (use_large_pages &&
4410 ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
4411 kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
4412 PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
4413 0, VMC_DUMPSAFE | VM_SLEEP);
4414 } else {
4415 kmem_oversize_arena = vmem_create("kmem_oversize",
4416 NULL, 0, PAGESIZE,
4417 segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
4418 kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE |
4419 VM_SLEEP);
4420 }
4421
4422 kmem_cache_init(2, use_large_pages);
4423
4424 if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
4425 if (kmem_transaction_log_size == 0)
4426 kmem_transaction_log_size = kmem_maxavail() / 50;
4427 kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
4428 }
4429
4430 if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
4431 if (kmem_content_log_size == 0)
4432 kmem_content_log_size = kmem_maxavail() / 50;
4433 kmem_content_log = kmem_log_init(kmem_content_log_size);
4434 }
4435
4436 kmem_failure_log = kmem_log_init(kmem_failure_log_size);
4437 kmem_slab_log = kmem_log_init(kmem_slab_log_size);
4438 kmem_zerosized_log = kmem_log_init(kmem_zerosized_log_size);
4439
4440 /*
4441 * Initialize STREAMS message caches so allocb() is available.
4442 * This allows us to initialize the logging framework (cmn_err(9F),
4443 * strlog(9F), etc) so we can start recording messages.
4444 */
4445 streams_msg_init();
4446
4447 /*
4448 * Initialize the ZSD framework in Zones so modules loaded henceforth
4449 * can register their callbacks.
4450 */
4451 zone_zsd_init();
4452
4453 log_init();
4454 taskq_init();
4455
4456 /*
4457 * Warn about invalid or dangerous values of kmem_flags.
4458 * Always warn about unsupported values.
4459 */
4460 if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
4461 KMF_CONTENTS | KMF_LITE)) != 0) ||
4462 ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
4463 cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x. "
4464 "See the Solaris Tunable Parameters Reference Manual.",
4465 kmem_flags);
4466
4467 #ifdef DEBUG
4468 if ((kmem_flags & KMF_DEBUG) == 0)
4469 cmn_err(CE_NOTE, "kmem debugging disabled.");
4470 #else
4471 /*
4472 * For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
4473 * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
4474 * if KMF_AUDIT is set). We should warn the user about the performance
4475 * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
4476 * isn't set (since that disables AUDIT).
4477 */
4478 if (!(kmem_flags & KMF_LITE) &&
4479 (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
4480 cmn_err(CE_WARN, "High-overhead kmem debugging features "
4481 "enabled (kmem_flags = 0x%x). Performance degradation "
4482 "and large memory overhead possible. See the Solaris "
4483 "Tunable Parameters Reference Manual.", kmem_flags);
4484 #endif /* not DEBUG */
4485
4486 kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
4487
4488 kmem_ready = 1;
4489
4490 /*
4491 * Initialize the platform-specific aligned/DMA memory allocator.
4492 */
4493 ka_init();
4494
4495 /*
4496 * Initialize 32-bit ID cache.
4497 */
4498 id32_init();
4499
4500 /*
4501 * Initialize the networking stack so modules loaded can
4502 * register their callbacks.
4503 */
4504 netstack_init();
4505 }
4506
4507 static void
4508 kmem_move_init(void)
4509 {
4510 kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
4511 sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
4512 kmem_msb_arena, KMC_NOHASH);
4513 kmem_move_cache = kmem_cache_create("kmem_move_cache",
4514 sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
4515 kmem_msb_arena, KMC_NOHASH);
4516
4517 /*
4518 * kmem guarantees that move callbacks are sequential and that even
4519 * across multiple caches no two moves ever execute simultaneously.
4520 * Move callbacks are processed on a separate taskq so that client code
4521 * does not interfere with internal maintenance tasks.
4522 */
4523 kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
4524 minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
4525 }
4526
4527 void
4528 kmem_thread_init(void)
4529 {
4530 kmem_move_init();
4531 kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
4532 300, INT_MAX, TASKQ_PREPOPULATE);
4533 }
4534
4535 void
4536 kmem_mp_init(void)
4537 {
4538 mutex_enter(&cpu_lock);
4539 register_cpu_setup_func(kmem_cpu_setup, NULL);
4540 mutex_exit(&cpu_lock);
4541
4542 kmem_update_timeout(NULL);
4543
4544 taskq_mp_init();
4545 }
4546
4547 /*
4548 * Return the slab of the allocated buffer, or NULL if the buffer is not
4549 * allocated. This function may be called with a known slab address to determine
4550 * whether or not the buffer is allocated, or with a NULL slab address to obtain
4551 * an allocated buffer's slab.
4552 */
4553 static kmem_slab_t *
4554 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4555 {
4556 kmem_bufctl_t *bcp, *bufbcp;
4557
4558 ASSERT(MUTEX_HELD(&cp->cache_lock));
4559 ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4560
4561 if (cp->cache_flags & KMF_HASH) {
4562 for (bcp = *KMEM_HASH(cp, buf);
4563 (bcp != NULL) && (bcp->bc_addr != buf);
4564 bcp = bcp->bc_next) {
4565 continue;
4566 }
4567 ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
4568 return (bcp == NULL ? NULL : bcp->bc_slab);
4569 }
4570
4571 if (sp == NULL) {
4572 sp = KMEM_SLAB(cp, buf);
4573 }
4574 bufbcp = KMEM_BUFCTL(cp, buf);
4575 for (bcp = sp->slab_head;
4576 (bcp != NULL) && (bcp != bufbcp);
4577 bcp = bcp->bc_next) {
4578 continue;
4579 }
4580 return (bcp == NULL ? sp : NULL);
4581 }
4582
4583 static boolean_t
4584 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4585 {
4586 long refcnt = sp->slab_refcnt;
4587
4588 ASSERT(cp->cache_defrag != NULL);
4589
4590 /*
4591 * For code coverage we want to be able to move an object within the
4592 * same slab (the only partial slab) even if allocating the destination
4593 * buffer resulted in a completely allocated slab.
4594 */
4595 if (flags & KMM_DEBUG) {
4596 return ((flags & KMM_DESPERATE) ||
4597 ((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0));
4598 }
4599
4600 /* If we're desperate, we don't care if the client said NO. */
4601 if (flags & KMM_DESPERATE) {
4602 return (refcnt < sp->slab_chunks); /* any partial */
4603 }
4604
4605 if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4606 return (B_FALSE);
4607 }
4608
4609 if ((refcnt == 1) || kmem_move_any_partial) {
4610 return (refcnt < sp->slab_chunks);
4611 }
4612
4613 /*
4614 * The reclaim threshold is adjusted at each kmem_cache_scan() so that
4615 * slabs with a progressively higher percentage of used buffers can be
4616 * reclaimed until the cache as a whole is no longer fragmented.
4617 *
4618 * sp->slab_refcnt kmd_reclaim_numer
4619 * --------------- < ------------------
4620 * sp->slab_chunks KMEM_VOID_FRACTION
4621 */
4622 return ((refcnt * KMEM_VOID_FRACTION) <
4623 (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4624 }
4625
4626 /*
4627 * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(),
4628 * or when the buffer is freed.
4629 */
4630 static void
4631 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4632 {
4633 ASSERT(MUTEX_HELD(&cp->cache_lock));
4634 ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4635
4636 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4637 return;
4638 }
4639
4640 if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4641 if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
4642 avl_remove(&cp->cache_partial_slabs, sp);
4643 sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
4644 sp->slab_stuck_offset = (uint32_t)-1;
4645 avl_add(&cp->cache_partial_slabs, sp);
4646 }
4647 } else {
4648 sp->slab_later_count = 0;
4649 sp->slab_stuck_offset = (uint32_t)-1;
4650 }
4651 }
4652
4653 static void
4654 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4655 {
4656 ASSERT(taskq_member(kmem_move_taskq, curthread));
4657 ASSERT(MUTEX_HELD(&cp->cache_lock));
4658 ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4659
4660 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4661 return;
4662 }
4663
4664 avl_remove(&cp->cache_partial_slabs, sp);
4665 sp->slab_later_count = 0;
4666 sp->slab_flags |= KMEM_SLAB_NOMOVE;
4667 sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
4668 avl_add(&cp->cache_partial_slabs, sp);
4669 }
4670
4671 static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
4672
4673 /*
4674 * The move callback takes two buffer addresses, the buffer to be moved, and a
4675 * newly allocated and constructed buffer selected by kmem as the destination.
4676 * It also takes the size of the buffer and an optional user argument specified
4677 * at cache creation time. kmem guarantees that the buffer to be moved has not
4678 * been unmapped by the virtual memory subsystem. Beyond that, it cannot
4679 * guarantee the present whereabouts of the buffer to be moved, so it is up to
4680 * the client to safely determine whether or not it is still using the buffer.
4681 * The client must not free either of the buffers passed to the move callback,
4682 * since kmem wants to free them directly to the slab layer. The client response
4683 * tells kmem which of the two buffers to free:
4684 *
4685 * YES kmem frees the old buffer (the move was successful)
4686 * NO kmem frees the new buffer, marks the slab of the old buffer
4687 * non-reclaimable to avoid bothering the client again
4688 * LATER kmem frees the new buffer, increments slab_later_count
4689 * DONT_KNOW kmem frees the new buffer
4690 * DONT_NEED kmem frees both the old buffer and the new buffer
4691 *
4692 * The pending callback argument now being processed contains both of the
4693 * buffers (old and new) passed to the move callback function, the slab of the
4694 * old buffer, and flags related to the move request, such as whether or not the
4695 * system was desperate for memory.
4696 *
4697 * Slabs are not freed while there is a pending callback, but instead are kept
4698 * on a deadlist, which is drained after the last callback completes. This means
4699 * that slabs are safe to access until kmem_move_end(), no matter how many of
4700 * their buffers have been freed. Once slab_refcnt reaches zero, it stays at
4701 * zero for as long as the slab remains on the deadlist and until the slab is
4702 * freed.
4703 */
4704 static void
4705 kmem_move_buffer(kmem_move_t *callback)
4706 {
4707 kmem_cbrc_t response;
4708 kmem_slab_t *sp = callback->kmm_from_slab;
4709 kmem_cache_t *cp = sp->slab_cache;
4710 boolean_t free_on_slab;
4711
4712 ASSERT(taskq_member(kmem_move_taskq, curthread));
4713 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4714 ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
4715
4716 /*
4717 * The number of allocated buffers on the slab may have changed since we
4718 * last checked the slab's reclaimability (when the pending move was
4719 * enqueued), or the client may have responded NO when asked to move
4720 * another buffer on the same slab.
4721 */
4722 if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4723 kmem_slab_free(cp, callback->kmm_to_buf);
4724 kmem_move_end(cp, callback);
4725 return;
4726 }
4727
4728 /*
4729 * Checking the slab layer is easy, so we might as well do that here
4730 * in case we can avoid bothering the client.
4731 */
4732 mutex_enter(&cp->cache_lock);
4733 free_on_slab = (kmem_slab_allocated(cp, sp,
4734 callback->kmm_from_buf) == NULL);
4735 mutex_exit(&cp->cache_lock);
4736
4737 if (free_on_slab) {
4738 kmem_slab_free(cp, callback->kmm_to_buf);
4739 kmem_move_end(cp, callback);
4740 return;
4741 }
4742
4743 if (cp->cache_flags & KMF_BUFTAG) {
4744 /*
4745 * Make kmem_cache_alloc_debug() apply the constructor for us.
4746 */
4747 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4748 KM_NOSLEEP, 1, caller()) != 0) {
4749 kmem_move_end(cp, callback);
4750 return;
4751 }
4752 } else if (cp->cache_constructor != NULL &&
4753 cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4754 KM_NOSLEEP) != 0) {
4755 atomic_inc_64(&cp->cache_alloc_fail);
4756 kmem_slab_free(cp, callback->kmm_to_buf);
4757 kmem_move_end(cp, callback);
4758 return;
4759 }
4760
4761 cp->cache_defrag->kmd_callbacks++;
4762 cp->cache_defrag->kmd_thread = curthread;
4763 cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4764 cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4765 DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4766 callback);
4767
4768 response = cp->cache_move(callback->kmm_from_buf,
4769 callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4770
4771 DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4772 callback, kmem_cbrc_t, response);
4773 cp->cache_defrag->kmd_thread = NULL;
4774 cp->cache_defrag->kmd_from_buf = NULL;
4775 cp->cache_defrag->kmd_to_buf = NULL;
4776
4777 if (response == KMEM_CBRC_YES) {
4778 cp->cache_defrag->kmd_yes++;
4779 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4780 /* slab safe to access until kmem_move_end() */
4781 if (sp->slab_refcnt == 0)
4782 cp->cache_defrag->kmd_slabs_freed++;
4783 mutex_enter(&cp->cache_lock);
4784 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4785 mutex_exit(&cp->cache_lock);
4786 kmem_move_end(cp, callback);
4787 return;
4788 }
4789
4790 switch (response) {
4791 case KMEM_CBRC_NO:
4792 cp->cache_defrag->kmd_no++;
4793 mutex_enter(&cp->cache_lock);
4794 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4795 mutex_exit(&cp->cache_lock);
4796 break;
4797 case KMEM_CBRC_LATER:
4798 cp->cache_defrag->kmd_later++;
4799 mutex_enter(&cp->cache_lock);
4800 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4801 mutex_exit(&cp->cache_lock);
4802 break;
4803 }
4804
4805 if (++sp->slab_later_count >= KMEM_DISBELIEF) {
4806 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4807 } else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
4808 sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
4809 callback->kmm_from_buf);
4810 }
4811 mutex_exit(&cp->cache_lock);
4812 break;
4813 case KMEM_CBRC_DONT_NEED:
4814 cp->cache_defrag->kmd_dont_need++;
4815 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4816 if (sp->slab_refcnt == 0)
4817 cp->cache_defrag->kmd_slabs_freed++;
4818 mutex_enter(&cp->cache_lock);
4819 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4820 mutex_exit(&cp->cache_lock);
4821 break;
4822 case KMEM_CBRC_DONT_KNOW:
4823 /*
4824 * If we don't know if we can move this buffer or not, we'll
4825 * just assume that we can't: if the buffer is in fact free,
4826 * then it is sitting in one of the per-CPU magazines or in
4827 * a full magazine in the depot layer. Either way, because
4828 * defrag is induced in the same logic that reaps a cache,
4829 * it's likely that full magazines will be returned to the
4830 * system soon (thereby accomplishing what we're trying to
4831 * accomplish here: return those magazines to their slabs).
4832 * Given this, any work that we might do now to locate a buffer
4833 * in a magazine is wasted (and expensive!) work; we bump
4834 * a counter in this case and otherwise assume that we can't
4835 * move it.
4836 */
4837 cp->cache_defrag->kmd_dont_know++;
4838 break;
4839 default:
4840 panic("'%s' (%p) unexpected move callback response %d\n",
4841 cp->cache_name, (void *)cp, response);
4842 }
4843
4844 kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
4845 kmem_move_end(cp, callback);
4846 }
4847
4848 /* Return B_FALSE if there is insufficient memory for the move request. */
4849 static boolean_t
4850 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
4851 {
4852 void *to_buf;
4853 avl_index_t index;
4854 kmem_move_t *callback, *pending;
4855 ulong_t n;
4856
4857 ASSERT(taskq_member(kmem_taskq, curthread));
4858 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4859 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4860
4861 callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
4862
4863 if (callback == NULL)
4864 return (B_FALSE);
4865
4866 callback->kmm_from_slab = sp;
4867 callback->kmm_from_buf = buf;
4868 callback->kmm_flags = flags;
4869
4870 mutex_enter(&cp->cache_lock);
4871
4872 n = avl_numnodes(&cp->cache_partial_slabs);
4873 if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) {
4874 mutex_exit(&cp->cache_lock);
4875 kmem_cache_free(kmem_move_cache, callback);
4876 return (B_TRUE); /* there is no need for the move request */
4877 }
4878
4879 pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
4880 if (pending != NULL) {
4881 /*
4882 * If the move is already pending and we're desperate now,
4883 * update the move flags.
4884 */
4885 if (flags & KMM_DESPERATE) {
4886 pending->kmm_flags |= KMM_DESPERATE;
4887 }
4888 mutex_exit(&cp->cache_lock);
4889 kmem_cache_free(kmem_move_cache, callback);
4890 return (B_TRUE);
4891 }
4892
4893 to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
4894 B_FALSE);
4895 callback->kmm_to_buf = to_buf;
4896 avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
4897
4898 mutex_exit(&cp->cache_lock);
4899
4900 if (taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
4901 callback, TQ_NOSLEEP) == TASKQID_INVALID) {
4902 mutex_enter(&cp->cache_lock);
4903 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4904 mutex_exit(&cp->cache_lock);
4905 kmem_slab_free(cp, to_buf);
4906 kmem_cache_free(kmem_move_cache, callback);
4907 return (B_FALSE);
4908 }
4909
4910 return (B_TRUE);
4911 }
4912
4913 static void
4914 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
4915 {
4916 avl_index_t index;
4917
4918 ASSERT(cp->cache_defrag != NULL);
4919 ASSERT(taskq_member(kmem_move_taskq, curthread));
4920 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4921
4922 mutex_enter(&cp->cache_lock);
4923 VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
4924 callback->kmm_from_buf, &index) != NULL);
4925 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4926 if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
4927 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
4928 kmem_slab_t *sp;
4929
4930 /*
4931 * The last pending move completed. Release all slabs from the
4932 * front of the dead list except for any slab at the tail that
4933 * needs to be released from the context of kmem_move_buffers().
4934 * kmem deferred unmapping the buffers on these slabs in order
4935 * to guarantee that buffers passed to the move callback have
4936 * been touched only by kmem or by the client itself.
4937 */
4938 while ((sp = list_remove_head(deadlist)) != NULL) {
4939 if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
4940 list_insert_tail(deadlist, sp);
4941 break;
4942 }
4943 cp->cache_defrag->kmd_deadcount--;
4944 cp->cache_slab_destroy++;
4945 mutex_exit(&cp->cache_lock);
4946 kmem_slab_destroy(cp, sp);
4947 mutex_enter(&cp->cache_lock);
4948 }
4949 }
4950 mutex_exit(&cp->cache_lock);
4951 kmem_cache_free(kmem_move_cache, callback);
4952 }
4953
4954 /*
4955 * Move buffers from least used slabs first by scanning backwards from the end
4956 * of the partial slab list. Scan at most max_scan candidate slabs and move
4957 * buffers from at most max_slabs slabs (0 for all partial slabs in both cases).
4958 * If desperate to reclaim memory, move buffers from any partial slab, otherwise
4959 * skip slabs with a ratio of allocated buffers at or above the current
4960 * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the
4961 * scan is aborted) so that the caller can adjust the reclaimability threshold
4962 * depending on how many reclaimable slabs it finds.
4963 *
4964 * kmem_move_buffers() drops and reacquires cache_lock every time it issues a
4965 * move request, since it is not valid for kmem_move_begin() to call
4966 * kmem_cache_alloc() or taskq_dispatch() with cache_lock held.
4967 */
4968 static int
4969 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
4970 int flags)
4971 {
4972 kmem_slab_t *sp;
4973 void *buf;
4974 int i, j; /* slab index, buffer index */
4975 int s; /* reclaimable slabs */
4976 int b; /* allocated (movable) buffers on reclaimable slab */
4977 boolean_t success;
4978 int refcnt;
4979 int nomove;
4980
4981 ASSERT(taskq_member(kmem_taskq, curthread));
4982 ASSERT(MUTEX_HELD(&cp->cache_lock));
4983 ASSERT(kmem_move_cache != NULL);
4984 ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
4985 ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
4986 avl_numnodes(&cp->cache_partial_slabs) > 1);
4987
4988 if (kmem_move_blocked) {
4989 return (0);
4990 }
4991
4992 if (kmem_move_fulltilt) {
4993 flags |= KMM_DESPERATE;
4994 }
4995
4996 if (max_scan == 0 || (flags & KMM_DESPERATE)) {
4997 /*
4998 * Scan as many slabs as needed to find the desired number of
4999 * candidate slabs.
5000 */
5001 max_scan = (size_t)-1;
5002 }
5003
5004 if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
5005 /* Find as many candidate slabs as possible. */
5006 max_slabs = (size_t)-1;
5007 }
5008
5009 sp = avl_last(&cp->cache_partial_slabs);
5010 ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
5011 for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) &&
5012 ((sp != avl_first(&cp->cache_partial_slabs)) ||
5013 (flags & KMM_DEBUG));
5014 sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
5015
5016 if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
5017 continue;
5018 }
5019 s++;
5020
5021 /* Look for allocated buffers to move. */
5022 for (j = 0, b = 0, buf = sp->slab_base;
5023 (j < sp->slab_chunks) && (b < sp->slab_refcnt);
5024 buf = (((char *)buf) + cp->cache_chunksize), j++) {
5025
5026 if (kmem_slab_allocated(cp, sp, buf) == NULL) {
5027 continue;
5028 }
5029
5030 b++;
5031
5032 /*
5033 * Prevent the slab from being destroyed while we drop
5034 * cache_lock and while the pending move is not yet
5035 * registered. Flag the pending move while
5036 * kmd_moves_pending may still be empty, since we can't
5037 * yet rely on a non-zero pending move count to prevent
5038 * the slab from being destroyed.
5039 */
5040 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5041 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5042 /*
5043 * Recheck refcnt and nomove after reacquiring the lock,
5044 * since these control the order of partial slabs, and
5045 * we want to know if we can pick up the scan where we
5046 * left off.
5047 */
5048 refcnt = sp->slab_refcnt;
5049 nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5050 mutex_exit(&cp->cache_lock);
5051
5052 success = kmem_move_begin(cp, sp, buf, flags);
5053
5054 /*
5055 * Now, before the lock is reacquired, kmem could
5056 * process all pending move requests and purge the
5057 * deadlist, so that upon reacquiring the lock, sp has
5058 * been remapped. Or, the client may free all the
5059 * objects on the slab while the pending moves are still
5060 * on the taskq. Therefore, the KMEM_SLAB_MOVE_PENDING
5061 * flag causes the slab to be put at the end of the
5062 * deadlist and prevents it from being destroyed, since
5063 * we plan to destroy it here after reacquiring the
5064 * lock.
5065 */
5066 mutex_enter(&cp->cache_lock);
5067 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5068 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5069
5070 if (sp->slab_refcnt == 0) {
5071 list_t *deadlist =
5072 &cp->cache_defrag->kmd_deadlist;
5073 list_remove(deadlist, sp);
5074
5075 if (!avl_is_empty(
5076 &cp->cache_defrag->kmd_moves_pending)) {
5077 /*
5078 * A pending move makes it unsafe to
5079 * destroy the slab, because even though
5080 * the move is no longer needed, the
5081 * context where that is determined
5082 * requires the slab to exist.
5083 * Fortunately, a pending move also
5084 * means we don't need to destroy the
5085 * slab here, since it will get
5086 * destroyed along with any other slabs
5087 * on the deadlist after the last
5088 * pending move completes.
5089 */
5090 list_insert_head(deadlist, sp);
5091 return (-1);
5092 }
5093
5094 /*
5095 * Destroy the slab now if it was completely
5096 * freed while we dropped cache_lock and there
5097 * are no pending moves. Since slab_refcnt
5098 * cannot change once it reaches zero, no new
5099 * pending moves from that slab are possible.
5100 */
5101 cp->cache_defrag->kmd_deadcount--;
5102 cp->cache_slab_destroy++;
5103 mutex_exit(&cp->cache_lock);
5104 kmem_slab_destroy(cp, sp);
5105 mutex_enter(&cp->cache_lock);
5106 /*
5107 * Since we can't pick up the scan where we left
5108 * off, abort the scan and say nothing about the
5109 * number of reclaimable slabs.
5110 */
5111 return (-1);
5112 }
5113
5114 if (!success) {
5115 /*
5116 * Abort the scan if there is not enough memory
5117 * for the request and say nothing about the
5118 * number of reclaimable slabs.
5119 */
5120 return (-1);
5121 }
5122
5123 /*
5124 * The slab's position changed while the lock was
5125 * dropped, so we don't know where we are in the
5126 * sequence any more.
5127 */
5128 if (sp->slab_refcnt != refcnt) {
5129 /*
5130 * If this is a KMM_DEBUG move, the slab_refcnt
5131 * may have changed because we allocated a
5132 * destination buffer on the same slab. In that
5133 * case, we're not interested in counting it.
5134 */
5135 return (-1);
5136 }
5137 if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove)
5138 return (-1);
5139
5140 /*
5141 * Generating a move request allocates a destination
5142 * buffer from the slab layer, bumping the first partial
5143 * slab if it is completely allocated. If the current
5144 * slab becomes the first partial slab as a result, we
5145 * can't continue to scan backwards.
5146 *
5147 * If this is a KMM_DEBUG move and we allocated the
5148 * destination buffer from the last partial slab, then
5149 * the buffer we're moving is on the same slab and our
5150 * slab_refcnt has changed, causing us to return before
5151 * reaching here if there are no partial slabs left.
5152 */
5153 ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5154 if (sp == avl_first(&cp->cache_partial_slabs)) {
5155 /*
5156 * We're not interested in a second KMM_DEBUG
5157 * move.
5158 */
5159 goto end_scan;
5160 }
5161 }
5162 }
5163 end_scan:
5164
5165 return (s);
5166 }
5167
5168 typedef struct kmem_move_notify_args {
5169 kmem_cache_t *kmna_cache;
5170 void *kmna_buf;
5171 } kmem_move_notify_args_t;
5172
5173 static void
5174 kmem_cache_move_notify_task(void *arg)
5175 {
5176 kmem_move_notify_args_t *args = arg;
5177 kmem_cache_t *cp = args->kmna_cache;
5178 void *buf = args->kmna_buf;
5179 kmem_slab_t *sp;
5180
5181 ASSERT(taskq_member(kmem_taskq, curthread));
5182 ASSERT(list_link_active(&cp->cache_link));
5183
5184 kmem_free(args, sizeof (kmem_move_notify_args_t));
5185 mutex_enter(&cp->cache_lock);
5186 sp = kmem_slab_allocated(cp, NULL, buf);
5187
5188 /* Ignore the notification if the buffer is no longer allocated. */
5189 if (sp == NULL) {
5190 mutex_exit(&cp->cache_lock);
5191 return;
5192 }
5193
5194 /* Ignore the notification if there's no reason to move the buffer. */
5195 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5196 /*
5197 * So far the notification is not ignored. Ignore the
5198 * notification if the slab is not marked by an earlier refusal
5199 * to move a buffer.
5200 */
5201 if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
5202 (sp->slab_later_count == 0)) {
5203 mutex_exit(&cp->cache_lock);
5204 return;
5205 }
5206
5207 kmem_slab_move_yes(cp, sp, buf);
5208 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5209 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5210 mutex_exit(&cp->cache_lock);
5211 /* see kmem_move_buffers() about dropping the lock */
5212 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5213 mutex_enter(&cp->cache_lock);
5214 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5215 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5216 if (sp->slab_refcnt == 0) {
5217 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5218 list_remove(deadlist, sp);
5219
5220 if (!avl_is_empty(
5221 &cp->cache_defrag->kmd_moves_pending)) {
5222 list_insert_head(deadlist, sp);
5223 mutex_exit(&cp->cache_lock);
5224 return;
5225 }
5226
5227 cp->cache_defrag->kmd_deadcount--;
5228 cp->cache_slab_destroy++;
5229 mutex_exit(&cp->cache_lock);
5230 kmem_slab_destroy(cp, sp);
5231 return;
5232 }
5233 } else {
5234 kmem_slab_move_yes(cp, sp, buf);
5235 }
5236 mutex_exit(&cp->cache_lock);
5237 }
5238
5239 void
5240 kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5241 {
5242 kmem_move_notify_args_t *args;
5243
5244 args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
5245 if (args != NULL) {
5246 args->kmna_cache = cp;
5247 args->kmna_buf = buf;
5248 if (taskq_dispatch(kmem_taskq,
5249 (task_func_t *)kmem_cache_move_notify_task, args,
5250 TQ_NOSLEEP) == TASKQID_INVALID)
5251 kmem_free(args, sizeof (kmem_move_notify_args_t));
5252 }
5253 }
5254
5255 static void
5256 kmem_cache_defrag(kmem_cache_t *cp)
5257 {
5258 size_t n;
5259
5260 ASSERT(cp->cache_defrag != NULL);
5261
5262 mutex_enter(&cp->cache_lock);
5263 n = avl_numnodes(&cp->cache_partial_slabs);
5264 if (n > 1) {
5265 /* kmem_move_buffers() drops and reacquires cache_lock */
5266 cp->cache_defrag->kmd_defrags++;
5267 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5268 }
5269 mutex_exit(&cp->cache_lock);
5270 }
5271
5272 /* Is this cache above the fragmentation threshold? */
5273 static boolean_t
5274 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5275 {
5276 /*
5277 * nfree kmem_frag_numer
5278 * ------------------ > ---------------
5279 * cp->cache_buftotal kmem_frag_denom
5280 */
5281 return ((nfree * kmem_frag_denom) >
5282 (cp->cache_buftotal * kmem_frag_numer));
5283 }
5284
5285 static boolean_t
5286 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5287 {
5288 boolean_t fragmented;
5289 uint64_t nfree;
5290
5291 ASSERT(MUTEX_HELD(&cp->cache_lock));
5292 *doreap = B_FALSE;
5293
5294 if (kmem_move_fulltilt) {
5295 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5296 return (B_TRUE);
5297 }
5298 } else {
5299 if ((cp->cache_complete_slab_count + avl_numnodes(
5300 &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5301 return (B_FALSE);
5302 }
5303 }
5304
5305 nfree = cp->cache_bufslab;
5306 fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5307 kmem_cache_frag_threshold(cp, nfree));
5308
5309 /*
5310 * Free buffers in the magazine layer appear allocated from the point of
5311 * view of the slab layer. We want to know if the slab layer would
5312 * appear fragmented if we included free buffers from magazines that
5313 * have fallen out of the working set.
5314 */
5315 if (!fragmented) {
5316 long reap;
5317
5318 mutex_enter(&cp->cache_depot_lock);
5319 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5320 reap = MIN(reap, cp->cache_full.ml_total);
5321 mutex_exit(&cp->cache_depot_lock);
5322
5323 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5324 if (kmem_cache_frag_threshold(cp, nfree)) {
5325 *doreap = B_TRUE;
5326 }
5327 }
5328
5329 return (fragmented);
5330 }
5331
5332 /* Called periodically from kmem_taskq */
5333 static void
5334 kmem_cache_scan(kmem_cache_t *cp)
5335 {
5336 boolean_t reap = B_FALSE;
5337 kmem_defrag_t *kmd;
5338
5339 ASSERT(taskq_member(kmem_taskq, curthread));
5340
5341 mutex_enter(&cp->cache_lock);
5342
5343 kmd = cp->cache_defrag;
5344 if (kmd->kmd_consolidate > 0) {
5345 kmd->kmd_consolidate--;
5346 mutex_exit(&cp->cache_lock);
5347 kmem_cache_reap(cp);
5348 return;
5349 }
5350
5351 if (kmem_cache_is_fragmented(cp, &reap)) {
5352 size_t slabs_found;
5353
5354 /*
5355 * Consolidate reclaimable slabs from the end of the partial
5356 * slab list (scan at most kmem_reclaim_scan_range slabs to find
5357 * reclaimable slabs). Keep track of how many candidate slabs we
5358 * looked for and how many we actually found so we can adjust
5359 * the definition of a candidate slab if we're having trouble
5360 * finding them.
5361 *
5362 * kmem_move_buffers() drops and reacquires cache_lock.
5363 */
5364 kmd->kmd_scans++;
5365 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5366 kmem_reclaim_max_slabs, 0);
5367 if (slabs_found >= 0) {
5368 kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
5369 kmd->kmd_slabs_found += slabs_found;
5370 }
5371
5372 if (++kmd->kmd_tries >= kmem_reclaim_scan_range) {
5373 kmd->kmd_tries = 0;
5374
5375 /*
5376 * If we had difficulty finding candidate slabs in
5377 * previous scans, adjust the threshold so that
5378 * candidates are easier to find.
5379 */
5380 if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
5381 kmem_adjust_reclaim_threshold(kmd, -1);
5382 } else if ((kmd->kmd_slabs_found * 2) <
5383 kmd->kmd_slabs_sought) {
5384 kmem_adjust_reclaim_threshold(kmd, 1);
5385 }
5386 kmd->kmd_slabs_sought = 0;
5387 kmd->kmd_slabs_found = 0;
5388 }
5389 } else {
5390 kmem_reset_reclaim_threshold(cp->cache_defrag);
5391 #ifdef DEBUG
5392 if (!avl_is_empty(&cp->cache_partial_slabs)) {
5393 /*
5394 * In a debug kernel we want the consolidator to
5395 * run occasionally even when there is plenty of
5396 * memory.
5397 */
5398 uint16_t debug_rand;
5399
5400 (void) random_get_bytes((uint8_t *)&debug_rand, 2);
5401 if (!kmem_move_noreap &&
5402 ((debug_rand % kmem_mtb_reap) == 0)) {
5403 mutex_exit(&cp->cache_lock);
5404 kmem_cache_reap(cp);
5405 return;
5406 } else if ((debug_rand % kmem_mtb_move) == 0) {
5407 kmd->kmd_scans++;
5408 (void) kmem_move_buffers(cp,
5409 kmem_reclaim_scan_range, 1, KMM_DEBUG);
5410 }
5411 }
5412 #endif /* DEBUG */
5413 }
5414
5415 mutex_exit(&cp->cache_lock);
5416
5417 if (reap)
5418 kmem_depot_ws_reap(cp);
5419 }