1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 /*
28 * Kernel memory allocator, as described in the following two papers and a
29 * statement about the consolidator:
30 *
31 * Jeff Bonwick,
32 * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
33 * Proceedings of the Summer 1994 Usenix Conference.
34 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
35 *
36 * Jeff Bonwick and Jonathan Adams,
37 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
38 * Arbitrary Resources.
39 * Proceedings of the 2001 Usenix Conference.
40 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
41 *
42 * kmem Slab Consolidator Big Theory Statement:
43 *
44 * 1. Motivation
45 *
46 * As stated in Bonwick94, slabs provide the following advantages over other
47 * allocation structures in terms of memory fragmentation:
48 *
49 * - Internal fragmentation (per-buffer wasted space) is minimal.
50 * - Severe external fragmentation (unused buffers on the free list) is
51 * unlikely.
52 *
53 * Segregating objects by size eliminates one source of external fragmentation,
54 * and according to Bonwick:
55 *
56 * The other reason that slabs reduce external fragmentation is that all
57 * objects in a slab are of the same type, so they have the same lifetime
58 * distribution. The resulting segregation of short-lived and long-lived
59 * objects at slab granularity reduces the likelihood of an entire page being
60 * held hostage due to a single long-lived allocation [Barrett93, Hanson90].
61 *
62 * While unlikely, severe external fragmentation remains possible. Clients that
63 * allocate both short- and long-lived objects from the same cache cannot
64 * anticipate the distribution of long-lived objects within the allocator's slab
65 * implementation. Even a small percentage of long-lived objects distributed
66 * randomly across many slabs can lead to a worst case scenario where the client
67 * frees the majority of its objects and the system gets back almost none of the
68 * slabs. Despite the client doing what it reasonably can to help the system
69 * reclaim memory, the allocator cannot shake free enough slabs because of
70 * lonely allocations stubbornly hanging on. Although the allocator is in a
71 * position to diagnose the fragmentation, there is nothing that the allocator
72 * by itself can do about it. It only takes a single allocated object to prevent
73 * an entire slab from being reclaimed, and any object handed out by
74 * kmem_cache_alloc() is by definition in the client's control. Conversely,
75 * although the client is in a position to move a long-lived object, it has no
76 * way of knowing if the object is causing fragmentation, and if so, where to
77 * move it. A solution necessarily requires further cooperation between the
78 * allocator and the client.
79 *
80 * 2. Move Callback
81 *
82 * The kmem slab consolidator therefore adds a move callback to the
83 * allocator/client interface, improving worst-case external fragmentation in
84 * kmem caches that supply a function to move objects from one memory location
85 * to another. In a situation of low memory kmem attempts to consolidate all of
86 * a cache's slabs at once; otherwise it works slowly to bring external
87 * fragmentation within the 1/8 limit guaranteed for internal fragmentation,
88 * thereby helping to avoid a low memory situation in the future.
89 *
90 * The callback has the following signature:
91 *
92 * kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg)
93 *
94 * It supplies the kmem client with two addresses: the allocated object that
95 * kmem wants to move and a buffer selected by kmem for the client to use as the
96 * copy destination. The callback is kmem's way of saying "Please get off of
97 * this buffer and use this one instead." kmem knows where it wants to move the
98 * object in order to best reduce fragmentation. All the client needs to know
99 * about the second argument (void *new) is that it is an allocated, constructed
100 * object ready to take the contents of the old object. When the move function
101 * is called, the system is likely to be low on memory, and the new object
102 * spares the client from having to worry about allocating memory for the
103 * requested move. The third argument supplies the size of the object, in case a
104 * single move function handles multiple caches whose objects differ only in
105 * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional
106 * user argument passed to the constructor, destructor, and reclaim functions is
107 * also passed to the move callback.
108 *
109 * 2.1 Setting the Move Callback
110 *
111 * The client sets the move callback after creating the cache and before
112 * allocating from it:
113 *
114 * object_cache = kmem_cache_create(...);
115 * kmem_cache_set_move(object_cache, object_move);
116 *
117 * 2.2 Move Callback Return Values
118 *
119 * Only the client knows about its own data and when is a good time to move it.
120 * The client is cooperating with kmem to return unused memory to the system,
121 * and kmem respectfully accepts this help at the client's convenience. When
122 * asked to move an object, the client can respond with any of the following:
123 *
124 * typedef enum kmem_cbrc {
125 * KMEM_CBRC_YES,
126 * KMEM_CBRC_NO,
127 * KMEM_CBRC_LATER,
128 * KMEM_CBRC_DONT_NEED,
129 * KMEM_CBRC_DONT_KNOW
130 * } kmem_cbrc_t;
131 *
132 * The client must not explicitly kmem_cache_free() either of the objects passed
133 * to the callback, since kmem wants to free them directly to the slab layer
134 * (bypassing the per-CPU magazine layer). The response tells kmem which of the
135 * objects to free:
136 *
137 * YES: (Did it) The client moved the object, so kmem frees the old one.
138 * NO: (Never) The client refused, so kmem frees the new object (the
139 * unused copy destination). kmem also marks the slab of the old
140 * object so as not to bother the client with further callbacks for
141 * that object as long as the slab remains on the partial slab list.
142 * (The system won't be getting the slab back as long as the
143 * immovable object holds it hostage, so there's no point in moving
144 * any of its objects.)
145 * LATER: The client is using the object and cannot move it now, so kmem
146 * frees the new object (the unused copy destination). kmem still
147 * attempts to move other objects off the slab, since it expects to
148 * succeed in clearing the slab in a later callback. The client
149 * should use LATER instead of NO if the object is likely to become
150 * movable very soon.
151 * DONT_NEED: The client no longer needs the object, so kmem frees the old along
152 * with the new object (the unused copy destination). This response
153 * is the client's opportunity to be a model citizen and give back as
154 * much as it can.
155 * DONT_KNOW: The client does not know about the object because
156 * a) the client has just allocated the object and not yet put it
157 * wherever it expects to find known objects
158 * b) the client has removed the object from wherever it expects to
159 * find known objects and is about to free it, or
160 * c) the client has freed the object.
161 * In all these cases (a, b, and c) kmem frees the new object (the
162 * unused copy destination) and searches for the old object in the
163 * magazine layer. If found, the object is removed from the magazine
164 * layer and freed to the slab layer so it will no longer hold the
165 * slab hostage.
166 *
167 * 2.3 Object States
168 *
169 * Neither kmem nor the client can be assumed to know the object's whereabouts
170 * at the time of the callback. An object belonging to a kmem cache may be in
171 * any of the following states:
172 *
173 * 1. Uninitialized on the slab
174 * 2. Allocated from the slab but not constructed (still uninitialized)
175 * 3. Allocated from the slab, constructed, but not yet ready for business
176 * (not in a valid state for the move callback)
177 * 4. In use (valid and known to the client)
178 * 5. About to be freed (no longer in a valid state for the move callback)
179 * 6. Freed to a magazine (still constructed)
180 * 7. Allocated from a magazine, not yet ready for business (not in a valid
181 * state for the move callback), and about to return to state #4
182 * 8. Deconstructed on a magazine that is about to be freed
183 * 9. Freed to the slab
184 *
185 * Since the move callback may be called at any time while the object is in any
186 * of the above states (except state #1), the client needs a safe way to
187 * determine whether or not it knows about the object. Specifically, the client
188 * needs to know whether or not the object is in state #4, the only state in
189 * which a move is valid. If the object is in any other state, the client should
190 * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of
191 * the object's fields.
192 *
193 * Note that although an object may be in state #4 when kmem initiates the move
194 * request, the object may no longer be in that state by the time kmem actually
195 * calls the move function. Not only does the client free objects
196 * asynchronously, kmem itself puts move requests on a queue where thay are
197 * pending until kmem processes them from another context. Also, objects freed
198 * to a magazine appear allocated from the point of view of the slab layer, so
199 * kmem may even initiate requests for objects in a state other than state #4.
200 *
201 * 2.3.1 Magazine Layer
202 *
203 * An important insight revealed by the states listed above is that the magazine
204 * layer is populated only by kmem_cache_free(). Magazines of constructed
205 * objects are never populated directly from the slab layer (which contains raw,
206 * unconstructed objects). Whenever an allocation request cannot be satisfied
207 * from the magazine layer, the magazines are bypassed and the request is
208 * satisfied from the slab layer (creating a new slab if necessary). kmem calls
209 * the object constructor only when allocating from the slab layer, and only in
210 * response to kmem_cache_alloc() or to prepare the destination buffer passed in
211 * the move callback. kmem does not preconstruct objects in anticipation of
212 * kmem_cache_alloc().
213 *
214 * 2.3.2 Object Constructor and Destructor
215 *
216 * If the client supplies a destructor, it must be valid to call the destructor
217 * on a newly created object (immediately after the constructor).
218 *
219 * 2.4 Recognizing Known Objects
220 *
221 * There is a simple test to determine safely whether or not the client knows
222 * about a given object in the move callback. It relies on the fact that kmem
223 * guarantees that the object of the move callback has only been touched by the
224 * client itself or else by kmem. kmem does this by ensuring that none of the
225 * cache's slabs are freed to the virtual memory (VM) subsystem while a move
226 * callback is pending. When the last object on a slab is freed, if there is a
227 * pending move, kmem puts the slab on a per-cache dead list and defers freeing
228 * slabs on that list until all pending callbacks are completed. That way,
229 * clients can be certain that the object of a move callback is in one of the
230 * states listed above, making it possible to distinguish known objects (in
231 * state #4) using the two low order bits of any pointer member (with the
232 * exception of 'char *' or 'short *' which may not be 4-byte aligned on some
233 * platforms).
234 *
235 * The test works as long as the client always transitions objects from state #4
236 * (known, in use) to state #5 (about to be freed, invalid) by setting the low
237 * order bit of the client-designated pointer member. Since kmem only writes
238 * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and
239 * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is
240 * guaranteed to set at least one of the two low order bits. Therefore, given an
241 * object with a back pointer to a 'container_t *o_container', the client can
242 * test
243 *
244 * container_t *container = object->o_container;
245 * if ((uintptr_t)container & 0x3) {
246 * return (KMEM_CBRC_DONT_KNOW);
247 * }
248 *
249 * Typically, an object will have a pointer to some structure with a list or
250 * hash where objects from the cache are kept while in use. Assuming that the
251 * client has some way of knowing that the container structure is valid and will
252 * not go away during the move, and assuming that the structure includes a lock
253 * to protect whatever collection is used, then the client would continue as
254 * follows:
255 *
256 * // Ensure that the container structure does not go away.
257 * if (container_hold(container) == 0) {
258 * return (KMEM_CBRC_DONT_KNOW);
259 * }
260 * mutex_enter(&container->c_objects_lock);
261 * if (container != object->o_container) {
262 * mutex_exit(&container->c_objects_lock);
263 * container_rele(container);
264 * return (KMEM_CBRC_DONT_KNOW);
265 * }
266 *
267 * At this point the client knows that the object cannot be freed as long as
268 * c_objects_lock is held. Note that after acquiring the lock, the client must
269 * recheck the o_container pointer in case the object was removed just before
270 * acquiring the lock.
271 *
272 * When the client is about to free an object, it must first remove that object
273 * from the list, hash, or other structure where it is kept. At that time, to
274 * mark the object so it can be distinguished from the remaining, known objects,
275 * the client sets the designated low order bit:
276 *
277 * mutex_enter(&container->c_objects_lock);
278 * object->o_container = (void *)((uintptr_t)object->o_container | 0x1);
279 * list_remove(&container->c_objects, object);
280 * mutex_exit(&container->c_objects_lock);
281 *
282 * In the common case, the object is freed to the magazine layer, where it may
283 * be reused on a subsequent allocation without the overhead of calling the
284 * constructor. While in the magazine it appears allocated from the point of
285 * view of the slab layer, making it a candidate for the move callback. Most
286 * objects unrecognized by the client in the move callback fall into this
287 * category and are cheaply distinguished from known objects by the test
288 * described earlier. Since recognition is cheap for the client, and searching
289 * magazines is expensive for kmem, kmem defers searching until the client first
290 * returns KMEM_CBRC_DONT_KNOW. As long as the needed effort is reasonable, kmem
291 * elsewhere does what it can to avoid bothering the client unnecessarily.
292 *
293 * Invalidating the designated pointer member before freeing the object marks
294 * the object to be avoided in the callback, and conversely, assigning a valid
295 * value to the designated pointer member after allocating the object makes the
296 * object fair game for the callback:
297 *
298 * ... allocate object ...
299 * ... set any initial state not set by the constructor ...
300 *
301 * mutex_enter(&container->c_objects_lock);
302 * list_insert_tail(&container->c_objects, object);
303 * membar_producer();
304 * object->o_container = container;
305 * mutex_exit(&container->c_objects_lock);
306 *
307 * Note that everything else must be valid before setting o_container makes the
308 * object fair game for the move callback. The membar_producer() call ensures
309 * that all the object's state is written to memory before setting the pointer
310 * that transitions the object from state #3 or #7 (allocated, constructed, not
311 * yet in use) to state #4 (in use, valid). That's important because the move
312 * function has to check the validity of the pointer before it can safely
313 * acquire the lock protecting the collection where it expects to find known
314 * objects.
315 *
316 * This method of distinguishing known objects observes the usual symmetry:
317 * invalidating the designated pointer is the first thing the client does before
318 * freeing the object, and setting the designated pointer is the last thing the
319 * client does after allocating the object. Of course, the client is not
320 * required to use this method. Fundamentally, how the client recognizes known
321 * objects is completely up to the client, but this method is recommended as an
322 * efficient and safe way to take advantage of the guarantees made by kmem. If
323 * the entire object is arbitrary data without any markable bits from a suitable
324 * pointer member, then the client must find some other method, such as
325 * searching a hash table of known objects.
326 *
327 * 2.5 Preventing Objects From Moving
328 *
329 * Besides a way to distinguish known objects, the other thing that the client
330 * needs is a strategy to ensure that an object will not move while the client
331 * is actively using it. The details of satisfying this requirement tend to be
332 * highly cache-specific. It might seem that the same rules that let a client
333 * remove an object safely should also decide when an object can be moved
334 * safely. However, any object state that makes a removal attempt invalid is
335 * likely to be long-lasting for objects that the client does not expect to
336 * remove. kmem knows nothing about the object state and is equally likely (from
337 * the client's point of view) to request a move for any object in the cache,
338 * whether prepared for removal or not. Even a low percentage of objects stuck
339 * in place by unremovability will defeat the consolidator if the stuck objects
340 * are the same long-lived allocations likely to hold slabs hostage.
341 * Fundamentally, the consolidator is not aimed at common cases. Severe external
342 * fragmentation is a worst case scenario manifested as sparsely allocated
343 * slabs, by definition a low percentage of the cache's objects. When deciding
344 * what makes an object movable, keep in mind the goal of the consolidator: to
345 * bring worst-case external fragmentation within the limits guaranteed for
346 * internal fragmentation. Removability is a poor criterion if it is likely to
347 * exclude more than an insignificant percentage of objects for long periods of
348 * time.
349 *
350 * A tricky general solution exists, and it has the advantage of letting you
351 * move any object at almost any moment, practically eliminating the likelihood
352 * that an object can hold a slab hostage. However, if there is a cache-specific
353 * way to ensure that an object is not actively in use in the vast majority of
354 * cases, a simpler solution that leverages this cache-specific knowledge is
355 * preferred.
356 *
357 * 2.5.1 Cache-Specific Solution
358 *
359 * As an example of a cache-specific solution, the ZFS znode cache takes
360 * advantage of the fact that the vast majority of znodes are only being
361 * referenced from the DNLC. (A typical case might be a few hundred in active
362 * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS
363 * client has established that it recognizes the znode and can access its fields
364 * safely (using the method described earlier), it then tests whether the znode
365 * is referenced by anything other than the DNLC. If so, it assumes that the
366 * znode may be in active use and is unsafe to move, so it drops its locks and
367 * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere
368 * else znodes are used, no change is needed to protect against the possibility
369 * of the znode moving. The disadvantage is that it remains possible for an
370 * application to hold a znode slab hostage with an open file descriptor.
371 * However, this case ought to be rare and the consolidator has a way to deal
372 * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same
373 * object, kmem eventually stops believing it and treats the slab as if the
374 * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can
375 * then focus on getting it off of the partial slab list by allocating rather
376 * than freeing all of its objects. (Either way of getting a slab off the
377 * free list reduces fragmentation.)
378 *
379 * 2.5.2 General Solution
380 *
381 * The general solution, on the other hand, requires an explicit hold everywhere
382 * the object is used to prevent it from moving. To keep the client locking
383 * strategy as uncomplicated as possible, kmem guarantees the simplifying
384 * assumption that move callbacks are sequential, even across multiple caches.
385 * Internally, a global queue processed by a single thread supports all caches
386 * implementing the callback function. No matter how many caches supply a move
387 * function, the consolidator never moves more than one object at a time, so the
388 * client does not have to worry about tricky lock ordering involving several
389 * related objects from different kmem caches.
390 *
391 * The general solution implements the explicit hold as a read-write lock, which
392 * allows multiple readers to access an object from the cache simultaneously
393 * while a single writer is excluded from moving it. A single rwlock for the
394 * entire cache would lock out all threads from using any of the cache's objects
395 * even though only a single object is being moved, so to reduce contention,
396 * the client can fan out the single rwlock into an array of rwlocks hashed by
397 * the object address, making it probable that moving one object will not
398 * prevent other threads from using a different object. The rwlock cannot be a
399 * member of the object itself, because the possibility of the object moving
400 * makes it unsafe to access any of the object's fields until the lock is
401 * acquired.
402 *
403 * Assuming a small, fixed number of locks, it's possible that multiple objects
404 * will hash to the same lock. A thread that needs to use multiple objects in
405 * the same function may acquire the same lock multiple times. Since rwlocks are
406 * reentrant for readers, and since there is never more than a single writer at
407 * a time (assuming that the client acquires the lock as a writer only when
408 * moving an object inside the callback), there would seem to be no problem.
409 * However, a client locking multiple objects in the same function must handle
410 * one case of potential deadlock: Assume that thread A needs to prevent both
411 * object 1 and object 2 from moving, and thread B, the callback, meanwhile
412 * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the
413 * same lock, that thread A will acquire the lock for object 1 as a reader
414 * before thread B sets the lock's write-wanted bit, preventing thread A from
415 * reacquiring the lock for object 2 as a reader. Unable to make forward
416 * progress, thread A will never release the lock for object 1, resulting in
417 * deadlock.
418 *
419 * There are two ways of avoiding the deadlock just described. The first is to
420 * use rw_tryenter() rather than rw_enter() in the callback function when
421 * attempting to acquire the lock as a writer. If tryenter discovers that the
422 * same object (or another object hashed to the same lock) is already in use, it
423 * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use
424 * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t,
425 * since it allows a thread to acquire the lock as a reader in spite of a
426 * waiting writer. This second approach insists on moving the object now, no
427 * matter how many readers the move function must wait for in order to do so,
428 * and could delay the completion of the callback indefinitely (blocking
429 * callbacks to other clients). In practice, a less insistent callback using
430 * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems
431 * little reason to use anything else.
432 *
433 * Avoiding deadlock is not the only problem that an implementation using an
434 * explicit hold needs to solve. Locking the object in the first place (to
435 * prevent it from moving) remains a problem, since the object could move
436 * between the time you obtain a pointer to the object and the time you acquire
437 * the rwlock hashed to that pointer value. Therefore the client needs to
438 * recheck the value of the pointer after acquiring the lock, drop the lock if
439 * the value has changed, and try again. This requires a level of indirection:
440 * something that points to the object rather than the object itself, that the
441 * client can access safely while attempting to acquire the lock. (The object
442 * itself cannot be referenced safely because it can move at any time.)
443 * The following lock-acquisition function takes whatever is safe to reference
444 * (arg), follows its pointer to the object (using function f), and tries as
445 * often as necessary to acquire the hashed lock and verify that the object
446 * still has not moved:
447 *
448 * object_t *
449 * object_hold(object_f f, void *arg)
450 * {
451 * object_t *op;
452 *
453 * op = f(arg);
454 * if (op == NULL) {
455 * return (NULL);
456 * }
457 *
458 * rw_enter(OBJECT_RWLOCK(op), RW_READER);
459 * while (op != f(arg)) {
460 * rw_exit(OBJECT_RWLOCK(op));
461 * op = f(arg);
462 * if (op == NULL) {
463 * break;
464 * }
465 * rw_enter(OBJECT_RWLOCK(op), RW_READER);
466 * }
467 *
468 * return (op);
469 * }
470 *
471 * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The
472 * lock reacquisition loop, while necessary, almost never executes. The function
473 * pointer f (used to obtain the object pointer from arg) has the following type
474 * definition:
475 *
476 * typedef object_t *(*object_f)(void *arg);
477 *
478 * An object_f implementation is likely to be as simple as accessing a structure
479 * member:
480 *
481 * object_t *
482 * s_object(void *arg)
483 * {
484 * something_t *sp = arg;
485 * return (sp->s_object);
486 * }
487 *
488 * The flexibility of a function pointer allows the path to the object to be
489 * arbitrarily complex and also supports the notion that depending on where you
490 * are using the object, you may need to get it from someplace different.
491 *
492 * The function that releases the explicit hold is simpler because it does not
493 * have to worry about the object moving:
494 *
495 * void
496 * object_rele(object_t *op)
497 * {
498 * rw_exit(OBJECT_RWLOCK(op));
499 * }
500 *
501 * The caller is spared these details so that obtaining and releasing an
502 * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller
503 * of object_hold() only needs to know that the returned object pointer is valid
504 * if not NULL and that the object will not move until released.
505 *
506 * Although object_hold() prevents an object from moving, it does not prevent it
507 * from being freed. The caller must take measures before calling object_hold()
508 * (afterwards is too late) to ensure that the held object cannot be freed. The
509 * caller must do so without accessing the unsafe object reference, so any lock
510 * or reference count used to ensure the continued existence of the object must
511 * live outside the object itself.
512 *
513 * Obtaining a new object is a special case where an explicit hold is impossible
514 * for the caller. Any function that returns a newly allocated object (either as
515 * a return value, or as an in-out paramter) must return it already held; after
516 * the caller gets it is too late, since the object cannot be safely accessed
517 * without the level of indirection described earlier. The following
518 * object_alloc() example uses the same code shown earlier to transition a new
519 * object into the state of being recognized (by the client) as a known object.
520 * The function must acquire the hold (rw_enter) before that state transition
521 * makes the object movable:
522 *
523 * static object_t *
524 * object_alloc(container_t *container)
525 * {
526 * object_t *object = kmem_cache_alloc(object_cache, 0);
527 * ... set any initial state not set by the constructor ...
528 * rw_enter(OBJECT_RWLOCK(object), RW_READER);
529 * mutex_enter(&container->c_objects_lock);
530 * list_insert_tail(&container->c_objects, object);
531 * membar_producer();
532 * object->o_container = container;
533 * mutex_exit(&container->c_objects_lock);
534 * return (object);
535 * }
536 *
537 * Functions that implicitly acquire an object hold (any function that calls
538 * object_alloc() to supply an object for the caller) need to be carefully noted
539 * so that the matching object_rele() is not neglected. Otherwise, leaked holds
540 * prevent all objects hashed to the affected rwlocks from ever being moved.
541 *
542 * The pointer to a held object can be hashed to the holding rwlock even after
543 * the object has been freed. Although it is possible to release the hold
544 * after freeing the object, you may decide to release the hold implicitly in
545 * whatever function frees the object, so as to release the hold as soon as
546 * possible, and for the sake of symmetry with the function that implicitly
547 * acquires the hold when it allocates the object. Here, object_free() releases
548 * the hold acquired by object_alloc(). Its implicit object_rele() forms a
549 * matching pair with object_hold():
550 *
551 * void
552 * object_free(object_t *object)
553 * {
554 * container_t *container;
555 *
556 * ASSERT(object_held(object));
557 * container = object->o_container;
558 * mutex_enter(&container->c_objects_lock);
559 * object->o_container =
560 * (void *)((uintptr_t)object->o_container | 0x1);
561 * list_remove(&container->c_objects, object);
562 * mutex_exit(&container->c_objects_lock);
563 * object_rele(object);
564 * kmem_cache_free(object_cache, object);
565 * }
566 *
567 * Note that object_free() cannot safely accept an object pointer as an argument
568 * unless the object is already held. Any function that calls object_free()
569 * needs to be carefully noted since it similarly forms a matching pair with
570 * object_hold().
571 *
572 * To complete the picture, the following callback function implements the
573 * general solution by moving objects only if they are currently unheld:
574 *
575 * static kmem_cbrc_t
576 * object_move(void *buf, void *newbuf, size_t size, void *arg)
577 * {
578 * object_t *op = buf, *np = newbuf;
579 * container_t *container;
580 *
581 * container = op->o_container;
582 * if ((uintptr_t)container & 0x3) {
583 * return (KMEM_CBRC_DONT_KNOW);
584 * }
585 *
586 * // Ensure that the container structure does not go away.
587 * if (container_hold(container) == 0) {
588 * return (KMEM_CBRC_DONT_KNOW);
589 * }
590 *
591 * mutex_enter(&container->c_objects_lock);
592 * if (container != op->o_container) {
593 * mutex_exit(&container->c_objects_lock);
594 * container_rele(container);
595 * return (KMEM_CBRC_DONT_KNOW);
596 * }
597 *
598 * if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) {
599 * mutex_exit(&container->c_objects_lock);
600 * container_rele(container);
601 * return (KMEM_CBRC_LATER);
602 * }
603 *
604 * object_move_impl(op, np); // critical section
605 * rw_exit(OBJECT_RWLOCK(op));
606 *
607 * op->o_container = (void *)((uintptr_t)op->o_container | 0x1);
608 * list_link_replace(&op->o_link_node, &np->o_link_node);
609 * mutex_exit(&container->c_objects_lock);
610 * container_rele(container);
611 * return (KMEM_CBRC_YES);
612 * }
613 *
614 * Note that object_move() must invalidate the designated o_container pointer of
615 * the old object in the same way that object_free() does, since kmem will free
616 * the object in response to the KMEM_CBRC_YES return value.
617 *
618 * The lock order in object_move() differs from object_alloc(), which locks
619 * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the
620 * callback uses rw_tryenter() (preventing the deadlock described earlier), it's
621 * not a problem. Holding the lock on the object list in the example above
622 * through the entire callback not only prevents the object from going away, it
623 * also allows you to lock the list elsewhere and know that none of its elements
624 * will move during iteration.
625 *
626 * Adding an explicit hold everywhere an object from the cache is used is tricky
627 * and involves much more change to client code than a cache-specific solution
628 * that leverages existing state to decide whether or not an object is
629 * movable. However, this approach has the advantage that no object remains
630 * immovable for any significant length of time, making it extremely unlikely
631 * that long-lived allocations can continue holding slabs hostage; and it works
632 * for any cache.
633 *
634 * 3. Consolidator Implementation
635 *
636 * Once the client supplies a move function that a) recognizes known objects and
637 * b) avoids moving objects that are actively in use, the remaining work is up
638 * to the consolidator to decide which objects to move and when to issue
639 * callbacks.
640 *
641 * The consolidator relies on the fact that a cache's slabs are ordered by
642 * usage. Each slab has a fixed number of objects. Depending on the slab's
643 * "color" (the offset of the first object from the beginning of the slab;
644 * offsets are staggered to mitigate false sharing of cache lines) it is either
645 * the maximum number of objects per slab determined at cache creation time or
646 * else the number closest to the maximum that fits within the space remaining
647 * after the initial offset. A completely allocated slab may contribute some
648 * internal fragmentation (per-slab overhead) but no external fragmentation, so
649 * it is of no interest to the consolidator. At the other extreme, slabs whose
650 * objects have all been freed to the slab are released to the virtual memory
651 * (VM) subsystem (objects freed to magazines are still allocated as far as the
652 * slab is concerned). External fragmentation exists when there are slabs
653 * somewhere between these extremes. A partial slab has at least one but not all
654 * of its objects allocated. The more partial slabs, and the fewer allocated
655 * objects on each of them, the higher the fragmentation. Hence the
656 * consolidator's overall strategy is to reduce the number of partial slabs by
657 * moving allocated objects from the least allocated slabs to the most allocated
658 * slabs.
659 *
660 * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated
661 * slabs are kept separately in an unordered list. Since the majority of slabs
662 * tend to be completely allocated (a typical unfragmented cache may have
663 * thousands of complete slabs and only a single partial slab), separating
664 * complete slabs improves the efficiency of partial slab ordering, since the
665 * complete slabs do not affect the depth or balance of the AVL tree. This
666 * ordered sequence of partial slabs acts as a "free list" supplying objects for
667 * allocation requests.
668 *
669 * Objects are always allocated from the first partial slab in the free list,
670 * where the allocation is most likely to eliminate a partial slab (by
671 * completely allocating it). Conversely, when a single object from a completely
672 * allocated slab is freed to the slab, that slab is added to the front of the
673 * free list. Since most free list activity involves highly allocated slabs
674 * coming and going at the front of the list, slabs tend naturally toward the
675 * ideal order: highly allocated at the front, sparsely allocated at the back.
676 * Slabs with few allocated objects are likely to become completely free if they
677 * keep a safe distance away from the front of the free list. Slab misorders
678 * interfere with the natural tendency of slabs to become completely free or
679 * completely allocated. For example, a slab with a single allocated object
680 * needs only a single free to escape the cache; its natural desire is
681 * frustrated when it finds itself at the front of the list where a second
682 * allocation happens just before the free could have released it. Another slab
683 * with all but one object allocated might have supplied the buffer instead, so
684 * that both (as opposed to neither) of the slabs would have been taken off the
685 * free list.
686 *
687 * Although slabs tend naturally toward the ideal order, misorders allowed by a
688 * simple list implementation defeat the consolidator's strategy of merging
689 * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem
690 * needs another way to fix misorders to optimize its callback strategy. One
691 * approach is to periodically scan a limited number of slabs, advancing a
692 * marker to hold the current scan position, and to move extreme misorders to
693 * the front or back of the free list and to the front or back of the current
694 * scan range. By making consecutive scan ranges overlap by one slab, the least
695 * allocated slab in the current range can be carried along from the end of one
696 * scan to the start of the next.
697 *
698 * Maintaining partial slabs in an AVL tree relieves kmem of this additional
699 * task, however. Since most of the cache's activity is in the magazine layer,
700 * and allocations from the slab layer represent only a startup cost, the
701 * overhead of maintaining a balanced tree is not a significant concern compared
702 * to the opportunity of reducing complexity by eliminating the partial slab
703 * scanner just described. The overhead of an AVL tree is minimized by
704 * maintaining only partial slabs in the tree and keeping completely allocated
705 * slabs separately in a list. To avoid increasing the size of the slab
706 * structure the AVL linkage pointers are reused for the slab's list linkage,
707 * since the slab will always be either partial or complete, never stored both
708 * ways at the same time. To further minimize the overhead of the AVL tree the
709 * compare function that orders partial slabs by usage divides the range of
710 * allocated object counts into bins such that counts within the same bin are
711 * considered equal. Binning partial slabs makes it less likely that allocating
712 * or freeing a single object will change the slab's order, requiring a tree
713 * reinsertion (an avl_remove() followed by an avl_add(), both potentially
714 * requiring some rebalancing of the tree). Allocation counts closest to
715 * completely free and completely allocated are left unbinned (finely sorted) to
716 * better support the consolidator's strategy of merging slabs at either
717 * extreme.
718 *
719 * 3.1 Assessing Fragmentation and Selecting Candidate Slabs
720 *
721 * The consolidator piggybacks on the kmem maintenance thread and is called on
722 * the same interval as kmem_cache_update(), once per cache every fifteen
723 * seconds. kmem maintains a running count of unallocated objects in the slab
724 * layer (cache_bufslab). The consolidator checks whether that number exceeds
725 * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether
726 * there is a significant number of slabs in the cache (arbitrarily a minimum
727 * 101 total slabs). Unused objects that have fallen out of the magazine layer's
728 * working set are included in the assessment, and magazines in the depot are
729 * reaped if those objects would lift cache_bufslab above the fragmentation
730 * threshold. Once the consolidator decides that a cache is fragmented, it looks
731 * for a candidate slab to reclaim, starting at the end of the partial slab free
732 * list and scanning backwards. At first the consolidator is choosy: only a slab
733 * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a
734 * single allocated object, regardless of percentage). If there is difficulty
735 * finding a candidate slab, kmem raises the allocation threshold incrementally,
736 * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce
737 * external fragmentation (unused objects on the free list) below 12.5% (1/8),
738 * even in the worst case of every slab in the cache being almost 7/8 allocated.
739 * The threshold can also be lowered incrementally when candidate slabs are easy
740 * to find, and the threshold is reset to the minimum 1/8 as soon as the cache
741 * is no longer fragmented.
742 *
743 * 3.2 Generating Callbacks
744 *
745 * Once an eligible slab is chosen, a callback is generated for every allocated
746 * object on the slab, in the hope that the client will move everything off the
747 * slab and make it reclaimable. Objects selected as move destinations are
748 * chosen from slabs at the front of the free list. Assuming slabs in the ideal
749 * order (most allocated at the front, least allocated at the back) and a
750 * cooperative client, the consolidator will succeed in removing slabs from both
751 * ends of the free list, completely allocating on the one hand and completely
752 * freeing on the other. Objects selected as move destinations are allocated in
753 * the kmem maintenance thread where move requests are enqueued. A separate
754 * callback thread removes pending callbacks from the queue and calls the
755 * client. The separate thread ensures that client code (the move function) does
756 * not interfere with internal kmem maintenance tasks. A map of pending
757 * callbacks keyed by object address (the object to be moved) is checked to
758 * ensure that duplicate callbacks are not generated for the same object.
759 * Allocating the move destination (the object to move to) prevents subsequent
760 * callbacks from selecting the same destination as an earlier pending callback.
761 *
762 * Move requests can also be generated by kmem_cache_reap() when the system is
763 * desperate for memory and by kmem_cache_move_notify(), called by the client to
764 * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible.
765 * The map of pending callbacks is protected by the same lock that protects the
766 * slab layer.
767 *
768 * When the system is desperate for memory, kmem does not bother to determine
769 * whether or not the cache exceeds the fragmentation threshold, but tries to
770 * consolidate as many slabs as possible. Normally, the consolidator chews
771 * slowly, one sparsely allocated slab at a time during each maintenance
772 * interval that the cache is fragmented. When desperate, the consolidator
773 * starts at the last partial slab and enqueues callbacks for every allocated
774 * object on every partial slab, working backwards until it reaches the first
775 * partial slab. The first partial slab, meanwhile, advances in pace with the
776 * consolidator as allocations to supply move destinations for the enqueued
777 * callbacks use up the highly allocated slabs at the front of the free list.
778 * Ideally, the overgrown free list collapses like an accordion, starting at
779 * both ends and ending at the center with a single partial slab.
780 *
781 * 3.3 Client Responses
782 *
783 * When the client returns KMEM_CBRC_NO in response to the move callback, kmem
784 * marks the slab that supplied the stuck object non-reclaimable and moves it to
785 * front of the free list. The slab remains marked as long as it remains on the
786 * free list, and it appears more allocated to the partial slab compare function
787 * than any unmarked slab, no matter how many of its objects are allocated.
788 * Since even one immovable object ties up the entire slab, the goal is to
789 * completely allocate any slab that cannot be completely freed. kmem does not
790 * bother generating callbacks to move objects from a marked slab unless the
791 * system is desperate.
792 *
793 * When the client responds KMEM_CBRC_LATER, kmem increments a count for the
794 * slab. If the client responds LATER too many times, kmem disbelieves and
795 * treats the response as a NO. The count is cleared when the slab is taken off
796 * the partial slab list or when the client moves one of the slab's objects.
797 *
798 * 4. Observability
799 *
800 * A kmem cache's external fragmentation is best observed with 'mdb -k' using
801 * the ::kmem_slabs dcmd. For a complete description of the command, enter
802 * '::help kmem_slabs' at the mdb prompt.
803 */
804
805 #include <sys/kmem_impl.h>
806 #include <sys/vmem_impl.h>
807 #include <sys/param.h>
808 #include <sys/sysmacros.h>
809 #include <sys/vm.h>
810 #include <sys/proc.h>
811 #include <sys/tuneable.h>
812 #include <sys/systm.h>
813 #include <sys/cmn_err.h>
814 #include <sys/debug.h>
815 #include <sys/sdt.h>
816 #include <sys/mutex.h>
817 #include <sys/bitmap.h>
818 #include <sys/atomic.h>
819 #include <sys/kobj.h>
820 #include <sys/disp.h>
821 #include <vm/seg_kmem.h>
822 #include <sys/log.h>
823 #include <sys/callb.h>
824 #include <sys/taskq.h>
825 #include <sys/modctl.h>
826 #include <sys/reboot.h>
827 #include <sys/id32.h>
828 #include <sys/zone.h>
829 #include <sys/netstack.h>
830 #ifdef DEBUG
831 #include <sys/random.h>
832 #endif
833
834 extern void streams_msg_init(void);
835 extern int segkp_fromheap;
836 extern void segkp_cache_free(void);
837 extern int callout_init_done;
838
839 struct kmem_cache_kstat {
840 kstat_named_t kmc_buf_size;
841 kstat_named_t kmc_align;
842 kstat_named_t kmc_chunk_size;
843 kstat_named_t kmc_slab_size;
844 kstat_named_t kmc_alloc;
845 kstat_named_t kmc_alloc_fail;
846 kstat_named_t kmc_free;
847 kstat_named_t kmc_depot_alloc;
848 kstat_named_t kmc_depot_free;
849 kstat_named_t kmc_depot_contention;
850 kstat_named_t kmc_slab_alloc;
851 kstat_named_t kmc_slab_free;
852 kstat_named_t kmc_buf_constructed;
853 kstat_named_t kmc_buf_avail;
854 kstat_named_t kmc_buf_inuse;
855 kstat_named_t kmc_buf_total;
856 kstat_named_t kmc_buf_max;
857 kstat_named_t kmc_slab_create;
858 kstat_named_t kmc_slab_destroy;
859 kstat_named_t kmc_vmem_source;
860 kstat_named_t kmc_hash_size;
861 kstat_named_t kmc_hash_lookup_depth;
862 kstat_named_t kmc_hash_rescale;
863 kstat_named_t kmc_full_magazines;
864 kstat_named_t kmc_empty_magazines;
865 kstat_named_t kmc_magazine_size;
866 kstat_named_t kmc_reap; /* number of kmem_cache_reap() calls */
867 kstat_named_t kmc_defrag; /* attempts to defrag all partial slabs */
868 kstat_named_t kmc_scan; /* attempts to defrag one partial slab */
869 kstat_named_t kmc_move_callbacks; /* sum of yes, no, later, dn, dk */
870 kstat_named_t kmc_move_yes;
871 kstat_named_t kmc_move_no;
872 kstat_named_t kmc_move_later;
873 kstat_named_t kmc_move_dont_need;
874 kstat_named_t kmc_move_dont_know; /* obj unrecognized by client ... */
875 kstat_named_t kmc_move_hunt_found; /* ... but found in mag layer */
876 kstat_named_t kmc_move_slabs_freed; /* slabs freed by consolidator */
877 kstat_named_t kmc_move_reclaimable; /* buffers, if consolidator ran */
878 } kmem_cache_kstat = {
879 { "buf_size", KSTAT_DATA_UINT64 },
880 { "align", KSTAT_DATA_UINT64 },
881 { "chunk_size", KSTAT_DATA_UINT64 },
882 { "slab_size", KSTAT_DATA_UINT64 },
883 { "alloc", KSTAT_DATA_UINT64 },
884 { "alloc_fail", KSTAT_DATA_UINT64 },
885 { "free", KSTAT_DATA_UINT64 },
886 { "depot_alloc", KSTAT_DATA_UINT64 },
887 { "depot_free", KSTAT_DATA_UINT64 },
888 { "depot_contention", KSTAT_DATA_UINT64 },
889 { "slab_alloc", KSTAT_DATA_UINT64 },
890 { "slab_free", KSTAT_DATA_UINT64 },
891 { "buf_constructed", KSTAT_DATA_UINT64 },
892 { "buf_avail", KSTAT_DATA_UINT64 },
893 { "buf_inuse", KSTAT_DATA_UINT64 },
894 { "buf_total", KSTAT_DATA_UINT64 },
895 { "buf_max", KSTAT_DATA_UINT64 },
896 { "slab_create", KSTAT_DATA_UINT64 },
897 { "slab_destroy", KSTAT_DATA_UINT64 },
898 { "vmem_source", KSTAT_DATA_UINT64 },
899 { "hash_size", KSTAT_DATA_UINT64 },
900 { "hash_lookup_depth", KSTAT_DATA_UINT64 },
901 { "hash_rescale", KSTAT_DATA_UINT64 },
902 { "full_magazines", KSTAT_DATA_UINT64 },
903 { "empty_magazines", KSTAT_DATA_UINT64 },
904 { "magazine_size", KSTAT_DATA_UINT64 },
905 { "reap", KSTAT_DATA_UINT64 },
906 { "defrag", KSTAT_DATA_UINT64 },
907 { "scan", KSTAT_DATA_UINT64 },
908 { "move_callbacks", KSTAT_DATA_UINT64 },
909 { "move_yes", KSTAT_DATA_UINT64 },
910 { "move_no", KSTAT_DATA_UINT64 },
911 { "move_later", KSTAT_DATA_UINT64 },
912 { "move_dont_need", KSTAT_DATA_UINT64 },
913 { "move_dont_know", KSTAT_DATA_UINT64 },
914 { "move_hunt_found", KSTAT_DATA_UINT64 },
915 { "move_slabs_freed", KSTAT_DATA_UINT64 },
916 { "move_reclaimable", KSTAT_DATA_UINT64 },
917 };
918
919 static kmutex_t kmem_cache_kstat_lock;
920
921 /*
922 * The default set of caches to back kmem_alloc().
923 * These sizes should be reevaluated periodically.
924 *
925 * We want allocations that are multiples of the coherency granularity
926 * (64 bytes) to be satisfied from a cache which is a multiple of 64
927 * bytes, so that it will be 64-byte aligned. For all multiples of 64,
928 * the next kmem_cache_size greater than or equal to it must be a
929 * multiple of 64.
930 *
931 * We split the table into two sections: size <= 4k and size > 4k. This
932 * saves a lot of space and cache footprint in our cache tables.
933 */
934 static const int kmem_alloc_sizes[] = {
935 1 * 8,
936 2 * 8,
937 3 * 8,
938 4 * 8, 5 * 8, 6 * 8, 7 * 8,
939 4 * 16, 5 * 16, 6 * 16, 7 * 16,
940 4 * 32, 5 * 32, 6 * 32, 7 * 32,
941 4 * 64, 5 * 64, 6 * 64, 7 * 64,
942 4 * 128, 5 * 128, 6 * 128, 7 * 128,
943 P2ALIGN(8192 / 7, 64),
944 P2ALIGN(8192 / 6, 64),
945 P2ALIGN(8192 / 5, 64),
946 P2ALIGN(8192 / 4, 64),
947 P2ALIGN(8192 / 3, 64),
948 P2ALIGN(8192 / 2, 64),
949 };
950
951 static const int kmem_big_alloc_sizes[] = {
952 2 * 4096, 3 * 4096,
953 2 * 8192, 3 * 8192,
954 4 * 8192, 5 * 8192, 6 * 8192, 7 * 8192,
955 8 * 8192, 9 * 8192, 10 * 8192, 11 * 8192,
956 12 * 8192, 13 * 8192, 14 * 8192, 15 * 8192,
957 16 * 8192
958 };
959
960 #define KMEM_MAXBUF 4096
961 #define KMEM_BIG_MAXBUF_32BIT 32768
962 #define KMEM_BIG_MAXBUF 131072
963
964 #define KMEM_BIG_MULTIPLE 4096 /* big_alloc_sizes must be a multiple */
965 #define KMEM_BIG_SHIFT 12 /* lg(KMEM_BIG_MULTIPLE) */
966
967 static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
968 static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT];
969
970 #define KMEM_ALLOC_TABLE_MAX (KMEM_MAXBUF >> KMEM_ALIGN_SHIFT)
971 static size_t kmem_big_alloc_table_max = 0; /* # of filled elements */
972
973 static kmem_magtype_t kmem_magtype[] = {
974 { 1, 8, 3200, 65536 },
975 { 3, 16, 256, 32768 },
976 { 7, 32, 64, 16384 },
977 { 15, 64, 0, 8192 },
978 { 31, 64, 0, 4096 },
979 { 47, 64, 0, 2048 },
980 { 63, 64, 0, 1024 },
981 { 95, 64, 0, 512 },
982 { 143, 64, 0, 0 },
983 };
984
985 static uint32_t kmem_reaping;
986 static uint32_t kmem_reaping_idspace;
987
988 /*
989 * kmem tunables
990 */
991 clock_t kmem_reap_interval; /* cache reaping rate [15 * HZ ticks] */
992 int kmem_depot_contention = 3; /* max failed tryenters per real interval */
993 pgcnt_t kmem_reapahead = 0; /* start reaping N pages before pageout */
994 int kmem_panic = 1; /* whether to panic on error */
995 int kmem_logging = 1; /* kmem_log_enter() override */
996 uint32_t kmem_mtbf = 0; /* mean time between failures [default: off] */
997 size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */
998 size_t kmem_content_log_size; /* content log size [2% of memory] */
999 size_t kmem_failure_log_size; /* failure log [4 pages per CPU] */
1000 size_t kmem_slab_log_size; /* slab create log [4 pages per CPU] */
1001 size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */
1002 size_t kmem_lite_minsize = 0; /* minimum buffer size for KMF_LITE */
1003 size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */
1004 int kmem_lite_pcs = 4; /* number of PCs to store in KMF_LITE mode */
1005 size_t kmem_maxverify; /* maximum bytes to inspect in debug routines */
1006 size_t kmem_minfirewall; /* hardware-enforced redzone threshold */
1007
1008 #ifdef _LP64
1009 size_t kmem_max_cached = KMEM_BIG_MAXBUF; /* maximum kmem_alloc cache */
1010 #else
1011 size_t kmem_max_cached = KMEM_BIG_MAXBUF_32BIT; /* maximum kmem_alloc cache */
1012 #endif
1013
1014 #ifdef DEBUG
1015 int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
1016 #else
1017 int kmem_flags = 0;
1018 #endif
1019 int kmem_ready;
1020
1021 static kmem_cache_t *kmem_slab_cache;
1022 static kmem_cache_t *kmem_bufctl_cache;
1023 static kmem_cache_t *kmem_bufctl_audit_cache;
1024
1025 static kmutex_t kmem_cache_lock; /* inter-cache linkage only */
1026 static list_t kmem_caches;
1027
1028 static taskq_t *kmem_taskq;
1029 static kmutex_t kmem_flags_lock;
1030 static vmem_t *kmem_metadata_arena;
1031 static vmem_t *kmem_msb_arena; /* arena for metadata caches */
1032 static vmem_t *kmem_cache_arena;
1033 static vmem_t *kmem_hash_arena;
1034 static vmem_t *kmem_log_arena;
1035 static vmem_t *kmem_oversize_arena;
1036 static vmem_t *kmem_va_arena;
1037 static vmem_t *kmem_default_arena;
1038 static vmem_t *kmem_firewall_va_arena;
1039 static vmem_t *kmem_firewall_arena;
1040
1041 /*
1042 * Define KMEM_STATS to turn on statistic gathering. By default, it is only
1043 * turned on when DEBUG is also defined.
1044 */
1045 #ifdef DEBUG
1046 #define KMEM_STATS
1047 #endif /* DEBUG */
1048
1049 #ifdef KMEM_STATS
1050 #define KMEM_STAT_ADD(stat) ((stat)++)
1051 #define KMEM_STAT_COND_ADD(cond, stat) ((void) (!(cond) || (stat)++))
1052 #else
1053 #define KMEM_STAT_ADD(stat) /* nothing */
1054 #define KMEM_STAT_COND_ADD(cond, stat) /* nothing */
1055 #endif /* KMEM_STATS */
1056
1057 /*
1058 * kmem slab consolidator thresholds (tunables)
1059 */
1060 size_t kmem_frag_minslabs = 101; /* minimum total slabs */
1061 size_t kmem_frag_numer = 1; /* free buffers (numerator) */
1062 size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */
1063 /*
1064 * Maximum number of slabs from which to move buffers during a single
1065 * maintenance interval while the system is not low on memory.
1066 */
1067 size_t kmem_reclaim_max_slabs = 1;
1068 /*
1069 * Number of slabs to scan backwards from the end of the partial slab list
1070 * when searching for buffers to relocate.
1071 */
1072 size_t kmem_reclaim_scan_range = 12;
1073
1074 #ifdef KMEM_STATS
1075 static struct {
1076 uint64_t kms_callbacks;
1077 uint64_t kms_yes;
1078 uint64_t kms_no;
1079 uint64_t kms_later;
1080 uint64_t kms_dont_need;
1081 uint64_t kms_dont_know;
1082 uint64_t kms_hunt_found_mag;
1083 uint64_t kms_hunt_found_slab;
1084 uint64_t kms_hunt_alloc_fail;
1085 uint64_t kms_hunt_lucky;
1086 uint64_t kms_notify;
1087 uint64_t kms_notify_callbacks;
1088 uint64_t kms_disbelief;
1089 uint64_t kms_already_pending;
1090 uint64_t kms_callback_alloc_fail;
1091 uint64_t kms_callback_taskq_fail;
1092 uint64_t kms_endscan_slab_dead;
1093 uint64_t kms_endscan_slab_destroyed;
1094 uint64_t kms_endscan_nomem;
1095 uint64_t kms_endscan_refcnt_changed;
1096 uint64_t kms_endscan_nomove_changed;
1097 uint64_t kms_endscan_freelist;
1098 uint64_t kms_avl_update;
1099 uint64_t kms_avl_noupdate;
1100 uint64_t kms_no_longer_reclaimable;
1101 uint64_t kms_notify_no_longer_reclaimable;
1102 uint64_t kms_notify_slab_dead;
1103 uint64_t kms_notify_slab_destroyed;
1104 uint64_t kms_alloc_fail;
1105 uint64_t kms_constructor_fail;
1106 uint64_t kms_dead_slabs_freed;
1107 uint64_t kms_defrags;
1108 uint64_t kms_scans;
1109 uint64_t kms_scan_depot_ws_reaps;
1110 uint64_t kms_debug_reaps;
1111 uint64_t kms_debug_scans;
1112 } kmem_move_stats;
1113 #endif /* KMEM_STATS */
1114
1115 /* consolidator knobs */
1116 static boolean_t kmem_move_noreap;
1117 static boolean_t kmem_move_blocked;
1118 static boolean_t kmem_move_fulltilt;
1119 static boolean_t kmem_move_any_partial;
1120
1121 #ifdef DEBUG
1122 /*
1123 * kmem consolidator debug tunables:
1124 * Ensure code coverage by occasionally running the consolidator even when the
1125 * caches are not fragmented (they may never be). These intervals are mean time
1126 * in cache maintenance intervals (kmem_cache_update).
1127 */
1128 uint32_t kmem_mtb_move = 60; /* defrag 1 slab (~15min) */
1129 uint32_t kmem_mtb_reap = 1800; /* defrag all slabs (~7.5hrs) */
1130 #endif /* DEBUG */
1131
1132 static kmem_cache_t *kmem_defrag_cache;
1133 static kmem_cache_t *kmem_move_cache;
1134 static taskq_t *kmem_move_taskq;
1135
1136 static void kmem_cache_scan(kmem_cache_t *);
1137 static void kmem_cache_defrag(kmem_cache_t *);
1138 static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
1139
1140
1141 kmem_log_header_t *kmem_transaction_log;
1142 kmem_log_header_t *kmem_content_log;
1143 kmem_log_header_t *kmem_failure_log;
1144 kmem_log_header_t *kmem_slab_log;
1145
1146 static int kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
1147
1148 #define KMEM_BUFTAG_LITE_ENTER(bt, count, caller) \
1149 if ((count) > 0) { \
1150 pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history; \
1151 pc_t *_e; \
1152 /* memmove() the old entries down one notch */ \
1153 for (_e = &_s[(count) - 1]; _e > _s; _e--) \
1154 *_e = *(_e - 1); \
1155 *_s = (uintptr_t)(caller); \
1156 }
1157
1158 #define KMERR_MODIFIED 0 /* buffer modified while on freelist */
1159 #define KMERR_REDZONE 1 /* redzone violation (write past end of buf) */
1160 #define KMERR_DUPFREE 2 /* freed a buffer twice */
1161 #define KMERR_BADADDR 3 /* freed a bad (unallocated) address */
1162 #define KMERR_BADBUFTAG 4 /* buftag corrupted */
1163 #define KMERR_BADBUFCTL 5 /* bufctl corrupted */
1164 #define KMERR_BADCACHE 6 /* freed a buffer to the wrong cache */
1165 #define KMERR_BADSIZE 7 /* alloc size != free size */
1166 #define KMERR_BADBASE 8 /* buffer base address wrong */
1167
1168 struct {
1169 hrtime_t kmp_timestamp; /* timestamp of panic */
1170 int kmp_error; /* type of kmem error */
1171 void *kmp_buffer; /* buffer that induced panic */
1172 void *kmp_realbuf; /* real start address for buffer */
1173 kmem_cache_t *kmp_cache; /* buffer's cache according to client */
1174 kmem_cache_t *kmp_realcache; /* actual cache containing buffer */
1175 kmem_slab_t *kmp_slab; /* slab accoring to kmem_findslab() */
1176 kmem_bufctl_t *kmp_bufctl; /* bufctl */
1177 } kmem_panic_info;
1178
1179
1180 static void
1181 copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
1182 {
1183 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1184 uint64_t *buf = buf_arg;
1185
1186 while (buf < bufend)
1187 *buf++ = pattern;
1188 }
1189
1190 static void *
1191 verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
1192 {
1193 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1194 uint64_t *buf;
1195
1196 for (buf = buf_arg; buf < bufend; buf++)
1197 if (*buf != pattern)
1198 return (buf);
1199 return (NULL);
1200 }
1201
1202 static void *
1203 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
1204 {
1205 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1206 uint64_t *buf;
1207
1208 for (buf = buf_arg; buf < bufend; buf++) {
1209 if (*buf != old) {
1210 copy_pattern(old, buf_arg,
1211 (char *)buf - (char *)buf_arg);
1212 return (buf);
1213 }
1214 *buf = new;
1215 }
1216
1217 return (NULL);
1218 }
1219
1220 static void
1221 kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1222 {
1223 kmem_cache_t *cp;
1224
1225 mutex_enter(&kmem_cache_lock);
1226 for (cp = list_head(&kmem_caches); cp != NULL;
1227 cp = list_next(&kmem_caches, cp))
1228 if (tq != NULL)
1229 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1230 tqflag);
1231 else
1232 func(cp);
1233 mutex_exit(&kmem_cache_lock);
1234 }
1235
1236 static void
1237 kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1238 {
1239 kmem_cache_t *cp;
1240
1241 mutex_enter(&kmem_cache_lock);
1242 for (cp = list_head(&kmem_caches); cp != NULL;
1243 cp = list_next(&kmem_caches, cp)) {
1244 if (!(cp->cache_cflags & KMC_IDENTIFIER))
1245 continue;
1246 if (tq != NULL)
1247 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1248 tqflag);
1249 else
1250 func(cp);
1251 }
1252 mutex_exit(&kmem_cache_lock);
1253 }
1254
1255 /*
1256 * Debugging support. Given a buffer address, find its slab.
1257 */
1258 static kmem_slab_t *
1259 kmem_findslab(kmem_cache_t *cp, void *buf)
1260 {
1261 kmem_slab_t *sp;
1262
1263 mutex_enter(&cp->cache_lock);
1264 for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1265 sp = list_next(&cp->cache_complete_slabs, sp)) {
1266 if (KMEM_SLAB_MEMBER(sp, buf)) {
1267 mutex_exit(&cp->cache_lock);
1268 return (sp);
1269 }
1270 }
1271 for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1272 sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1273 if (KMEM_SLAB_MEMBER(sp, buf)) {
1274 mutex_exit(&cp->cache_lock);
1275 return (sp);
1276 }
1277 }
1278 mutex_exit(&cp->cache_lock);
1279
1280 return (NULL);
1281 }
1282
1283 static void
1284 kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
1285 {
1286 kmem_buftag_t *btp = NULL;
1287 kmem_bufctl_t *bcp = NULL;
1288 kmem_cache_t *cp = cparg;
1289 kmem_slab_t *sp;
1290 uint64_t *off;
1291 void *buf = bufarg;
1292
1293 kmem_logging = 0; /* stop logging when a bad thing happens */
1294
1295 kmem_panic_info.kmp_timestamp = gethrtime();
1296
1297 sp = kmem_findslab(cp, buf);
1298 if (sp == NULL) {
1299 for (cp = list_tail(&kmem_caches); cp != NULL;
1300 cp = list_prev(&kmem_caches, cp)) {
1301 if ((sp = kmem_findslab(cp, buf)) != NULL)
1302 break;
1303 }
1304 }
1305
1306 if (sp == NULL) {
1307 cp = NULL;
1308 error = KMERR_BADADDR;
1309 } else {
1310 if (cp != cparg)
1311 error = KMERR_BADCACHE;
1312 else
1313 buf = (char *)bufarg - ((uintptr_t)bufarg -
1314 (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1315 if (buf != bufarg)
1316 error = KMERR_BADBASE;
1317 if (cp->cache_flags & KMF_BUFTAG)
1318 btp = KMEM_BUFTAG(cp, buf);
1319 if (cp->cache_flags & KMF_HASH) {
1320 mutex_enter(&cp->cache_lock);
1321 for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1322 if (bcp->bc_addr == buf)
1323 break;
1324 mutex_exit(&cp->cache_lock);
1325 if (bcp == NULL && btp != NULL)
1326 bcp = btp->bt_bufctl;
1327 if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1328 NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
1329 bcp->bc_addr != buf) {
1330 error = KMERR_BADBUFCTL;
1331 bcp = NULL;
1332 }
1333 }
1334 }
1335
1336 kmem_panic_info.kmp_error = error;
1337 kmem_panic_info.kmp_buffer = bufarg;
1338 kmem_panic_info.kmp_realbuf = buf;
1339 kmem_panic_info.kmp_cache = cparg;
1340 kmem_panic_info.kmp_realcache = cp;
1341 kmem_panic_info.kmp_slab = sp;
1342 kmem_panic_info.kmp_bufctl = bcp;
1343
1344 printf("kernel memory allocator: ");
1345
1346 switch (error) {
1347
1348 case KMERR_MODIFIED:
1349 printf("buffer modified after being freed\n");
1350 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1351 if (off == NULL) /* shouldn't happen */
1352 off = buf;
1353 printf("modification occurred at offset 0x%lx "
1354 "(0x%llx replaced by 0x%llx)\n",
1355 (uintptr_t)off - (uintptr_t)buf,
1356 (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
1357 break;
1358
1359 case KMERR_REDZONE:
1360 printf("redzone violation: write past end of buffer\n");
1361 break;
1362
1363 case KMERR_BADADDR:
1364 printf("invalid free: buffer not in cache\n");
1365 break;
1366
1367 case KMERR_DUPFREE:
1368 printf("duplicate free: buffer freed twice\n");
1369 break;
1370
1371 case KMERR_BADBUFTAG:
1372 printf("boundary tag corrupted\n");
1373 printf("bcp ^ bxstat = %lx, should be %lx\n",
1374 (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1375 KMEM_BUFTAG_FREE);
1376 break;
1377
1378 case KMERR_BADBUFCTL:
1379 printf("bufctl corrupted\n");
1380 break;
1381
1382 case KMERR_BADCACHE:
1383 printf("buffer freed to wrong cache\n");
1384 printf("buffer was allocated from %s,\n", cp->cache_name);
1385 printf("caller attempting free to %s.\n", cparg->cache_name);
1386 break;
1387
1388 case KMERR_BADSIZE:
1389 printf("bad free: free size (%u) != alloc size (%u)\n",
1390 KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1391 KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1392 break;
1393
1394 case KMERR_BADBASE:
1395 printf("bad free: free address (%p) != alloc address (%p)\n",
1396 bufarg, buf);
1397 break;
1398 }
1399
1400 printf("buffer=%p bufctl=%p cache: %s\n",
1401 bufarg, (void *)bcp, cparg->cache_name);
1402
1403 if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1404 error != KMERR_BADBUFCTL) {
1405 int d;
1406 timestruc_t ts;
1407 kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
1408
1409 hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
1410 printf("previous transaction on buffer %p:\n", buf);
1411 printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n",
1412 (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1413 (void *)sp, cp->cache_name);
1414 for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
1415 ulong_t off;
1416 char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
1417 printf("%s+%lx\n", sym ? sym : "?", off);
1418 }
1419 }
1420 if (kmem_panic > 0)
1421 panic("kernel heap corruption detected");
1422 if (kmem_panic == 0)
1423 debug_enter(NULL);
1424 kmem_logging = 1; /* resume logging */
1425 }
1426
1427 static kmem_log_header_t *
1428 kmem_log_init(size_t logsize)
1429 {
1430 kmem_log_header_t *lhp;
1431 int nchunks = 4 * max_ncpus;
1432 size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
1433 int i;
1434
1435 /*
1436 * Make sure that lhp->lh_cpu[] is nicely aligned
1437 * to prevent false sharing of cache lines.
1438 */
1439 lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
1440 lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1441 NULL, NULL, VM_SLEEP);
1442 bzero(lhp, lhsize);
1443
1444 mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
1445 lhp->lh_nchunks = nchunks;
1446 lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
1447 lhp->lh_base = vmem_alloc(kmem_log_arena,
1448 lhp->lh_chunksize * nchunks, VM_SLEEP);
1449 lhp->lh_free = vmem_alloc(kmem_log_arena,
1450 nchunks * sizeof (int), VM_SLEEP);
1451 bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1452
1453 for (i = 0; i < max_ncpus; i++) {
1454 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1455 mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1456 clhp->clh_chunk = i;
1457 }
1458
1459 for (i = max_ncpus; i < nchunks; i++)
1460 lhp->lh_free[i] = i;
1461
1462 lhp->lh_head = max_ncpus;
1463 lhp->lh_tail = 0;
1464
1465 return (lhp);
1466 }
1467
1468 static void *
1469 kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1470 {
1471 void *logspace;
1472 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1473
1474 if (lhp == NULL || kmem_logging == 0 || panicstr)
1475 return (NULL);
1476
1477 mutex_enter(&clhp->clh_lock);
1478 clhp->clh_hits++;
1479 if (size > clhp->clh_avail) {
1480 mutex_enter(&lhp->lh_lock);
1481 lhp->lh_hits++;
1482 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1483 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1484 clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1485 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1486 clhp->clh_current = lhp->lh_base +
1487 clhp->clh_chunk * lhp->lh_chunksize;
1488 clhp->clh_avail = lhp->lh_chunksize;
1489 if (size > lhp->lh_chunksize)
1490 size = lhp->lh_chunksize;
1491 mutex_exit(&lhp->lh_lock);
1492 }
1493 logspace = clhp->clh_current;
1494 clhp->clh_current += size;
1495 clhp->clh_avail -= size;
1496 bcopy(data, logspace, size);
1497 mutex_exit(&clhp->clh_lock);
1498 return (logspace);
1499 }
1500
1501 #define KMEM_AUDIT(lp, cp, bcp) \
1502 { \
1503 kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp); \
1504 _bcp->bc_timestamp = gethrtime(); \
1505 _bcp->bc_thread = curthread; \
1506 _bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH); \
1507 _bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp)); \
1508 }
1509
1510 static void
1511 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1512 kmem_slab_t *sp, void *addr)
1513 {
1514 kmem_bufctl_audit_t bca;
1515
1516 bzero(&bca, sizeof (kmem_bufctl_audit_t));
1517 bca.bc_addr = addr;
1518 bca.bc_slab = sp;
1519 bca.bc_cache = cp;
1520 KMEM_AUDIT(lp, cp, &bca);
1521 }
1522
1523 /*
1524 * Create a new slab for cache cp.
1525 */
1526 static kmem_slab_t *
1527 kmem_slab_create(kmem_cache_t *cp, int kmflag)
1528 {
1529 size_t slabsize = cp->cache_slabsize;
1530 size_t chunksize = cp->cache_chunksize;
1531 int cache_flags = cp->cache_flags;
1532 size_t color, chunks;
1533 char *buf, *slab;
1534 kmem_slab_t *sp;
1535 kmem_bufctl_t *bcp;
1536 vmem_t *vmp = cp->cache_arena;
1537
1538 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1539
1540 color = cp->cache_color + cp->cache_align;
1541 if (color > cp->cache_maxcolor)
1542 color = cp->cache_mincolor;
1543 cp->cache_color = color;
1544
1545 slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
1546
1547 if (slab == NULL)
1548 goto vmem_alloc_failure;
1549
1550 ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1551
1552 /*
1553 * Reverify what was already checked in kmem_cache_set_move(), since the
1554 * consolidator depends (for correctness) on slabs being initialized
1555 * with the 0xbaddcafe memory pattern (setting a low order bit usable by
1556 * clients to distinguish uninitialized memory from known objects).
1557 */
1558 ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1559 if (!(cp->cache_cflags & KMC_NOTOUCH))
1560 copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1561
1562 if (cache_flags & KMF_HASH) {
1563 if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
1564 goto slab_alloc_failure;
1565 chunks = (slabsize - color) / chunksize;
1566 } else {
1567 sp = KMEM_SLAB(cp, slab);
1568 chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
1569 }
1570
1571 sp->slab_cache = cp;
1572 sp->slab_head = NULL;
1573 sp->slab_refcnt = 0;
1574 sp->slab_base = buf = slab + color;
1575 sp->slab_chunks = chunks;
1576 sp->slab_stuck_offset = (uint32_t)-1;
1577 sp->slab_later_count = 0;
1578 sp->slab_flags = 0;
1579
1580 ASSERT(chunks > 0);
1581 while (chunks-- != 0) {
1582 if (cache_flags & KMF_HASH) {
1583 bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1584 if (bcp == NULL)
1585 goto bufctl_alloc_failure;
1586 if (cache_flags & KMF_AUDIT) {
1587 kmem_bufctl_audit_t *bcap =
1588 (kmem_bufctl_audit_t *)bcp;
1589 bzero(bcap, sizeof (kmem_bufctl_audit_t));
1590 bcap->bc_cache = cp;
1591 }
1592 bcp->bc_addr = buf;
1593 bcp->bc_slab = sp;
1594 } else {
1595 bcp = KMEM_BUFCTL(cp, buf);
1596 }
1597 if (cache_flags & KMF_BUFTAG) {
1598 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1599 btp->bt_redzone = KMEM_REDZONE_PATTERN;
1600 btp->bt_bufctl = bcp;
1601 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1602 if (cache_flags & KMF_DEADBEEF) {
1603 copy_pattern(KMEM_FREE_PATTERN, buf,
1604 cp->cache_verify);
1605 }
1606 }
1607 bcp->bc_next = sp->slab_head;
1608 sp->slab_head = bcp;
1609 buf += chunksize;
1610 }
1611
1612 kmem_log_event(kmem_slab_log, cp, sp, slab);
1613
1614 return (sp);
1615
1616 bufctl_alloc_failure:
1617
1618 while ((bcp = sp->slab_head) != NULL) {
1619 sp->slab_head = bcp->bc_next;
1620 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1621 }
1622 kmem_cache_free(kmem_slab_cache, sp);
1623
1624 slab_alloc_failure:
1625
1626 vmem_free(vmp, slab, slabsize);
1627
1628 vmem_alloc_failure:
1629
1630 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1631 atomic_inc_64(&cp->cache_alloc_fail);
1632
1633 return (NULL);
1634 }
1635
1636 /*
1637 * Destroy a slab.
1638 */
1639 static void
1640 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1641 {
1642 vmem_t *vmp = cp->cache_arena;
1643 void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1644
1645 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1646 ASSERT(sp->slab_refcnt == 0);
1647
1648 if (cp->cache_flags & KMF_HASH) {
1649 kmem_bufctl_t *bcp;
1650 while ((bcp = sp->slab_head) != NULL) {
1651 sp->slab_head = bcp->bc_next;
1652 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1653 }
1654 kmem_cache_free(kmem_slab_cache, sp);
1655 }
1656 vmem_free(vmp, slab, cp->cache_slabsize);
1657 }
1658
1659 static void *
1660 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
1661 {
1662 kmem_bufctl_t *bcp, **hash_bucket;
1663 void *buf;
1664 boolean_t new_slab = (sp->slab_refcnt == 0);
1665
1666 ASSERT(MUTEX_HELD(&cp->cache_lock));
1667 /*
1668 * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
1669 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1670 * slab is newly created.
1671 */
1672 ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
1673 (sp == avl_first(&cp->cache_partial_slabs))));
1674 ASSERT(sp->slab_cache == cp);
1675
1676 cp->cache_slab_alloc++;
1677 cp->cache_bufslab--;
1678 sp->slab_refcnt++;
1679
1680 bcp = sp->slab_head;
1681 sp->slab_head = bcp->bc_next;
1682
1683 if (cp->cache_flags & KMF_HASH) {
1684 /*
1685 * Add buffer to allocated-address hash table.
1686 */
1687 buf = bcp->bc_addr;
1688 hash_bucket = KMEM_HASH(cp, buf);
1689 bcp->bc_next = *hash_bucket;
1690 *hash_bucket = bcp;
1691 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1692 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1693 }
1694 } else {
1695 buf = KMEM_BUF(cp, bcp);
1696 }
1697
1698 ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1699
1700 if (sp->slab_head == NULL) {
1701 ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
1702 if (new_slab) {
1703 ASSERT(sp->slab_chunks == 1);
1704 } else {
1705 ASSERT(sp->slab_chunks > 1); /* the slab was partial */
1706 avl_remove(&cp->cache_partial_slabs, sp);
1707 sp->slab_later_count = 0; /* clear history */
1708 sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
1709 sp->slab_stuck_offset = (uint32_t)-1;
1710 }
1711 list_insert_head(&cp->cache_complete_slabs, sp);
1712 cp->cache_complete_slab_count++;
1713 return (buf);
1714 }
1715
1716 ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
1717 /*
1718 * Peek to see if the magazine layer is enabled before
1719 * we prefill. We're not holding the cpu cache lock,
1720 * so the peek could be wrong, but there's no harm in it.
1721 */
1722 if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1723 (KMEM_CPU_CACHE(cp)->cc_magsize != 0)) {
1724 kmem_slab_prefill(cp, sp);
1725 return (buf);
1726 }
1727
1728 if (new_slab) {
1729 avl_add(&cp->cache_partial_slabs, sp);
1730 return (buf);
1731 }
1732
1733 /*
1734 * The slab is now more allocated than it was, so the
1735 * order remains unchanged.
1736 */
1737 ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1738 return (buf);
1739 }
1740
1741 /*
1742 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1743 */
1744 static void *
1745 kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1746 {
1747 kmem_slab_t *sp;
1748 void *buf;
1749 boolean_t test_destructor;
1750
1751 mutex_enter(&cp->cache_lock);
1752 test_destructor = (cp->cache_slab_alloc == 0);
1753 sp = avl_first(&cp->cache_partial_slabs);
1754 if (sp == NULL) {
1755 ASSERT(cp->cache_bufslab == 0);
1756
1757 /*
1758 * The freelist is empty. Create a new slab.
1759 */
1760 mutex_exit(&cp->cache_lock);
1761 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1762 return (NULL);
1763 }
1764 mutex_enter(&cp->cache_lock);
1765 cp->cache_slab_create++;
1766 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1767 cp->cache_bufmax = cp->cache_buftotal;
1768 cp->cache_bufslab += sp->slab_chunks;
1769 }
1770
1771 buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1772 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1773 (cp->cache_complete_slab_count +
1774 avl_numnodes(&cp->cache_partial_slabs) +
1775 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1776 mutex_exit(&cp->cache_lock);
1777
1778 if (test_destructor && cp->cache_destructor != NULL) {
1779 /*
1780 * On the first kmem_slab_alloc(), assert that it is valid to
1781 * call the destructor on a newly constructed object without any
1782 * client involvement.
1783 */
1784 if ((cp->cache_constructor == NULL) ||
1785 cp->cache_constructor(buf, cp->cache_private,
1786 kmflag) == 0) {
1787 cp->cache_destructor(buf, cp->cache_private);
1788 }
1789 copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
1790 cp->cache_bufsize);
1791 if (cp->cache_flags & KMF_DEADBEEF) {
1792 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1793 }
1794 }
1795
1796 return (buf);
1797 }
1798
1799 static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
1800
1801 /*
1802 * Free a raw (unconstructed) buffer to cp's slab layer.
1803 */
1804 static void
1805 kmem_slab_free(kmem_cache_t *cp, void *buf)
1806 {
1807 kmem_slab_t *sp;
1808 kmem_bufctl_t *bcp, **prev_bcpp;
1809
1810 ASSERT(buf != NULL);
1811
1812 mutex_enter(&cp->cache_lock);
1813 cp->cache_slab_free++;
1814
1815 if (cp->cache_flags & KMF_HASH) {
1816 /*
1817 * Look up buffer in allocated-address hash table.
1818 */
1819 prev_bcpp = KMEM_HASH(cp, buf);
1820 while ((bcp = *prev_bcpp) != NULL) {
1821 if (bcp->bc_addr == buf) {
1822 *prev_bcpp = bcp->bc_next;
1823 sp = bcp->bc_slab;
1824 break;
1825 }
1826 cp->cache_lookup_depth++;
1827 prev_bcpp = &bcp->bc_next;
1828 }
1829 } else {
1830 bcp = KMEM_BUFCTL(cp, buf);
1831 sp = KMEM_SLAB(cp, buf);
1832 }
1833
1834 if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1835 mutex_exit(&cp->cache_lock);
1836 kmem_error(KMERR_BADADDR, cp, buf);
1837 return;
1838 }
1839
1840 if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1841 /*
1842 * If this is the buffer that prevented the consolidator from
1843 * clearing the slab, we can reset the slab flags now that the
1844 * buffer is freed. (It makes sense to do this in
1845 * kmem_cache_free(), where the client gives up ownership of the
1846 * buffer, but on the hot path the test is too expensive.)
1847 */
1848 kmem_slab_move_yes(cp, sp, buf);
1849 }
1850
1851 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1852 if (cp->cache_flags & KMF_CONTENTS)
1853 ((kmem_bufctl_audit_t *)bcp)->bc_contents =
1854 kmem_log_enter(kmem_content_log, buf,
1855 cp->cache_contents);
1856 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1857 }
1858
1859 bcp->bc_next = sp->slab_head;
1860 sp->slab_head = bcp;
1861
1862 cp->cache_bufslab++;
1863 ASSERT(sp->slab_refcnt >= 1);
1864
1865 if (--sp->slab_refcnt == 0) {
1866 /*
1867 * There are no outstanding allocations from this slab,
1868 * so we can reclaim the memory.
1869 */
1870 if (sp->slab_chunks == 1) {
1871 list_remove(&cp->cache_complete_slabs, sp);
1872 cp->cache_complete_slab_count--;
1873 } else {
1874 avl_remove(&cp->cache_partial_slabs, sp);
1875 }
1876
1877 cp->cache_buftotal -= sp->slab_chunks;
1878 cp->cache_bufslab -= sp->slab_chunks;
1879 /*
1880 * Defer releasing the slab to the virtual memory subsystem
1881 * while there is a pending move callback, since we guarantee
1882 * that buffers passed to the move callback have only been
1883 * touched by kmem or by the client itself. Since the memory
1884 * patterns baddcafe (uninitialized) and deadbeef (freed) both
1885 * set at least one of the two lowest order bits, the client can
1886 * test those bits in the move callback to determine whether or
1887 * not it knows about the buffer (assuming that the client also
1888 * sets one of those low order bits whenever it frees a buffer).
1889 */
1890 if (cp->cache_defrag == NULL ||
1891 (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1892 !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
1893 cp->cache_slab_destroy++;
1894 mutex_exit(&cp->cache_lock);
1895 kmem_slab_destroy(cp, sp);
1896 } else {
1897 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1898 /*
1899 * Slabs are inserted at both ends of the deadlist to
1900 * distinguish between slabs freed while move callbacks
1901 * are pending (list head) and a slab freed while the
1902 * lock is dropped in kmem_move_buffers() (list tail) so
1903 * that in both cases slab_destroy() is called from the
1904 * right context.
1905 */
1906 if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
1907 list_insert_tail(deadlist, sp);
1908 } else {
1909 list_insert_head(deadlist, sp);
1910 }
1911 cp->cache_defrag->kmd_deadcount++;
1912 mutex_exit(&cp->cache_lock);
1913 }
1914 return;
1915 }
1916
1917 if (bcp->bc_next == NULL) {
1918 /* Transition the slab from completely allocated to partial. */
1919 ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
1920 ASSERT(sp->slab_chunks > 1);
1921 list_remove(&cp->cache_complete_slabs, sp);
1922 cp->cache_complete_slab_count--;
1923 avl_add(&cp->cache_partial_slabs, sp);
1924 } else {
1925 #ifdef DEBUG
1926 if (avl_update_gt(&cp->cache_partial_slabs, sp)) {
1927 KMEM_STAT_ADD(kmem_move_stats.kms_avl_update);
1928 } else {
1929 KMEM_STAT_ADD(kmem_move_stats.kms_avl_noupdate);
1930 }
1931 #else
1932 (void) avl_update_gt(&cp->cache_partial_slabs, sp);
1933 #endif
1934 }
1935
1936 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1937 (cp->cache_complete_slab_count +
1938 avl_numnodes(&cp->cache_partial_slabs) +
1939 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1940 mutex_exit(&cp->cache_lock);
1941 }
1942
1943 /*
1944 * Return -1 if kmem_error, 1 if constructor fails, 0 if successful.
1945 */
1946 static int
1947 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1948 caddr_t caller)
1949 {
1950 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1951 kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1952 uint32_t mtbf;
1953
1954 if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1955 kmem_error(KMERR_BADBUFTAG, cp, buf);
1956 return (-1);
1957 }
1958
1959 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
1960
1961 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1962 kmem_error(KMERR_BADBUFCTL, cp, buf);
1963 return (-1);
1964 }
1965
1966 if (cp->cache_flags & KMF_DEADBEEF) {
1967 if (!construct && (cp->cache_flags & KMF_LITE)) {
1968 if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
1969 kmem_error(KMERR_MODIFIED, cp, buf);
1970 return (-1);
1971 }
1972 if (cp->cache_constructor != NULL)
1973 *(uint64_t *)buf = btp->bt_redzone;
1974 else
1975 *(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
1976 } else {
1977 construct = 1;
1978 if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
1979 KMEM_UNINITIALIZED_PATTERN, buf,
1980 cp->cache_verify)) {
1981 kmem_error(KMERR_MODIFIED, cp, buf);
1982 return (-1);
1983 }
1984 }
1985 }
1986 btp->bt_redzone = KMEM_REDZONE_PATTERN;
1987
1988 if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1989 gethrtime() % mtbf == 0 &&
1990 (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1991 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1992 if (!construct && cp->cache_destructor != NULL)
1993 cp->cache_destructor(buf, cp->cache_private);
1994 } else {
1995 mtbf = 0;
1996 }
1997
1998 if (mtbf || (construct && cp->cache_constructor != NULL &&
1999 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
2000 atomic_inc_64(&cp->cache_alloc_fail);
2001 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2002 if (cp->cache_flags & KMF_DEADBEEF)
2003 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2004 kmem_slab_free(cp, buf);
2005 return (1);
2006 }
2007
2008 if (cp->cache_flags & KMF_AUDIT) {
2009 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2010 }
2011
2012 if ((cp->cache_flags & KMF_LITE) &&
2013 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2014 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2015 }
2016
2017 return (0);
2018 }
2019
2020 static int
2021 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
2022 {
2023 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2024 kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
2025 kmem_slab_t *sp;
2026
2027 if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
2028 if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
2029 kmem_error(KMERR_DUPFREE, cp, buf);
2030 return (-1);
2031 }
2032 sp = kmem_findslab(cp, buf);
2033 if (sp == NULL || sp->slab_cache != cp)
2034 kmem_error(KMERR_BADADDR, cp, buf);
2035 else
2036 kmem_error(KMERR_REDZONE, cp, buf);
2037 return (-1);
2038 }
2039
2040 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2041
2042 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
2043 kmem_error(KMERR_BADBUFCTL, cp, buf);
2044 return (-1);
2045 }
2046
2047 if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
2048 kmem_error(KMERR_REDZONE, cp, buf);
2049 return (-1);
2050 }
2051
2052 if (cp->cache_flags & KMF_AUDIT) {
2053 if (cp->cache_flags & KMF_CONTENTS)
2054 bcp->bc_contents = kmem_log_enter(kmem_content_log,
2055 buf, cp->cache_contents);
2056 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2057 }
2058
2059 if ((cp->cache_flags & KMF_LITE) &&
2060 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2061 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2062 }
2063
2064 if (cp->cache_flags & KMF_DEADBEEF) {
2065 if (cp->cache_flags & KMF_LITE)
2066 btp->bt_redzone = *(uint64_t *)buf;
2067 else if (cp->cache_destructor != NULL)
2068 cp->cache_destructor(buf, cp->cache_private);
2069
2070 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2071 }
2072
2073 return (0);
2074 }
2075
2076 /*
2077 * Free each object in magazine mp to cp's slab layer, and free mp itself.
2078 */
2079 static void
2080 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2081 {
2082 int round;
2083
2084 ASSERT(!list_link_active(&cp->cache_link) ||
2085 taskq_member(kmem_taskq, curthread));
2086
2087 for (round = 0; round < nrounds; round++) {
2088 void *buf = mp->mag_round[round];
2089
2090 if (cp->cache_flags & KMF_DEADBEEF) {
2091 if (verify_pattern(KMEM_FREE_PATTERN, buf,
2092 cp->cache_verify) != NULL) {
2093 kmem_error(KMERR_MODIFIED, cp, buf);
2094 continue;
2095 }
2096 if ((cp->cache_flags & KMF_LITE) &&
2097 cp->cache_destructor != NULL) {
2098 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2099 *(uint64_t *)buf = btp->bt_redzone;
2100 cp->cache_destructor(buf, cp->cache_private);
2101 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2102 }
2103 } else if (cp->cache_destructor != NULL) {
2104 cp->cache_destructor(buf, cp->cache_private);
2105 }
2106
2107 kmem_slab_free(cp, buf);
2108 }
2109 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2110 kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2111 }
2112
2113 /*
2114 * Allocate a magazine from the depot.
2115 */
2116 static kmem_magazine_t *
2117 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2118 {
2119 kmem_magazine_t *mp;
2120
2121 /*
2122 * If we can't get the depot lock without contention,
2123 * update our contention count. We use the depot
2124 * contention rate to determine whether we need to
2125 * increase the magazine size for better scalability.
2126 */
2127 if (!mutex_tryenter(&cp->cache_depot_lock)) {
2128 mutex_enter(&cp->cache_depot_lock);
2129 cp->cache_depot_contention++;
2130 }
2131
2132 if ((mp = mlp->ml_list) != NULL) {
2133 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2134 mlp->ml_list = mp->mag_next;
2135 if (--mlp->ml_total < mlp->ml_min)
2136 mlp->ml_min = mlp->ml_total;
2137 mlp->ml_alloc++;
2138 }
2139
2140 mutex_exit(&cp->cache_depot_lock);
2141
2142 return (mp);
2143 }
2144
2145 /*
2146 * Free a magazine to the depot.
2147 */
2148 static void
2149 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2150 {
2151 mutex_enter(&cp->cache_depot_lock);
2152 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2153 mp->mag_next = mlp->ml_list;
2154 mlp->ml_list = mp;
2155 mlp->ml_total++;
2156 mutex_exit(&cp->cache_depot_lock);
2157 }
2158
2159 /*
2160 * Update the working set statistics for cp's depot.
2161 */
2162 static void
2163 kmem_depot_ws_update(kmem_cache_t *cp)
2164 {
2165 mutex_enter(&cp->cache_depot_lock);
2166 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2167 cp->cache_full.ml_min = cp->cache_full.ml_total;
2168 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2169 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2170 mutex_exit(&cp->cache_depot_lock);
2171 }
2172
2173 /*
2174 * Set the working set statistics for cp's depot to zero. (Everything is
2175 * eligible for reaping.)
2176 */
2177 static void
2178 kmem_depot_ws_zero(kmem_cache_t *cp)
2179 {
2180 mutex_enter(&cp->cache_depot_lock);
2181 cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
2182 cp->cache_full.ml_min = cp->cache_full.ml_total;
2183 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
2184 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2185 mutex_exit(&cp->cache_depot_lock);
2186 }
2187
2188 /*
2189 * The number of bytes to reap before we call kpreempt(). The default (1MB)
2190 * causes us to preempt reaping up to hundreds of times per second. Using a
2191 * larger value (1GB) causes this to have virtually no effect.
2192 */
2193 size_t kmem_reap_preempt_bytes = 1024 * 1024;
2194
2195 /*
2196 * Reap all magazines that have fallen out of the depot's working set.
2197 */
2198 static void
2199 kmem_depot_ws_reap(kmem_cache_t *cp)
2200 {
2201 size_t bytes = 0;
2202 long reap;
2203 kmem_magazine_t *mp;
2204
2205 ASSERT(!list_link_active(&cp->cache_link) ||
2206 taskq_member(kmem_taskq, curthread));
2207
2208 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2209 while (reap-- &&
2210 (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) {
2211 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2212 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2213 if (bytes > kmem_reap_preempt_bytes) {
2214 kpreempt(KPREEMPT_SYNC);
2215 bytes = 0;
2216 }
2217 }
2218
2219 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2220 while (reap-- &&
2221 (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) {
2222 kmem_magazine_destroy(cp, mp, 0);
2223 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2224 if (bytes > kmem_reap_preempt_bytes) {
2225 kpreempt(KPREEMPT_SYNC);
2226 bytes = 0;
2227 }
2228 }
2229 }
2230
2231 static void
2232 kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
2233 {
2234 ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
2235 (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
2236 ASSERT(ccp->cc_magsize > 0);
2237
2238 ccp->cc_ploaded = ccp->cc_loaded;
2239 ccp->cc_prounds = ccp->cc_rounds;
2240 ccp->cc_loaded = mp;
2241 ccp->cc_rounds = rounds;
2242 }
2243
2244 /*
2245 * Intercept kmem alloc/free calls during crash dump in order to avoid
2246 * changing kmem state while memory is being saved to the dump device.
2247 * Otherwise, ::kmem_verify will report "corrupt buffers". Note that
2248 * there are no locks because only one CPU calls kmem during a crash
2249 * dump. To enable this feature, first create the associated vmem
2250 * arena with VMC_DUMPSAFE.
2251 */
2252 static void *kmem_dump_start; /* start of pre-reserved heap */
2253 static void *kmem_dump_end; /* end of heap area */
2254 static void *kmem_dump_curr; /* current free heap pointer */
2255 static size_t kmem_dump_size; /* size of heap area */
2256
2257 /* append to each buf created in the pre-reserved heap */
2258 typedef struct kmem_dumpctl {
2259 void *kdc_next; /* cache dump free list linkage */
2260 } kmem_dumpctl_t;
2261
2262 #define KMEM_DUMPCTL(cp, buf) \
2263 ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2264 sizeof (void *)))
2265
2266 /* Keep some simple stats. */
2267 #define KMEM_DUMP_LOGS (100)
2268
2269 typedef struct kmem_dump_log {
2270 kmem_cache_t *kdl_cache;
2271 uint_t kdl_allocs; /* # of dump allocations */
2272 uint_t kdl_frees; /* # of dump frees */
2273 uint_t kdl_alloc_fails; /* # of allocation failures */
2274 uint_t kdl_free_nondump; /* # of non-dump frees */
2275 uint_t kdl_unsafe; /* cache was used, but unsafe */
2276 } kmem_dump_log_t;
2277
2278 static kmem_dump_log_t *kmem_dump_log;
2279 static int kmem_dump_log_idx;
2280
2281 #define KDI_LOG(cp, stat) { \
2282 kmem_dump_log_t *kdl; \
2283 if ((kdl = (kmem_dump_log_t *)((cp)->cache_dumplog)) != NULL) { \
2284 kdl->stat++; \
2285 } else if (kmem_dump_log_idx < KMEM_DUMP_LOGS) { \
2286 kdl = &kmem_dump_log[kmem_dump_log_idx++]; \
2287 kdl->stat++; \
2288 kdl->kdl_cache = (cp); \
2289 (cp)->cache_dumplog = kdl; \
2290 } \
2291 }
2292
2293 /* set non zero for full report */
2294 uint_t kmem_dump_verbose = 0;
2295
2296 /* stats for overize heap */
2297 uint_t kmem_dump_oversize_allocs = 0;
2298 uint_t kmem_dump_oversize_max = 0;
2299
2300 static void
2301 kmem_dumppr(char **pp, char *e, const char *format, ...)
2302 {
2303 char *p = *pp;
2304
2305 if (p < e) {
2306 int n;
2307 va_list ap;
2308
2309 va_start(ap, format);
2310 n = vsnprintf(p, e - p, format, ap);
2311 va_end(ap);
2312 *pp = p + n;
2313 }
2314 }
2315
2316 /*
2317 * Called when dumpadm(1M) configures dump parameters.
2318 */
2319 void
2320 kmem_dump_init(size_t size)
2321 {
2322 if (kmem_dump_start != NULL)
2323 kmem_free(kmem_dump_start, kmem_dump_size);
2324
2325 if (kmem_dump_log == NULL)
2326 kmem_dump_log = (kmem_dump_log_t *)kmem_zalloc(KMEM_DUMP_LOGS *
2327 sizeof (kmem_dump_log_t), KM_SLEEP);
2328
2329 kmem_dump_start = kmem_alloc(size, KM_SLEEP);
2330
2331 if (kmem_dump_start != NULL) {
2332 kmem_dump_size = size;
2333 kmem_dump_curr = kmem_dump_start;
2334 kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2335 copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2336 } else {
2337 kmem_dump_size = 0;
2338 kmem_dump_curr = NULL;
2339 kmem_dump_end = NULL;
2340 }
2341 }
2342
2343 /*
2344 * Set flag for each kmem_cache_t if is safe to use alternate dump
2345 * memory. Called just before panic crash dump starts. Set the flag
2346 * for the calling CPU.
2347 */
2348 void
2349 kmem_dump_begin(void)
2350 {
2351 ASSERT(panicstr != NULL);
2352 if (kmem_dump_start != NULL) {
2353 kmem_cache_t *cp;
2354
2355 for (cp = list_head(&kmem_caches); cp != NULL;
2356 cp = list_next(&kmem_caches, cp)) {
2357 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2358
2359 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2360 cp->cache_flags |= KMF_DUMPDIVERT;
2361 ccp->cc_flags |= KMF_DUMPDIVERT;
2362 ccp->cc_dump_rounds = ccp->cc_rounds;
2363 ccp->cc_dump_prounds = ccp->cc_prounds;
2364 ccp->cc_rounds = ccp->cc_prounds = -1;
2365 } else {
2366 cp->cache_flags |= KMF_DUMPUNSAFE;
2367 ccp->cc_flags |= KMF_DUMPUNSAFE;
2368 }
2369 }
2370 }
2371 }
2372
2373 /*
2374 * finished dump intercept
2375 * print any warnings on the console
2376 * return verbose information to dumpsys() in the given buffer
2377 */
2378 size_t
2379 kmem_dump_finish(char *buf, size_t size)
2380 {
2381 int kdi_idx;
2382 int kdi_end = kmem_dump_log_idx;
2383 int percent = 0;
2384 int header = 0;
2385 int warn = 0;
2386 size_t used;
2387 kmem_cache_t *cp;
2388 kmem_dump_log_t *kdl;
2389 char *e = buf + size;
2390 char *p = buf;
2391
2392 if (kmem_dump_size == 0 || kmem_dump_verbose == 0)
2393 return (0);
2394
2395 used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2396 percent = (used * 100) / kmem_dump_size;
2397
2398 kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2399 kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2400 kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2401 kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2402 kmem_dump_oversize_allocs);
2403 kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2404 kmem_dump_oversize_max);
2405
2406 for (kdi_idx = 0; kdi_idx < kdi_end; kdi_idx++) {
2407 kdl = &kmem_dump_log[kdi_idx];
2408 cp = kdl->kdl_cache;
2409 if (cp == NULL)
2410 break;
2411 if (kdl->kdl_alloc_fails)
2412 ++warn;
2413 if (header == 0) {
2414 kmem_dumppr(&p, e,
2415 "Cache Name,Allocs,Frees,Alloc Fails,"
2416 "Nondump Frees,Unsafe Allocs/Frees\n");
2417 header = 1;
2418 }
2419 kmem_dumppr(&p, e, "%s,%d,%d,%d,%d,%d\n",
2420 cp->cache_name, kdl->kdl_allocs, kdl->kdl_frees,
2421 kdl->kdl_alloc_fails, kdl->kdl_free_nondump,
2422 kdl->kdl_unsafe);
2423 }
2424
2425 /* return buffer size used */
2426 if (p < e)
2427 bzero(p, e - p);
2428 return (p - buf);
2429 }
2430
2431 /*
2432 * Allocate a constructed object from alternate dump memory.
2433 */
2434 void *
2435 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2436 {
2437 void *buf;
2438 void *curr;
2439 char *bufend;
2440
2441 /* return a constructed object */
2442 if ((buf = cp->cache_dumpfreelist) != NULL) {
2443 cp->cache_dumpfreelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2444 KDI_LOG(cp, kdl_allocs);
2445 return (buf);
2446 }
2447
2448 /* create a new constructed object */
2449 curr = kmem_dump_curr;
2450 buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2451 bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2452
2453 /* hat layer objects cannot cross a page boundary */
2454 if (cp->cache_align < PAGESIZE) {
2455 char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2456 if (bufend > page) {
2457 bufend += page - (char *)buf;
2458 buf = (void *)page;
2459 }
2460 }
2461
2462 /* fall back to normal alloc if reserved area is used up */
2463 if (bufend > (char *)kmem_dump_end) {
2464 kmem_dump_curr = kmem_dump_end;
2465 KDI_LOG(cp, kdl_alloc_fails);
2466 return (NULL);
2467 }
2468
2469 /*
2470 * Must advance curr pointer before calling a constructor that
2471 * may also allocate memory.
2472 */
2473 kmem_dump_curr = bufend;
2474
2475 /* run constructor */
2476 if (cp->cache_constructor != NULL &&
2477 cp->cache_constructor(buf, cp->cache_private, kmflag)
2478 != 0) {
2479 #ifdef DEBUG
2480 printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2481 cp->cache_name, (void *)cp);
2482 #endif
2483 /* reset curr pointer iff no allocs were done */
2484 if (kmem_dump_curr == bufend)
2485 kmem_dump_curr = curr;
2486
2487 /* fall back to normal alloc if the constructor fails */
2488 KDI_LOG(cp, kdl_alloc_fails);
2489 return (NULL);
2490 }
2491
2492 KDI_LOG(cp, kdl_allocs);
2493 return (buf);
2494 }
2495
2496 /*
2497 * Free a constructed object in alternate dump memory.
2498 */
2499 int
2500 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2501 {
2502 /* save constructed buffers for next time */
2503 if ((char *)buf >= (char *)kmem_dump_start &&
2504 (char *)buf < (char *)kmem_dump_end) {
2505 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dumpfreelist;
2506 cp->cache_dumpfreelist = buf;
2507 KDI_LOG(cp, kdl_frees);
2508 return (0);
2509 }
2510
2511 /* count all non-dump buf frees */
2512 KDI_LOG(cp, kdl_free_nondump);
2513
2514 /* just drop buffers that were allocated before dump started */
2515 if (kmem_dump_curr < kmem_dump_end)
2516 return (0);
2517
2518 /* fall back to normal free if reserved area is used up */
2519 return (1);
2520 }
2521
2522 /*
2523 * Allocate a constructed object from cache cp.
2524 */
2525 void *
2526 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2527 {
2528 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2529 kmem_magazine_t *fmp;
2530 void *buf;
2531
2532 mutex_enter(&ccp->cc_lock);
2533 for (;;) {
2534 /*
2535 * If there's an object available in the current CPU's
2536 * loaded magazine, just take it and return.
2537 */
2538 if (ccp->cc_rounds > 0) {
2539 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2540 ccp->cc_alloc++;
2541 mutex_exit(&ccp->cc_lock);
2542 if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2543 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2544 ASSERT(!(ccp->cc_flags &
2545 KMF_DUMPDIVERT));
2546 KDI_LOG(cp, kdl_unsafe);
2547 }
2548 if ((ccp->cc_flags & KMF_BUFTAG) &&
2549 kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2550 caller()) != 0) {
2551 if (kmflag & KM_NOSLEEP)
2552 return (NULL);
2553 mutex_enter(&ccp->cc_lock);
2554 continue;
2555 }
2556 }
2557 return (buf);
2558 }
2559
2560 /*
2561 * The loaded magazine is empty. If the previously loaded
2562 * magazine was full, exchange them and try again.
2563 */
2564 if (ccp->cc_prounds > 0) {
2565 kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2566 continue;
2567 }
2568
2569 /*
2570 * Return an alternate buffer at dump time to preserve
2571 * the heap.
2572 */
2573 if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2574 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2575 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2576 /* log it so that we can warn about it */
2577 KDI_LOG(cp, kdl_unsafe);
2578 } else {
2579 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2580 NULL) {
2581 mutex_exit(&ccp->cc_lock);
2582 return (buf);
2583 }
2584 break; /* fall back to slab layer */
2585 }
2586 }
2587
2588 /*
2589 * If the magazine layer is disabled, break out now.
2590 */
2591 if (ccp->cc_magsize == 0)
2592 break;
2593
2594 /*
2595 * Try to get a full magazine from the depot.
2596 */
2597 fmp = kmem_depot_alloc(cp, &cp->cache_full);
2598 if (fmp != NULL) {
2599 if (ccp->cc_ploaded != NULL)
2600 kmem_depot_free(cp, &cp->cache_empty,
2601 ccp->cc_ploaded);
2602 kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
2603 continue;
2604 }
2605
2606 /*
2607 * There are no full magazines in the depot,
2608 * so fall through to the slab layer.
2609 */
2610 break;
2611 }
2612 mutex_exit(&ccp->cc_lock);
2613
2614 /*
2615 * We couldn't allocate a constructed object from the magazine layer,
2616 * so get a raw buffer from the slab layer and apply its constructor.
2617 */
2618 buf = kmem_slab_alloc(cp, kmflag);
2619
2620 if (buf == NULL)
2621 return (NULL);
2622
2623 if (cp->cache_flags & KMF_BUFTAG) {
2624 /*
2625 * Make kmem_cache_alloc_debug() apply the constructor for us.
2626 */
2627 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2628 if (rc != 0) {
2629 if (kmflag & KM_NOSLEEP)
2630 return (NULL);
2631 /*
2632 * kmem_cache_alloc_debug() detected corruption
2633 * but didn't panic (kmem_panic <= 0). We should not be
2634 * here because the constructor failed (indicated by a
2635 * return code of 1). Try again.
2636 */
2637 ASSERT(rc == -1);
2638 return (kmem_cache_alloc(cp, kmflag));
2639 }
2640 return (buf);
2641 }
2642
2643 if (cp->cache_constructor != NULL &&
2644 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2645 atomic_inc_64(&cp->cache_alloc_fail);
2646 kmem_slab_free(cp, buf);
2647 return (NULL);
2648 }
2649
2650 return (buf);
2651 }
2652
2653 /*
2654 * The freed argument tells whether or not kmem_cache_free_debug() has already
2655 * been called so that we can avoid the duplicate free error. For example, a
2656 * buffer on a magazine has already been freed by the client but is still
2657 * constructed.
2658 */
2659 static void
2660 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2661 {
2662 if (!freed && (cp->cache_flags & KMF_BUFTAG))
2663 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2664 return;
2665
2666 /*
2667 * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
2668 * kmem_cache_free_debug() will have already applied the destructor.
2669 */
2670 if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2671 cp->cache_destructor != NULL) {
2672 if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */
2673 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2674 *(uint64_t *)buf = btp->bt_redzone;
2675 cp->cache_destructor(buf, cp->cache_private);
2676 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2677 } else {
2678 cp->cache_destructor(buf, cp->cache_private);
2679 }
2680 }
2681
2682 kmem_slab_free(cp, buf);
2683 }
2684
2685 /*
2686 * Used when there's no room to free a buffer to the per-CPU cache.
2687 * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the
2688 * caller should try freeing to the per-CPU cache again.
2689 * Note that we don't directly install the magazine in the cpu cache,
2690 * since its state may have changed wildly while the lock was dropped.
2691 */
2692 static int
2693 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2694 {
2695 kmem_magazine_t *emp;
2696 kmem_magtype_t *mtp;
2697
2698 ASSERT(MUTEX_HELD(&ccp->cc_lock));
2699 ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
2700 ((uint_t)ccp->cc_rounds == -1)) &&
2701 ((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
2702 ((uint_t)ccp->cc_prounds == -1)));
2703
2704 emp = kmem_depot_alloc(cp, &cp->cache_empty);
2705 if (emp != NULL) {
2706 if (ccp->cc_ploaded != NULL)
2707 kmem_depot_free(cp, &cp->cache_full,
2708 ccp->cc_ploaded);
2709 kmem_cpu_reload(ccp, emp, 0);
2710 return (1);
2711 }
2712 /*
2713 * There are no empty magazines in the depot,
2714 * so try to allocate a new one. We must drop all locks
2715 * across kmem_cache_alloc() because lower layers may
2716 * attempt to allocate from this cache.
2717 */
2718 mtp = cp->cache_magtype;
2719 mutex_exit(&ccp->cc_lock);
2720 emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
2721 mutex_enter(&ccp->cc_lock);
2722
2723 if (emp != NULL) {
2724 /*
2725 * We successfully allocated an empty magazine.
2726 * However, we had to drop ccp->cc_lock to do it,
2727 * so the cache's magazine size may have changed.
2728 * If so, free the magazine and try again.
2729 */
2730 if (ccp->cc_magsize != mtp->mt_magsize) {
2731 mutex_exit(&ccp->cc_lock);
2732 kmem_cache_free(mtp->mt_cache, emp);
2733 mutex_enter(&ccp->cc_lock);
2734 return (1);
2735 }
2736
2737 /*
2738 * We got a magazine of the right size. Add it to
2739 * the depot and try the whole dance again.
2740 */
2741 kmem_depot_free(cp, &cp->cache_empty, emp);
2742 return (1);
2743 }
2744
2745 /*
2746 * We couldn't allocate an empty magazine,
2747 * so fall through to the slab layer.
2748 */
2749 return (0);
2750 }
2751
2752 /*
2753 * Free a constructed object to cache cp.
2754 */
2755 void
2756 kmem_cache_free(kmem_cache_t *cp, void *buf)
2757 {
2758 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2759
2760 /*
2761 * The client must not free either of the buffers passed to the move
2762 * callback function.
2763 */
2764 ASSERT(cp->cache_defrag == NULL ||
2765 cp->cache_defrag->kmd_thread != curthread ||
2766 (buf != cp->cache_defrag->kmd_from_buf &&
2767 buf != cp->cache_defrag->kmd_to_buf));
2768
2769 if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2770 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2771 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2772 /* log it so that we can warn about it */
2773 KDI_LOG(cp, kdl_unsafe);
2774 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2775 return;
2776 }
2777 if (ccp->cc_flags & KMF_BUFTAG) {
2778 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2779 return;
2780 }
2781 }
2782
2783 mutex_enter(&ccp->cc_lock);
2784 /*
2785 * Any changes to this logic should be reflected in kmem_slab_prefill()
2786 */
2787 for (;;) {
2788 /*
2789 * If there's a slot available in the current CPU's
2790 * loaded magazine, just put the object there and return.
2791 */
2792 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2793 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2794 ccp->cc_free++;
2795 mutex_exit(&ccp->cc_lock);
2796 return;
2797 }
2798
2799 /*
2800 * The loaded magazine is full. If the previously loaded
2801 * magazine was empty, exchange them and try again.
2802 */
2803 if (ccp->cc_prounds == 0) {
2804 kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2805 continue;
2806 }
2807
2808 /*
2809 * If the magazine layer is disabled, break out now.
2810 */
2811 if (ccp->cc_magsize == 0)
2812 break;
2813
2814 if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
2815 /*
2816 * We couldn't free our constructed object to the
2817 * magazine layer, so apply its destructor and free it
2818 * to the slab layer.
2819 */
2820 break;
2821 }
2822 }
2823 mutex_exit(&ccp->cc_lock);
2824 kmem_slab_free_constructed(cp, buf, B_TRUE);
2825 }
2826
2827 static void
2828 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2829 {
2830 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2831 int cache_flags = cp->cache_flags;
2832
2833 kmem_bufctl_t *next, *head;
2834 size_t nbufs;
2835
2836 /*
2837 * Completely allocate the newly created slab and put the pre-allocated
2838 * buffers in magazines. Any of the buffers that cannot be put in
2839 * magazines must be returned to the slab.
2840 */
2841 ASSERT(MUTEX_HELD(&cp->cache_lock));
2842 ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
2843 ASSERT(cp->cache_constructor == NULL);
2844 ASSERT(sp->slab_cache == cp);
2845 ASSERT(sp->slab_refcnt == 1);
2846 ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
2847 ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2848
2849 head = sp->slab_head;
2850 nbufs = (sp->slab_chunks - sp->slab_refcnt);
2851 sp->slab_head = NULL;
2852 sp->slab_refcnt += nbufs;
2853 cp->cache_bufslab -= nbufs;
2854 cp->cache_slab_alloc += nbufs;
2855 list_insert_head(&cp->cache_complete_slabs, sp);
2856 cp->cache_complete_slab_count++;
2857 mutex_exit(&cp->cache_lock);
2858 mutex_enter(&ccp->cc_lock);
2859
2860 while (head != NULL) {
2861 void *buf = KMEM_BUF(cp, head);
2862 /*
2863 * If there's a slot available in the current CPU's
2864 * loaded magazine, just put the object there and
2865 * continue.
2866 */
2867 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2868 ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
2869 buf;
2870 ccp->cc_free++;
2871 nbufs--;
2872 head = head->bc_next;
2873 continue;
2874 }
2875
2876 /*
2877 * The loaded magazine is full. If the previously
2878 * loaded magazine was empty, exchange them and try
2879 * again.
2880 */
2881 if (ccp->cc_prounds == 0) {
2882 kmem_cpu_reload(ccp, ccp->cc_ploaded,
2883 ccp->cc_prounds);
2884 continue;
2885 }
2886
2887 /*
2888 * If the magazine layer is disabled, break out now.
2889 */
2890
2891 if (ccp->cc_magsize == 0) {
2892 break;
2893 }
2894
2895 if (!kmem_cpucache_magazine_alloc(ccp, cp))
2896 break;
2897 }
2898 mutex_exit(&ccp->cc_lock);
2899 if (nbufs != 0) {
2900 ASSERT(head != NULL);
2901
2902 /*
2903 * If there was a failure, return remaining objects to
2904 * the slab
2905 */
2906 while (head != NULL) {
2907 ASSERT(nbufs != 0);
2908 next = head->bc_next;
2909 head->bc_next = NULL;
2910 kmem_slab_free(cp, KMEM_BUF(cp, head));
2911 head = next;
2912 nbufs--;
2913 }
2914 }
2915 ASSERT(head == NULL);
2916 ASSERT(nbufs == 0);
2917 mutex_enter(&cp->cache_lock);
2918 }
2919
2920 void *
2921 kmem_zalloc(size_t size, int kmflag)
2922 {
2923 size_t index;
2924 void *buf;
2925
2926 if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2927 kmem_cache_t *cp = kmem_alloc_table[index];
2928 buf = kmem_cache_alloc(cp, kmflag);
2929 if (buf != NULL) {
2930 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2931 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2932 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2933 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2934
2935 if (cp->cache_flags & KMF_LITE) {
2936 KMEM_BUFTAG_LITE_ENTER(btp,
2937 kmem_lite_count, caller());
2938 }
2939 }
2940 bzero(buf, size);
2941 }
2942 } else {
2943 buf = kmem_alloc(size, kmflag);
2944 if (buf != NULL)
2945 bzero(buf, size);
2946 }
2947 return (buf);
2948 }
2949
2950 void *
2951 kmem_alloc(size_t size, int kmflag)
2952 {
2953 size_t index;
2954 kmem_cache_t *cp;
2955 void *buf;
2956
2957 if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2958 cp = kmem_alloc_table[index];
2959 /* fall through to kmem_cache_alloc() */
2960
2961 } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2962 kmem_big_alloc_table_max) {
2963 cp = kmem_big_alloc_table[index];
2964 /* fall through to kmem_cache_alloc() */
2965
2966 } else {
2967 if (size == 0)
2968 return (NULL);
2969
2970 buf = vmem_alloc(kmem_oversize_arena, size,
2971 kmflag & KM_VMFLAGS);
2972 if (buf == NULL)
2973 kmem_log_event(kmem_failure_log, NULL, NULL,
2974 (void *)size);
2975 else if (KMEM_DUMP(kmem_slab_cache)) {
2976 /* stats for dump intercept */
2977 kmem_dump_oversize_allocs++;
2978 if (size > kmem_dump_oversize_max)
2979 kmem_dump_oversize_max = size;
2980 }
2981 return (buf);
2982 }
2983
2984 buf = kmem_cache_alloc(cp, kmflag);
2985 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2986 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2987 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2988 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2989
2990 if (cp->cache_flags & KMF_LITE) {
2991 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller());
2992 }
2993 }
2994 return (buf);
2995 }
2996
2997 void
2998 kmem_free(void *buf, size_t size)
2999 {
3000 size_t index;
3001 kmem_cache_t *cp;
3002
3003 if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) {
3004 cp = kmem_alloc_table[index];
3005 /* fall through to kmem_cache_free() */
3006
3007 } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
3008 kmem_big_alloc_table_max) {
3009 cp = kmem_big_alloc_table[index];
3010 /* fall through to kmem_cache_free() */
3011
3012 } else {
3013 EQUIV(buf == NULL, size == 0);
3014 if (buf == NULL && size == 0)
3015 return;
3016 vmem_free(kmem_oversize_arena, buf, size);
3017 return;
3018 }
3019
3020 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
3021 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
3022 uint32_t *ip = (uint32_t *)btp;
3023 if (ip[1] != KMEM_SIZE_ENCODE(size)) {
3024 if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
3025 kmem_error(KMERR_DUPFREE, cp, buf);
3026 return;
3027 }
3028 if (KMEM_SIZE_VALID(ip[1])) {
3029 ip[0] = KMEM_SIZE_ENCODE(size);
3030 kmem_error(KMERR_BADSIZE, cp, buf);
3031 } else {
3032 kmem_error(KMERR_REDZONE, cp, buf);
3033 }
3034 return;
3035 }
3036 if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
3037 kmem_error(KMERR_REDZONE, cp, buf);
3038 return;
3039 }
3040 btp->bt_redzone = KMEM_REDZONE_PATTERN;
3041 if (cp->cache_flags & KMF_LITE) {
3042 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
3043 caller());
3044 }
3045 }
3046 kmem_cache_free(cp, buf);
3047 }
3048
3049 void *
3050 kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
3051 {
3052 size_t realsize = size + vmp->vm_quantum;
3053 void *addr;
3054
3055 /*
3056 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
3057 * vm_quantum will cause integer wraparound. Check for this, and
3058 * blow off the firewall page in this case. Note that such a
3059 * giant allocation (the entire kernel address space) can never
3060 * be satisfied, so it will either fail immediately (VM_NOSLEEP)
3061 * or sleep forever (VM_SLEEP). Thus, there is no need for a
3062 * corresponding check in kmem_firewall_va_free().
3063 */
3064 if (realsize < size)
3065 realsize = size;
3066
3067 /*
3068 * While boot still owns resource management, make sure that this
3069 * redzone virtual address allocation is properly accounted for in
3070 * OBPs "virtual-memory" "available" lists because we're
3071 * effectively claiming them for a red zone. If we don't do this,
3072 * the available lists become too fragmented and too large for the
3073 * current boot/kernel memory list interface.
3074 */
3075 addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
3076
3077 if (addr != NULL && kvseg.s_base == NULL && realsize != size)
3078 (void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
3079
3080 return (addr);
3081 }
3082
3083 void
3084 kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
3085 {
3086 ASSERT((kvseg.s_base == NULL ?
3087 va_to_pfn((char *)addr + size) :
3088 hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
3089
3090 vmem_free(vmp, addr, size + vmp->vm_quantum);
3091 }
3092
3093 /*
3094 * Try to allocate at least `size' bytes of memory without sleeping or
3095 * panicking. Return actual allocated size in `asize'. If allocation failed,
3096 * try final allocation with sleep or panic allowed.
3097 */
3098 void *
3099 kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
3100 {
3101 void *p;
3102
3103 *asize = P2ROUNDUP(size, KMEM_ALIGN);
3104 do {
3105 p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
3106 if (p != NULL)
3107 return (p);
3108 *asize += KMEM_ALIGN;
3109 } while (*asize <= PAGESIZE);
3110
3111 *asize = P2ROUNDUP(size, KMEM_ALIGN);
3112 return (kmem_alloc(*asize, kmflag));
3113 }
3114
3115 /*
3116 * Reclaim all unused memory from a cache.
3117 */
3118 static void
3119 kmem_cache_reap(kmem_cache_t *cp)
3120 {
3121 ASSERT(taskq_member(kmem_taskq, curthread));
3122 cp->cache_reap++;
3123
3124 /*
3125 * Ask the cache's owner to free some memory if possible.
3126 * The idea is to handle things like the inode cache, which
3127 * typically sits on a bunch of memory that it doesn't truly
3128 * *need*. Reclaim policy is entirely up to the owner; this
3129 * callback is just an advisory plea for help.
3130 */
3131 if (cp->cache_reclaim != NULL) {
3132 long delta;
3133
3134 /*
3135 * Reclaimed memory should be reapable (not included in the
3136 * depot's working set).
3137 */
3138 delta = cp->cache_full.ml_total;
3139 cp->cache_reclaim(cp->cache_private);
3140 delta = cp->cache_full.ml_total - delta;
3141 if (delta > 0) {
3142 mutex_enter(&cp->cache_depot_lock);
3143 cp->cache_full.ml_reaplimit += delta;
3144 cp->cache_full.ml_min += delta;
3145 mutex_exit(&cp->cache_depot_lock);
3146 }
3147 }
3148
3149 kmem_depot_ws_reap(cp);
3150
3151 if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3152 kmem_cache_defrag(cp);
3153 }
3154 }
3155
3156 static void
3157 kmem_reap_timeout(void *flag_arg)
3158 {
3159 uint32_t *flag = (uint32_t *)flag_arg;
3160
3161 ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3162 *flag = 0;
3163 }
3164
3165 static void
3166 kmem_reap_done(void *flag)
3167 {
3168 if (!callout_init_done) {
3169 /* can't schedule a timeout at this point */
3170 kmem_reap_timeout(flag);
3171 } else {
3172 (void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
3173 }
3174 }
3175
3176 static void
3177 kmem_reap_start(void *flag)
3178 {
3179 ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3180
3181 if (flag == &kmem_reaping) {
3182 kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3183 /*
3184 * if we have segkp under heap, reap segkp cache.
3185 */
3186 if (segkp_fromheap)
3187 segkp_cache_free();
3188 }
3189 else
3190 kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3191
3192 /*
3193 * We use taskq_dispatch() to schedule a timeout to clear
3194 * the flag so that kmem_reap() becomes self-throttling:
3195 * we won't reap again until the current reap completes *and*
3196 * at least kmem_reap_interval ticks have elapsed.
3197 */
3198 if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP))
3199 kmem_reap_done(flag);
3200 }
3201
3202 static void
3203 kmem_reap_common(void *flag_arg)
3204 {
3205 uint32_t *flag = (uint32_t *)flag_arg;
3206
3207 if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3208 atomic_cas_32(flag, 0, 1) != 0)
3209 return;
3210
3211 /*
3212 * It may not be kosher to do memory allocation when a reap is called
3213 * (for example, if vmem_populate() is in the call chain). So we
3214 * start the reap going with a TQ_NOALLOC dispatch. If the dispatch
3215 * fails, we reset the flag, and the next reap will try again.
3216 */
3217 if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC))
3218 *flag = 0;
3219 }
3220
3221 /*
3222 * Reclaim all unused memory from all caches. Called from the VM system
3223 * when memory gets tight.
3224 */
3225 void
3226 kmem_reap(void)
3227 {
3228 kmem_reap_common(&kmem_reaping);
3229 }
3230
3231 /*
3232 * Reclaim all unused memory from identifier arenas, called when a vmem
3233 * arena not back by memory is exhausted. Since reaping memory-backed caches
3234 * cannot help with identifier exhaustion, we avoid both a large amount of
3235 * work and unwanted side-effects from reclaim callbacks.
3236 */
3237 void
3238 kmem_reap_idspace(void)
3239 {
3240 kmem_reap_common(&kmem_reaping_idspace);
3241 }
3242
3243 /*
3244 * Purge all magazines from a cache and set its magazine limit to zero.
3245 * All calls are serialized by the kmem_taskq lock, except for the final
3246 * call from kmem_cache_destroy().
3247 */
3248 static void
3249 kmem_cache_magazine_purge(kmem_cache_t *cp)
3250 {
3251 kmem_cpu_cache_t *ccp;
3252 kmem_magazine_t *mp, *pmp;
3253 int rounds, prounds, cpu_seqid;
3254
3255 ASSERT(!list_link_active(&cp->cache_link) ||
3256 taskq_member(kmem_taskq, curthread));
3257 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
3258
3259 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3260 ccp = &cp->cache_cpu[cpu_seqid];
3261
3262 mutex_enter(&ccp->cc_lock);
3263 mp = ccp->cc_loaded;
3264 pmp = ccp->cc_ploaded;
3265 rounds = ccp->cc_rounds;
3266 prounds = ccp->cc_prounds;
3267 ccp->cc_loaded = NULL;
3268 ccp->cc_ploaded = NULL;
3269 ccp->cc_rounds = -1;
3270 ccp->cc_prounds = -1;
3271 ccp->cc_magsize = 0;
3272 mutex_exit(&ccp->cc_lock);
3273
3274 if (mp)
3275 kmem_magazine_destroy(cp, mp, rounds);
3276 if (pmp)
3277 kmem_magazine_destroy(cp, pmp, prounds);
3278 }
3279
3280 kmem_depot_ws_zero(cp);
3281 kmem_depot_ws_reap(cp);
3282 }
3283
3284 /*
3285 * Enable per-cpu magazines on a cache.
3286 */
3287 static void
3288 kmem_cache_magazine_enable(kmem_cache_t *cp)
3289 {
3290 int cpu_seqid;
3291
3292 if (cp->cache_flags & KMF_NOMAGAZINE)
3293 return;
3294
3295 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3296 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3297 mutex_enter(&ccp->cc_lock);
3298 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
3299 mutex_exit(&ccp->cc_lock);
3300 }
3301
3302 }
3303
3304 /*
3305 * Reap (almost) everything right now.
3306 */
3307 void
3308 kmem_cache_reap_now(kmem_cache_t *cp)
3309 {
3310 ASSERT(list_link_active(&cp->cache_link));
3311
3312 kmem_depot_ws_zero(cp);
3313
3314 (void) taskq_dispatch(kmem_taskq,
3315 (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3316 taskq_wait(kmem_taskq);
3317 }
3318
3319 /*
3320 * Recompute a cache's magazine size. The trade-off is that larger magazines
3321 * provide a higher transfer rate with the depot, while smaller magazines
3322 * reduce memory consumption. Magazine resizing is an expensive operation;
3323 * it should not be done frequently.
3324 *
3325 * Changes to the magazine size are serialized by the kmem_taskq lock.
3326 *
3327 * Note: at present this only grows the magazine size. It might be useful
3328 * to allow shrinkage too.
3329 */
3330 static void
3331 kmem_cache_magazine_resize(kmem_cache_t *cp)
3332 {
3333 kmem_magtype_t *mtp = cp->cache_magtype;
3334
3335 ASSERT(taskq_member(kmem_taskq, curthread));
3336
3337 if (cp->cache_chunksize < mtp->mt_maxbuf) {
3338 kmem_cache_magazine_purge(cp);
3339 mutex_enter(&cp->cache_depot_lock);
3340 cp->cache_magtype = ++mtp;
3341 cp->cache_depot_contention_prev =
3342 cp->cache_depot_contention + INT_MAX;
3343 mutex_exit(&cp->cache_depot_lock);
3344 kmem_cache_magazine_enable(cp);
3345 }
3346 }
3347
3348 /*
3349 * Rescale a cache's hash table, so that the table size is roughly the
3350 * cache size. We want the average lookup time to be extremely small.
3351 */
3352 static void
3353 kmem_hash_rescale(kmem_cache_t *cp)
3354 {
3355 kmem_bufctl_t **old_table, **new_table, *bcp;
3356 size_t old_size, new_size, h;
3357
3358 ASSERT(taskq_member(kmem_taskq, curthread));
3359
3360 new_size = MAX(KMEM_HASH_INITIAL,
3361 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
3362 old_size = cp->cache_hash_mask + 1;
3363
3364 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
3365 return;
3366
3367 new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
3368 VM_NOSLEEP);
3369 if (new_table == NULL)
3370 return;
3371 bzero(new_table, new_size * sizeof (void *));
3372
3373 mutex_enter(&cp->cache_lock);
3374
3375 old_size = cp->cache_hash_mask + 1;
3376 old_table = cp->cache_hash_table;
3377
3378 cp->cache_hash_mask = new_size - 1;
3379 cp->cache_hash_table = new_table;
3380 cp->cache_rescale++;
3381
3382 for (h = 0; h < old_size; h++) {
3383 bcp = old_table[h];
3384 while (bcp != NULL) {
3385 void *addr = bcp->bc_addr;
3386 kmem_bufctl_t *next_bcp = bcp->bc_next;
3387 kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
3388 bcp->bc_next = *hash_bucket;
3389 *hash_bucket = bcp;
3390 bcp = next_bcp;
3391 }
3392 }
3393
3394 mutex_exit(&cp->cache_lock);
3395
3396 vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
3397 }
3398
3399 /*
3400 * Perform periodic maintenance on a cache: hash rescaling, depot working-set
3401 * update, magazine resizing, and slab consolidation.
3402 */
3403 static void
3404 kmem_cache_update(kmem_cache_t *cp)
3405 {
3406 int need_hash_rescale = 0;
3407 int need_magazine_resize = 0;
3408
3409 ASSERT(MUTEX_HELD(&kmem_cache_lock));
3410
3411 /*
3412 * If the cache has become much larger or smaller than its hash table,
3413 * fire off a request to rescale the hash table.
3414 */
3415 mutex_enter(&cp->cache_lock);
3416
3417 if ((cp->cache_flags & KMF_HASH) &&
3418 (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
3419 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
3420 cp->cache_hash_mask > KMEM_HASH_INITIAL)))
3421 need_hash_rescale = 1;
3422
3423 mutex_exit(&cp->cache_lock);
3424
3425 /*
3426 * Update the depot working set statistics.
3427 */
3428 kmem_depot_ws_update(cp);
3429
3430 /*
3431 * If there's a lot of contention in the depot,
3432 * increase the magazine size.
3433 */
3434 mutex_enter(&cp->cache_depot_lock);
3435
3436 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
3437 (int)(cp->cache_depot_contention -
3438 cp->cache_depot_contention_prev) > kmem_depot_contention)
3439 need_magazine_resize = 1;
3440
3441 cp->cache_depot_contention_prev = cp->cache_depot_contention;
3442
3443 mutex_exit(&cp->cache_depot_lock);
3444
3445 if (need_hash_rescale)
3446 (void) taskq_dispatch(kmem_taskq,
3447 (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
3448
3449 if (need_magazine_resize)
3450 (void) taskq_dispatch(kmem_taskq,
3451 (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3452
3453 if (cp->cache_defrag != NULL)
3454 (void) taskq_dispatch(kmem_taskq,
3455 (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
3456 }
3457
3458 static void kmem_update(void *);
3459
3460 static void
3461 kmem_update_timeout(void *dummy)
3462 {
3463 (void) timeout(kmem_update, dummy, kmem_reap_interval);
3464 }
3465
3466 static void
3467 kmem_update(void *dummy)
3468 {
3469 kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
3470
3471 /*
3472 * We use taskq_dispatch() to reschedule the timeout so that
3473 * kmem_update() becomes self-throttling: it won't schedule
3474 * new tasks until all previous tasks have completed.
3475 */
3476 if (!taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP))
3477 kmem_update_timeout(NULL);
3478 }
3479
3480 static int
3481 kmem_cache_kstat_update(kstat_t *ksp, int rw)
3482 {
3483 struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
3484 kmem_cache_t *cp = ksp->ks_private;
3485 uint64_t cpu_buf_avail;
3486 uint64_t buf_avail = 0;
3487 int cpu_seqid;
3488 long reap;
3489
3490 ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
3491
3492 if (rw == KSTAT_WRITE)
3493 return (EACCES);
3494
3495 mutex_enter(&cp->cache_lock);
3496
3497 kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail;
3498 kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc;
3499 kmcp->kmc_free.value.ui64 = cp->cache_slab_free;
3500 kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc;
3501 kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free;
3502
3503 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3504 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3505
3506 mutex_enter(&ccp->cc_lock);
3507
3508 cpu_buf_avail = 0;
3509 if (ccp->cc_rounds > 0)
3510 cpu_buf_avail += ccp->cc_rounds;
3511 if (ccp->cc_prounds > 0)
3512 cpu_buf_avail += ccp->cc_prounds;
3513
3514 kmcp->kmc_alloc.value.ui64 += ccp->cc_alloc;
3515 kmcp->kmc_free.value.ui64 += ccp->cc_free;
3516 buf_avail += cpu_buf_avail;
3517
3518 mutex_exit(&ccp->cc_lock);
3519 }
3520
3521 mutex_enter(&cp->cache_depot_lock);
3522
3523 kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc;
3524 kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc;
3525 kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention;
3526 kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total;
3527 kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total;
3528 kmcp->kmc_magazine_size.value.ui64 =
3529 (cp->cache_flags & KMF_NOMAGAZINE) ?
3530 0 : cp->cache_magtype->mt_magsize;
3531
3532 kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc;
3533 kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc;
3534 buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
3535
3536 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3537 reap = MIN(reap, cp->cache_full.ml_total);
3538
3539 mutex_exit(&cp->cache_depot_lock);
3540
3541 kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize;
3542 kmcp->kmc_align.value.ui64 = cp->cache_align;
3543 kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize;
3544 kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize;
3545 kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
3546 buf_avail += cp->cache_bufslab;
3547 kmcp->kmc_buf_avail.value.ui64 = buf_avail;
3548 kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail;
3549 kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal;
3550 kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax;
3551 kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create;
3552 kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy;
3553 kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ?
3554 cp->cache_hash_mask + 1 : 0;
3555 kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth;
3556 kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale;
3557 kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id;
3558 kmcp->kmc_reap.value.ui64 = cp->cache_reap;
3559
3560 if (cp->cache_defrag == NULL) {
3561 kmcp->kmc_move_callbacks.value.ui64 = 0;
3562 kmcp->kmc_move_yes.value.ui64 = 0;
3563 kmcp->kmc_move_no.value.ui64 = 0;
3564 kmcp->kmc_move_later.value.ui64 = 0;
3565 kmcp->kmc_move_dont_need.value.ui64 = 0;
3566 kmcp->kmc_move_dont_know.value.ui64 = 0;
3567 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3568 kmcp->kmc_move_slabs_freed.value.ui64 = 0;
3569 kmcp->kmc_defrag.value.ui64 = 0;
3570 kmcp->kmc_scan.value.ui64 = 0;
3571 kmcp->kmc_move_reclaimable.value.ui64 = 0;
3572 } else {
3573 int64_t reclaimable;
3574
3575 kmem_defrag_t *kd = cp->cache_defrag;
3576 kmcp->kmc_move_callbacks.value.ui64 = kd->kmd_callbacks;
3577 kmcp->kmc_move_yes.value.ui64 = kd->kmd_yes;
3578 kmcp->kmc_move_no.value.ui64 = kd->kmd_no;
3579 kmcp->kmc_move_later.value.ui64 = kd->kmd_later;
3580 kmcp->kmc_move_dont_need.value.ui64 = kd->kmd_dont_need;
3581 kmcp->kmc_move_dont_know.value.ui64 = kd->kmd_dont_know;
3582 kmcp->kmc_move_hunt_found.value.ui64 = kd->kmd_hunt_found;
3583 kmcp->kmc_move_slabs_freed.value.ui64 = kd->kmd_slabs_freed;
3584 kmcp->kmc_defrag.value.ui64 = kd->kmd_defrags;
3585 kmcp->kmc_scan.value.ui64 = kd->kmd_scans;
3586
3587 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3588 reclaimable = MAX(reclaimable, 0);
3589 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3590 kmcp->kmc_move_reclaimable.value.ui64 = reclaimable;
3591 }
3592
3593 mutex_exit(&cp->cache_lock);
3594 return (0);
3595 }
3596
3597 /*
3598 * Return a named statistic about a particular cache.
3599 * This shouldn't be called very often, so it's currently designed for
3600 * simplicity (leverages existing kstat support) rather than efficiency.
3601 */
3602 uint64_t
3603 kmem_cache_stat(kmem_cache_t *cp, char *name)
3604 {
3605 int i;
3606 kstat_t *ksp = cp->cache_kstat;
3607 kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
3608 uint64_t value = 0;
3609
3610 if (ksp != NULL) {
3611 mutex_enter(&kmem_cache_kstat_lock);
3612 (void) kmem_cache_kstat_update(ksp, KSTAT_READ);
3613 for (i = 0; i < ksp->ks_ndata; i++) {
3614 if (strcmp(knp[i].name, name) == 0) {
3615 value = knp[i].value.ui64;
3616 break;
3617 }
3618 }
3619 mutex_exit(&kmem_cache_kstat_lock);
3620 }
3621 return (value);
3622 }
3623
3624 /*
3625 * Return an estimate of currently available kernel heap memory.
3626 * On 32-bit systems, physical memory may exceed virtual memory,
3627 * we just truncate the result at 1GB.
3628 */
3629 size_t
3630 kmem_avail(void)
3631 {
3632 spgcnt_t rmem = availrmem - tune.t_minarmem;
3633 spgcnt_t fmem = freemem - minfree;
3634
3635 return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
3636 1 << (30 - PAGESHIFT))));
3637 }
3638
3639 /*
3640 * Return the maximum amount of memory that is (in theory) allocatable
3641 * from the heap. This may be used as an estimate only since there
3642 * is no guarentee this space will still be available when an allocation
3643 * request is made, nor that the space may be allocated in one big request
3644 * due to kernel heap fragmentation.
3645 */
3646 size_t
3647 kmem_maxavail(void)
3648 {
3649 spgcnt_t pmem = availrmem - tune.t_minarmem;
3650 spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
3651
3652 return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
3653 }
3654
3655 /*
3656 * Indicate whether memory-intensive kmem debugging is enabled.
3657 */
3658 int
3659 kmem_debugging(void)
3660 {
3661 return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
3662 }
3663
3664 /* binning function, sorts finely at the two extremes */
3665 #define KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift) \
3666 ((((sp)->slab_refcnt <= (binshift)) || \
3667 (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift))) \
3668 ? -(sp)->slab_refcnt \
3669 : -((binshift) + ((sp)->slab_refcnt >> (binshift))))
3670
3671 /*
3672 * Minimizing the number of partial slabs on the freelist minimizes
3673 * fragmentation (the ratio of unused buffers held by the slab layer). There are
3674 * two ways to get a slab off of the freelist: 1) free all the buffers on the
3675 * slab, and 2) allocate all the buffers on the slab. It follows that we want
3676 * the most-used slabs at the front of the list where they have the best chance
3677 * of being completely allocated, and the least-used slabs at a safe distance
3678 * from the front to improve the odds that the few remaining buffers will all be
3679 * freed before another allocation can tie up the slab. For that reason a slab
3680 * with a higher slab_refcnt sorts less than than a slab with a lower
3681 * slab_refcnt.
3682 *
3683 * However, if a slab has at least one buffer that is deemed unfreeable, we
3684 * would rather have that slab at the front of the list regardless of
3685 * slab_refcnt, since even one unfreeable buffer makes the entire slab
3686 * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move()
3687 * callback, the slab is marked unfreeable for as long as it remains on the
3688 * freelist.
3689 */
3690 static int
3691 kmem_partial_slab_cmp(const void *p0, const void *p1)
3692 {
3693 const kmem_cache_t *cp;
3694 const kmem_slab_t *s0 = p0;
3695 const kmem_slab_t *s1 = p1;
3696 int w0, w1;
3697 size_t binshift;
3698
3699 ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
3700 ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
3701 ASSERT(s0->slab_cache == s1->slab_cache);
3702 cp = s1->slab_cache;
3703 ASSERT(MUTEX_HELD(&cp->cache_lock));
3704 binshift = cp->cache_partial_binshift;
3705
3706 /* weight of first slab */
3707 w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
3708 if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
3709 w0 -= cp->cache_maxchunks;
3710 }
3711
3712 /* weight of second slab */
3713 w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
3714 if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
3715 w1 -= cp->cache_maxchunks;
3716 }
3717
3718 if (w0 < w1)
3719 return (-1);
3720 if (w0 > w1)
3721 return (1);
3722
3723 /* compare pointer values */
3724 if ((uintptr_t)s0 < (uintptr_t)s1)
3725 return (-1);
3726 if ((uintptr_t)s0 > (uintptr_t)s1)
3727 return (1);
3728
3729 return (0);
3730 }
3731
3732 /*
3733 * It must be valid to call the destructor (if any) on a newly created object.
3734 * That is, the constructor (if any) must leave the object in a valid state for
3735 * the destructor.
3736 */
3737 kmem_cache_t *
3738 kmem_cache_create(
3739 char *name, /* descriptive name for this cache */
3740 size_t bufsize, /* size of the objects it manages */
3741 size_t align, /* required object alignment */
3742 int (*constructor)(void *, void *, int), /* object constructor */
3743 void (*destructor)(void *, void *), /* object destructor */
3744 void (*reclaim)(void *), /* memory reclaim callback */
3745 void *private, /* pass-thru arg for constr/destr/reclaim */
3746 vmem_t *vmp, /* vmem source for slab allocation */
3747 int cflags) /* cache creation flags */
3748 {
3749 int cpu_seqid;
3750 size_t chunksize;
3751 kmem_cache_t *cp;
3752 kmem_magtype_t *mtp;
3753 size_t csize = KMEM_CACHE_SIZE(max_ncpus);
3754
3755 #ifdef DEBUG
3756 /*
3757 * Cache names should conform to the rules for valid C identifiers
3758 */
3759 if (!strident_valid(name)) {
3760 cmn_err(CE_CONT,
3761 "kmem_cache_create: '%s' is an invalid cache name\n"
3762 "cache names must conform to the rules for "
3763 "C identifiers\n", name);
3764 }
3765 #endif /* DEBUG */
3766
3767 if (vmp == NULL)
3768 vmp = kmem_default_arena;
3769
3770 /*
3771 * If this kmem cache has an identifier vmem arena as its source, mark
3772 * it such to allow kmem_reap_idspace().
3773 */
3774 ASSERT(!(cflags & KMC_IDENTIFIER)); /* consumer should not set this */
3775 if (vmp->vm_cflags & VMC_IDENTIFIER)
3776 cflags |= KMC_IDENTIFIER;
3777
3778 /*
3779 * Get a kmem_cache structure. We arrange that cp->cache_cpu[]
3780 * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3781 * false sharing of per-CPU data.
3782 */
3783 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3784 P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3785 bzero(cp, csize);
3786 list_link_init(&cp->cache_link);
3787
3788 if (align == 0)
3789 align = KMEM_ALIGN;
3790
3791 /*
3792 * If we're not at least KMEM_ALIGN aligned, we can't use free
3793 * memory to hold bufctl information (because we can't safely
3794 * perform word loads and stores on it).
3795 */
3796 if (align < KMEM_ALIGN)
3797 cflags |= KMC_NOTOUCH;
3798
3799 if (!ISP2(align) || align > vmp->vm_quantum)
3800 panic("kmem_cache_create: bad alignment %lu", align);
3801
3802 mutex_enter(&kmem_flags_lock);
3803 if (kmem_flags & KMF_RANDOMIZE)
3804 kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3805 KMF_RANDOMIZE;
3806 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3807 mutex_exit(&kmem_flags_lock);
3808
3809 /*
3810 * Make sure all the various flags are reasonable.
3811 */
3812 ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3813
3814 if (cp->cache_flags & KMF_LITE) {
3815 if (bufsize >= kmem_lite_minsize &&
3816 align <= kmem_lite_maxalign &&
3817 P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3818 cp->cache_flags |= KMF_BUFTAG;
3819 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3820 } else {
3821 cp->cache_flags &= ~KMF_DEBUG;
3822 }
3823 }
3824
3825 if (cp->cache_flags & KMF_DEADBEEF)
3826 cp->cache_flags |= KMF_REDZONE;
3827
3828 if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3829 cp->cache_flags |= KMF_NOMAGAZINE;
3830
3831 if (cflags & KMC_NODEBUG)
3832 cp->cache_flags &= ~KMF_DEBUG;
3833
3834 if (cflags & KMC_NOTOUCH)
3835 cp->cache_flags &= ~KMF_TOUCH;
3836
3837 if (cflags & KMC_PREFILL)
3838 cp->cache_flags |= KMF_PREFILL;
3839
3840 if (cflags & KMC_NOHASH)
3841 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3842
3843 if (cflags & KMC_NOMAGAZINE)
3844 cp->cache_flags |= KMF_NOMAGAZINE;
3845
3846 if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3847 cp->cache_flags |= KMF_REDZONE;
3848
3849 if (!(cp->cache_flags & KMF_AUDIT))
3850 cp->cache_flags &= ~KMF_CONTENTS;
3851
3852 if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3853 !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3854 cp->cache_flags |= KMF_FIREWALL;
3855
3856 if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
3857 cp->cache_flags &= ~KMF_FIREWALL;
3858
3859 if (cp->cache_flags & KMF_FIREWALL) {
3860 cp->cache_flags &= ~KMF_BUFTAG;
3861 cp->cache_flags |= KMF_NOMAGAZINE;
3862 ASSERT(vmp == kmem_default_arena);
3863 vmp = kmem_firewall_arena;
3864 }
3865
3866 /*
3867 * Set cache properties.
3868 */
3869 (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3870 strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3871 cp->cache_bufsize = bufsize;
3872 cp->cache_align = align;
3873 cp->cache_constructor = constructor;
3874 cp->cache_destructor = destructor;
3875 cp->cache_reclaim = reclaim;
3876 cp->cache_private = private;
3877 cp->cache_arena = vmp;
3878 cp->cache_cflags = cflags;
3879
3880 /*
3881 * Determine the chunk size.
3882 */
3883 chunksize = bufsize;
3884
3885 if (align >= KMEM_ALIGN) {
3886 chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
3887 cp->cache_bufctl = chunksize - KMEM_ALIGN;
3888 }
3889
3890 if (cp->cache_flags & KMF_BUFTAG) {
3891 cp->cache_bufctl = chunksize;
3892 cp->cache_buftag = chunksize;
3893 if (cp->cache_flags & KMF_LITE)
3894 chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
3895 else
3896 chunksize += sizeof (kmem_buftag_t);
3897 }
3898
3899 if (cp->cache_flags & KMF_DEADBEEF) {
3900 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3901 if (cp->cache_flags & KMF_LITE)
3902 cp->cache_verify = sizeof (uint64_t);
3903 }
3904
3905 cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3906
3907 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3908
3909 /*
3910 * Now that we know the chunk size, determine the optimal slab size.
3911 */
3912 if (vmp == kmem_firewall_arena) {
3913 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3914 cp->cache_mincolor = cp->cache_slabsize - chunksize;
3915 cp->cache_maxcolor = cp->cache_mincolor;
3916 cp->cache_flags |= KMF_HASH;
3917 ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3918 } else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
3919 !(cp->cache_flags & KMF_AUDIT) &&
3920 chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
3921 cp->cache_slabsize = vmp->vm_quantum;
3922 cp->cache_mincolor = 0;
3923 cp->cache_maxcolor =
3924 (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3925 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3926 ASSERT(!(cp->cache_flags & KMF_AUDIT));
3927 } else {
3928 size_t chunks, bestfit, waste, slabsize;
3929 size_t minwaste = LONG_MAX;
3930
3931 for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
3932 slabsize = P2ROUNDUP(chunksize * chunks,
3933 vmp->vm_quantum);
3934 chunks = slabsize / chunksize;
3935 waste = (slabsize % chunksize) / chunks;
3936 if (waste < minwaste) {
3937 minwaste = waste;
3938 bestfit = slabsize;
3939 }
3940 }
3941 if (cflags & KMC_QCACHE)
3942 bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
3943 cp->cache_slabsize = bestfit;
3944 cp->cache_mincolor = 0;
3945 cp->cache_maxcolor = bestfit % chunksize;
3946 cp->cache_flags |= KMF_HASH;
3947 }
3948
3949 cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3950 cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3951
3952 /*
3953 * Disallowing prefill when either the DEBUG or HASH flag is set or when
3954 * there is a constructor avoids some tricky issues with debug setup
3955 * that may be revisited later. We cannot allow prefill in a
3956 * metadata cache because of potential recursion.
3957 */
3958 if (vmp == kmem_msb_arena ||
3959 cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3960 cp->cache_constructor != NULL)
3961 cp->cache_flags &= ~KMF_PREFILL;
3962
3963 if (cp->cache_flags & KMF_HASH) {
3964 ASSERT(!(cflags & KMC_NOHASH));
3965 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3966 kmem_bufctl_audit_cache : kmem_bufctl_cache;
3967 }
3968
3969 if (cp->cache_maxcolor >= vmp->vm_quantum)
3970 cp->cache_maxcolor = vmp->vm_quantum - 1;
3971
3972 cp->cache_color = cp->cache_mincolor;
3973
3974 /*
3975 * Initialize the rest of the slab layer.
3976 */
3977 mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3978
3979 avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3980 sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3981 /* LINTED: E_TRUE_LOGICAL_EXPR */
3982 ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3983 /* reuse partial slab AVL linkage for complete slab list linkage */
3984 list_create(&cp->cache_complete_slabs,
3985 sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3986
3987 if (cp->cache_flags & KMF_HASH) {
3988 cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3989 KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
3990 bzero(cp->cache_hash_table,
3991 KMEM_HASH_INITIAL * sizeof (void *));
3992 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3993 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3994 }
3995
3996 /*
3997 * Initialize the depot.
3998 */
3999 mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
4000
4001 for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
4002 continue;
4003
4004 cp->cache_magtype = mtp;
4005
4006 /*
4007 * Initialize the CPU layer.
4008 */
4009 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
4010 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
4011 mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
4012 ccp->cc_flags = cp->cache_flags;
4013 ccp->cc_rounds = -1;
4014 ccp->cc_prounds = -1;
4015 }
4016
4017 /*
4018 * Create the cache's kstats.
4019 */
4020 if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
4021 "kmem_cache", KSTAT_TYPE_NAMED,
4022 sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
4023 KSTAT_FLAG_VIRTUAL)) != NULL) {
4024 cp->cache_kstat->ks_data = &kmem_cache_kstat;
4025 cp->cache_kstat->ks_update = kmem_cache_kstat_update;
4026 cp->cache_kstat->ks_private = cp;
4027 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
4028 kstat_install(cp->cache_kstat);
4029 }
4030
4031 /*
4032 * Add the cache to the global list. This makes it visible
4033 * to kmem_update(), so the cache must be ready for business.
4034 */
4035 mutex_enter(&kmem_cache_lock);
4036 list_insert_tail(&kmem_caches, cp);
4037 mutex_exit(&kmem_cache_lock);
4038
4039 if (kmem_ready)
4040 kmem_cache_magazine_enable(cp);
4041
4042 return (cp);
4043 }
4044
4045 static int
4046 kmem_move_cmp(const void *buf, const void *p)
4047 {
4048 const kmem_move_t *kmm = p;
4049 uintptr_t v1 = (uintptr_t)buf;
4050 uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
4051 return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
4052 }
4053
4054 static void
4055 kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
4056 {
4057 kmd->kmd_reclaim_numer = 1;
4058 }
4059
4060 /*
4061 * Initially, when choosing candidate slabs for buffers to move, we want to be
4062 * very selective and take only slabs that are less than
4063 * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate
4064 * slabs, then we raise the allocation ceiling incrementally. The reclaim
4065 * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no
4066 * longer fragmented.
4067 */
4068 static void
4069 kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
4070 {
4071 if (direction > 0) {
4072 /* make it easier to find a candidate slab */
4073 if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
4074 kmd->kmd_reclaim_numer++;
4075 }
4076 } else {
4077 /* be more selective */
4078 if (kmd->kmd_reclaim_numer > 1) {
4079 kmd->kmd_reclaim_numer--;
4080 }
4081 }
4082 }
4083
4084 void
4085 kmem_cache_set_move(kmem_cache_t *cp,
4086 kmem_cbrc_t (*move)(void *, void *, size_t, void *))
4087 {
4088 kmem_defrag_t *defrag;
4089
4090 ASSERT(move != NULL);
4091 /*
4092 * The consolidator does not support NOTOUCH caches because kmem cannot
4093 * initialize their slabs with the 0xbaddcafe memory pattern, which sets
4094 * a low order bit usable by clients to distinguish uninitialized memory
4095 * from known objects (see kmem_slab_create).
4096 */
4097 ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
4098 ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
4099
4100 /*
4101 * We should not be holding anyone's cache lock when calling
4102 * kmem_cache_alloc(), so allocate in all cases before acquiring the
4103 * lock.
4104 */
4105 defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
4106
4107 mutex_enter(&cp->cache_lock);
4108
4109 if (KMEM_IS_MOVABLE(cp)) {
4110 if (cp->cache_move == NULL) {
4111 ASSERT(cp->cache_slab_alloc == 0);
4112
4113 cp->cache_defrag = defrag;
4114 defrag = NULL; /* nothing to free */
4115 bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4116 avl_create(&cp->cache_defrag->kmd_moves_pending,
4117 kmem_move_cmp, sizeof (kmem_move_t),
4118 offsetof(kmem_move_t, kmm_entry));
4119 /* LINTED: E_TRUE_LOGICAL_EXPR */
4120 ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
4121 /* reuse the slab's AVL linkage for deadlist linkage */
4122 list_create(&cp->cache_defrag->kmd_deadlist,
4123 sizeof (kmem_slab_t),
4124 offsetof(kmem_slab_t, slab_link));
4125 kmem_reset_reclaim_threshold(cp->cache_defrag);
4126 }
4127 cp->cache_move = move;
4128 }
4129
4130 mutex_exit(&cp->cache_lock);
4131
4132 if (defrag != NULL) {
4133 kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4134 }
4135 }
4136
4137 void
4138 kmem_cache_destroy(kmem_cache_t *cp)
4139 {
4140 int cpu_seqid;
4141
4142 /*
4143 * Remove the cache from the global cache list so that no one else
4144 * can schedule tasks on its behalf, wait for any pending tasks to
4145 * complete, purge the cache, and then destroy it.
4146 */
4147 mutex_enter(&kmem_cache_lock);
4148 list_remove(&kmem_caches, cp);
4149 mutex_exit(&kmem_cache_lock);
4150
4151 if (kmem_taskq != NULL)
4152 taskq_wait(kmem_taskq);
4153 if (kmem_move_taskq != NULL)
4154 taskq_wait(kmem_move_taskq);
4155
4156 kmem_cache_magazine_purge(cp);
4157
4158 mutex_enter(&cp->cache_lock);
4159 if (cp->cache_buftotal != 0)
4160 cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
4161 cp->cache_name, (void *)cp);
4162 if (cp->cache_defrag != NULL) {
4163 avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4164 list_destroy(&cp->cache_defrag->kmd_deadlist);
4165 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4166 cp->cache_defrag = NULL;
4167 }
4168 /*
4169 * The cache is now dead. There should be no further activity. We
4170 * enforce this by setting land mines in the constructor, destructor,
4171 * reclaim, and move routines that induce a kernel text fault if
4172 * invoked.
4173 */
4174 cp->cache_constructor = (int (*)(void *, void *, int))1;
4175 cp->cache_destructor = (void (*)(void *, void *))2;
4176 cp->cache_reclaim = (void (*)(void *))3;
4177 cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
4178 mutex_exit(&cp->cache_lock);
4179
4180 kstat_delete(cp->cache_kstat);
4181
4182 if (cp->cache_hash_table != NULL)
4183 vmem_free(kmem_hash_arena, cp->cache_hash_table,
4184 (cp->cache_hash_mask + 1) * sizeof (void *));
4185
4186 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
4187 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
4188
4189 mutex_destroy(&cp->cache_depot_lock);
4190 mutex_destroy(&cp->cache_lock);
4191
4192 vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
4193 }
4194
4195 /*ARGSUSED*/
4196 static int
4197 kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
4198 {
4199 ASSERT(MUTEX_HELD(&cpu_lock));
4200 if (what == CPU_UNCONFIG) {
4201 kmem_cache_applyall(kmem_cache_magazine_purge,
4202 kmem_taskq, TQ_SLEEP);
4203 kmem_cache_applyall(kmem_cache_magazine_enable,
4204 kmem_taskq, TQ_SLEEP);
4205 }
4206 return (0);
4207 }
4208
4209 static void
4210 kmem_alloc_caches_create(const int *array, size_t count,
4211 kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift)
4212 {
4213 char name[KMEM_CACHE_NAMELEN + 1];
4214 size_t table_unit = (1 << shift); /* range of one alloc_table entry */
4215 size_t size = table_unit;
4216 int i;
4217
4218 for (i = 0; i < count; i++) {
4219 size_t cache_size = array[i];
4220 size_t align = KMEM_ALIGN;
4221 kmem_cache_t *cp;
4222
4223 /* if the table has an entry for maxbuf, we're done */
4224 if (size > maxbuf)
4225 break;
4226
4227 /* cache size must be a multiple of the table unit */
4228 ASSERT(P2PHASE(cache_size, table_unit) == 0);
4229
4230 /*
4231 * If they allocate a multiple of the coherency granularity,
4232 * they get a coherency-granularity-aligned address.
4233 */
4234 if (IS_P2ALIGNED(cache_size, 64))
4235 align = 64;
4236 if (IS_P2ALIGNED(cache_size, PAGESIZE))
4237 align = PAGESIZE;
4238 (void) snprintf(name, sizeof (name),
4239 "kmem_alloc_%lu", cache_size);
4240 cp = kmem_cache_create(name, cache_size, align,
4241 NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
4242
4243 while (size <= cache_size) {
4244 alloc_table[(size - 1) >> shift] = cp;
4245 size += table_unit;
4246 }
4247 }
4248
4249 ASSERT(size > maxbuf); /* i.e. maxbuf <= max(cache_size) */
4250 }
4251
4252 static void
4253 kmem_cache_init(int pass, int use_large_pages)
4254 {
4255 int i;
4256 size_t maxbuf;
4257 kmem_magtype_t *mtp;
4258
4259 for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
4260 char name[KMEM_CACHE_NAMELEN + 1];
4261
4262 mtp = &kmem_magtype[i];
4263 (void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
4264 mtp->mt_cache = kmem_cache_create(name,
4265 (mtp->mt_magsize + 1) * sizeof (void *),
4266 mtp->mt_align, NULL, NULL, NULL, NULL,
4267 kmem_msb_arena, KMC_NOHASH);
4268 }
4269
4270 kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
4271 sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
4272 kmem_msb_arena, KMC_NOHASH);
4273
4274 kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
4275 sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
4276 kmem_msb_arena, KMC_NOHASH);
4277
4278 kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
4279 sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
4280 kmem_msb_arena, KMC_NOHASH);
4281
4282 if (pass == 2) {
4283 kmem_va_arena = vmem_create("kmem_va",
4284 NULL, 0, PAGESIZE,
4285 vmem_alloc, vmem_free, heap_arena,
4286 8 * PAGESIZE, VM_SLEEP);
4287
4288 if (use_large_pages) {
4289 kmem_default_arena = vmem_xcreate("kmem_default",
4290 NULL, 0, PAGESIZE,
4291 segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
4292 0, VMC_DUMPSAFE | VM_SLEEP);
4293 } else {
4294 kmem_default_arena = vmem_create("kmem_default",
4295 NULL, 0, PAGESIZE,
4296 segkmem_alloc, segkmem_free, kmem_va_arena,
4297 0, VMC_DUMPSAFE | VM_SLEEP);
4298 }
4299
4300 /* Figure out what our maximum cache size is */
4301 maxbuf = kmem_max_cached;
4302 if (maxbuf <= KMEM_MAXBUF) {
4303 maxbuf = 0;
4304 kmem_max_cached = KMEM_MAXBUF;
4305 } else {
4306 size_t size = 0;
4307 size_t max =
4308 sizeof (kmem_big_alloc_sizes) / sizeof (int);
4309 /*
4310 * Round maxbuf up to an existing cache size. If maxbuf
4311 * is larger than the largest cache, we truncate it to
4312 * the largest cache's size.
4313 */
4314 for (i = 0; i < max; i++) {
4315 size = kmem_big_alloc_sizes[i];
4316 if (maxbuf <= size)
4317 break;
4318 }
4319 kmem_max_cached = maxbuf = size;
4320 }
4321
4322 /*
4323 * The big alloc table may not be completely overwritten, so
4324 * we clear out any stale cache pointers from the first pass.
4325 */
4326 bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table));
4327 } else {
4328 /*
4329 * During the first pass, the kmem_alloc_* caches
4330 * are treated as metadata.
4331 */
4332 kmem_default_arena = kmem_msb_arena;
4333 maxbuf = KMEM_BIG_MAXBUF_32BIT;
4334 }
4335
4336 /*
4337 * Set up the default caches to back kmem_alloc()
4338 */
4339 kmem_alloc_caches_create(
4340 kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int),
4341 kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT);
4342
4343 kmem_alloc_caches_create(
4344 kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int),
4345 kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT);
4346
4347 kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT;
4348 }
4349
4350 void
4351 kmem_init(void)
4352 {
4353 kmem_cache_t *cp;
4354 int old_kmem_flags = kmem_flags;
4355 int use_large_pages = 0;
4356 size_t maxverify, minfirewall;
4357
4358 kstat_init();
4359
4360 /*
4361 * Don't do firewalled allocations if the heap is less than 1TB
4362 * (i.e. on a 32-bit kernel)
4363 * The resulting VM_NEXTFIT allocations would create too much
4364 * fragmentation in a small heap.
4365 */
4366 #if defined(_LP64)
4367 maxverify = minfirewall = PAGESIZE / 2;
4368 #else
4369 maxverify = minfirewall = ULONG_MAX;
4370 #endif
4371
4372 /* LINTED */
4373 ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
4374
4375 list_create(&kmem_caches, sizeof (kmem_cache_t),
4376 offsetof(kmem_cache_t, cache_link));
4377
4378 kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
4379 vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
4380 VM_SLEEP | VMC_NO_QCACHE);
4381
4382 kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
4383 PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
4384 VMC_DUMPSAFE | VM_SLEEP);
4385
4386 kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
4387 segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4388
4389 kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
4390 segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4391
4392 kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
4393 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4394
4395 kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
4396 NULL, 0, PAGESIZE,
4397 kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
4398 0, VM_SLEEP);
4399
4400 kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
4401 segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0,
4402 VMC_DUMPSAFE | VM_SLEEP);
4403
4404 /* temporary oversize arena for mod_read_system_file */
4405 kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
4406 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4407
4408 kmem_reap_interval = 15 * hz;
4409
4410 /*
4411 * Read /etc/system. This is a chicken-and-egg problem because
4412 * kmem_flags may be set in /etc/system, but mod_read_system_file()
4413 * needs to use the allocator. The simplest solution is to create
4414 * all the standard kmem caches, read /etc/system, destroy all the
4415 * caches we just created, and then create them all again in light
4416 * of the (possibly) new kmem_flags and other kmem tunables.
4417 */
4418 kmem_cache_init(1, 0);
4419
4420 mod_read_system_file(boothowto & RB_ASKNAME);
4421
4422 while ((cp = list_tail(&kmem_caches)) != NULL)
4423 kmem_cache_destroy(cp);
4424
4425 vmem_destroy(kmem_oversize_arena);
4426
4427 if (old_kmem_flags & KMF_STICKY)
4428 kmem_flags = old_kmem_flags;
4429
4430 if (!(kmem_flags & KMF_AUDIT))
4431 vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
4432
4433 if (kmem_maxverify == 0)
4434 kmem_maxverify = maxverify;
4435
4436 if (kmem_minfirewall == 0)
4437 kmem_minfirewall = minfirewall;
4438
4439 /*
4440 * give segkmem a chance to figure out if we are using large pages
4441 * for the kernel heap
4442 */
4443 use_large_pages = segkmem_lpsetup();
4444
4445 /*
4446 * To protect against corruption, we keep the actual number of callers
4447 * KMF_LITE records seperate from the tunable. We arbitrarily clamp
4448 * to 16, since the overhead for small buffers quickly gets out of
4449 * hand.
4450 *
4451 * The real limit would depend on the needs of the largest KMC_NOHASH
4452 * cache.
4453 */
4454 kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
4455 kmem_lite_pcs = kmem_lite_count;
4456
4457 /*
4458 * Normally, we firewall oversized allocations when possible, but
4459 * if we are using large pages for kernel memory, and we don't have
4460 * any non-LITE debugging flags set, we want to allocate oversized
4461 * buffers from large pages, and so skip the firewalling.
4462 */
4463 if (use_large_pages &&
4464 ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
4465 kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
4466 PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
4467 0, VMC_DUMPSAFE | VM_SLEEP);
4468 } else {
4469 kmem_oversize_arena = vmem_create("kmem_oversize",
4470 NULL, 0, PAGESIZE,
4471 segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
4472 kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE |
4473 VM_SLEEP);
4474 }
4475
4476 kmem_cache_init(2, use_large_pages);
4477
4478 if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
4479 if (kmem_transaction_log_size == 0)
4480 kmem_transaction_log_size = kmem_maxavail() / 50;
4481 kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
4482 }
4483
4484 if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
4485 if (kmem_content_log_size == 0)
4486 kmem_content_log_size = kmem_maxavail() / 50;
4487 kmem_content_log = kmem_log_init(kmem_content_log_size);
4488 }
4489
4490 kmem_failure_log = kmem_log_init(kmem_failure_log_size);
4491
4492 kmem_slab_log = kmem_log_init(kmem_slab_log_size);
4493
4494 /*
4495 * Initialize STREAMS message caches so allocb() is available.
4496 * This allows us to initialize the logging framework (cmn_err(9F),
4497 * strlog(9F), etc) so we can start recording messages.
4498 */
4499 streams_msg_init();
4500
4501 /*
4502 * Initialize the ZSD framework in Zones so modules loaded henceforth
4503 * can register their callbacks.
4504 */
4505 zone_zsd_init();
4506
4507 log_init();
4508 taskq_init();
4509
4510 /*
4511 * Warn about invalid or dangerous values of kmem_flags.
4512 * Always warn about unsupported values.
4513 */
4514 if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
4515 KMF_CONTENTS | KMF_LITE)) != 0) ||
4516 ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
4517 cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x. "
4518 "See the Solaris Tunable Parameters Reference Manual.",
4519 kmem_flags);
4520
4521 #ifdef DEBUG
4522 if ((kmem_flags & KMF_DEBUG) == 0)
4523 cmn_err(CE_NOTE, "kmem debugging disabled.");
4524 #else
4525 /*
4526 * For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
4527 * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
4528 * if KMF_AUDIT is set). We should warn the user about the performance
4529 * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
4530 * isn't set (since that disables AUDIT).
4531 */
4532 if (!(kmem_flags & KMF_LITE) &&
4533 (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
4534 cmn_err(CE_WARN, "High-overhead kmem debugging features "
4535 "enabled (kmem_flags = 0x%x). Performance degradation "
4536 "and large memory overhead possible. See the Solaris "
4537 "Tunable Parameters Reference Manual.", kmem_flags);
4538 #endif /* not DEBUG */
4539
4540 kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
4541
4542 kmem_ready = 1;
4543
4544 /*
4545 * Initialize the platform-specific aligned/DMA memory allocator.
4546 */
4547 ka_init();
4548
4549 /*
4550 * Initialize 32-bit ID cache.
4551 */
4552 id32_init();
4553
4554 /*
4555 * Initialize the networking stack so modules loaded can
4556 * register their callbacks.
4557 */
4558 netstack_init();
4559 }
4560
4561 static void
4562 kmem_move_init(void)
4563 {
4564 kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
4565 sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
4566 kmem_msb_arena, KMC_NOHASH);
4567 kmem_move_cache = kmem_cache_create("kmem_move_cache",
4568 sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
4569 kmem_msb_arena, KMC_NOHASH);
4570
4571 /*
4572 * kmem guarantees that move callbacks are sequential and that even
4573 * across multiple caches no two moves ever execute simultaneously.
4574 * Move callbacks are processed on a separate taskq so that client code
4575 * does not interfere with internal maintenance tasks.
4576 */
4577 kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
4578 minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
4579 }
4580
4581 void
4582 kmem_thread_init(void)
4583 {
4584 kmem_move_init();
4585 kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
4586 300, INT_MAX, TASKQ_PREPOPULATE);
4587 }
4588
4589 void
4590 kmem_mp_init(void)
4591 {
4592 mutex_enter(&cpu_lock);
4593 register_cpu_setup_func(kmem_cpu_setup, NULL);
4594 mutex_exit(&cpu_lock);
4595
4596 kmem_update_timeout(NULL);
4597
4598 taskq_mp_init();
4599 }
4600
4601 /*
4602 * Return the slab of the allocated buffer, or NULL if the buffer is not
4603 * allocated. This function may be called with a known slab address to determine
4604 * whether or not the buffer is allocated, or with a NULL slab address to obtain
4605 * an allocated buffer's slab.
4606 */
4607 static kmem_slab_t *
4608 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4609 {
4610 kmem_bufctl_t *bcp, *bufbcp;
4611
4612 ASSERT(MUTEX_HELD(&cp->cache_lock));
4613 ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4614
4615 if (cp->cache_flags & KMF_HASH) {
4616 for (bcp = *KMEM_HASH(cp, buf);
4617 (bcp != NULL) && (bcp->bc_addr != buf);
4618 bcp = bcp->bc_next) {
4619 continue;
4620 }
4621 ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
4622 return (bcp == NULL ? NULL : bcp->bc_slab);
4623 }
4624
4625 if (sp == NULL) {
4626 sp = KMEM_SLAB(cp, buf);
4627 }
4628 bufbcp = KMEM_BUFCTL(cp, buf);
4629 for (bcp = sp->slab_head;
4630 (bcp != NULL) && (bcp != bufbcp);
4631 bcp = bcp->bc_next) {
4632 continue;
4633 }
4634 return (bcp == NULL ? sp : NULL);
4635 }
4636
4637 static boolean_t
4638 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4639 {
4640 long refcnt = sp->slab_refcnt;
4641
4642 ASSERT(cp->cache_defrag != NULL);
4643
4644 /*
4645 * For code coverage we want to be able to move an object within the
4646 * same slab (the only partial slab) even if allocating the destination
4647 * buffer resulted in a completely allocated slab.
4648 */
4649 if (flags & KMM_DEBUG) {
4650 return ((flags & KMM_DESPERATE) ||
4651 ((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0));
4652 }
4653
4654 /* If we're desperate, we don't care if the client said NO. */
4655 if (flags & KMM_DESPERATE) {
4656 return (refcnt < sp->slab_chunks); /* any partial */
4657 }
4658
4659 if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4660 return (B_FALSE);
4661 }
4662
4663 if ((refcnt == 1) || kmem_move_any_partial) {
4664 return (refcnt < sp->slab_chunks);
4665 }
4666
4667 /*
4668 * The reclaim threshold is adjusted at each kmem_cache_scan() so that
4669 * slabs with a progressively higher percentage of used buffers can be
4670 * reclaimed until the cache as a whole is no longer fragmented.
4671 *
4672 * sp->slab_refcnt kmd_reclaim_numer
4673 * --------------- < ------------------
4674 * sp->slab_chunks KMEM_VOID_FRACTION
4675 */
4676 return ((refcnt * KMEM_VOID_FRACTION) <
4677 (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4678 }
4679
4680 static void *
4681 kmem_hunt_mag(kmem_cache_t *cp, kmem_magazine_t *m, int n, void *buf,
4682 void *tbuf)
4683 {
4684 int i; /* magazine round index */
4685
4686 for (i = 0; i < n; i++) {
4687 if (buf == m->mag_round[i]) {
4688 if (cp->cache_flags & KMF_BUFTAG) {
4689 (void) kmem_cache_free_debug(cp, tbuf,
4690 caller());
4691 }
4692 m->mag_round[i] = tbuf;
4693 return (buf);
4694 }
4695 }
4696
4697 return (NULL);
4698 }
4699
4700 /*
4701 * Hunt the magazine layer for the given buffer. If found, the buffer is
4702 * removed from the magazine layer and returned, otherwise NULL is returned.
4703 * The state of the returned buffer is freed and constructed.
4704 */
4705 static void *
4706 kmem_hunt_mags(kmem_cache_t *cp, void *buf)
4707 {
4708 kmem_cpu_cache_t *ccp;
4709 kmem_magazine_t *m;
4710 int cpu_seqid;
4711 int n; /* magazine rounds */
4712 void *tbuf; /* temporary swap buffer */
4713
4714 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4715
4716 /*
4717 * Allocated a buffer to swap with the one we hope to pull out of a
4718 * magazine when found.
4719 */
4720 tbuf = kmem_cache_alloc(cp, KM_NOSLEEP);
4721 if (tbuf == NULL) {
4722 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_alloc_fail);
4723 return (NULL);
4724 }
4725 if (tbuf == buf) {
4726 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_lucky);
4727 if (cp->cache_flags & KMF_BUFTAG) {
4728 (void) kmem_cache_free_debug(cp, buf, caller());
4729 }
4730 return (buf);
4731 }
4732
4733 /* Hunt the depot. */
4734 mutex_enter(&cp->cache_depot_lock);
4735 n = cp->cache_magtype->mt_magsize;
4736 for (m = cp->cache_full.ml_list; m != NULL; m = m->mag_next) {
4737 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4738 mutex_exit(&cp->cache_depot_lock);
4739 return (buf);
4740 }
4741 }
4742 mutex_exit(&cp->cache_depot_lock);
4743
4744 /* Hunt the per-CPU magazines. */
4745 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
4746 ccp = &cp->cache_cpu[cpu_seqid];
4747
4748 mutex_enter(&ccp->cc_lock);
4749 m = ccp->cc_loaded;
4750 n = ccp->cc_rounds;
4751 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4752 mutex_exit(&ccp->cc_lock);
4753 return (buf);
4754 }
4755 m = ccp->cc_ploaded;
4756 n = ccp->cc_prounds;
4757 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4758 mutex_exit(&ccp->cc_lock);
4759 return (buf);
4760 }
4761 mutex_exit(&ccp->cc_lock);
4762 }
4763
4764 kmem_cache_free(cp, tbuf);
4765 return (NULL);
4766 }
4767
4768 /*
4769 * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(),
4770 * or when the buffer is freed.
4771 */
4772 static void
4773 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4774 {
4775 ASSERT(MUTEX_HELD(&cp->cache_lock));
4776 ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4777
4778 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4779 return;
4780 }
4781
4782 if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4783 if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
4784 avl_remove(&cp->cache_partial_slabs, sp);
4785 sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
4786 sp->slab_stuck_offset = (uint32_t)-1;
4787 avl_add(&cp->cache_partial_slabs, sp);
4788 }
4789 } else {
4790 sp->slab_later_count = 0;
4791 sp->slab_stuck_offset = (uint32_t)-1;
4792 }
4793 }
4794
4795 static void
4796 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4797 {
4798 ASSERT(taskq_member(kmem_move_taskq, curthread));
4799 ASSERT(MUTEX_HELD(&cp->cache_lock));
4800 ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4801
4802 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4803 return;
4804 }
4805
4806 avl_remove(&cp->cache_partial_slabs, sp);
4807 sp->slab_later_count = 0;
4808 sp->slab_flags |= KMEM_SLAB_NOMOVE;
4809 sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
4810 avl_add(&cp->cache_partial_slabs, sp);
4811 }
4812
4813 static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
4814
4815 /*
4816 * The move callback takes two buffer addresses, the buffer to be moved, and a
4817 * newly allocated and constructed buffer selected by kmem as the destination.
4818 * It also takes the size of the buffer and an optional user argument specified
4819 * at cache creation time. kmem guarantees that the buffer to be moved has not
4820 * been unmapped by the virtual memory subsystem. Beyond that, it cannot
4821 * guarantee the present whereabouts of the buffer to be moved, so it is up to
4822 * the client to safely determine whether or not it is still using the buffer.
4823 * The client must not free either of the buffers passed to the move callback,
4824 * since kmem wants to free them directly to the slab layer. The client response
4825 * tells kmem which of the two buffers to free:
4826 *
4827 * YES kmem frees the old buffer (the move was successful)
4828 * NO kmem frees the new buffer, marks the slab of the old buffer
4829 * non-reclaimable to avoid bothering the client again
4830 * LATER kmem frees the new buffer, increments slab_later_count
4831 * DONT_KNOW kmem frees the new buffer, searches mags for the old buffer
4832 * DONT_NEED kmem frees both the old buffer and the new buffer
4833 *
4834 * The pending callback argument now being processed contains both of the
4835 * buffers (old and new) passed to the move callback function, the slab of the
4836 * old buffer, and flags related to the move request, such as whether or not the
4837 * system was desperate for memory.
4838 *
4839 * Slabs are not freed while there is a pending callback, but instead are kept
4840 * on a deadlist, which is drained after the last callback completes. This means
4841 * that slabs are safe to access until kmem_move_end(), no matter how many of
4842 * their buffers have been freed. Once slab_refcnt reaches zero, it stays at
4843 * zero for as long as the slab remains on the deadlist and until the slab is
4844 * freed.
4845 */
4846 static void
4847 kmem_move_buffer(kmem_move_t *callback)
4848 {
4849 kmem_cbrc_t response;
4850 kmem_slab_t *sp = callback->kmm_from_slab;
4851 kmem_cache_t *cp = sp->slab_cache;
4852 boolean_t free_on_slab;
4853
4854 ASSERT(taskq_member(kmem_move_taskq, curthread));
4855 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4856 ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
4857
4858 /*
4859 * The number of allocated buffers on the slab may have changed since we
4860 * last checked the slab's reclaimability (when the pending move was
4861 * enqueued), or the client may have responded NO when asked to move
4862 * another buffer on the same slab.
4863 */
4864 if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4865 KMEM_STAT_ADD(kmem_move_stats.kms_no_longer_reclaimable);
4866 KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY),
4867 kmem_move_stats.kms_notify_no_longer_reclaimable);
4868 kmem_slab_free(cp, callback->kmm_to_buf);
4869 kmem_move_end(cp, callback);
4870 return;
4871 }
4872
4873 /*
4874 * Hunting magazines is expensive, so we'll wait to do that until the
4875 * client responds KMEM_CBRC_DONT_KNOW. However, checking the slab layer
4876 * is cheap, so we might as well do that here in case we can avoid
4877 * bothering the client.
4878 */
4879 mutex_enter(&cp->cache_lock);
4880 free_on_slab = (kmem_slab_allocated(cp, sp,
4881 callback->kmm_from_buf) == NULL);
4882 mutex_exit(&cp->cache_lock);
4883
4884 if (free_on_slab) {
4885 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_slab);
4886 kmem_slab_free(cp, callback->kmm_to_buf);
4887 kmem_move_end(cp, callback);
4888 return;
4889 }
4890
4891 if (cp->cache_flags & KMF_BUFTAG) {
4892 /*
4893 * Make kmem_cache_alloc_debug() apply the constructor for us.
4894 */
4895 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4896 KM_NOSLEEP, 1, caller()) != 0) {
4897 KMEM_STAT_ADD(kmem_move_stats.kms_alloc_fail);
4898 kmem_move_end(cp, callback);
4899 return;
4900 }
4901 } else if (cp->cache_constructor != NULL &&
4902 cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4903 KM_NOSLEEP) != 0) {
4904 atomic_inc_64(&cp->cache_alloc_fail);
4905 KMEM_STAT_ADD(kmem_move_stats.kms_constructor_fail);
4906 kmem_slab_free(cp, callback->kmm_to_buf);
4907 kmem_move_end(cp, callback);
4908 return;
4909 }
4910
4911 KMEM_STAT_ADD(kmem_move_stats.kms_callbacks);
4912 KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY),
4913 kmem_move_stats.kms_notify_callbacks);
4914 cp->cache_defrag->kmd_callbacks++;
4915 cp->cache_defrag->kmd_thread = curthread;
4916 cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4917 cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4918 DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4919 callback);
4920
4921 response = cp->cache_move(callback->kmm_from_buf,
4922 callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4923
4924 DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4925 callback, kmem_cbrc_t, response);
4926 cp->cache_defrag->kmd_thread = NULL;
4927 cp->cache_defrag->kmd_from_buf = NULL;
4928 cp->cache_defrag->kmd_to_buf = NULL;
4929
4930 if (response == KMEM_CBRC_YES) {
4931 KMEM_STAT_ADD(kmem_move_stats.kms_yes);
4932 cp->cache_defrag->kmd_yes++;
4933 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4934 /* slab safe to access until kmem_move_end() */
4935 if (sp->slab_refcnt == 0)
4936 cp->cache_defrag->kmd_slabs_freed++;
4937 mutex_enter(&cp->cache_lock);
4938 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4939 mutex_exit(&cp->cache_lock);
4940 kmem_move_end(cp, callback);
4941 return;
4942 }
4943
4944 switch (response) {
4945 case KMEM_CBRC_NO:
4946 KMEM_STAT_ADD(kmem_move_stats.kms_no);
4947 cp->cache_defrag->kmd_no++;
4948 mutex_enter(&cp->cache_lock);
4949 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4950 mutex_exit(&cp->cache_lock);
4951 break;
4952 case KMEM_CBRC_LATER:
4953 KMEM_STAT_ADD(kmem_move_stats.kms_later);
4954 cp->cache_defrag->kmd_later++;
4955 mutex_enter(&cp->cache_lock);
4956 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4957 mutex_exit(&cp->cache_lock);
4958 break;
4959 }
4960
4961 if (++sp->slab_later_count >= KMEM_DISBELIEF) {
4962 KMEM_STAT_ADD(kmem_move_stats.kms_disbelief);
4963 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4964 } else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
4965 sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
4966 callback->kmm_from_buf);
4967 }
4968 mutex_exit(&cp->cache_lock);
4969 break;
4970 case KMEM_CBRC_DONT_NEED:
4971 KMEM_STAT_ADD(kmem_move_stats.kms_dont_need);
4972 cp->cache_defrag->kmd_dont_need++;
4973 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4974 if (sp->slab_refcnt == 0)
4975 cp->cache_defrag->kmd_slabs_freed++;
4976 mutex_enter(&cp->cache_lock);
4977 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4978 mutex_exit(&cp->cache_lock);
4979 break;
4980 case KMEM_CBRC_DONT_KNOW:
4981 KMEM_STAT_ADD(kmem_move_stats.kms_dont_know);
4982 cp->cache_defrag->kmd_dont_know++;
4983 if (kmem_hunt_mags(cp, callback->kmm_from_buf) != NULL) {
4984 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_mag);
4985 cp->cache_defrag->kmd_hunt_found++;
4986 kmem_slab_free_constructed(cp, callback->kmm_from_buf,
4987 B_TRUE);
4988 if (sp->slab_refcnt == 0)
4989 cp->cache_defrag->kmd_slabs_freed++;
4990 mutex_enter(&cp->cache_lock);
4991 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4992 mutex_exit(&cp->cache_lock);
4993 }
4994 break;
4995 default:
4996 panic("'%s' (%p) unexpected move callback response %d\n",
4997 cp->cache_name, (void *)cp, response);
4998 }
4999
5000 kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
5001 kmem_move_end(cp, callback);
5002 }
5003
5004 /* Return B_FALSE if there is insufficient memory for the move request. */
5005 static boolean_t
5006 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
5007 {
5008 void *to_buf;
5009 avl_index_t index;
5010 kmem_move_t *callback, *pending;
5011 ulong_t n;
5012
5013 ASSERT(taskq_member(kmem_taskq, curthread));
5014 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
5015 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5016
5017 callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
5018 if (callback == NULL) {
5019 KMEM_STAT_ADD(kmem_move_stats.kms_callback_alloc_fail);
5020 return (B_FALSE);
5021 }
5022
5023 callback->kmm_from_slab = sp;
5024 callback->kmm_from_buf = buf;
5025 callback->kmm_flags = flags;
5026
5027 mutex_enter(&cp->cache_lock);
5028
5029 n = avl_numnodes(&cp->cache_partial_slabs);
5030 if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) {
5031 mutex_exit(&cp->cache_lock);
5032 kmem_cache_free(kmem_move_cache, callback);
5033 return (B_TRUE); /* there is no need for the move request */
5034 }
5035
5036 pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
5037 if (pending != NULL) {
5038 /*
5039 * If the move is already pending and we're desperate now,
5040 * update the move flags.
5041 */
5042 if (flags & KMM_DESPERATE) {
5043 pending->kmm_flags |= KMM_DESPERATE;
5044 }
5045 mutex_exit(&cp->cache_lock);
5046 KMEM_STAT_ADD(kmem_move_stats.kms_already_pending);
5047 kmem_cache_free(kmem_move_cache, callback);
5048 return (B_TRUE);
5049 }
5050
5051 to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
5052 B_FALSE);
5053 callback->kmm_to_buf = to_buf;
5054 avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
5055
5056 mutex_exit(&cp->cache_lock);
5057
5058 if (!taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
5059 callback, TQ_NOSLEEP)) {
5060 KMEM_STAT_ADD(kmem_move_stats.kms_callback_taskq_fail);
5061 mutex_enter(&cp->cache_lock);
5062 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
5063 mutex_exit(&cp->cache_lock);
5064 kmem_slab_free(cp, to_buf);
5065 kmem_cache_free(kmem_move_cache, callback);
5066 return (B_FALSE);
5067 }
5068
5069 return (B_TRUE);
5070 }
5071
5072 static void
5073 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
5074 {
5075 avl_index_t index;
5076
5077 ASSERT(cp->cache_defrag != NULL);
5078 ASSERT(taskq_member(kmem_move_taskq, curthread));
5079 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
5080
5081 mutex_enter(&cp->cache_lock);
5082 VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
5083 callback->kmm_from_buf, &index) != NULL);
5084 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
5085 if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
5086 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5087 kmem_slab_t *sp;
5088
5089 /*
5090 * The last pending move completed. Release all slabs from the
5091 * front of the dead list except for any slab at the tail that
5092 * needs to be released from the context of kmem_move_buffers().
5093 * kmem deferred unmapping the buffers on these slabs in order
5094 * to guarantee that buffers passed to the move callback have
5095 * been touched only by kmem or by the client itself.
5096 */
5097 while ((sp = list_remove_head(deadlist)) != NULL) {
5098 if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
5099 list_insert_tail(deadlist, sp);
5100 break;
5101 }
5102 cp->cache_defrag->kmd_deadcount--;
5103 cp->cache_slab_destroy++;
5104 mutex_exit(&cp->cache_lock);
5105 kmem_slab_destroy(cp, sp);
5106 KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed);
5107 mutex_enter(&cp->cache_lock);
5108 }
5109 }
5110 mutex_exit(&cp->cache_lock);
5111 kmem_cache_free(kmem_move_cache, callback);
5112 }
5113
5114 /*
5115 * Move buffers from least used slabs first by scanning backwards from the end
5116 * of the partial slab list. Scan at most max_scan candidate slabs and move
5117 * buffers from at most max_slabs slabs (0 for all partial slabs in both cases).
5118 * If desperate to reclaim memory, move buffers from any partial slab, otherwise
5119 * skip slabs with a ratio of allocated buffers at or above the current
5120 * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the
5121 * scan is aborted) so that the caller can adjust the reclaimability threshold
5122 * depending on how many reclaimable slabs it finds.
5123 *
5124 * kmem_move_buffers() drops and reacquires cache_lock every time it issues a
5125 * move request, since it is not valid for kmem_move_begin() to call
5126 * kmem_cache_alloc() or taskq_dispatch() with cache_lock held.
5127 */
5128 static int
5129 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
5130 int flags)
5131 {
5132 kmem_slab_t *sp;
5133 void *buf;
5134 int i, j; /* slab index, buffer index */
5135 int s; /* reclaimable slabs */
5136 int b; /* allocated (movable) buffers on reclaimable slab */
5137 boolean_t success;
5138 int refcnt;
5139 int nomove;
5140
5141 ASSERT(taskq_member(kmem_taskq, curthread));
5142 ASSERT(MUTEX_HELD(&cp->cache_lock));
5143 ASSERT(kmem_move_cache != NULL);
5144 ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
5145 ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
5146 avl_numnodes(&cp->cache_partial_slabs) > 1);
5147
5148 if (kmem_move_blocked) {
5149 return (0);
5150 }
5151
5152 if (kmem_move_fulltilt) {
5153 flags |= KMM_DESPERATE;
5154 }
5155
5156 if (max_scan == 0 || (flags & KMM_DESPERATE)) {
5157 /*
5158 * Scan as many slabs as needed to find the desired number of
5159 * candidate slabs.
5160 */
5161 max_scan = (size_t)-1;
5162 }
5163
5164 if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
5165 /* Find as many candidate slabs as possible. */
5166 max_slabs = (size_t)-1;
5167 }
5168
5169 sp = avl_last(&cp->cache_partial_slabs);
5170 ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
5171 for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) &&
5172 ((sp != avl_first(&cp->cache_partial_slabs)) ||
5173 (flags & KMM_DEBUG));
5174 sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
5175
5176 if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
5177 continue;
5178 }
5179 s++;
5180
5181 /* Look for allocated buffers to move. */
5182 for (j = 0, b = 0, buf = sp->slab_base;
5183 (j < sp->slab_chunks) && (b < sp->slab_refcnt);
5184 buf = (((char *)buf) + cp->cache_chunksize), j++) {
5185
5186 if (kmem_slab_allocated(cp, sp, buf) == NULL) {
5187 continue;
5188 }
5189
5190 b++;
5191
5192 /*
5193 * Prevent the slab from being destroyed while we drop
5194 * cache_lock and while the pending move is not yet
5195 * registered. Flag the pending move while
5196 * kmd_moves_pending may still be empty, since we can't
5197 * yet rely on a non-zero pending move count to prevent
5198 * the slab from being destroyed.
5199 */
5200 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5201 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5202 /*
5203 * Recheck refcnt and nomove after reacquiring the lock,
5204 * since these control the order of partial slabs, and
5205 * we want to know if we can pick up the scan where we
5206 * left off.
5207 */
5208 refcnt = sp->slab_refcnt;
5209 nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5210 mutex_exit(&cp->cache_lock);
5211
5212 success = kmem_move_begin(cp, sp, buf, flags);
5213
5214 /*
5215 * Now, before the lock is reacquired, kmem could
5216 * process all pending move requests and purge the
5217 * deadlist, so that upon reacquiring the lock, sp has
5218 * been remapped. Or, the client may free all the
5219 * objects on the slab while the pending moves are still
5220 * on the taskq. Therefore, the KMEM_SLAB_MOVE_PENDING
5221 * flag causes the slab to be put at the end of the
5222 * deadlist and prevents it from being destroyed, since
5223 * we plan to destroy it here after reacquiring the
5224 * lock.
5225 */
5226 mutex_enter(&cp->cache_lock);
5227 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5228 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5229
5230 if (sp->slab_refcnt == 0) {
5231 list_t *deadlist =
5232 &cp->cache_defrag->kmd_deadlist;
5233 list_remove(deadlist, sp);
5234
5235 if (!avl_is_empty(
5236 &cp->cache_defrag->kmd_moves_pending)) {
5237 /*
5238 * A pending move makes it unsafe to
5239 * destroy the slab, because even though
5240 * the move is no longer needed, the
5241 * context where that is determined
5242 * requires the slab to exist.
5243 * Fortunately, a pending move also
5244 * means we don't need to destroy the
5245 * slab here, since it will get
5246 * destroyed along with any other slabs
5247 * on the deadlist after the last
5248 * pending move completes.
5249 */
5250 list_insert_head(deadlist, sp);
5251 KMEM_STAT_ADD(kmem_move_stats.
5252 kms_endscan_slab_dead);
5253 return (-1);
5254 }
5255
5256 /*
5257 * Destroy the slab now if it was completely
5258 * freed while we dropped cache_lock and there
5259 * are no pending moves. Since slab_refcnt
5260 * cannot change once it reaches zero, no new
5261 * pending moves from that slab are possible.
5262 */
5263 cp->cache_defrag->kmd_deadcount--;
5264 cp->cache_slab_destroy++;
5265 mutex_exit(&cp->cache_lock);
5266 kmem_slab_destroy(cp, sp);
5267 KMEM_STAT_ADD(kmem_move_stats.
5268 kms_dead_slabs_freed);
5269 KMEM_STAT_ADD(kmem_move_stats.
5270 kms_endscan_slab_destroyed);
5271 mutex_enter(&cp->cache_lock);
5272 /*
5273 * Since we can't pick up the scan where we left
5274 * off, abort the scan and say nothing about the
5275 * number of reclaimable slabs.
5276 */
5277 return (-1);
5278 }
5279
5280 if (!success) {
5281 /*
5282 * Abort the scan if there is not enough memory
5283 * for the request and say nothing about the
5284 * number of reclaimable slabs.
5285 */
5286 KMEM_STAT_COND_ADD(s < max_slabs,
5287 kmem_move_stats.kms_endscan_nomem);
5288 return (-1);
5289 }
5290
5291 /*
5292 * The slab's position changed while the lock was
5293 * dropped, so we don't know where we are in the
5294 * sequence any more.
5295 */
5296 if (sp->slab_refcnt != refcnt) {
5297 /*
5298 * If this is a KMM_DEBUG move, the slab_refcnt
5299 * may have changed because we allocated a
5300 * destination buffer on the same slab. In that
5301 * case, we're not interested in counting it.
5302 */
5303 KMEM_STAT_COND_ADD(!(flags & KMM_DEBUG) &&
5304 (s < max_slabs),
5305 kmem_move_stats.kms_endscan_refcnt_changed);
5306 return (-1);
5307 }
5308 if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove) {
5309 KMEM_STAT_COND_ADD(s < max_slabs,
5310 kmem_move_stats.kms_endscan_nomove_changed);
5311 return (-1);
5312 }
5313
5314 /*
5315 * Generating a move request allocates a destination
5316 * buffer from the slab layer, bumping the first partial
5317 * slab if it is completely allocated. If the current
5318 * slab becomes the first partial slab as a result, we
5319 * can't continue to scan backwards.
5320 *
5321 * If this is a KMM_DEBUG move and we allocated the
5322 * destination buffer from the last partial slab, then
5323 * the buffer we're moving is on the same slab and our
5324 * slab_refcnt has changed, causing us to return before
5325 * reaching here if there are no partial slabs left.
5326 */
5327 ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5328 if (sp == avl_first(&cp->cache_partial_slabs)) {
5329 /*
5330 * We're not interested in a second KMM_DEBUG
5331 * move.
5332 */
5333 goto end_scan;
5334 }
5335 }
5336 }
5337 end_scan:
5338
5339 KMEM_STAT_COND_ADD(!(flags & KMM_DEBUG) &&
5340 (s < max_slabs) &&
5341 (sp == avl_first(&cp->cache_partial_slabs)),
5342 kmem_move_stats.kms_endscan_freelist);
5343
5344 return (s);
5345 }
5346
5347 typedef struct kmem_move_notify_args {
5348 kmem_cache_t *kmna_cache;
5349 void *kmna_buf;
5350 } kmem_move_notify_args_t;
5351
5352 static void
5353 kmem_cache_move_notify_task(void *arg)
5354 {
5355 kmem_move_notify_args_t *args = arg;
5356 kmem_cache_t *cp = args->kmna_cache;
5357 void *buf = args->kmna_buf;
5358 kmem_slab_t *sp;
5359
5360 ASSERT(taskq_member(kmem_taskq, curthread));
5361 ASSERT(list_link_active(&cp->cache_link));
5362
5363 kmem_free(args, sizeof (kmem_move_notify_args_t));
5364 mutex_enter(&cp->cache_lock);
5365 sp = kmem_slab_allocated(cp, NULL, buf);
5366
5367 /* Ignore the notification if the buffer is no longer allocated. */
5368 if (sp == NULL) {
5369 mutex_exit(&cp->cache_lock);
5370 return;
5371 }
5372
5373 /* Ignore the notification if there's no reason to move the buffer. */
5374 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5375 /*
5376 * So far the notification is not ignored. Ignore the
5377 * notification if the slab is not marked by an earlier refusal
5378 * to move a buffer.
5379 */
5380 if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
5381 (sp->slab_later_count == 0)) {
5382 mutex_exit(&cp->cache_lock);
5383 return;
5384 }
5385
5386 kmem_slab_move_yes(cp, sp, buf);
5387 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5388 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5389 mutex_exit(&cp->cache_lock);
5390 /* see kmem_move_buffers() about dropping the lock */
5391 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5392 mutex_enter(&cp->cache_lock);
5393 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5394 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5395 if (sp->slab_refcnt == 0) {
5396 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5397 list_remove(deadlist, sp);
5398
5399 if (!avl_is_empty(
5400 &cp->cache_defrag->kmd_moves_pending)) {
5401 list_insert_head(deadlist, sp);
5402 mutex_exit(&cp->cache_lock);
5403 KMEM_STAT_ADD(kmem_move_stats.
5404 kms_notify_slab_dead);
5405 return;
5406 }
5407
5408 cp->cache_defrag->kmd_deadcount--;
5409 cp->cache_slab_destroy++;
5410 mutex_exit(&cp->cache_lock);
5411 kmem_slab_destroy(cp, sp);
5412 KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed);
5413 KMEM_STAT_ADD(kmem_move_stats.
5414 kms_notify_slab_destroyed);
5415 return;
5416 }
5417 } else {
5418 kmem_slab_move_yes(cp, sp, buf);
5419 }
5420 mutex_exit(&cp->cache_lock);
5421 }
5422
5423 void
5424 kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5425 {
5426 kmem_move_notify_args_t *args;
5427
5428 KMEM_STAT_ADD(kmem_move_stats.kms_notify);
5429 args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
5430 if (args != NULL) {
5431 args->kmna_cache = cp;
5432 args->kmna_buf = buf;
5433 if (!taskq_dispatch(kmem_taskq,
5434 (task_func_t *)kmem_cache_move_notify_task, args,
5435 TQ_NOSLEEP))
5436 kmem_free(args, sizeof (kmem_move_notify_args_t));
5437 }
5438 }
5439
5440 static void
5441 kmem_cache_defrag(kmem_cache_t *cp)
5442 {
5443 size_t n;
5444
5445 ASSERT(cp->cache_defrag != NULL);
5446
5447 mutex_enter(&cp->cache_lock);
5448 n = avl_numnodes(&cp->cache_partial_slabs);
5449 if (n > 1) {
5450 /* kmem_move_buffers() drops and reacquires cache_lock */
5451 KMEM_STAT_ADD(kmem_move_stats.kms_defrags);
5452 cp->cache_defrag->kmd_defrags++;
5453 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5454 }
5455 mutex_exit(&cp->cache_lock);
5456 }
5457
5458 /* Is this cache above the fragmentation threshold? */
5459 static boolean_t
5460 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5461 {
5462 /*
5463 * nfree kmem_frag_numer
5464 * ------------------ > ---------------
5465 * cp->cache_buftotal kmem_frag_denom
5466 */
5467 return ((nfree * kmem_frag_denom) >
5468 (cp->cache_buftotal * kmem_frag_numer));
5469 }
5470
5471 static boolean_t
5472 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5473 {
5474 boolean_t fragmented;
5475 uint64_t nfree;
5476
5477 ASSERT(MUTEX_HELD(&cp->cache_lock));
5478 *doreap = B_FALSE;
5479
5480 if (kmem_move_fulltilt) {
5481 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5482 return (B_TRUE);
5483 }
5484 } else {
5485 if ((cp->cache_complete_slab_count + avl_numnodes(
5486 &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5487 return (B_FALSE);
5488 }
5489 }
5490
5491 nfree = cp->cache_bufslab;
5492 fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5493 kmem_cache_frag_threshold(cp, nfree));
5494
5495 /*
5496 * Free buffers in the magazine layer appear allocated from the point of
5497 * view of the slab layer. We want to know if the slab layer would
5498 * appear fragmented if we included free buffers from magazines that
5499 * have fallen out of the working set.
5500 */
5501 if (!fragmented) {
5502 long reap;
5503
5504 mutex_enter(&cp->cache_depot_lock);
5505 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5506 reap = MIN(reap, cp->cache_full.ml_total);
5507 mutex_exit(&cp->cache_depot_lock);
5508
5509 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5510 if (kmem_cache_frag_threshold(cp, nfree)) {
5511 *doreap = B_TRUE;
5512 }
5513 }
5514
5515 return (fragmented);
5516 }
5517
5518 /* Called periodically from kmem_taskq */
5519 static void
5520 kmem_cache_scan(kmem_cache_t *cp)
5521 {
5522 boolean_t reap = B_FALSE;
5523 kmem_defrag_t *kmd;
5524
5525 ASSERT(taskq_member(kmem_taskq, curthread));
5526
5527 mutex_enter(&cp->cache_lock);
5528
5529 kmd = cp->cache_defrag;
5530 if (kmd->kmd_consolidate > 0) {
5531 kmd->kmd_consolidate--;
5532 mutex_exit(&cp->cache_lock);
5533 kmem_cache_reap(cp);
5534 return;
5535 }
5536
5537 if (kmem_cache_is_fragmented(cp, &reap)) {
5538 size_t slabs_found;
5539
5540 /*
5541 * Consolidate reclaimable slabs from the end of the partial
5542 * slab list (scan at most kmem_reclaim_scan_range slabs to find
5543 * reclaimable slabs). Keep track of how many candidate slabs we
5544 * looked for and how many we actually found so we can adjust
5545 * the definition of a candidate slab if we're having trouble
5546 * finding them.
5547 *
5548 * kmem_move_buffers() drops and reacquires cache_lock.
5549 */
5550 KMEM_STAT_ADD(kmem_move_stats.kms_scans);
5551 kmd->kmd_scans++;
5552 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5553 kmem_reclaim_max_slabs, 0);
5554 if (slabs_found >= 0) {
5555 kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
5556 kmd->kmd_slabs_found += slabs_found;
5557 }
5558
5559 if (++kmd->kmd_tries >= kmem_reclaim_scan_range) {
5560 kmd->kmd_tries = 0;
5561
5562 /*
5563 * If we had difficulty finding candidate slabs in
5564 * previous scans, adjust the threshold so that
5565 * candidates are easier to find.
5566 */
5567 if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
5568 kmem_adjust_reclaim_threshold(kmd, -1);
5569 } else if ((kmd->kmd_slabs_found * 2) <
5570 kmd->kmd_slabs_sought) {
5571 kmem_adjust_reclaim_threshold(kmd, 1);
5572 }
5573 kmd->kmd_slabs_sought = 0;
5574 kmd->kmd_slabs_found = 0;
5575 }
5576 } else {
5577 kmem_reset_reclaim_threshold(cp->cache_defrag);
5578 #ifdef DEBUG
5579 if (!avl_is_empty(&cp->cache_partial_slabs)) {
5580 /*
5581 * In a debug kernel we want the consolidator to
5582 * run occasionally even when there is plenty of
5583 * memory.
5584 */
5585 uint16_t debug_rand;
5586
5587 (void) random_get_bytes((uint8_t *)&debug_rand, 2);
5588 if (!kmem_move_noreap &&
5589 ((debug_rand % kmem_mtb_reap) == 0)) {
5590 mutex_exit(&cp->cache_lock);
5591 KMEM_STAT_ADD(kmem_move_stats.kms_debug_reaps);
5592 kmem_cache_reap(cp);
5593 return;
5594 } else if ((debug_rand % kmem_mtb_move) == 0) {
5595 KMEM_STAT_ADD(kmem_move_stats.kms_scans);
5596 KMEM_STAT_ADD(kmem_move_stats.kms_debug_scans);
5597 kmd->kmd_scans++;
5598 (void) kmem_move_buffers(cp,
5599 kmem_reclaim_scan_range, 1, KMM_DEBUG);
5600 }
5601 }
5602 #endif /* DEBUG */
5603 }
5604
5605 mutex_exit(&cp->cache_lock);
5606
5607 if (reap) {
5608 KMEM_STAT_ADD(kmem_move_stats.kms_scan_depot_ws_reaps);
5609 kmem_depot_ws_reap(cp);
5610 }
5611 }