Print this page
    
9525 kmem_dump_size is a corrupting influence
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Richard Lowe <richlowe@richlowe.net>
Approved by: Dan McDonald <danmcd@joyent.com>
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/sys/kmem_impl.h
          +++ new/usr/src/uts/common/sys/kmem_impl.h
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  
    | 
      ↓ open down ↓ | 
    13 lines elided | 
    
      ↑ open up ↑ | 
  
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
       24 + * Copyright 2018 Joyent, Inc.
  24   25   */
  25   26  
  26   27  #ifndef _SYS_KMEM_IMPL_H
  27   28  #define _SYS_KMEM_IMPL_H
  28   29  
  29   30  #include <sys/kmem.h>
  30   31  #include <sys/vmem.h>
  31   32  #include <sys/thread.h>
  32   33  #include <sys/t_lock.h>
  33   34  #include <sys/time.h>
  34   35  #include <sys/kstat.h>
  35   36  #include <sys/cpuvar.h>
  36   37  #include <sys/systm.h>
  37   38  #include <vm/page.h>
  38   39  #include <sys/avl.h>
  39   40  #include <sys/list.h>
  40   41  
  41   42  #ifdef  __cplusplus
  42   43  extern "C" {
  43   44  #endif
  44   45  
  45   46  /*
  46   47   * kernel memory allocator: implementation-private data structures
  47   48   *
  48   49   * Lock order:
  49   50   * 1. cache_lock
  50   51   * 2. cc_lock in order by CPU ID
  51   52   * 3. cache_depot_lock
  52   53   *
  53   54   * Do not call kmem_cache_alloc() or taskq_dispatch() while holding any of the
  54   55   * above locks.
  55   56   */
  56   57  
  57   58  #define KMF_AUDIT       0x00000001      /* transaction auditing */
  58   59  #define KMF_DEADBEEF    0x00000002      /* deadbeef checking */
  59   60  #define KMF_REDZONE     0x00000004      /* redzone checking */
  60   61  #define KMF_CONTENTS    0x00000008      /* freed-buffer content logging */
  61   62  #define KMF_STICKY      0x00000010      /* if set, override /etc/system */
  62   63  #define KMF_NOMAGAZINE  0x00000020      /* disable per-cpu magazines */
  63   64  #define KMF_FIREWALL    0x00000040      /* put all bufs before unmapped pages */
  64   65  #define KMF_LITE        0x00000100      /* lightweight debugging */
  65   66  
  66   67  #define KMF_HASH        0x00000200      /* cache has hash table */
  67   68  #define KMF_RANDOMIZE   0x00000400      /* randomize other kmem_flags */
  68   69  
  69   70  #define KMF_DUMPDIVERT  0x00001000      /* use alternate memory at dump time */
  70   71  #define KMF_DUMPUNSAFE  0x00002000      /* flag caches used at dump time */
  71   72  #define KMF_PREFILL     0x00004000      /* Prefill the slab when created. */
  72   73  
  73   74  #define KMF_BUFTAG      (KMF_DEADBEEF | KMF_REDZONE)
  74   75  #define KMF_TOUCH       (KMF_BUFTAG | KMF_LITE | KMF_CONTENTS)
  75   76  #define KMF_RANDOM      (KMF_TOUCH | KMF_AUDIT | KMF_NOMAGAZINE)
  76   77  #define KMF_DEBUG       (KMF_RANDOM | KMF_FIREWALL)
  77   78  
  78   79  #define KMEM_STACK_DEPTH        15
  79   80  
  80   81  #define KMEM_FREE_PATTERN               0xdeadbeefdeadbeefULL
  81   82  #define KMEM_UNINITIALIZED_PATTERN      0xbaddcafebaddcafeULL
  82   83  #define KMEM_REDZONE_PATTERN            0xfeedfacefeedfaceULL
  83   84  #define KMEM_REDZONE_BYTE               0xbb
  84   85  
  85   86  /*
  86   87   * Redzone size encodings for kmem_alloc() / kmem_free().  We encode the
  87   88   * allocation size, rather than storing it directly, so that kmem_free()
  88   89   * can distinguish frees of the wrong size from redzone violations.
  89   90   *
  90   91   * A size of zero is never valid.
  91   92   */
  92   93  #define KMEM_SIZE_ENCODE(x)     (251 * (x) + 1)
  93   94  #define KMEM_SIZE_DECODE(x)     ((x) / 251)
  94   95  #define KMEM_SIZE_VALID(x)      ((x) % 251 == 1 && (x) != 1)
  95   96  
  96   97  
  97   98  #define KMEM_ALIGN              8       /* min guaranteed alignment */
  98   99  #define KMEM_ALIGN_SHIFT        3       /* log2(KMEM_ALIGN) */
  99  100  #define KMEM_VOID_FRACTION      8       /* never waste more than 1/8 of slab */
 100  101  
 101  102  #define KMEM_SLAB_IS_PARTIAL(sp)                \
 102  103          ((sp)->slab_refcnt > 0 && (sp)->slab_refcnt < (sp)->slab_chunks)
 103  104  #define KMEM_SLAB_IS_ALL_USED(sp)               \
 104  105          ((sp)->slab_refcnt == (sp)->slab_chunks)
 105  106  
 106  107  /*
 107  108   * The bufctl (buffer control) structure keeps some minimal information
 108  109   * about each buffer: its address, its slab, and its current linkage,
 109  110   * which is either on the slab's freelist (if the buffer is free), or
 110  111   * on the cache's buf-to-bufctl hash table (if the buffer is allocated).
 111  112   * In the case of non-hashed, or "raw", caches (the common case), only
 112  113   * the freelist linkage is necessary: the buffer address is at a fixed
 113  114   * offset from the bufctl address, and the slab is at the end of the page.
 114  115   *
 115  116   * NOTE: bc_next must be the first field; raw buffers have linkage only.
 116  117   */
 117  118  typedef struct kmem_bufctl {
 118  119          struct kmem_bufctl      *bc_next;       /* next bufctl struct */
 119  120          void                    *bc_addr;       /* address of buffer */
 120  121          struct kmem_slab        *bc_slab;       /* controlling slab */
 121  122  } kmem_bufctl_t;
 122  123  
 123  124  /*
 124  125   * The KMF_AUDIT version of the bufctl structure.  The beginning of this
 125  126   * structure must be identical to the normal bufctl structure so that
 126  127   * pointers are interchangeable.
 127  128   */
 128  129  typedef struct kmem_bufctl_audit {
 129  130          struct kmem_bufctl      *bc_next;       /* next bufctl struct */
 130  131          void                    *bc_addr;       /* address of buffer */
 131  132          struct kmem_slab        *bc_slab;       /* controlling slab */
 132  133          kmem_cache_t            *bc_cache;      /* controlling cache */
 133  134          hrtime_t                bc_timestamp;   /* transaction time */
 134  135          kthread_t               *bc_thread;     /* thread doing transaction */
 135  136          struct kmem_bufctl      *bc_lastlog;    /* last log entry */
 136  137          void                    *bc_contents;   /* contents at last free */
 137  138          int                     bc_depth;       /* stack depth */
 138  139          pc_t                    bc_stack[KMEM_STACK_DEPTH];     /* pc stack */
 139  140  } kmem_bufctl_audit_t;
 140  141  
 141  142  /*
 142  143   * A kmem_buftag structure is appended to each buffer whenever any of the
 143  144   * KMF_BUFTAG flags (KMF_DEADBEEF, KMF_REDZONE, KMF_VERIFY) are set.
 144  145   */
 145  146  typedef struct kmem_buftag {
 146  147          uint64_t                bt_redzone;     /* 64-bit redzone pattern */
 147  148          kmem_bufctl_t           *bt_bufctl;     /* bufctl */
 148  149          intptr_t                bt_bxstat;      /* bufctl ^ (alloc/free) */
 149  150  } kmem_buftag_t;
 150  151  
 151  152  /*
 152  153   * A variant of the kmem_buftag structure used for KMF_LITE caches.
 153  154   * Previous callers are stored in reverse chronological order. (i.e. most
 154  155   * recent first)
 155  156   */
 156  157  typedef struct kmem_buftag_lite {
 157  158          kmem_buftag_t           bt_buftag;      /* a normal buftag */
 158  159          pc_t                    bt_history[1];  /* zero or more callers */
 159  160  } kmem_buftag_lite_t;
 160  161  
 161  162  #define KMEM_BUFTAG_LITE_SIZE(f)        \
 162  163          (offsetof(kmem_buftag_lite_t, bt_history[f]))
 163  164  
 164  165  #define KMEM_BUFTAG(cp, buf)            \
 165  166          ((kmem_buftag_t *)((char *)(buf) + (cp)->cache_buftag))
 166  167  
 167  168  #define KMEM_BUFCTL(cp, buf)            \
 168  169          ((kmem_bufctl_t *)((char *)(buf) + (cp)->cache_bufctl))
 169  170  
 170  171  #define KMEM_BUF(cp, bcp)               \
 171  172          ((void *)((char *)(bcp) - (cp)->cache_bufctl))
 172  173  
 173  174  #define KMEM_SLAB(cp, buf)              \
 174  175          ((kmem_slab_t *)P2END((uintptr_t)(buf), (cp)->cache_slabsize) - 1)
 175  176  
 176  177  /*
 177  178   * Test for using alternate memory at dump time.
 178  179   */
 179  180  #define KMEM_DUMP(cp)           ((cp)->cache_flags & KMF_DUMPDIVERT)
 180  181  #define KMEM_DUMPCC(ccp)        ((ccp)->cc_flags & KMF_DUMPDIVERT)
 181  182  
 182  183  /*
 183  184   * The "CPU" macro loads a cpu_t that refers to the cpu that the current
 184  185   * thread is running on at the time the macro is executed.  A context switch
 185  186   * may occur immediately after loading this data structure, leaving this
 186  187   * thread pointing at the cpu_t for the previous cpu.  This is not a problem;
 187  188   * we'd just end up checking the previous cpu's per-cpu cache, and then check
 188  189   * the other layers of the kmem cache if need be.
 189  190   *
 190  191   * It's not even a problem if the old cpu gets DR'ed out during the context
 191  192   * switch.  The cpu-remove DR operation bzero()s the cpu_t, but doesn't free
 192  193   * it.  So the cpu_t's cpu_cache_offset would read as 0, causing us to use
 193  194   * cpu 0's per-cpu cache.
 194  195   *
 195  196   * So, there is no need to disable kernel preemption while using the CPU macro
 196  197   * below since if we have been context switched, there will not be any
 197  198   * correctness problem, just a momentary use of a different per-cpu cache.
 198  199   */
 199  200  
 200  201  #define KMEM_CPU_CACHE(cp)                                              \
 201  202          ((kmem_cpu_cache_t *)((char *)(&cp->cache_cpu) + CPU->cpu_cache_offset))
 202  203  
 203  204  #define KMEM_MAGAZINE_VALID(cp, mp)     \
 204  205          (((kmem_slab_t *)P2END((uintptr_t)(mp), PAGESIZE) - 1)->slab_cache == \
 205  206              (cp)->cache_magtype->mt_cache)
 206  207  
 207  208  #define KMEM_SLAB_OFFSET(sp, buf)       \
 208  209          ((size_t)((uintptr_t)(buf) - (uintptr_t)((sp)->slab_base)))
 209  210  
 210  211  #define KMEM_SLAB_MEMBER(sp, buf)       \
 211  212          (KMEM_SLAB_OFFSET(sp, buf) < (sp)->slab_cache->cache_slabsize)
 212  213  
 213  214  #define KMEM_BUFTAG_ALLOC       0xa110c8edUL
 214  215  #define KMEM_BUFTAG_FREE        0xf4eef4eeUL
 215  216  
 216  217  /* slab_later_count thresholds */
 217  218  #define KMEM_DISBELIEF          3
 218  219  
 219  220  /* slab_flags */
 220  221  #define KMEM_SLAB_NOMOVE        0x1
 221  222  #define KMEM_SLAB_MOVE_PENDING  0x2
 222  223  
 223  224  typedef struct kmem_slab {
 224  225          struct kmem_cache       *slab_cache;    /* controlling cache */
 225  226          void                    *slab_base;     /* base of allocated memory */
 226  227          avl_node_t              slab_link;      /* slab linkage */
 227  228          struct kmem_bufctl      *slab_head;     /* first free buffer */
 228  229          long                    slab_refcnt;    /* outstanding allocations */
 229  230          long                    slab_chunks;    /* chunks (bufs) in this slab */
 230  231          uint32_t                slab_stuck_offset; /* unmoved buffer offset */
 231  232          uint16_t                slab_later_count; /* cf KMEM_CBRC_LATER */
 232  233          uint16_t                slab_flags;     /* bits to mark the slab */
 233  234  } kmem_slab_t;
 234  235  
 235  236  #define KMEM_HASH_INITIAL       64
 236  237  
 237  238  #define KMEM_HASH(cp, buf)      \
 238  239          ((cp)->cache_hash_table +       \
 239  240          (((uintptr_t)(buf) >> (cp)->cache_hash_shift) & (cp)->cache_hash_mask))
 240  241  
 241  242  typedef struct kmem_magazine {
 242  243          void    *mag_next;
 243  244          void    *mag_round[1];          /* one or more rounds */
 244  245  } kmem_magazine_t;
 245  246  
 246  247  /*
 247  248   * The magazine types for fast per-cpu allocation
 248  249   */
 249  250  typedef struct kmem_magtype {
 250  251          short           mt_magsize;     /* magazine size (number of rounds) */
 251  252          int             mt_align;       /* magazine alignment */
 252  253          size_t          mt_minbuf;      /* all smaller buffers qualify */
 253  254          size_t          mt_maxbuf;      /* no larger buffers qualify */
 254  255          kmem_cache_t    *mt_cache;      /* magazine cache */
 255  256  } kmem_magtype_t;
 256  257  
 257  258  #define KMEM_CPU_CACHE_SIZE     64      /* must be power of 2 */
 258  259  #define KMEM_CPU_PAD            (KMEM_CPU_CACHE_SIZE - sizeof (kmutex_t) - \
 259  260          2 * sizeof (uint64_t) - 2 * sizeof (void *) - sizeof (int) - \
 260  261          5 * sizeof (short))
 261  262  #define KMEM_CACHE_SIZE(ncpus)  \
 262  263          ((size_t)(&((kmem_cache_t *)0)->cache_cpu[ncpus]))
 263  264  
 264  265  /* Offset from kmem_cache->cache_cpu for per cpu caches */
 265  266  #define KMEM_CPU_CACHE_OFFSET(cpuid)                                    \
 266  267          ((size_t)(&((kmem_cache_t *)0)->cache_cpu[cpuid]) -             \
 267  268          (size_t)(&((kmem_cache_t *)0)->cache_cpu))
 268  269  
 269  270  typedef struct kmem_cpu_cache {
 270  271          kmutex_t        cc_lock;        /* protects this cpu's local cache */
 271  272          uint64_t        cc_alloc;       /* allocations from this cpu */
 272  273          uint64_t        cc_free;        /* frees to this cpu */
 273  274          kmem_magazine_t *cc_loaded;     /* the currently loaded magazine */
 274  275          kmem_magazine_t *cc_ploaded;    /* the previously loaded magazine */
 275  276          int             cc_flags;       /* CPU-local copy of cache_flags */
 276  277          short           cc_rounds;      /* number of objects in loaded mag */
 277  278          short           cc_prounds;     /* number of objects in previous mag */
 278  279          short           cc_magsize;     /* number of rounds in a full mag */
 279  280          short           cc_dump_rounds; /* dump time copy of cc_rounds */
 280  281          short           cc_dump_prounds; /* dump time copy of cc_prounds */
 281  282          char            cc_pad[KMEM_CPU_PAD]; /* for nice alignment */
 282  283  } kmem_cpu_cache_t;
 283  284  
 284  285  /*
 285  286   * The magazine lists used in the depot.
 286  287   */
 287  288  typedef struct kmem_maglist {
 288  289          kmem_magazine_t *ml_list;       /* magazine list */
 289  290          long            ml_total;       /* number of magazines */
 290  291          long            ml_min;         /* min since last update */
 291  292          long            ml_reaplimit;   /* max reapable magazines */
 292  293          uint64_t        ml_alloc;       /* allocations from this list */
 293  294  } kmem_maglist_t;
 294  295  
 295  296  typedef struct kmem_defrag {
 296  297          /*
 297  298           * Statistics
 298  299           */
 299  300          uint64_t        kmd_callbacks;          /* move callbacks */
 300  301          uint64_t        kmd_yes;                /* KMEM_CBRC_YES responses */
 301  302          uint64_t        kmd_no;                 /* NO responses */
 302  303          uint64_t        kmd_later;              /* LATER responses */
 303  304          uint64_t        kmd_dont_need;          /* DONT_NEED responses */
 304  305          uint64_t        kmd_dont_know;          /* DONT_KNOW responses */
 305  306          uint64_t        kmd_slabs_freed;        /* slabs freed by moves */
 306  307          uint64_t        kmd_defrags;            /* kmem_cache_defrag() */
 307  308          uint64_t        kmd_scans;              /* kmem_cache_scan() */
 308  309  
 309  310          /*
 310  311           * Consolidator fields
 311  312           */
 312  313          avl_tree_t      kmd_moves_pending;      /* buffer moves pending */
 313  314          list_t          kmd_deadlist;           /* deferred slab frees */
 314  315          size_t          kmd_deadcount;          /* # of slabs in kmd_deadlist */
 315  316          uint8_t         kmd_reclaim_numer;      /* slab usage threshold */
 316  317          uint8_t         kmd_pad1;               /* compiler padding */
 317  318          uint16_t        kmd_consolidate;        /* triggers consolidator */
 318  319          uint32_t        kmd_pad2;               /* compiler padding */
 319  320          size_t          kmd_slabs_sought;       /* reclaimable slabs sought */
 320  321          size_t          kmd_slabs_found;        /* reclaimable slabs found */
  
    | 
      ↓ open down ↓ | 
    287 lines elided | 
    
      ↑ open up ↑ | 
  
 321  322          size_t          kmd_tries;              /* nth scan interval counter */
 322  323          /*
 323  324           * Fields used to ASSERT that the client does not kmem_cache_free()
 324  325           * objects passed to the move callback.
 325  326           */
 326  327          void            *kmd_from_buf;          /* object to move */
 327  328          void            *kmd_to_buf;            /* move destination */
 328  329          kthread_t       *kmd_thread;            /* thread calling move */
 329  330  } kmem_defrag_t;
 330  331  
      332 +typedef struct kmem_dump {
      333 +        void            *kd_freelist;           /* heap during crash dump */
      334 +        uint_t          kd_alloc_fails;         /* # of allocation failures */
      335 +        uint_t          kd_unsafe;              /* cache was used, but unsafe */
      336 +} kmem_dump_t;
      337 +
 331  338  #define KMEM_CACHE_NAMELEN      31
 332  339  
 333  340  struct kmem_cache {
 334  341          /*
 335  342           * Statistics
 336  343           */
 337  344          uint64_t        cache_slab_create;      /* slab creates */
 338  345          uint64_t        cache_slab_destroy;     /* slab destroys */
 339  346          uint64_t        cache_slab_alloc;       /* slab layer allocations */
 340  347          uint64_t        cache_slab_free;        /* slab layer frees */
 341  348          uint64_t        cache_alloc_fail;       /* total failed allocations */
 342  349          uint64_t        cache_buftotal;         /* total buffers */
 343  350          uint64_t        cache_bufmax;           /* max buffers ever */
 344  351          uint64_t        cache_bufslab;          /* buffers free in slab layer */
 345  352          uint64_t        cache_reap;             /* cache reaps */
 346  353          uint64_t        cache_rescale;          /* hash table rescales */
 347  354          uint64_t        cache_lookup_depth;     /* hash lookup depth */
 348  355          uint64_t        cache_depot_contention; /* mutex contention count */
 349  356          uint64_t        cache_depot_contention_prev; /* previous snapshot */
 350  357  
 351  358          /*
 352  359           * Cache properties
 353  360           */
 354  361          char            cache_name[KMEM_CACHE_NAMELEN + 1];
 355  362          size_t          cache_bufsize;          /* object size */
 356  363          size_t          cache_align;            /* object alignment */
 357  364          int             (*cache_constructor)(void *, void *, int);
 358  365          void            (*cache_destructor)(void *, void *);
 359  366          void            (*cache_reclaim)(void *);
 360  367          kmem_cbrc_t     (*cache_move)(void *, void *, size_t, void *);
 361  368          void            *cache_private;         /* opaque arg to callbacks */
 362  369          vmem_t          *cache_arena;           /* vmem source for slabs */
 363  370          int             cache_cflags;           /* cache creation flags */
 364  371          int             cache_flags;            /* various cache state info */
 365  372          uint32_t        cache_mtbf;             /* induced alloc failure rate */
 366  373          uint32_t        cache_pad1;             /* compiler padding */
 367  374          kstat_t         *cache_kstat;           /* exported statistics */
 368  375          list_node_t     cache_link;             /* cache linkage */
 369  376  
 370  377          /*
 371  378           * Slab layer
 372  379           */
 373  380          kmutex_t        cache_lock;             /* protects slab layer */
 374  381          size_t          cache_chunksize;        /* buf + alignment [+ debug] */
 375  382          size_t          cache_slabsize;         /* size of a slab */
 376  383          size_t          cache_maxchunks;        /* max buffers per slab */
 377  384          size_t          cache_bufctl;           /* buf-to-bufctl distance */
 378  385          size_t          cache_buftag;           /* buf-to-buftag distance */
 379  386          size_t          cache_verify;           /* bytes to verify */
 380  387          size_t          cache_contents;         /* bytes of saved content */
 381  388          size_t          cache_color;            /* next slab color */
 382  389          size_t          cache_mincolor;         /* maximum slab color */
 383  390          size_t          cache_maxcolor;         /* maximum slab color */
 384  391          size_t          cache_hash_shift;       /* get to interesting bits */
 385  392          size_t          cache_hash_mask;        /* hash table mask */
 386  393          list_t          cache_complete_slabs;   /* completely allocated slabs */
 387  394          size_t          cache_complete_slab_count;
 388  395          avl_tree_t      cache_partial_slabs;    /* partial slab freelist */
 389  396          size_t          cache_partial_binshift; /* for AVL sort bins */
 390  397          kmem_cache_t    *cache_bufctl_cache;    /* source of bufctls */
  
    | 
      ↓ open down ↓ | 
    50 lines elided | 
    
      ↑ open up ↑ | 
  
 391  398          kmem_bufctl_t   **cache_hash_table;     /* hash table base */
 392  399          kmem_defrag_t   *cache_defrag;          /* slab consolidator fields */
 393  400  
 394  401          /*
 395  402           * Depot layer
 396  403           */
 397  404          kmutex_t        cache_depot_lock;       /* protects depot */
 398  405          kmem_magtype_t  *cache_magtype;         /* magazine type */
 399  406          kmem_maglist_t  cache_full;             /* full magazines */
 400  407          kmem_maglist_t  cache_empty;            /* empty magazines */
 401      -        void            *cache_dumpfreelist;    /* heap during crash dump */
 402      -        void            *cache_dumplog;         /* log entry during dump */
      408 +        kmem_dump_t     cache_dump;             /* used during crash dump */
 403  409  
 404  410          /*
 405  411           * Per-CPU layer
 406  412           */
 407  413          kmem_cpu_cache_t cache_cpu[1];          /* max_ncpus actual elements */
 408  414  };
 409  415  
 410  416  typedef struct kmem_cpu_log_header {
 411  417          kmutex_t        clh_lock;
 412  418          char            *clh_current;
 413  419          size_t          clh_avail;
 414  420          int             clh_chunk;
 415  421          int             clh_hits;
 416  422          char            clh_pad[64 - sizeof (kmutex_t) - sizeof (char *) -
 417  423                                  sizeof (size_t) - 2 * sizeof (int)];
 418  424  } kmem_cpu_log_header_t;
 419  425  
 420  426  typedef struct kmem_log_header {
 421  427          kmutex_t        lh_lock;
 422  428          char            *lh_base;
 423  429          int             *lh_free;
 424  430          size_t          lh_chunksize;
 425  431          int             lh_nchunks;
 426  432          int             lh_head;
 427  433          int             lh_tail;
 428  434          int             lh_hits;
 429  435          kmem_cpu_log_header_t lh_cpu[1];        /* ncpus actually allocated */
 430  436  } kmem_log_header_t;
 431  437  
 432  438  /* kmem_move kmm_flags */
 433  439  #define KMM_DESPERATE           0x1
 434  440  #define KMM_NOTIFY              0x2
 435  441  #define KMM_DEBUG               0x4
 436  442  
 437  443  typedef struct kmem_move {
 438  444          kmem_slab_t     *kmm_from_slab;
 439  445          void            *kmm_from_buf;
 440  446          void            *kmm_to_buf;
 441  447          avl_node_t      kmm_entry;
 442  448          int             kmm_flags;
 443  449  } kmem_move_t;
 444  450  
 445  451  /*
 446  452   * In order to consolidate partial slabs, it must be possible for the cache to
 447  453   * have partial slabs.
 448  454   */
 449  455  #define KMEM_IS_MOVABLE(cp)                                             \
 450  456          (((cp)->cache_chunksize * 2) <= (cp)->cache_slabsize)
 451  457  
 452  458  #ifdef  __cplusplus
 453  459  }
 454  460  #endif
 455  461  
 456  462  #endif  /* _SYS_KMEM_IMPL_H */
  
    | 
      ↓ open down ↓ | 
    44 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX