1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  24  * Copyright (c) 2012 by Delphix. All rights reserved.
  25  */
  26 
  27 /*
  28  * DVA-based Adjustable Replacement Cache
  29  *
  30  * While much of the theory of operation used here is
  31  * based on the self-tuning, low overhead replacement cache
  32  * presented by Megiddo and Modha at FAST 2003, there are some
  33  * significant differences:
  34  *
  35  * 1. The Megiddo and Modha model assumes any page is evictable.
  36  * Pages in its cache cannot be "locked" into memory.  This makes
  37  * the eviction algorithm simple: evict the last page in the list.
  38  * This also make the performance characteristics easy to reason
  39  * about.  Our cache is not so simple.  At any given moment, some
  40  * subset of the blocks in the cache are un-evictable because we
  41  * have handed out a reference to them.  Blocks are only evictable
  42  * when there are no external references active.  This makes
  43  * eviction far more problematic:  we choose to evict the evictable
  44  * blocks that are the "lowest" in the list.
  45  *
  46  * There are times when it is not possible to evict the requested
  47  * space.  In these circumstances we are unable to adjust the cache
  48  * size.  To prevent the cache growing unbounded at these times we
  49  * implement a "cache throttle" that slows the flow of new data
  50  * into the cache until we can make space available.
  51  *
  52  * 2. The Megiddo and Modha model assumes a fixed cache size.
  53  * Pages are evicted when the cache is full and there is a cache
  54  * miss.  Our model has a variable sized cache.  It grows with
  55  * high use, but also tries to react to memory pressure from the
  56  * operating system: decreasing its size when system memory is
  57  * tight.
  58  *
  59  * 3. The Megiddo and Modha model assumes a fixed page size. All
  60  * elements of the cache are therefor exactly the same size.  So
  61  * when adjusting the cache size following a cache miss, its simply
  62  * a matter of choosing a single page to evict.  In our model, we
  63  * have variable sized cache blocks (rangeing from 512 bytes to
  64  * 128K bytes).  We therefor choose a set of blocks to evict to make
  65  * space for a cache miss that approximates as closely as possible
  66  * the space used by the new block.
  67  *
  68  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
  69  * by N. Megiddo & D. Modha, FAST 2003
  70  */
  71 
  72 /*
  73  * The locking model:
  74  *
  75  * A new reference to a cache buffer can be obtained in two
  76  * ways: 1) via a hash table lookup using the DVA as a key,
  77  * or 2) via one of the ARC lists.  The arc_read() interface
  78  * uses method 1, while the internal arc algorithms for
  79  * adjusting the cache use method 2.  We therefor provide two
  80  * types of locks: 1) the hash table lock array, and 2) the
  81  * arc list locks.
  82  *
  83  * Buffers do not have their own mutexs, rather they rely on the
  84  * hash table mutexs for the bulk of their protection (i.e. most
  85  * fields in the arc_buf_hdr_t are protected by these mutexs).
  86  *
  87  * buf_hash_find() returns the appropriate mutex (held) when it
  88  * locates the requested buffer in the hash table.  It returns
  89  * NULL for the mutex if the buffer was not in the table.
  90  *
  91  * buf_hash_remove() expects the appropriate hash mutex to be
  92  * already held before it is invoked.
  93  *
  94  * Each arc state also has a mutex which is used to protect the
  95  * buffer list associated with the state.  When attempting to
  96  * obtain a hash table lock while holding an arc list lock you
  97  * must use: mutex_tryenter() to avoid deadlock.  Also note that
  98  * the active state mutex must be held before the ghost state mutex.
  99  *
 100  * Arc buffers may have an associated eviction callback function.
 101  * This function will be invoked prior to removing the buffer (e.g.
 102  * in arc_do_user_evicts()).  Note however that the data associated
 103  * with the buffer may be evicted prior to the callback.  The callback
 104  * must be made with *no locks held* (to prevent deadlock).  Additionally,
 105  * the users of callbacks must ensure that their private data is
 106  * protected from simultaneous callbacks from arc_buf_evict()
 107  * and arc_do_user_evicts().
 108  *
 109  * Note that the majority of the performance stats are manipulated
 110  * with atomic operations.
 111  *
 112  * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
 113  *
 114  *      - L2ARC buflist creation
 115  *      - L2ARC buflist eviction
 116  *      - L2ARC write completion, which walks L2ARC buflists
 117  *      - ARC header destruction, as it removes from L2ARC buflists
 118  *      - ARC header release, as it removes from L2ARC buflists
 119  */
 120 
 121 #include <sys/spa.h>
 122 #include <sys/zio.h>
 123 #include <sys/zfs_context.h>
 124 #include <sys/arc.h>
 125 #include <sys/refcount.h>
 126 #include <sys/vdev.h>
 127 #include <sys/vdev_impl.h>
 128 #ifdef _KERNEL
 129 #include <sys/vmsystm.h>
 130 #include <vm/anon.h>
 131 #include <sys/fs/swapnode.h>
 132 #include <sys/dnlc.h>
 133 #endif
 134 #include <sys/callb.h>
 135 #include <sys/kstat.h>
 136 #include <zfs_fletcher.h>
 137 
 138 static kmutex_t         arc_reclaim_thr_lock;
 139 static kcondvar_t       arc_reclaim_thr_cv;     /* used to signal reclaim thr */
 140 static uint8_t          arc_thread_exit;
 141 
 142 extern int zfs_write_limit_shift;
 143 extern uint64_t zfs_write_limit_max;
 144 extern kmutex_t zfs_write_limit_lock;
 145 
 146 #define ARC_REDUCE_DNLC_PERCENT 3
 147 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
 148 
 149 typedef enum arc_reclaim_strategy {
 150         ARC_RECLAIM_AGGR,               /* Aggressive reclaim strategy */
 151         ARC_RECLAIM_CONS                /* Conservative reclaim strategy */
 152 } arc_reclaim_strategy_t;
 153 
 154 /* number of seconds before growing cache again */
 155 static int              arc_grow_retry = 60;
 156 
 157 /* shift of arc_c for calculating both min and max arc_p */
 158 static int              arc_p_min_shift = 4;
 159 
 160 /* log2(fraction of arc to reclaim) */
 161 static int              arc_shrink_shift = 5;
 162 
 163 /*
 164  * minimum lifespan of a prefetch block in clock ticks
 165  * (initialized in arc_init())
 166  */
 167 static int              arc_min_prefetch_lifespan;
 168 
 169 static int arc_dead;
 170 
 171 /*
 172  * The arc has filled available memory and has now warmed up.
 173  */
 174 static boolean_t arc_warm;
 175 
 176 /*
 177  * These tunables are for performance analysis.
 178  */
 179 uint64_t zfs_arc_max;
 180 uint64_t zfs_arc_min;
 181 uint64_t zfs_arc_meta_limit = 0;
 182 int zfs_arc_grow_retry = 0;
 183 int zfs_arc_shrink_shift = 0;
 184 int zfs_arc_p_min_shift = 0;
 185 
 186 /*
 187  * Note that buffers can be in one of 6 states:
 188  *      ARC_anon        - anonymous (discussed below)
 189  *      ARC_mru         - recently used, currently cached
 190  *      ARC_mru_ghost   - recentely used, no longer in cache
 191  *      ARC_mfu         - frequently used, currently cached
 192  *      ARC_mfu_ghost   - frequently used, no longer in cache
 193  *      ARC_l2c_only    - exists in L2ARC but not other states
 194  * When there are no active references to the buffer, they are
 195  * are linked onto a list in one of these arc states.  These are
 196  * the only buffers that can be evicted or deleted.  Within each
 197  * state there are multiple lists, one for meta-data and one for
 198  * non-meta-data.  Meta-data (indirect blocks, blocks of dnodes,
 199  * etc.) is tracked separately so that it can be managed more
 200  * explicitly: favored over data, limited explicitly.
 201  *
 202  * Anonymous buffers are buffers that are not associated with
 203  * a DVA.  These are buffers that hold dirty block copies
 204  * before they are written to stable storage.  By definition,
 205  * they are "ref'd" and are considered part of arc_mru
 206  * that cannot be freed.  Generally, they will aquire a DVA
 207  * as they are written and migrate onto the arc_mru list.
 208  *
 209  * The ARC_l2c_only state is for buffers that are in the second
 210  * level ARC but no longer in any of the ARC_m* lists.  The second
 211  * level ARC itself may also contain buffers that are in any of
 212  * the ARC_m* states - meaning that a buffer can exist in two
 213  * places.  The reason for the ARC_l2c_only state is to keep the
 214  * buffer header in the hash table, so that reads that hit the
 215  * second level ARC benefit from these fast lookups.
 216  */
 217 
 218 typedef struct arc_state {
 219         list_t  arcs_list[ARC_BUFC_NUMTYPES];   /* list of evictable buffers */
 220         uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
 221         uint64_t arcs_size;     /* total amount of data in this state */
 222         kmutex_t arcs_mtx;
 223 } arc_state_t;
 224 
 225 /* The 6 states: */
 226 static arc_state_t ARC_anon;
 227 static arc_state_t ARC_mru;
 228 static arc_state_t ARC_mru_ghost;
 229 static arc_state_t ARC_mfu;
 230 static arc_state_t ARC_mfu_ghost;
 231 static arc_state_t ARC_l2c_only;
 232 
 233 typedef struct arc_stats {
 234         kstat_named_t arcstat_hits;
 235         kstat_named_t arcstat_misses;
 236         kstat_named_t arcstat_demand_data_hits;
 237         kstat_named_t arcstat_demand_data_misses;
 238         kstat_named_t arcstat_demand_metadata_hits;
 239         kstat_named_t arcstat_demand_metadata_misses;
 240         kstat_named_t arcstat_prefetch_data_hits;
 241         kstat_named_t arcstat_prefetch_data_misses;
 242         kstat_named_t arcstat_prefetch_metadata_hits;
 243         kstat_named_t arcstat_prefetch_metadata_misses;
 244         kstat_named_t arcstat_mru_hits;
 245         kstat_named_t arcstat_mru_ghost_hits;
 246         kstat_named_t arcstat_mfu_hits;
 247         kstat_named_t arcstat_mfu_ghost_hits;
 248         kstat_named_t arcstat_deleted;
 249         kstat_named_t arcstat_recycle_miss;
 250         kstat_named_t arcstat_mutex_miss;
 251         kstat_named_t arcstat_evict_skip;
 252         kstat_named_t arcstat_evict_l2_cached;
 253         kstat_named_t arcstat_evict_l2_eligible;
 254         kstat_named_t arcstat_evict_l2_ineligible;
 255         kstat_named_t arcstat_hash_elements;
 256         kstat_named_t arcstat_hash_elements_max;
 257         kstat_named_t arcstat_hash_collisions;
 258         kstat_named_t arcstat_hash_chains;
 259         kstat_named_t arcstat_hash_chain_max;
 260         kstat_named_t arcstat_p;
 261         kstat_named_t arcstat_c;
 262         kstat_named_t arcstat_c_min;
 263         kstat_named_t arcstat_c_max;
 264         kstat_named_t arcstat_size;
 265         kstat_named_t arcstat_hdr_size;
 266         kstat_named_t arcstat_data_size;
 267         kstat_named_t arcstat_other_size;
 268         kstat_named_t arcstat_l2_hits;
 269         kstat_named_t arcstat_l2_misses;
 270         kstat_named_t arcstat_l2_feeds;
 271         kstat_named_t arcstat_l2_rw_clash;
 272         kstat_named_t arcstat_l2_read_bytes;
 273         kstat_named_t arcstat_l2_write_bytes;
 274         kstat_named_t arcstat_l2_writes_sent;
 275         kstat_named_t arcstat_l2_writes_done;
 276         kstat_named_t arcstat_l2_writes_error;
 277         kstat_named_t arcstat_l2_writes_hdr_miss;
 278         kstat_named_t arcstat_l2_evict_lock_retry;
 279         kstat_named_t arcstat_l2_evict_reading;
 280         kstat_named_t arcstat_l2_free_on_write;
 281         kstat_named_t arcstat_l2_abort_lowmem;
 282         kstat_named_t arcstat_l2_cksum_bad;
 283         kstat_named_t arcstat_l2_io_error;
 284         kstat_named_t arcstat_l2_size;
 285         kstat_named_t arcstat_l2_hdr_size;
 286         kstat_named_t arcstat_memory_throttle_count;
 287 } arc_stats_t;
 288 
 289 static arc_stats_t arc_stats = {
 290         { "hits",                       KSTAT_DATA_UINT64 },
 291         { "misses",                     KSTAT_DATA_UINT64 },
 292         { "demand_data_hits",           KSTAT_DATA_UINT64 },
 293         { "demand_data_misses",         KSTAT_DATA_UINT64 },
 294         { "demand_metadata_hits",       KSTAT_DATA_UINT64 },
 295         { "demand_metadata_misses",     KSTAT_DATA_UINT64 },
 296         { "prefetch_data_hits",         KSTAT_DATA_UINT64 },
 297         { "prefetch_data_misses",       KSTAT_DATA_UINT64 },
 298         { "prefetch_metadata_hits",     KSTAT_DATA_UINT64 },
 299         { "prefetch_metadata_misses",   KSTAT_DATA_UINT64 },
 300         { "mru_hits",                   KSTAT_DATA_UINT64 },
 301         { "mru_ghost_hits",             KSTAT_DATA_UINT64 },
 302         { "mfu_hits",                   KSTAT_DATA_UINT64 },
 303         { "mfu_ghost_hits",             KSTAT_DATA_UINT64 },
 304         { "deleted",                    KSTAT_DATA_UINT64 },
 305         { "recycle_miss",               KSTAT_DATA_UINT64 },
 306         { "mutex_miss",                 KSTAT_DATA_UINT64 },
 307         { "evict_skip",                 KSTAT_DATA_UINT64 },
 308         { "evict_l2_cached",            KSTAT_DATA_UINT64 },
 309         { "evict_l2_eligible",          KSTAT_DATA_UINT64 },
 310         { "evict_l2_ineligible",        KSTAT_DATA_UINT64 },
 311         { "hash_elements",              KSTAT_DATA_UINT64 },
 312         { "hash_elements_max",          KSTAT_DATA_UINT64 },
 313         { "hash_collisions",            KSTAT_DATA_UINT64 },
 314         { "hash_chains",                KSTAT_DATA_UINT64 },
 315         { "hash_chain_max",             KSTAT_DATA_UINT64 },
 316         { "p",                          KSTAT_DATA_UINT64 },
 317         { "c",                          KSTAT_DATA_UINT64 },
 318         { "c_min",                      KSTAT_DATA_UINT64 },
 319         { "c_max",                      KSTAT_DATA_UINT64 },
 320         { "size",                       KSTAT_DATA_UINT64 },
 321         { "hdr_size",                   KSTAT_DATA_UINT64 },
 322         { "data_size",                  KSTAT_DATA_UINT64 },
 323         { "other_size",                 KSTAT_DATA_UINT64 },
 324         { "l2_hits",                    KSTAT_DATA_UINT64 },
 325         { "l2_misses",                  KSTAT_DATA_UINT64 },
 326         { "l2_feeds",                   KSTAT_DATA_UINT64 },
 327         { "l2_rw_clash",                KSTAT_DATA_UINT64 },
 328         { "l2_read_bytes",              KSTAT_DATA_UINT64 },
 329         { "l2_write_bytes",             KSTAT_DATA_UINT64 },
 330         { "l2_writes_sent",             KSTAT_DATA_UINT64 },
 331         { "l2_writes_done",             KSTAT_DATA_UINT64 },
 332         { "l2_writes_error",            KSTAT_DATA_UINT64 },
 333         { "l2_writes_hdr_miss",         KSTAT_DATA_UINT64 },
 334         { "l2_evict_lock_retry",        KSTAT_DATA_UINT64 },
 335         { "l2_evict_reading",           KSTAT_DATA_UINT64 },
 336         { "l2_free_on_write",           KSTAT_DATA_UINT64 },
 337         { "l2_abort_lowmem",            KSTAT_DATA_UINT64 },
 338         { "l2_cksum_bad",               KSTAT_DATA_UINT64 },
 339         { "l2_io_error",                KSTAT_DATA_UINT64 },
 340         { "l2_size",                    KSTAT_DATA_UINT64 },
 341         { "l2_hdr_size",                KSTAT_DATA_UINT64 },
 342         { "memory_throttle_count",      KSTAT_DATA_UINT64 }
 343 };
 344 
 345 #define ARCSTAT(stat)   (arc_stats.stat.value.ui64)
 346 
 347 #define ARCSTAT_INCR(stat, val) \
 348         atomic_add_64(&arc_stats.stat.value.ui64, (val));
 349 
 350 #define ARCSTAT_BUMP(stat)      ARCSTAT_INCR(stat, 1)
 351 #define ARCSTAT_BUMPDOWN(stat)  ARCSTAT_INCR(stat, -1)
 352 
 353 #define ARCSTAT_MAX(stat, val) {                                        \
 354         uint64_t m;                                                     \
 355         while ((val) > (m = arc_stats.stat.value.ui64) &&            \
 356             (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val))))     \
 357                 continue;                                               \
 358 }
 359 
 360 #define ARCSTAT_MAXSTAT(stat) \
 361         ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
 362 
 363 /*
 364  * We define a macro to allow ARC hits/misses to be easily broken down by
 365  * two separate conditions, giving a total of four different subtypes for
 366  * each of hits and misses (so eight statistics total).
 367  */
 368 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
 369         if (cond1) {                                                    \
 370                 if (cond2) {                                            \
 371                         ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
 372                 } else {                                                \
 373                         ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
 374                 }                                                       \
 375         } else {                                                        \
 376                 if (cond2) {                                            \
 377                         ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
 378                 } else {                                                \
 379                         ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
 380                 }                                                       \
 381         }
 382 
 383 kstat_t                 *arc_ksp;
 384 static arc_state_t      *arc_anon;
 385 static arc_state_t      *arc_mru;
 386 static arc_state_t      *arc_mru_ghost;
 387 static arc_state_t      *arc_mfu;
 388 static arc_state_t      *arc_mfu_ghost;
 389 static arc_state_t      *arc_l2c_only;
 390 
 391 /*
 392  * There are several ARC variables that are critical to export as kstats --
 393  * but we don't want to have to grovel around in the kstat whenever we wish to
 394  * manipulate them.  For these variables, we therefore define them to be in
 395  * terms of the statistic variable.  This assures that we are not introducing
 396  * the possibility of inconsistency by having shadow copies of the variables,
 397  * while still allowing the code to be readable.
 398  */
 399 #define arc_size        ARCSTAT(arcstat_size)   /* actual total arc size */
 400 #define arc_p           ARCSTAT(arcstat_p)      /* target size of MRU */
 401 #define arc_c           ARCSTAT(arcstat_c)      /* target size of cache */
 402 #define arc_c_min       ARCSTAT(arcstat_c_min)  /* min target cache size */
 403 #define arc_c_max       ARCSTAT(arcstat_c_max)  /* max target cache size */
 404 
 405 static int              arc_no_grow;    /* Don't try to grow cache size */
 406 static uint64_t         arc_tempreserve;
 407 static uint64_t         arc_loaned_bytes;
 408 static uint64_t         arc_meta_used;
 409 static uint64_t         arc_meta_limit;
 410 static uint64_t         arc_meta_max = 0;
 411 
 412 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
 413 
 414 typedef struct arc_callback arc_callback_t;
 415 
 416 struct arc_callback {
 417         void                    *acb_private;
 418         arc_done_func_t         *acb_done;
 419         arc_buf_t               *acb_buf;
 420         zio_t                   *acb_zio_dummy;
 421         arc_callback_t          *acb_next;
 422 };
 423 
 424 typedef struct arc_write_callback arc_write_callback_t;
 425 
 426 struct arc_write_callback {
 427         void            *awcb_private;
 428         arc_done_func_t *awcb_ready;
 429         arc_done_func_t *awcb_done;
 430         arc_buf_t       *awcb_buf;
 431 };
 432 
 433 struct arc_buf_hdr {
 434         /* protected by hash lock */
 435         dva_t                   b_dva;
 436         uint64_t                b_birth;
 437         uint64_t                b_cksum0;
 438 
 439         kmutex_t                b_freeze_lock;
 440         zio_cksum_t             *b_freeze_cksum;
 441         void                    *b_thawed;
 442 
 443         arc_buf_hdr_t           *b_hash_next;
 444         arc_buf_t               *b_buf;
 445         uint32_t                b_flags;
 446         uint32_t                b_datacnt;
 447 
 448         arc_callback_t          *b_acb;
 449         kcondvar_t              b_cv;
 450 
 451         /* immutable */
 452         arc_buf_contents_t      b_type;
 453         uint64_t                b_size;
 454         uint64_t                b_spa;
 455 
 456         /* protected by arc state mutex */
 457         arc_state_t             *b_state;
 458         list_node_t             b_arc_node;
 459 
 460         /* updated atomically */
 461         clock_t                 b_arc_access;
 462 
 463         /* self protecting */
 464         refcount_t              b_refcnt;
 465 
 466         l2arc_buf_hdr_t         *b_l2hdr;
 467         list_node_t             b_l2node;
 468 };
 469 
 470 static arc_buf_t *arc_eviction_list;
 471 static kmutex_t arc_eviction_mtx;
 472 static arc_buf_hdr_t arc_eviction_hdr;
 473 static void arc_get_data_buf(arc_buf_t *buf);
 474 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
 475 static int arc_evict_needed(arc_buf_contents_t type);
 476 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
 477 
 478 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
 479 
 480 #define GHOST_STATE(state)      \
 481         ((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||        \
 482         (state) == arc_l2c_only)
 483 
 484 /*
 485  * Private ARC flags.  These flags are private ARC only flags that will show up
 486  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
 487  * be passed in as arc_flags in things like arc_read.  However, these flags
 488  * should never be passed and should only be set by ARC code.  When adding new
 489  * public flags, make sure not to smash the private ones.
 490  */
 491 
 492 #define ARC_IN_HASH_TABLE       (1 << 9)  /* this buffer is hashed */
 493 #define ARC_IO_IN_PROGRESS      (1 << 10) /* I/O in progress for buf */
 494 #define ARC_IO_ERROR            (1 << 11) /* I/O failed for buf */
 495 #define ARC_FREED_IN_READ       (1 << 12) /* buf freed while in read */
 496 #define ARC_BUF_AVAILABLE       (1 << 13) /* block not in active use */
 497 #define ARC_INDIRECT            (1 << 14) /* this is an indirect block */
 498 #define ARC_FREE_IN_PROGRESS    (1 << 15) /* hdr about to be freed */
 499 #define ARC_L2_WRITING          (1 << 16) /* L2ARC write in progress */
 500 #define ARC_L2_EVICTED          (1 << 17) /* evicted during I/O */
 501 #define ARC_L2_WRITE_HEAD       (1 << 18) /* head of write list */
 502 
 503 #define HDR_IN_HASH_TABLE(hdr)  ((hdr)->b_flags & ARC_IN_HASH_TABLE)
 504 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
 505 #define HDR_IO_ERROR(hdr)       ((hdr)->b_flags & ARC_IO_ERROR)
 506 #define HDR_PREFETCH(hdr)       ((hdr)->b_flags & ARC_PREFETCH)
 507 #define HDR_FREED_IN_READ(hdr)  ((hdr)->b_flags & ARC_FREED_IN_READ)
 508 #define HDR_BUF_AVAILABLE(hdr)  ((hdr)->b_flags & ARC_BUF_AVAILABLE)
 509 #define HDR_FREE_IN_PROGRESS(hdr)       ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
 510 #define HDR_L2CACHE(hdr)        ((hdr)->b_flags & ARC_L2CACHE)
 511 #define HDR_L2_READING(hdr)     ((hdr)->b_flags & ARC_IO_IN_PROGRESS &&  \
 512                                     (hdr)->b_l2hdr != NULL)
 513 #define HDR_L2_WRITING(hdr)     ((hdr)->b_flags & ARC_L2_WRITING)
 514 #define HDR_L2_EVICTED(hdr)     ((hdr)->b_flags & ARC_L2_EVICTED)
 515 #define HDR_L2_WRITE_HEAD(hdr)  ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
 516 
 517 /*
 518  * Other sizes
 519  */
 520 
 521 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
 522 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
 523 
 524 /*
 525  * Hash table routines
 526  */
 527 
 528 #define HT_LOCK_PAD     64
 529 
 530 struct ht_lock {
 531         kmutex_t        ht_lock;
 532 #ifdef _KERNEL
 533         unsigned char   pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
 534 #endif
 535 };
 536 
 537 #define BUF_LOCKS 256
 538 typedef struct buf_hash_table {
 539         uint64_t ht_mask;
 540         arc_buf_hdr_t **ht_table;
 541         struct ht_lock ht_locks[BUF_LOCKS];
 542 } buf_hash_table_t;
 543 
 544 static buf_hash_table_t buf_hash_table;
 545 
 546 #define BUF_HASH_INDEX(spa, dva, birth) \
 547         (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
 548 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
 549 #define BUF_HASH_LOCK(idx)      (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
 550 #define HDR_LOCK(hdr) \
 551         (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
 552 
 553 uint64_t zfs_crc64_table[256];
 554 
 555 /*
 556  * Level 2 ARC
 557  */
 558 
 559 #define L2ARC_WRITE_SIZE        (8 * 1024 * 1024)       /* initial write max */
 560 #define L2ARC_HEADROOM          2               /* num of writes */
 561 #define L2ARC_FEED_SECS         1               /* caching interval secs */
 562 #define L2ARC_FEED_MIN_MS       200             /* min caching interval ms */
 563 
 564 #define l2arc_writes_sent       ARCSTAT(arcstat_l2_writes_sent)
 565 #define l2arc_writes_done       ARCSTAT(arcstat_l2_writes_done)
 566 
 567 /*
 568  * L2ARC Performance Tunables
 569  */
 570 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE;    /* default max write size */
 571 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE;  /* extra write during warmup */
 572 uint64_t l2arc_headroom = L2ARC_HEADROOM;       /* number of dev writes */
 573 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS;     /* interval seconds */
 574 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
 575 boolean_t l2arc_noprefetch = B_TRUE;            /* don't cache prefetch bufs */
 576 boolean_t l2arc_feed_again = B_TRUE;            /* turbo warmup */
 577 boolean_t l2arc_norw = B_TRUE;                  /* no reads during writes */
 578 
 579 /*
 580  * L2ARC Internals
 581  */
 582 typedef struct l2arc_dev {
 583         vdev_t                  *l2ad_vdev;     /* vdev */
 584         spa_t                   *l2ad_spa;      /* spa */
 585         uint64_t                l2ad_hand;      /* next write location */
 586         uint64_t                l2ad_write;     /* desired write size, bytes */
 587         uint64_t                l2ad_boost;     /* warmup write boost, bytes */
 588         uint64_t                l2ad_start;     /* first addr on device */
 589         uint64_t                l2ad_end;       /* last addr on device */
 590         uint64_t                l2ad_evict;     /* last addr eviction reached */
 591         boolean_t               l2ad_first;     /* first sweep through */
 592         boolean_t               l2ad_writing;   /* currently writing */
 593         list_t                  *l2ad_buflist;  /* buffer list */
 594         list_node_t             l2ad_node;      /* device list node */
 595 } l2arc_dev_t;
 596 
 597 static list_t L2ARC_dev_list;                   /* device list */
 598 static list_t *l2arc_dev_list;                  /* device list pointer */
 599 static kmutex_t l2arc_dev_mtx;                  /* device list mutex */
 600 static l2arc_dev_t *l2arc_dev_last;             /* last device used */
 601 static kmutex_t l2arc_buflist_mtx;              /* mutex for all buflists */
 602 static list_t L2ARC_free_on_write;              /* free after write buf list */
 603 static list_t *l2arc_free_on_write;             /* free after write list ptr */
 604 static kmutex_t l2arc_free_on_write_mtx;        /* mutex for list */
 605 static uint64_t l2arc_ndev;                     /* number of devices */
 606 
 607 typedef struct l2arc_read_callback {
 608         arc_buf_t       *l2rcb_buf;             /* read buffer */
 609         spa_t           *l2rcb_spa;             /* spa */
 610         blkptr_t        l2rcb_bp;               /* original blkptr */
 611         zbookmark_t     l2rcb_zb;               /* original bookmark */
 612         int             l2rcb_flags;            /* original flags */
 613 } l2arc_read_callback_t;
 614 
 615 typedef struct l2arc_write_callback {
 616         l2arc_dev_t     *l2wcb_dev;             /* device info */
 617         arc_buf_hdr_t   *l2wcb_head;            /* head of write buflist */
 618 } l2arc_write_callback_t;
 619 
 620 struct l2arc_buf_hdr {
 621         /* protected by arc_buf_hdr  mutex */
 622         l2arc_dev_t     *b_dev;                 /* L2ARC device */
 623         uint64_t        b_daddr;                /* disk address, offset byte */
 624 };
 625 
 626 typedef struct l2arc_data_free {
 627         /* protected by l2arc_free_on_write_mtx */
 628         void            *l2df_data;
 629         size_t          l2df_size;
 630         void            (*l2df_func)(void *, size_t);
 631         list_node_t     l2df_list_node;
 632 } l2arc_data_free_t;
 633 
 634 static kmutex_t l2arc_feed_thr_lock;
 635 static kcondvar_t l2arc_feed_thr_cv;
 636 static uint8_t l2arc_thread_exit;
 637 
 638 static void l2arc_read_done(zio_t *zio);
 639 static void l2arc_hdr_stat_add(void);
 640 static void l2arc_hdr_stat_remove(void);
 641 
 642 static uint64_t
 643 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
 644 {
 645         uint8_t *vdva = (uint8_t *)dva;
 646         uint64_t crc = -1ULL;
 647         int i;
 648 
 649         ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
 650 
 651         for (i = 0; i < sizeof (dva_t); i++)
 652                 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
 653 
 654         crc ^= (spa>>8) ^ birth;
 655 
 656         return (crc);
 657 }
 658 
 659 #define BUF_EMPTY(buf)                                          \
 660         ((buf)->b_dva.dva_word[0] == 0 &&                    \
 661         (buf)->b_dva.dva_word[1] == 0 &&                     \
 662         (buf)->b_birth == 0)
 663 
 664 #define BUF_EQUAL(spa, dva, birth, buf)                         \
 665         ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&       \
 666         ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&       \
 667         ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
 668 
 669 static void
 670 buf_discard_identity(arc_buf_hdr_t *hdr)
 671 {
 672         hdr->b_dva.dva_word[0] = 0;
 673         hdr->b_dva.dva_word[1] = 0;
 674         hdr->b_birth = 0;
 675         hdr->b_cksum0 = 0;
 676 }
 677 
 678 static arc_buf_hdr_t *
 679 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp)
 680 {
 681         uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
 682         kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
 683         arc_buf_hdr_t *buf;
 684 
 685         mutex_enter(hash_lock);
 686         for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
 687             buf = buf->b_hash_next) {
 688                 if (BUF_EQUAL(spa, dva, birth, buf)) {
 689                         *lockp = hash_lock;
 690                         return (buf);
 691                 }
 692         }
 693         mutex_exit(hash_lock);
 694         *lockp = NULL;
 695         return (NULL);
 696 }
 697 
 698 /*
 699  * Insert an entry into the hash table.  If there is already an element
 700  * equal to elem in the hash table, then the already existing element
 701  * will be returned and the new element will not be inserted.
 702  * Otherwise returns NULL.
 703  */
 704 static arc_buf_hdr_t *
 705 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
 706 {
 707         uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
 708         kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
 709         arc_buf_hdr_t *fbuf;
 710         uint32_t i;
 711 
 712         ASSERT(!HDR_IN_HASH_TABLE(buf));
 713         *lockp = hash_lock;
 714         mutex_enter(hash_lock);
 715         for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
 716             fbuf = fbuf->b_hash_next, i++) {
 717                 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
 718                         return (fbuf);
 719         }
 720 
 721         buf->b_hash_next = buf_hash_table.ht_table[idx];
 722         buf_hash_table.ht_table[idx] = buf;
 723         buf->b_flags |= ARC_IN_HASH_TABLE;
 724 
 725         /* collect some hash table performance data */
 726         if (i > 0) {
 727                 ARCSTAT_BUMP(arcstat_hash_collisions);
 728                 if (i == 1)
 729                         ARCSTAT_BUMP(arcstat_hash_chains);
 730 
 731                 ARCSTAT_MAX(arcstat_hash_chain_max, i);
 732         }
 733 
 734         ARCSTAT_BUMP(arcstat_hash_elements);
 735         ARCSTAT_MAXSTAT(arcstat_hash_elements);
 736 
 737         return (NULL);
 738 }
 739 
 740 static void
 741 buf_hash_remove(arc_buf_hdr_t *buf)
 742 {
 743         arc_buf_hdr_t *fbuf, **bufp;
 744         uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
 745 
 746         ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
 747         ASSERT(HDR_IN_HASH_TABLE(buf));
 748 
 749         bufp = &buf_hash_table.ht_table[idx];
 750         while ((fbuf = *bufp) != buf) {
 751                 ASSERT(fbuf != NULL);
 752                 bufp = &fbuf->b_hash_next;
 753         }
 754         *bufp = buf->b_hash_next;
 755         buf->b_hash_next = NULL;
 756         buf->b_flags &= ~ARC_IN_HASH_TABLE;
 757 
 758         /* collect some hash table performance data */
 759         ARCSTAT_BUMPDOWN(arcstat_hash_elements);
 760 
 761         if (buf_hash_table.ht_table[idx] &&
 762             buf_hash_table.ht_table[idx]->b_hash_next == NULL)
 763                 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
 764 }
 765 
 766 /*
 767  * Global data structures and functions for the buf kmem cache.
 768  */
 769 static kmem_cache_t *hdr_cache;
 770 static kmem_cache_t *buf_cache;
 771 
 772 static void
 773 buf_fini(void)
 774 {
 775         int i;
 776 
 777         kmem_free(buf_hash_table.ht_table,
 778             (buf_hash_table.ht_mask + 1) * sizeof (void *));
 779         for (i = 0; i < BUF_LOCKS; i++)
 780                 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
 781         kmem_cache_destroy(hdr_cache);
 782         kmem_cache_destroy(buf_cache);
 783 }
 784 
 785 /*
 786  * Constructor callback - called when the cache is empty
 787  * and a new buf is requested.
 788  */
 789 /* ARGSUSED */
 790 static int
 791 hdr_cons(void *vbuf, void *unused, int kmflag)
 792 {
 793         arc_buf_hdr_t *buf = vbuf;
 794 
 795         bzero(buf, sizeof (arc_buf_hdr_t));
 796         refcount_create(&buf->b_refcnt);
 797         cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
 798         mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
 799         arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
 800 
 801         return (0);
 802 }
 803 
 804 /* ARGSUSED */
 805 static int
 806 buf_cons(void *vbuf, void *unused, int kmflag)
 807 {
 808         arc_buf_t *buf = vbuf;
 809 
 810         bzero(buf, sizeof (arc_buf_t));
 811         mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
 812         rw_init(&buf->b_data_lock, NULL, RW_DEFAULT, NULL);
 813         arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
 814 
 815         return (0);
 816 }
 817 
 818 /*
 819  * Destructor callback - called when a cached buf is
 820  * no longer required.
 821  */
 822 /* ARGSUSED */
 823 static void
 824 hdr_dest(void *vbuf, void *unused)
 825 {
 826         arc_buf_hdr_t *buf = vbuf;
 827 
 828         ASSERT(BUF_EMPTY(buf));
 829         refcount_destroy(&buf->b_refcnt);
 830         cv_destroy(&buf->b_cv);
 831         mutex_destroy(&buf->b_freeze_lock);
 832         arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
 833 }
 834 
 835 /* ARGSUSED */
 836 static void
 837 buf_dest(void *vbuf, void *unused)
 838 {
 839         arc_buf_t *buf = vbuf;
 840 
 841         mutex_destroy(&buf->b_evict_lock);
 842         rw_destroy(&buf->b_data_lock);
 843         arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
 844 }
 845 
 846 /*
 847  * Reclaim callback -- invoked when memory is low.
 848  */
 849 /* ARGSUSED */
 850 static void
 851 hdr_recl(void *unused)
 852 {
 853         dprintf("hdr_recl called\n");
 854         /*
 855          * umem calls the reclaim func when we destroy the buf cache,
 856          * which is after we do arc_fini().
 857          */
 858         if (!arc_dead)
 859                 cv_signal(&arc_reclaim_thr_cv);
 860 }
 861 
 862 static void
 863 buf_init(void)
 864 {
 865         uint64_t *ct;
 866         uint64_t hsize = 1ULL << 12;
 867         int i, j;
 868 
 869         /*
 870          * The hash table is big enough to fill all of physical memory
 871          * with an average 64K block size.  The table will take up
 872          * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
 873          */
 874         while (hsize * 65536 < physmem * PAGESIZE)
 875                 hsize <<= 1;
 876 retry:
 877         buf_hash_table.ht_mask = hsize - 1;
 878         buf_hash_table.ht_table =
 879             kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
 880         if (buf_hash_table.ht_table == NULL) {
 881                 ASSERT(hsize > (1ULL << 8));
 882                 hsize >>= 1;
 883                 goto retry;
 884         }
 885 
 886         hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
 887             0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
 888         buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
 889             0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
 890 
 891         for (i = 0; i < 256; i++)
 892                 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
 893                         *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
 894 
 895         for (i = 0; i < BUF_LOCKS; i++) {
 896                 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
 897                     NULL, MUTEX_DEFAULT, NULL);
 898         }
 899 }
 900 
 901 #define ARC_MINTIME     (hz>>4) /* 62 ms */
 902 
 903 static void
 904 arc_cksum_verify(arc_buf_t *buf)
 905 {
 906         zio_cksum_t zc;
 907 
 908         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
 909                 return;
 910 
 911         mutex_enter(&buf->b_hdr->b_freeze_lock);
 912         if (buf->b_hdr->b_freeze_cksum == NULL ||
 913             (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
 914                 mutex_exit(&buf->b_hdr->b_freeze_lock);
 915                 return;
 916         }
 917         fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
 918         if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
 919                 panic("buffer modified while frozen!");
 920         mutex_exit(&buf->b_hdr->b_freeze_lock);
 921 }
 922 
 923 static int
 924 arc_cksum_equal(arc_buf_t *buf)
 925 {
 926         zio_cksum_t zc;
 927         int equal;
 928 
 929         mutex_enter(&buf->b_hdr->b_freeze_lock);
 930         fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
 931         equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
 932         mutex_exit(&buf->b_hdr->b_freeze_lock);
 933 
 934         return (equal);
 935 }
 936 
 937 static void
 938 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
 939 {
 940         if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
 941                 return;
 942 
 943         mutex_enter(&buf->b_hdr->b_freeze_lock);
 944         if (buf->b_hdr->b_freeze_cksum != NULL) {
 945                 mutex_exit(&buf->b_hdr->b_freeze_lock);
 946                 return;
 947         }
 948         buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
 949         fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
 950             buf->b_hdr->b_freeze_cksum);
 951         mutex_exit(&buf->b_hdr->b_freeze_lock);
 952 }
 953 
 954 void
 955 arc_buf_thaw(arc_buf_t *buf)
 956 {
 957         if (zfs_flags & ZFS_DEBUG_MODIFY) {
 958                 if (buf->b_hdr->b_state != arc_anon)
 959                         panic("modifying non-anon buffer!");
 960                 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
 961                         panic("modifying buffer while i/o in progress!");
 962                 arc_cksum_verify(buf);
 963         }
 964 
 965         mutex_enter(&buf->b_hdr->b_freeze_lock);
 966         if (buf->b_hdr->b_freeze_cksum != NULL) {
 967                 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
 968                 buf->b_hdr->b_freeze_cksum = NULL;
 969         }
 970 
 971         if (zfs_flags & ZFS_DEBUG_MODIFY) {
 972                 if (buf->b_hdr->b_thawed)
 973                         kmem_free(buf->b_hdr->b_thawed, 1);
 974                 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
 975         }
 976 
 977         mutex_exit(&buf->b_hdr->b_freeze_lock);
 978 }
 979 
 980 void
 981 arc_buf_freeze(arc_buf_t *buf)
 982 {
 983         kmutex_t *hash_lock;
 984 
 985         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
 986                 return;
 987 
 988         hash_lock = HDR_LOCK(buf->b_hdr);
 989         mutex_enter(hash_lock);
 990 
 991         ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
 992             buf->b_hdr->b_state == arc_anon);
 993         arc_cksum_compute(buf, B_FALSE);
 994         mutex_exit(hash_lock);
 995 }
 996 
 997 static void
 998 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
 999 {
1000         ASSERT(MUTEX_HELD(hash_lock));
1001 
1002         if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1003             (ab->b_state != arc_anon)) {
1004                 uint64_t delta = ab->b_size * ab->b_datacnt;
1005                 list_t *list = &ab->b_state->arcs_list[ab->b_type];
1006                 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1007 
1008                 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
1009                 mutex_enter(&ab->b_state->arcs_mtx);
1010                 ASSERT(list_link_active(&ab->b_arc_node));
1011                 list_remove(list, ab);
1012                 if (GHOST_STATE(ab->b_state)) {
1013                         ASSERT3U(ab->b_datacnt, ==, 0);
1014                         ASSERT3P(ab->b_buf, ==, NULL);
1015                         delta = ab->b_size;
1016                 }
1017                 ASSERT(delta > 0);
1018                 ASSERT3U(*size, >=, delta);
1019                 atomic_add_64(size, -delta);
1020                 mutex_exit(&ab->b_state->arcs_mtx);
1021                 /* remove the prefetch flag if we get a reference */
1022                 if (ab->b_flags & ARC_PREFETCH)
1023                         ab->b_flags &= ~ARC_PREFETCH;
1024         }
1025 }
1026 
1027 static int
1028 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1029 {
1030         int cnt;
1031         arc_state_t *state = ab->b_state;
1032 
1033         ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1034         ASSERT(!GHOST_STATE(state));
1035 
1036         if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1037             (state != arc_anon)) {
1038                 uint64_t *size = &state->arcs_lsize[ab->b_type];
1039 
1040                 ASSERT(!MUTEX_HELD(&state->arcs_mtx));
1041                 mutex_enter(&state->arcs_mtx);
1042                 ASSERT(!list_link_active(&ab->b_arc_node));
1043                 list_insert_head(&state->arcs_list[ab->b_type], ab);
1044                 ASSERT(ab->b_datacnt > 0);
1045                 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1046                 mutex_exit(&state->arcs_mtx);
1047         }
1048         return (cnt);
1049 }
1050 
1051 /*
1052  * Move the supplied buffer to the indicated state.  The mutex
1053  * for the buffer must be held by the caller.
1054  */
1055 static void
1056 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1057 {
1058         arc_state_t *old_state = ab->b_state;
1059         int64_t refcnt = refcount_count(&ab->b_refcnt);
1060         uint64_t from_delta, to_delta;
1061 
1062         ASSERT(MUTEX_HELD(hash_lock));
1063         ASSERT(new_state != old_state);
1064         ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1065         ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1066         ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1067 
1068         from_delta = to_delta = ab->b_datacnt * ab->b_size;
1069 
1070         /*
1071          * If this buffer is evictable, transfer it from the
1072          * old state list to the new state list.
1073          */
1074         if (refcnt == 0) {
1075                 if (old_state != arc_anon) {
1076                         int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
1077                         uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1078 
1079                         if (use_mutex)
1080                                 mutex_enter(&old_state->arcs_mtx);
1081 
1082                         ASSERT(list_link_active(&ab->b_arc_node));
1083                         list_remove(&old_state->arcs_list[ab->b_type], ab);
1084 
1085                         /*
1086                          * If prefetching out of the ghost cache,
1087                          * we will have a non-zero datacnt.
1088                          */
1089                         if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1090                                 /* ghost elements have a ghost size */
1091                                 ASSERT(ab->b_buf == NULL);
1092                                 from_delta = ab->b_size;
1093                         }
1094                         ASSERT3U(*size, >=, from_delta);
1095                         atomic_add_64(size, -from_delta);
1096 
1097                         if (use_mutex)
1098                                 mutex_exit(&old_state->arcs_mtx);
1099                 }
1100                 if (new_state != arc_anon) {
1101                         int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
1102                         uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1103 
1104                         if (use_mutex)
1105                                 mutex_enter(&new_state->arcs_mtx);
1106 
1107                         list_insert_head(&new_state->arcs_list[ab->b_type], ab);
1108 
1109                         /* ghost elements have a ghost size */
1110                         if (GHOST_STATE(new_state)) {
1111                                 ASSERT(ab->b_datacnt == 0);
1112                                 ASSERT(ab->b_buf == NULL);
1113                                 to_delta = ab->b_size;
1114                         }
1115                         atomic_add_64(size, to_delta);
1116 
1117                         if (use_mutex)
1118                                 mutex_exit(&new_state->arcs_mtx);
1119                 }
1120         }
1121 
1122         ASSERT(!BUF_EMPTY(ab));
1123         if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1124                 buf_hash_remove(ab);
1125 
1126         /* adjust state sizes */
1127         if (to_delta)
1128                 atomic_add_64(&new_state->arcs_size, to_delta);
1129         if (from_delta) {
1130                 ASSERT3U(old_state->arcs_size, >=, from_delta);
1131                 atomic_add_64(&old_state->arcs_size, -from_delta);
1132         }
1133         ab->b_state = new_state;
1134 
1135         /* adjust l2arc hdr stats */
1136         if (new_state == arc_l2c_only)
1137                 l2arc_hdr_stat_add();
1138         else if (old_state == arc_l2c_only)
1139                 l2arc_hdr_stat_remove();
1140 }
1141 
1142 void
1143 arc_space_consume(uint64_t space, arc_space_type_t type)
1144 {
1145         ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1146 
1147         switch (type) {
1148         case ARC_SPACE_DATA:
1149                 ARCSTAT_INCR(arcstat_data_size, space);
1150                 break;
1151         case ARC_SPACE_OTHER:
1152                 ARCSTAT_INCR(arcstat_other_size, space);
1153                 break;
1154         case ARC_SPACE_HDRS:
1155                 ARCSTAT_INCR(arcstat_hdr_size, space);
1156                 break;
1157         case ARC_SPACE_L2HDRS:
1158                 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1159                 break;
1160         }
1161 
1162         atomic_add_64(&arc_meta_used, space);
1163         atomic_add_64(&arc_size, space);
1164 }
1165 
1166 void
1167 arc_space_return(uint64_t space, arc_space_type_t type)
1168 {
1169         ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1170 
1171         switch (type) {
1172         case ARC_SPACE_DATA:
1173                 ARCSTAT_INCR(arcstat_data_size, -space);
1174                 break;
1175         case ARC_SPACE_OTHER:
1176                 ARCSTAT_INCR(arcstat_other_size, -space);
1177                 break;
1178         case ARC_SPACE_HDRS:
1179                 ARCSTAT_INCR(arcstat_hdr_size, -space);
1180                 break;
1181         case ARC_SPACE_L2HDRS:
1182                 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1183                 break;
1184         }
1185 
1186         ASSERT(arc_meta_used >= space);
1187         if (arc_meta_max < arc_meta_used)
1188                 arc_meta_max = arc_meta_used;
1189         atomic_add_64(&arc_meta_used, -space);
1190         ASSERT(arc_size >= space);
1191         atomic_add_64(&arc_size, -space);
1192 }
1193 
1194 void *
1195 arc_data_buf_alloc(uint64_t size)
1196 {
1197         if (arc_evict_needed(ARC_BUFC_DATA))
1198                 cv_signal(&arc_reclaim_thr_cv);
1199         atomic_add_64(&arc_size, size);
1200         return (zio_data_buf_alloc(size));
1201 }
1202 
1203 void
1204 arc_data_buf_free(void *buf, uint64_t size)
1205 {
1206         zio_data_buf_free(buf, size);
1207         ASSERT(arc_size >= size);
1208         atomic_add_64(&arc_size, -size);
1209 }
1210 
1211 arc_buf_t *
1212 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1213 {
1214         arc_buf_hdr_t *hdr;
1215         arc_buf_t *buf;
1216 
1217         ASSERT3U(size, >, 0);
1218         hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1219         ASSERT(BUF_EMPTY(hdr));
1220         hdr->b_size = size;
1221         hdr->b_type = type;
1222         hdr->b_spa = spa_load_guid(spa);
1223         hdr->b_state = arc_anon;
1224         hdr->b_arc_access = 0;
1225         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1226         buf->b_hdr = hdr;
1227         buf->b_data = NULL;
1228         buf->b_efunc = NULL;
1229         buf->b_private = NULL;
1230         buf->b_next = NULL;
1231         hdr->b_buf = buf;
1232         arc_get_data_buf(buf);
1233         hdr->b_datacnt = 1;
1234         hdr->b_flags = 0;
1235         ASSERT(refcount_is_zero(&hdr->b_refcnt));
1236         (void) refcount_add(&hdr->b_refcnt, tag);
1237 
1238         return (buf);
1239 }
1240 
1241 static char *arc_onloan_tag = "onloan";
1242 
1243 /*
1244  * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1245  * flight data by arc_tempreserve_space() until they are "returned". Loaned
1246  * buffers must be returned to the arc before they can be used by the DMU or
1247  * freed.
1248  */
1249 arc_buf_t *
1250 arc_loan_buf(spa_t *spa, int size)
1251 {
1252         arc_buf_t *buf;
1253 
1254         buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1255 
1256         atomic_add_64(&arc_loaned_bytes, size);
1257         return (buf);
1258 }
1259 
1260 /*
1261  * Return a loaned arc buffer to the arc.
1262  */
1263 void
1264 arc_return_buf(arc_buf_t *buf, void *tag)
1265 {
1266         arc_buf_hdr_t *hdr = buf->b_hdr;
1267 
1268         ASSERT(buf->b_data != NULL);
1269         (void) refcount_add(&hdr->b_refcnt, tag);
1270         (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1271 
1272         atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1273 }
1274 
1275 /* Detach an arc_buf from a dbuf (tag) */
1276 void
1277 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1278 {
1279         arc_buf_hdr_t *hdr;
1280 
1281         ASSERT(buf->b_data != NULL);
1282         hdr = buf->b_hdr;
1283         (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1284         (void) refcount_remove(&hdr->b_refcnt, tag);
1285         buf->b_efunc = NULL;
1286         buf->b_private = NULL;
1287 
1288         atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1289 }
1290 
1291 static arc_buf_t *
1292 arc_buf_clone(arc_buf_t *from)
1293 {
1294         arc_buf_t *buf;
1295         arc_buf_hdr_t *hdr = from->b_hdr;
1296         uint64_t size = hdr->b_size;
1297 
1298         ASSERT(hdr->b_state != arc_anon);
1299 
1300         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1301         buf->b_hdr = hdr;
1302         buf->b_data = NULL;
1303         buf->b_efunc = NULL;
1304         buf->b_private = NULL;
1305         buf->b_next = hdr->b_buf;
1306         hdr->b_buf = buf;
1307         arc_get_data_buf(buf);
1308         bcopy(from->b_data, buf->b_data, size);
1309         hdr->b_datacnt += 1;
1310         return (buf);
1311 }
1312 
1313 void
1314 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1315 {
1316         arc_buf_hdr_t *hdr;
1317         kmutex_t *hash_lock;
1318 
1319         /*
1320          * Check to see if this buffer is evicted.  Callers
1321          * must verify b_data != NULL to know if the add_ref
1322          * was successful.
1323          */
1324         mutex_enter(&buf->b_evict_lock);
1325         if (buf->b_data == NULL) {
1326                 mutex_exit(&buf->b_evict_lock);
1327                 return;
1328         }
1329         hash_lock = HDR_LOCK(buf->b_hdr);
1330         mutex_enter(hash_lock);
1331         hdr = buf->b_hdr;
1332         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1333         mutex_exit(&buf->b_evict_lock);
1334 
1335         ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1336         add_reference(hdr, hash_lock, tag);
1337         DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1338         arc_access(hdr, hash_lock);
1339         mutex_exit(hash_lock);
1340         ARCSTAT_BUMP(arcstat_hits);
1341         ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1342             demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1343             data, metadata, hits);
1344 }
1345 
1346 /*
1347  * Free the arc data buffer.  If it is an l2arc write in progress,
1348  * the buffer is placed on l2arc_free_on_write to be freed later.
1349  */
1350 static void
1351 arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t),
1352     void *data, size_t size)
1353 {
1354         if (HDR_L2_WRITING(hdr)) {
1355                 l2arc_data_free_t *df;
1356                 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1357                 df->l2df_data = data;
1358                 df->l2df_size = size;
1359                 df->l2df_func = free_func;
1360                 mutex_enter(&l2arc_free_on_write_mtx);
1361                 list_insert_head(l2arc_free_on_write, df);
1362                 mutex_exit(&l2arc_free_on_write_mtx);
1363                 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1364         } else {
1365                 free_func(data, size);
1366         }
1367 }
1368 
1369 static void
1370 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1371 {
1372         arc_buf_t **bufp;
1373 
1374         /* free up data associated with the buf */
1375         if (buf->b_data) {
1376                 arc_state_t *state = buf->b_hdr->b_state;
1377                 uint64_t size = buf->b_hdr->b_size;
1378                 arc_buf_contents_t type = buf->b_hdr->b_type;
1379 
1380                 arc_cksum_verify(buf);
1381 
1382                 if (!recycle) {
1383                         if (type == ARC_BUFC_METADATA) {
1384                                 arc_buf_data_free(buf->b_hdr, zio_buf_free,
1385                                     buf->b_data, size);
1386                                 arc_space_return(size, ARC_SPACE_DATA);
1387                         } else {
1388                                 ASSERT(type == ARC_BUFC_DATA);
1389                                 arc_buf_data_free(buf->b_hdr,
1390                                     zio_data_buf_free, buf->b_data, size);
1391                                 ARCSTAT_INCR(arcstat_data_size, -size);
1392                                 atomic_add_64(&arc_size, -size);
1393                         }
1394                 }
1395                 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1396                         uint64_t *cnt = &state->arcs_lsize[type];
1397 
1398                         ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1399                         ASSERT(state != arc_anon);
1400 
1401                         ASSERT3U(*cnt, >=, size);
1402                         atomic_add_64(cnt, -size);
1403                 }
1404                 ASSERT3U(state->arcs_size, >=, size);
1405                 atomic_add_64(&state->arcs_size, -size);
1406                 buf->b_data = NULL;
1407                 ASSERT(buf->b_hdr->b_datacnt > 0);
1408                 buf->b_hdr->b_datacnt -= 1;
1409         }
1410 
1411         /* only remove the buf if requested */
1412         if (!all)
1413                 return;
1414 
1415         /* remove the buf from the hdr list */
1416         for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1417                 continue;
1418         *bufp = buf->b_next;
1419         buf->b_next = NULL;
1420 
1421         ASSERT(buf->b_efunc == NULL);
1422 
1423         /* clean up the buf */
1424         buf->b_hdr = NULL;
1425         kmem_cache_free(buf_cache, buf);
1426 }
1427 
1428 static void
1429 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1430 {
1431         ASSERT(refcount_is_zero(&hdr->b_refcnt));
1432         ASSERT3P(hdr->b_state, ==, arc_anon);
1433         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1434         l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1435 
1436         if (l2hdr != NULL) {
1437                 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1438                 /*
1439                  * To prevent arc_free() and l2arc_evict() from
1440                  * attempting to free the same buffer at the same time,
1441                  * a FREE_IN_PROGRESS flag is given to arc_free() to
1442                  * give it priority.  l2arc_evict() can't destroy this
1443                  * header while we are waiting on l2arc_buflist_mtx.
1444                  *
1445                  * The hdr may be removed from l2ad_buflist before we
1446                  * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1447                  */
1448                 if (!buflist_held) {
1449                         mutex_enter(&l2arc_buflist_mtx);
1450                         l2hdr = hdr->b_l2hdr;
1451                 }
1452 
1453                 if (l2hdr != NULL) {
1454                         list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1455                         ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1456                         kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1457                         if (hdr->b_state == arc_l2c_only)
1458                                 l2arc_hdr_stat_remove();
1459                         hdr->b_l2hdr = NULL;
1460                 }
1461 
1462                 if (!buflist_held)
1463                         mutex_exit(&l2arc_buflist_mtx);
1464         }
1465 
1466         if (!BUF_EMPTY(hdr)) {
1467                 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1468                 buf_discard_identity(hdr);
1469         }
1470         while (hdr->b_buf) {
1471                 arc_buf_t *buf = hdr->b_buf;
1472 
1473                 if (buf->b_efunc) {
1474                         mutex_enter(&arc_eviction_mtx);
1475                         mutex_enter(&buf->b_evict_lock);
1476                         ASSERT(buf->b_hdr != NULL);
1477                         arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1478                         hdr->b_buf = buf->b_next;
1479                         buf->b_hdr = &arc_eviction_hdr;
1480                         buf->b_next = arc_eviction_list;
1481                         arc_eviction_list = buf;
1482                         mutex_exit(&buf->b_evict_lock);
1483                         mutex_exit(&arc_eviction_mtx);
1484                 } else {
1485                         arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1486                 }
1487         }
1488         if (hdr->b_freeze_cksum != NULL) {
1489                 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1490                 hdr->b_freeze_cksum = NULL;
1491         }
1492         if (hdr->b_thawed) {
1493                 kmem_free(hdr->b_thawed, 1);
1494                 hdr->b_thawed = NULL;
1495         }
1496 
1497         ASSERT(!list_link_active(&hdr->b_arc_node));
1498         ASSERT3P(hdr->b_hash_next, ==, NULL);
1499         ASSERT3P(hdr->b_acb, ==, NULL);
1500         kmem_cache_free(hdr_cache, hdr);
1501 }
1502 
1503 void
1504 arc_buf_free(arc_buf_t *buf, void *tag)
1505 {
1506         arc_buf_hdr_t *hdr = buf->b_hdr;
1507         int hashed = hdr->b_state != arc_anon;
1508 
1509         ASSERT(buf->b_efunc == NULL);
1510         ASSERT(buf->b_data != NULL);
1511 
1512         if (hashed) {
1513                 kmutex_t *hash_lock = HDR_LOCK(hdr);
1514 
1515                 mutex_enter(hash_lock);
1516                 hdr = buf->b_hdr;
1517                 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1518 
1519                 (void) remove_reference(hdr, hash_lock, tag);
1520                 if (hdr->b_datacnt > 1) {
1521                         arc_buf_destroy(buf, FALSE, TRUE);
1522                 } else {
1523                         ASSERT(buf == hdr->b_buf);
1524                         ASSERT(buf->b_efunc == NULL);
1525                         hdr->b_flags |= ARC_BUF_AVAILABLE;
1526                 }
1527                 mutex_exit(hash_lock);
1528         } else if (HDR_IO_IN_PROGRESS(hdr)) {
1529                 int destroy_hdr;
1530                 /*
1531                  * We are in the middle of an async write.  Don't destroy
1532                  * this buffer unless the write completes before we finish
1533                  * decrementing the reference count.
1534                  */
1535                 mutex_enter(&arc_eviction_mtx);
1536                 (void) remove_reference(hdr, NULL, tag);
1537                 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1538                 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1539                 mutex_exit(&arc_eviction_mtx);
1540                 if (destroy_hdr)
1541                         arc_hdr_destroy(hdr);
1542         } else {
1543                 if (remove_reference(hdr, NULL, tag) > 0)
1544                         arc_buf_destroy(buf, FALSE, TRUE);
1545                 else
1546                         arc_hdr_destroy(hdr);
1547         }
1548 }
1549 
1550 int
1551 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1552 {
1553         arc_buf_hdr_t *hdr = buf->b_hdr;
1554         kmutex_t *hash_lock = HDR_LOCK(hdr);
1555         int no_callback = (buf->b_efunc == NULL);
1556 
1557         if (hdr->b_state == arc_anon) {
1558                 ASSERT(hdr->b_datacnt == 1);
1559                 arc_buf_free(buf, tag);
1560                 return (no_callback);
1561         }
1562 
1563         mutex_enter(hash_lock);
1564         hdr = buf->b_hdr;
1565         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1566         ASSERT(hdr->b_state != arc_anon);
1567         ASSERT(buf->b_data != NULL);
1568 
1569         (void) remove_reference(hdr, hash_lock, tag);
1570         if (hdr->b_datacnt > 1) {
1571                 if (no_callback)
1572                         arc_buf_destroy(buf, FALSE, TRUE);
1573         } else if (no_callback) {
1574                 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1575                 ASSERT(buf->b_efunc == NULL);
1576                 hdr->b_flags |= ARC_BUF_AVAILABLE;
1577         }
1578         ASSERT(no_callback || hdr->b_datacnt > 1 ||
1579             refcount_is_zero(&hdr->b_refcnt));
1580         mutex_exit(hash_lock);
1581         return (no_callback);
1582 }
1583 
1584 int
1585 arc_buf_size(arc_buf_t *buf)
1586 {
1587         return (buf->b_hdr->b_size);
1588 }
1589 
1590 /*
1591  * Evict buffers from list until we've removed the specified number of
1592  * bytes.  Move the removed buffers to the appropriate evict state.
1593  * If the recycle flag is set, then attempt to "recycle" a buffer:
1594  * - look for a buffer to evict that is `bytes' long.
1595  * - return the data block from this buffer rather than freeing it.
1596  * This flag is used by callers that are trying to make space for a
1597  * new buffer in a full arc cache.
1598  *
1599  * This function makes a "best effort".  It skips over any buffers
1600  * it can't get a hash_lock on, and so may not catch all candidates.
1601  * It may also return without evicting as much space as requested.
1602  */
1603 static void *
1604 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1605     arc_buf_contents_t type)
1606 {
1607         arc_state_t *evicted_state;
1608         uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1609         arc_buf_hdr_t *ab, *ab_prev = NULL;
1610         list_t *list = &state->arcs_list[type];
1611         kmutex_t *hash_lock;
1612         boolean_t have_lock;
1613         void *stolen = NULL;
1614 
1615         ASSERT(state == arc_mru || state == arc_mfu);
1616 
1617         evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1618 
1619         mutex_enter(&state->arcs_mtx);
1620         mutex_enter(&evicted_state->arcs_mtx);
1621 
1622         for (ab = list_tail(list); ab; ab = ab_prev) {
1623                 ab_prev = list_prev(list, ab);
1624                 /* prefetch buffers have a minimum lifespan */
1625                 if (HDR_IO_IN_PROGRESS(ab) ||
1626                     (spa && ab->b_spa != spa) ||
1627                     (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1628                     ddi_get_lbolt() - ab->b_arc_access <
1629                     arc_min_prefetch_lifespan)) {
1630                         skipped++;
1631                         continue;
1632                 }
1633                 /* "lookahead" for better eviction candidate */
1634                 if (recycle && ab->b_size != bytes &&
1635                     ab_prev && ab_prev->b_size == bytes)
1636                         continue;
1637                 hash_lock = HDR_LOCK(ab);
1638                 have_lock = MUTEX_HELD(hash_lock);
1639                 if (have_lock || mutex_tryenter(hash_lock)) {
1640                         ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
1641                         ASSERT(ab->b_datacnt > 0);
1642                         while (ab->b_buf) {
1643                                 arc_buf_t *buf = ab->b_buf;
1644                                 if (!mutex_tryenter(&buf->b_evict_lock)) {
1645                                         missed += 1;
1646                                         break;
1647                                 }
1648                                 if (buf->b_data) {
1649                                         bytes_evicted += ab->b_size;
1650                                         if (recycle && ab->b_type == type &&
1651                                             ab->b_size == bytes &&
1652                                             !HDR_L2_WRITING(ab)) {
1653                                                 stolen = buf->b_data;
1654                                                 recycle = FALSE;
1655                                         }
1656                                 }
1657                                 if (buf->b_efunc) {
1658                                         mutex_enter(&arc_eviction_mtx);
1659                                         arc_buf_destroy(buf,
1660                                             buf->b_data == stolen, FALSE);
1661                                         ab->b_buf = buf->b_next;
1662                                         buf->b_hdr = &arc_eviction_hdr;
1663                                         buf->b_next = arc_eviction_list;
1664                                         arc_eviction_list = buf;
1665                                         mutex_exit(&arc_eviction_mtx);
1666                                         mutex_exit(&buf->b_evict_lock);
1667                                 } else {
1668                                         mutex_exit(&buf->b_evict_lock);
1669                                         arc_buf_destroy(buf,
1670                                             buf->b_data == stolen, TRUE);
1671                                 }
1672                         }
1673 
1674                         if (ab->b_l2hdr) {
1675                                 ARCSTAT_INCR(arcstat_evict_l2_cached,
1676                                     ab->b_size);
1677                         } else {
1678                                 if (l2arc_write_eligible(ab->b_spa, ab)) {
1679                                         ARCSTAT_INCR(arcstat_evict_l2_eligible,
1680                                             ab->b_size);
1681                                 } else {
1682                                         ARCSTAT_INCR(
1683                                             arcstat_evict_l2_ineligible,
1684                                             ab->b_size);
1685                                 }
1686                         }
1687 
1688                         if (ab->b_datacnt == 0) {
1689                                 arc_change_state(evicted_state, ab, hash_lock);
1690                                 ASSERT(HDR_IN_HASH_TABLE(ab));
1691                                 ab->b_flags |= ARC_IN_HASH_TABLE;
1692                                 ab->b_flags &= ~ARC_BUF_AVAILABLE;
1693                                 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1694                         }
1695                         if (!have_lock)
1696                                 mutex_exit(hash_lock);
1697                         if (bytes >= 0 && bytes_evicted >= bytes)
1698                                 break;
1699                 } else {
1700                         missed += 1;
1701                 }
1702         }
1703 
1704         mutex_exit(&evicted_state->arcs_mtx);
1705         mutex_exit(&state->arcs_mtx);
1706 
1707         if (bytes_evicted < bytes)
1708                 dprintf("only evicted %lld bytes from %x",
1709                     (longlong_t)bytes_evicted, state);
1710 
1711         if (skipped)
1712                 ARCSTAT_INCR(arcstat_evict_skip, skipped);
1713 
1714         if (missed)
1715                 ARCSTAT_INCR(arcstat_mutex_miss, missed);
1716 
1717         /*
1718          * We have just evicted some date into the ghost state, make
1719          * sure we also adjust the ghost state size if necessary.
1720          */
1721         if (arc_no_grow &&
1722             arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) {
1723                 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size +
1724                     arc_mru_ghost->arcs_size - arc_c;
1725 
1726                 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
1727                         int64_t todelete =
1728                             MIN(arc_mru_ghost->arcs_lsize[type], mru_over);
1729                         arc_evict_ghost(arc_mru_ghost, NULL, todelete);
1730                 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) {
1731                         int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type],
1732                             arc_mru_ghost->arcs_size +
1733                             arc_mfu_ghost->arcs_size - arc_c);
1734                         arc_evict_ghost(arc_mfu_ghost, NULL, todelete);
1735                 }
1736         }
1737 
1738         return (stolen);
1739 }
1740 
1741 /*
1742  * Remove buffers from list until we've removed the specified number of
1743  * bytes.  Destroy the buffers that are removed.
1744  */
1745 static void
1746 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
1747 {
1748         arc_buf_hdr_t *ab, *ab_prev;
1749         arc_buf_hdr_t marker = { 0 };
1750         list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1751         kmutex_t *hash_lock;
1752         uint64_t bytes_deleted = 0;
1753         uint64_t bufs_skipped = 0;
1754 
1755         ASSERT(GHOST_STATE(state));
1756 top:
1757         mutex_enter(&state->arcs_mtx);
1758         for (ab = list_tail(list); ab; ab = ab_prev) {
1759                 ab_prev = list_prev(list, ab);
1760                 if (spa && ab->b_spa != spa)
1761                         continue;
1762 
1763                 /* ignore markers */
1764                 if (ab->b_spa == 0)
1765                         continue;
1766 
1767                 hash_lock = HDR_LOCK(ab);
1768                 /* caller may be trying to modify this buffer, skip it */
1769                 if (MUTEX_HELD(hash_lock))
1770                         continue;
1771                 if (mutex_tryenter(hash_lock)) {
1772                         ASSERT(!HDR_IO_IN_PROGRESS(ab));
1773                         ASSERT(ab->b_buf == NULL);
1774                         ARCSTAT_BUMP(arcstat_deleted);
1775                         bytes_deleted += ab->b_size;
1776 
1777                         if (ab->b_l2hdr != NULL) {
1778                                 /*
1779                                  * This buffer is cached on the 2nd Level ARC;
1780                                  * don't destroy the header.
1781                                  */
1782                                 arc_change_state(arc_l2c_only, ab, hash_lock);
1783                                 mutex_exit(hash_lock);
1784                         } else {
1785                                 arc_change_state(arc_anon, ab, hash_lock);
1786                                 mutex_exit(hash_lock);
1787                                 arc_hdr_destroy(ab);
1788                         }
1789 
1790                         DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1791                         if (bytes >= 0 && bytes_deleted >= bytes)
1792                                 break;
1793                 } else if (bytes < 0) {
1794                         /*
1795                          * Insert a list marker and then wait for the
1796                          * hash lock to become available. Once its
1797                          * available, restart from where we left off.
1798                          */
1799                         list_insert_after(list, ab, &marker);
1800                         mutex_exit(&state->arcs_mtx);
1801                         mutex_enter(hash_lock);
1802                         mutex_exit(hash_lock);
1803                         mutex_enter(&state->arcs_mtx);
1804                         ab_prev = list_prev(list, &marker);
1805                         list_remove(list, &marker);
1806                 } else
1807                         bufs_skipped += 1;
1808         }
1809         mutex_exit(&state->arcs_mtx);
1810 
1811         if (list == &state->arcs_list[ARC_BUFC_DATA] &&
1812             (bytes < 0 || bytes_deleted < bytes)) {
1813                 list = &state->arcs_list[ARC_BUFC_METADATA];
1814                 goto top;
1815         }
1816 
1817         if (bufs_skipped) {
1818                 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
1819                 ASSERT(bytes >= 0);
1820         }
1821 
1822         if (bytes_deleted < bytes)
1823                 dprintf("only deleted %lld bytes from %p",
1824                     (longlong_t)bytes_deleted, state);
1825 }
1826 
1827 static void
1828 arc_adjust(void)
1829 {
1830         int64_t adjustment, delta;
1831 
1832         /*
1833          * Adjust MRU size
1834          */
1835 
1836         adjustment = MIN((int64_t)(arc_size - arc_c),
1837             (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
1838             arc_p));
1839 
1840         if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
1841                 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
1842                 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA);
1843                 adjustment -= delta;
1844         }
1845 
1846         if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1847                 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
1848                 (void) arc_evict(arc_mru, NULL, delta, FALSE,
1849                     ARC_BUFC_METADATA);
1850         }
1851 
1852         /*
1853          * Adjust MFU size
1854          */
1855 
1856         adjustment = arc_size - arc_c;
1857 
1858         if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
1859                 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
1860                 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA);
1861                 adjustment -= delta;
1862         }
1863 
1864         if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1865                 int64_t delta = MIN(adjustment,
1866                     arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
1867                 (void) arc_evict(arc_mfu, NULL, delta, FALSE,
1868                     ARC_BUFC_METADATA);
1869         }
1870 
1871         /*
1872          * Adjust ghost lists
1873          */
1874 
1875         adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
1876 
1877         if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
1878                 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
1879                 arc_evict_ghost(arc_mru_ghost, NULL, delta);
1880         }
1881 
1882         adjustment =
1883             arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
1884 
1885         if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
1886                 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
1887                 arc_evict_ghost(arc_mfu_ghost, NULL, delta);
1888         }
1889 }
1890 
1891 static void
1892 arc_do_user_evicts(void)
1893 {
1894         mutex_enter(&arc_eviction_mtx);
1895         while (arc_eviction_list != NULL) {
1896                 arc_buf_t *buf = arc_eviction_list;
1897                 arc_eviction_list = buf->b_next;
1898                 mutex_enter(&buf->b_evict_lock);
1899                 buf->b_hdr = NULL;
1900                 mutex_exit(&buf->b_evict_lock);
1901                 mutex_exit(&arc_eviction_mtx);
1902 
1903                 if (buf->b_efunc != NULL)
1904                         VERIFY(buf->b_efunc(buf) == 0);
1905 
1906                 buf->b_efunc = NULL;
1907                 buf->b_private = NULL;
1908                 kmem_cache_free(buf_cache, buf);
1909                 mutex_enter(&arc_eviction_mtx);
1910         }
1911         mutex_exit(&arc_eviction_mtx);
1912 }
1913 
1914 /*
1915  * Flush all *evictable* data from the cache for the given spa.
1916  * NOTE: this will not touch "active" (i.e. referenced) data.
1917  */
1918 void
1919 arc_flush(spa_t *spa)
1920 {
1921         uint64_t guid = 0;
1922 
1923         if (spa)
1924                 guid = spa_load_guid(spa);
1925 
1926         while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
1927                 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
1928                 if (spa)
1929                         break;
1930         }
1931         while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
1932                 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
1933                 if (spa)
1934                         break;
1935         }
1936         while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
1937                 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
1938                 if (spa)
1939                         break;
1940         }
1941         while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
1942                 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
1943                 if (spa)
1944                         break;
1945         }
1946 
1947         arc_evict_ghost(arc_mru_ghost, guid, -1);
1948         arc_evict_ghost(arc_mfu_ghost, guid, -1);
1949 
1950         mutex_enter(&arc_reclaim_thr_lock);
1951         arc_do_user_evicts();
1952         mutex_exit(&arc_reclaim_thr_lock);
1953         ASSERT(spa || arc_eviction_list == NULL);
1954 }
1955 
1956 void
1957 arc_shrink(void)
1958 {
1959         if (arc_c > arc_c_min) {
1960                 uint64_t to_free;
1961 
1962 #ifdef _KERNEL
1963                 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
1964 #else
1965                 to_free = arc_c >> arc_shrink_shift;
1966 #endif
1967                 if (arc_c > arc_c_min + to_free)
1968                         atomic_add_64(&arc_c, -to_free);
1969                 else
1970                         arc_c = arc_c_min;
1971 
1972                 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
1973                 if (arc_c > arc_size)
1974                         arc_c = MAX(arc_size, arc_c_min);
1975                 if (arc_p > arc_c)
1976                         arc_p = (arc_c >> 1);
1977                 ASSERT(arc_c >= arc_c_min);
1978                 ASSERT((int64_t)arc_p >= 0);
1979         }
1980 
1981         if (arc_size > arc_c)
1982                 arc_adjust();
1983 }
1984 
1985 /*
1986  * Determine if the system is under memory pressure and is asking
1987  * to reclaim memory. A return value of 1 indicates that the system
1988  * is under memory pressure and that the arc should adjust accordingly.
1989  */
1990 static int
1991 arc_reclaim_needed(void)
1992 {
1993         uint64_t extra;
1994 
1995 #ifdef _KERNEL
1996 
1997         if (needfree)
1998                 return (1);
1999 
2000         /*
2001          * take 'desfree' extra pages, so we reclaim sooner, rather than later
2002          */
2003         extra = desfree;
2004 
2005         /*
2006          * check that we're out of range of the pageout scanner.  It starts to
2007          * schedule paging if freemem is less than lotsfree and needfree.
2008          * lotsfree is the high-water mark for pageout, and needfree is the
2009          * number of needed free pages.  We add extra pages here to make sure
2010          * the scanner doesn't start up while we're freeing memory.
2011          */
2012         if (freemem < lotsfree + needfree + extra)
2013                 return (1);
2014 
2015         /*
2016          * check to make sure that swapfs has enough space so that anon
2017          * reservations can still succeed. anon_resvmem() checks that the
2018          * availrmem is greater than swapfs_minfree, and the number of reserved
2019          * swap pages.  We also add a bit of extra here just to prevent
2020          * circumstances from getting really dire.
2021          */
2022         if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2023                 return (1);
2024 
2025 #if defined(__i386)
2026         /*
2027          * If we're on an i386 platform, it's possible that we'll exhaust the
2028          * kernel heap space before we ever run out of available physical
2029          * memory.  Most checks of the size of the heap_area compare against
2030          * tune.t_minarmem, which is the minimum available real memory that we
2031          * can have in the system.  However, this is generally fixed at 25 pages
2032          * which is so low that it's useless.  In this comparison, we seek to
2033          * calculate the total heap-size, and reclaim if more than 3/4ths of the
2034          * heap is allocated.  (Or, in the calculation, if less than 1/4th is
2035          * free)
2036          */
2037         if (vmem_size(heap_arena, VMEM_FREE) <
2038             (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2))
2039                 return (1);
2040 #endif
2041 
2042         /*
2043          * If zio data pages are being allocated out of a separate heap segment,
2044          * then enforce that the size of available vmem for this arena remains
2045          * above about 1/16th free.
2046          *
2047          * Note: The 1/16th arena free requirement was put in place
2048          * to aggressively evict memory from the arc in order to avoid
2049          * memory fragmentation issues.
2050          */
2051         if (zio_arena != NULL &&
2052             vmem_size(zio_arena, VMEM_FREE) <
2053             (vmem_size(zio_arena, VMEM_ALLOC) >> 4))
2054                 return (1);
2055 #else
2056         if (spa_get_random(100) == 0)
2057                 return (1);
2058 #endif
2059         return (0);
2060 }
2061 
2062 static void
2063 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2064 {
2065         size_t                  i;
2066         kmem_cache_t            *prev_cache = NULL;
2067         kmem_cache_t            *prev_data_cache = NULL;
2068         extern kmem_cache_t     *zio_buf_cache[];
2069         extern kmem_cache_t     *zio_data_buf_cache[];
2070 
2071 #ifdef _KERNEL
2072         if (arc_meta_used >= arc_meta_limit) {
2073                 /*
2074                  * We are exceeding our meta-data cache limit.
2075                  * Purge some DNLC entries to release holds on meta-data.
2076                  */
2077                 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2078         }
2079 #if defined(__i386)
2080         /*
2081          * Reclaim unused memory from all kmem caches.
2082          */
2083         kmem_reap();
2084 #endif
2085 #endif
2086 
2087         /*
2088          * An aggressive reclamation will shrink the cache size as well as
2089          * reap free buffers from the arc kmem caches.
2090          */
2091         if (strat == ARC_RECLAIM_AGGR)
2092                 arc_shrink();
2093 
2094         for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2095                 if (zio_buf_cache[i] != prev_cache) {
2096                         prev_cache = zio_buf_cache[i];
2097                         kmem_cache_reap_now(zio_buf_cache[i]);
2098                 }
2099                 if (zio_data_buf_cache[i] != prev_data_cache) {
2100                         prev_data_cache = zio_data_buf_cache[i];
2101                         kmem_cache_reap_now(zio_data_buf_cache[i]);
2102                 }
2103         }
2104         kmem_cache_reap_now(buf_cache);
2105         kmem_cache_reap_now(hdr_cache);
2106 
2107         /*
2108          * Ask the vmem areana to reclaim unused memory from its
2109          * quantum caches.
2110          */
2111         if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR)
2112                 vmem_qcache_reap(zio_arena);
2113 }
2114 
2115 static void
2116 arc_reclaim_thread(void)
2117 {
2118         clock_t                 growtime = 0;
2119         arc_reclaim_strategy_t  last_reclaim = ARC_RECLAIM_CONS;
2120         callb_cpr_t             cpr;
2121 
2122         CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2123 
2124         mutex_enter(&arc_reclaim_thr_lock);
2125         while (arc_thread_exit == 0) {
2126                 if (arc_reclaim_needed()) {
2127 
2128                         if (arc_no_grow) {
2129                                 if (last_reclaim == ARC_RECLAIM_CONS) {
2130                                         last_reclaim = ARC_RECLAIM_AGGR;
2131                                 } else {
2132                                         last_reclaim = ARC_RECLAIM_CONS;
2133                                 }
2134                         } else {
2135                                 arc_no_grow = TRUE;
2136                                 last_reclaim = ARC_RECLAIM_AGGR;
2137                                 membar_producer();
2138                         }
2139 
2140                         /* reset the growth delay for every reclaim */
2141                         growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2142 
2143                         arc_kmem_reap_now(last_reclaim);
2144                         arc_warm = B_TRUE;
2145 
2146                 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2147                         arc_no_grow = FALSE;
2148                 }
2149 
2150                 arc_adjust();
2151 
2152                 if (arc_eviction_list != NULL)
2153                         arc_do_user_evicts();
2154 
2155                 /* block until needed, or one second, whichever is shorter */
2156                 CALLB_CPR_SAFE_BEGIN(&cpr);
2157                 (void) cv_timedwait(&arc_reclaim_thr_cv,
2158                     &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz));
2159                 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2160         }
2161 
2162         arc_thread_exit = 0;
2163         cv_broadcast(&arc_reclaim_thr_cv);
2164         CALLB_CPR_EXIT(&cpr);               /* drops arc_reclaim_thr_lock */
2165         thread_exit();
2166 }
2167 
2168 /*
2169  * Adapt arc info given the number of bytes we are trying to add and
2170  * the state that we are comming from.  This function is only called
2171  * when we are adding new content to the cache.
2172  */
2173 static void
2174 arc_adapt(int bytes, arc_state_t *state)
2175 {
2176         int mult;
2177         uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2178 
2179         if (state == arc_l2c_only)
2180                 return;
2181 
2182         ASSERT(bytes > 0);
2183         /*
2184          * Adapt the target size of the MRU list:
2185          *      - if we just hit in the MRU ghost list, then increase
2186          *        the target size of the MRU list.
2187          *      - if we just hit in the MFU ghost list, then increase
2188          *        the target size of the MFU list by decreasing the
2189          *        target size of the MRU list.
2190          */
2191         if (state == arc_mru_ghost) {
2192                 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2193                     1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2194                 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2195 
2196                 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2197         } else if (state == arc_mfu_ghost) {
2198                 uint64_t delta;
2199 
2200                 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2201                     1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2202                 mult = MIN(mult, 10);
2203 
2204                 delta = MIN(bytes * mult, arc_p);
2205                 arc_p = MAX(arc_p_min, arc_p - delta);
2206         }
2207         ASSERT((int64_t)arc_p >= 0);
2208 
2209         if (arc_reclaim_needed()) {
2210                 cv_signal(&arc_reclaim_thr_cv);
2211                 return;
2212         }
2213 
2214         if (arc_no_grow)
2215                 return;
2216 
2217         if (arc_c >= arc_c_max)
2218                 return;
2219 
2220         /*
2221          * If we're within (2 * maxblocksize) bytes of the target
2222          * cache size, increment the target cache size
2223          */
2224         if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2225                 atomic_add_64(&arc_c, (int64_t)bytes);
2226                 if (arc_c > arc_c_max)
2227                         arc_c = arc_c_max;
2228                 else if (state == arc_anon)
2229                         atomic_add_64(&arc_p, (int64_t)bytes);
2230                 if (arc_p > arc_c)
2231                         arc_p = arc_c;
2232         }
2233         ASSERT((int64_t)arc_p >= 0);
2234 }
2235 
2236 /*
2237  * Check if the cache has reached its limits and eviction is required
2238  * prior to insert.
2239  */
2240 static int
2241 arc_evict_needed(arc_buf_contents_t type)
2242 {
2243         if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2244                 return (1);
2245 
2246         if (arc_reclaim_needed())
2247                 return (1);
2248 
2249         return (arc_size > arc_c);
2250 }
2251 
2252 /*
2253  * The buffer, supplied as the first argument, needs a data block.
2254  * So, if we are at cache max, determine which cache should be victimized.
2255  * We have the following cases:
2256  *
2257  * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2258  * In this situation if we're out of space, but the resident size of the MFU is
2259  * under the limit, victimize the MFU cache to satisfy this insertion request.
2260  *
2261  * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2262  * Here, we've used up all of the available space for the MRU, so we need to
2263  * evict from our own cache instead.  Evict from the set of resident MRU
2264  * entries.
2265  *
2266  * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2267  * c minus p represents the MFU space in the cache, since p is the size of the
2268  * cache that is dedicated to the MRU.  In this situation there's still space on
2269  * the MFU side, so the MRU side needs to be victimized.
2270  *
2271  * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2272  * MFU's resident set is consuming more space than it has been allotted.  In
2273  * this situation, we must victimize our own cache, the MFU, for this insertion.
2274  */
2275 static void
2276 arc_get_data_buf(arc_buf_t *buf)
2277 {
2278         arc_state_t             *state = buf->b_hdr->b_state;
2279         uint64_t                size = buf->b_hdr->b_size;
2280         arc_buf_contents_t      type = buf->b_hdr->b_type;
2281 
2282         arc_adapt(size, state);
2283 
2284         /*
2285          * We have not yet reached cache maximum size,
2286          * just allocate a new buffer.
2287          */
2288         if (!arc_evict_needed(type)) {
2289                 if (type == ARC_BUFC_METADATA) {
2290                         buf->b_data = zio_buf_alloc(size);
2291                         arc_space_consume(size, ARC_SPACE_DATA);
2292                 } else {
2293                         ASSERT(type == ARC_BUFC_DATA);
2294                         buf->b_data = zio_data_buf_alloc(size);
2295                         ARCSTAT_INCR(arcstat_data_size, size);
2296                         atomic_add_64(&arc_size, size);
2297                 }
2298                 goto out;
2299         }
2300 
2301         /*
2302          * If we are prefetching from the mfu ghost list, this buffer
2303          * will end up on the mru list; so steal space from there.
2304          */
2305         if (state == arc_mfu_ghost)
2306                 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2307         else if (state == arc_mru_ghost)
2308                 state = arc_mru;
2309 
2310         if (state == arc_mru || state == arc_anon) {
2311                 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2312                 state = (arc_mfu->arcs_lsize[type] >= size &&
2313                     arc_p > mru_used) ? arc_mfu : arc_mru;
2314         } else {
2315                 /* MFU cases */
2316                 uint64_t mfu_space = arc_c - arc_p;
2317                 state =  (arc_mru->arcs_lsize[type] >= size &&
2318                     mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2319         }
2320         if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) {
2321                 if (type == ARC_BUFC_METADATA) {
2322                         buf->b_data = zio_buf_alloc(size);
2323                         arc_space_consume(size, ARC_SPACE_DATA);
2324                 } else {
2325                         ASSERT(type == ARC_BUFC_DATA);
2326                         buf->b_data = zio_data_buf_alloc(size);
2327                         ARCSTAT_INCR(arcstat_data_size, size);
2328                         atomic_add_64(&arc_size, size);
2329                 }
2330                 ARCSTAT_BUMP(arcstat_recycle_miss);
2331         }
2332         ASSERT(buf->b_data != NULL);
2333 out:
2334         /*
2335          * Update the state size.  Note that ghost states have a
2336          * "ghost size" and so don't need to be updated.
2337          */
2338         if (!GHOST_STATE(buf->b_hdr->b_state)) {
2339                 arc_buf_hdr_t *hdr = buf->b_hdr;
2340 
2341                 atomic_add_64(&hdr->b_state->arcs_size, size);
2342                 if (list_link_active(&hdr->b_arc_node)) {
2343                         ASSERT(refcount_is_zero(&hdr->b_refcnt));
2344                         atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2345                 }
2346                 /*
2347                  * If we are growing the cache, and we are adding anonymous
2348                  * data, and we have outgrown arc_p, update arc_p
2349                  */
2350                 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2351                     arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2352                         arc_p = MIN(arc_c, arc_p + size);
2353         }
2354 }
2355 
2356 /*
2357  * This routine is called whenever a buffer is accessed.
2358  * NOTE: the hash lock is dropped in this function.
2359  */
2360 static void
2361 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2362 {
2363         clock_t now;
2364 
2365         ASSERT(MUTEX_HELD(hash_lock));
2366 
2367         if (buf->b_state == arc_anon) {
2368                 /*
2369                  * This buffer is not in the cache, and does not
2370                  * appear in our "ghost" list.  Add the new buffer
2371                  * to the MRU state.
2372                  */
2373 
2374                 ASSERT(buf->b_arc_access == 0);
2375                 buf->b_arc_access = ddi_get_lbolt();
2376                 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2377                 arc_change_state(arc_mru, buf, hash_lock);
2378 
2379         } else if (buf->b_state == arc_mru) {
2380                 now = ddi_get_lbolt();
2381 
2382                 /*
2383                  * If this buffer is here because of a prefetch, then either:
2384                  * - clear the flag if this is a "referencing" read
2385                  *   (any subsequent access will bump this into the MFU state).
2386                  * or
2387                  * - move the buffer to the head of the list if this is
2388                  *   another prefetch (to make it less likely to be evicted).
2389                  */
2390                 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2391                         if (refcount_count(&buf->b_refcnt) == 0) {
2392                                 ASSERT(list_link_active(&buf->b_arc_node));
2393                         } else {
2394                                 buf->b_flags &= ~ARC_PREFETCH;
2395                                 ARCSTAT_BUMP(arcstat_mru_hits);
2396                         }
2397                         buf->b_arc_access = now;
2398                         return;
2399                 }
2400 
2401                 /*
2402                  * This buffer has been "accessed" only once so far,
2403                  * but it is still in the cache. Move it to the MFU
2404                  * state.
2405                  */
2406                 if (now > buf->b_arc_access + ARC_MINTIME) {
2407                         /*
2408                          * More than 125ms have passed since we
2409                          * instantiated this buffer.  Move it to the
2410                          * most frequently used state.
2411                          */
2412                         buf->b_arc_access = now;
2413                         DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2414                         arc_change_state(arc_mfu, buf, hash_lock);
2415                 }
2416                 ARCSTAT_BUMP(arcstat_mru_hits);
2417         } else if (buf->b_state == arc_mru_ghost) {
2418                 arc_state_t     *new_state;
2419                 /*
2420                  * This buffer has been "accessed" recently, but
2421                  * was evicted from the cache.  Move it to the
2422                  * MFU state.
2423                  */
2424 
2425                 if (buf->b_flags & ARC_PREFETCH) {
2426                         new_state = arc_mru;
2427                         if (refcount_count(&buf->b_refcnt) > 0)
2428                                 buf->b_flags &= ~ARC_PREFETCH;
2429                         DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2430                 } else {
2431                         new_state = arc_mfu;
2432                         DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2433                 }
2434 
2435                 buf->b_arc_access = ddi_get_lbolt();
2436                 arc_change_state(new_state, buf, hash_lock);
2437 
2438                 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2439         } else if (buf->b_state == arc_mfu) {
2440                 /*
2441                  * This buffer has been accessed more than once and is
2442                  * still in the cache.  Keep it in the MFU state.
2443                  *
2444                  * NOTE: an add_reference() that occurred when we did
2445                  * the arc_read() will have kicked this off the list.
2446                  * If it was a prefetch, we will explicitly move it to
2447                  * the head of the list now.
2448                  */
2449                 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2450                         ASSERT(refcount_count(&buf->b_refcnt) == 0);
2451                         ASSERT(list_link_active(&buf->b_arc_node));
2452                 }
2453                 ARCSTAT_BUMP(arcstat_mfu_hits);
2454                 buf->b_arc_access = ddi_get_lbolt();
2455         } else if (buf->b_state == arc_mfu_ghost) {
2456                 arc_state_t     *new_state = arc_mfu;
2457                 /*
2458                  * This buffer has been accessed more than once but has
2459                  * been evicted from the cache.  Move it back to the
2460                  * MFU state.
2461                  */
2462 
2463                 if (buf->b_flags & ARC_PREFETCH) {
2464                         /*
2465                          * This is a prefetch access...
2466                          * move this block back to the MRU state.
2467                          */
2468                         ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
2469                         new_state = arc_mru;
2470                 }
2471 
2472                 buf->b_arc_access = ddi_get_lbolt();
2473                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2474                 arc_change_state(new_state, buf, hash_lock);
2475 
2476                 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2477         } else if (buf->b_state == arc_l2c_only) {
2478                 /*
2479                  * This buffer is on the 2nd Level ARC.
2480                  */
2481 
2482                 buf->b_arc_access = ddi_get_lbolt();
2483                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2484                 arc_change_state(arc_mfu, buf, hash_lock);
2485         } else {
2486                 ASSERT(!"invalid arc state");
2487         }
2488 }
2489 
2490 /* a generic arc_done_func_t which you can use */
2491 /* ARGSUSED */
2492 void
2493 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2494 {
2495         if (zio == NULL || zio->io_error == 0)
2496                 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2497         VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2498 }
2499 
2500 /* a generic arc_done_func_t */
2501 void
2502 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2503 {
2504         arc_buf_t **bufp = arg;
2505         if (zio && zio->io_error) {
2506                 VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2507                 *bufp = NULL;
2508         } else {
2509                 *bufp = buf;
2510                 ASSERT(buf->b_data);
2511         }
2512 }
2513 
2514 static void
2515 arc_read_done(zio_t *zio)
2516 {
2517         arc_buf_hdr_t   *hdr, *found;
2518         arc_buf_t       *buf;
2519         arc_buf_t       *abuf;  /* buffer we're assigning to callback */
2520         kmutex_t        *hash_lock;
2521         arc_callback_t  *callback_list, *acb;
2522         int             freeable = FALSE;
2523 
2524         buf = zio->io_private;
2525         hdr = buf->b_hdr;
2526 
2527         /*
2528          * The hdr was inserted into hash-table and removed from lists
2529          * prior to starting I/O.  We should find this header, since
2530          * it's in the hash table, and it should be legit since it's
2531          * not possible to evict it during the I/O.  The only possible
2532          * reason for it not to be found is if we were freed during the
2533          * read.
2534          */
2535         found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth,
2536             &hash_lock);
2537 
2538         ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2539             (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2540             (found == hdr && HDR_L2_READING(hdr)));
2541 
2542         hdr->b_flags &= ~ARC_L2_EVICTED;
2543         if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2544                 hdr->b_flags &= ~ARC_L2CACHE;
2545 
2546         /* byteswap if necessary */
2547         callback_list = hdr->b_acb;
2548         ASSERT(callback_list != NULL);
2549         if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
2550                 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2551                     byteswap_uint64_array :
2552                     dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap;
2553                 func(buf->b_data, hdr->b_size);
2554         }
2555 
2556         arc_cksum_compute(buf, B_FALSE);
2557 
2558         if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
2559                 /*
2560                  * Only call arc_access on anonymous buffers.  This is because
2561                  * if we've issued an I/O for an evicted buffer, we've already
2562                  * called arc_access (to prevent any simultaneous readers from
2563                  * getting confused).
2564                  */
2565                 arc_access(hdr, hash_lock);
2566         }
2567 
2568         /* create copies of the data buffer for the callers */
2569         abuf = buf;
2570         for (acb = callback_list; acb; acb = acb->acb_next) {
2571                 if (acb->acb_done) {
2572                         if (abuf == NULL)
2573                                 abuf = arc_buf_clone(buf);
2574                         acb->acb_buf = abuf;
2575                         abuf = NULL;
2576                 }
2577         }
2578         hdr->b_acb = NULL;
2579         hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2580         ASSERT(!HDR_BUF_AVAILABLE(hdr));
2581         if (abuf == buf) {
2582                 ASSERT(buf->b_efunc == NULL);
2583                 ASSERT(hdr->b_datacnt == 1);
2584                 hdr->b_flags |= ARC_BUF_AVAILABLE;
2585         }
2586 
2587         ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2588 
2589         if (zio->io_error != 0) {
2590                 hdr->b_flags |= ARC_IO_ERROR;
2591                 if (hdr->b_state != arc_anon)
2592                         arc_change_state(arc_anon, hdr, hash_lock);
2593                 if (HDR_IN_HASH_TABLE(hdr))
2594                         buf_hash_remove(hdr);
2595                 freeable = refcount_is_zero(&hdr->b_refcnt);
2596         }
2597 
2598         /*
2599          * Broadcast before we drop the hash_lock to avoid the possibility
2600          * that the hdr (and hence the cv) might be freed before we get to
2601          * the cv_broadcast().
2602          */
2603         cv_broadcast(&hdr->b_cv);
2604 
2605         if (hash_lock) {
2606                 mutex_exit(hash_lock);
2607         } else {
2608                 /*
2609                  * This block was freed while we waited for the read to
2610                  * complete.  It has been removed from the hash table and
2611                  * moved to the anonymous state (so that it won't show up
2612                  * in the cache).
2613                  */
2614                 ASSERT3P(hdr->b_state, ==, arc_anon);
2615                 freeable = refcount_is_zero(&hdr->b_refcnt);
2616         }
2617 
2618         /* execute each callback and free its structure */
2619         while ((acb = callback_list) != NULL) {
2620                 if (acb->acb_done)
2621                         acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2622 
2623                 if (acb->acb_zio_dummy != NULL) {
2624                         acb->acb_zio_dummy->io_error = zio->io_error;
2625                         zio_nowait(acb->acb_zio_dummy);
2626                 }
2627 
2628                 callback_list = acb->acb_next;
2629                 kmem_free(acb, sizeof (arc_callback_t));
2630         }
2631 
2632         if (freeable)
2633                 arc_hdr_destroy(hdr);
2634 }
2635 
2636 /*
2637  * "Read" the block block at the specified DVA (in bp) via the
2638  * cache.  If the block is found in the cache, invoke the provided
2639  * callback immediately and return.  Note that the `zio' parameter
2640  * in the callback will be NULL in this case, since no IO was
2641  * required.  If the block is not in the cache pass the read request
2642  * on to the spa with a substitute callback function, so that the
2643  * requested block will be added to the cache.
2644  *
2645  * If a read request arrives for a block that has a read in-progress,
2646  * either wait for the in-progress read to complete (and return the
2647  * results); or, if this is a read with a "done" func, add a record
2648  * to the read to invoke the "done" func when the read completes,
2649  * and return; or just return.
2650  *
2651  * arc_read_done() will invoke all the requested "done" functions
2652  * for readers of this block.
2653  *
2654  * Normal callers should use arc_read and pass the arc buffer and offset
2655  * for the bp.  But if you know you don't need locking, you can use
2656  * arc_read_bp.
2657  */
2658 int
2659 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_buf_t *pbuf,
2660     arc_done_func_t *done, void *private, int priority, int zio_flags,
2661     uint32_t *arc_flags, const zbookmark_t *zb)
2662 {
2663         int err;
2664 
2665         if (pbuf == NULL) {
2666                 /*
2667                  * XXX This happens from traverse callback funcs, for
2668                  * the objset_phys_t block.
2669                  */
2670                 return (arc_read_nolock(pio, spa, bp, done, private, priority,
2671                     zio_flags, arc_flags, zb));
2672         }
2673 
2674         ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt));
2675         ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size);
2676         rw_enter(&pbuf->b_data_lock, RW_READER);
2677 
2678         err = arc_read_nolock(pio, spa, bp, done, private, priority,
2679             zio_flags, arc_flags, zb);
2680         rw_exit(&pbuf->b_data_lock);
2681 
2682         return (err);
2683 }
2684 
2685 int
2686 arc_read_nolock(zio_t *pio, spa_t *spa, const blkptr_t *bp,
2687     arc_done_func_t *done, void *private, int priority, int zio_flags,
2688     uint32_t *arc_flags, const zbookmark_t *zb)
2689 {
2690         arc_buf_hdr_t *hdr;
2691         arc_buf_t *buf;
2692         kmutex_t *hash_lock;
2693         zio_t *rzio;
2694         uint64_t guid = spa_load_guid(spa);
2695 
2696 top:
2697         hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
2698             &hash_lock);
2699         if (hdr && hdr->b_datacnt > 0) {
2700 
2701                 *arc_flags |= ARC_CACHED;
2702 
2703                 if (HDR_IO_IN_PROGRESS(hdr)) {
2704 
2705                         if (*arc_flags & ARC_WAIT) {
2706                                 cv_wait(&hdr->b_cv, hash_lock);
2707                                 mutex_exit(hash_lock);
2708                                 goto top;
2709                         }
2710                         ASSERT(*arc_flags & ARC_NOWAIT);
2711 
2712                         if (done) {
2713                                 arc_callback_t  *acb = NULL;
2714 
2715                                 acb = kmem_zalloc(sizeof (arc_callback_t),
2716                                     KM_SLEEP);
2717                                 acb->acb_done = done;
2718                                 acb->acb_private = private;
2719                                 if (pio != NULL)
2720                                         acb->acb_zio_dummy = zio_null(pio,
2721                                             spa, NULL, NULL, NULL, zio_flags);
2722 
2723                                 ASSERT(acb->acb_done != NULL);
2724                                 acb->acb_next = hdr->b_acb;
2725                                 hdr->b_acb = acb;
2726                                 add_reference(hdr, hash_lock, private);
2727                                 mutex_exit(hash_lock);
2728                                 return (0);
2729                         }
2730                         mutex_exit(hash_lock);
2731                         return (0);
2732                 }
2733 
2734                 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2735 
2736                 if (done) {
2737                         add_reference(hdr, hash_lock, private);
2738                         /*
2739                          * If this block is already in use, create a new
2740                          * copy of the data so that we will be guaranteed
2741                          * that arc_release() will always succeed.
2742                          */
2743                         buf = hdr->b_buf;
2744                         ASSERT(buf);
2745                         ASSERT(buf->b_data);
2746                         if (HDR_BUF_AVAILABLE(hdr)) {
2747                                 ASSERT(buf->b_efunc == NULL);
2748                                 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2749                         } else {
2750                                 buf = arc_buf_clone(buf);
2751                         }
2752 
2753                 } else if (*arc_flags & ARC_PREFETCH &&
2754                     refcount_count(&hdr->b_refcnt) == 0) {
2755                         hdr->b_flags |= ARC_PREFETCH;
2756                 }
2757                 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2758                 arc_access(hdr, hash_lock);
2759                 if (*arc_flags & ARC_L2CACHE)
2760                         hdr->b_flags |= ARC_L2CACHE;
2761                 mutex_exit(hash_lock);
2762                 ARCSTAT_BUMP(arcstat_hits);
2763                 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2764                     demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2765                     data, metadata, hits);
2766 
2767                 if (done)
2768                         done(NULL, buf, private);
2769         } else {
2770                 uint64_t size = BP_GET_LSIZE(bp);
2771                 arc_callback_t  *acb;
2772                 vdev_t *vd = NULL;
2773                 uint64_t addr;
2774                 boolean_t devw = B_FALSE;
2775 
2776                 if (hdr == NULL) {
2777                         /* this block is not in the cache */
2778                         arc_buf_hdr_t   *exists;
2779                         arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2780                         buf = arc_buf_alloc(spa, size, private, type);
2781                         hdr = buf->b_hdr;
2782                         hdr->b_dva = *BP_IDENTITY(bp);
2783                         hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
2784                         hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2785                         exists = buf_hash_insert(hdr, &hash_lock);
2786                         if (exists) {
2787                                 /* somebody beat us to the hash insert */
2788                                 mutex_exit(hash_lock);
2789                                 buf_discard_identity(hdr);
2790                                 (void) arc_buf_remove_ref(buf, private);
2791                                 goto top; /* restart the IO request */
2792                         }
2793                         /* if this is a prefetch, we don't have a reference */
2794                         if (*arc_flags & ARC_PREFETCH) {
2795                                 (void) remove_reference(hdr, hash_lock,
2796                                     private);
2797                                 hdr->b_flags |= ARC_PREFETCH;
2798                         }
2799                         if (*arc_flags & ARC_L2CACHE)
2800                                 hdr->b_flags |= ARC_L2CACHE;
2801                         if (BP_GET_LEVEL(bp) > 0)
2802                                 hdr->b_flags |= ARC_INDIRECT;
2803                 } else {
2804                         /* this block is in the ghost cache */
2805                         ASSERT(GHOST_STATE(hdr->b_state));
2806                         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2807                         ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
2808                         ASSERT(hdr->b_buf == NULL);
2809 
2810                         /* if this is a prefetch, we don't have a reference */
2811                         if (*arc_flags & ARC_PREFETCH)
2812                                 hdr->b_flags |= ARC_PREFETCH;
2813                         else
2814                                 add_reference(hdr, hash_lock, private);
2815                         if (*arc_flags & ARC_L2CACHE)
2816                                 hdr->b_flags |= ARC_L2CACHE;
2817                         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2818                         buf->b_hdr = hdr;
2819                         buf->b_data = NULL;
2820                         buf->b_efunc = NULL;
2821                         buf->b_private = NULL;
2822                         buf->b_next = NULL;
2823                         hdr->b_buf = buf;
2824                         ASSERT(hdr->b_datacnt == 0);
2825                         hdr->b_datacnt = 1;
2826                         arc_get_data_buf(buf);
2827                         arc_access(hdr, hash_lock);
2828                 }
2829 
2830                 ASSERT(!GHOST_STATE(hdr->b_state));
2831 
2832                 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2833                 acb->acb_done = done;
2834                 acb->acb_private = private;
2835 
2836                 ASSERT(hdr->b_acb == NULL);
2837                 hdr->b_acb = acb;
2838                 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2839 
2840                 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
2841                     (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
2842                         devw = hdr->b_l2hdr->b_dev->l2ad_writing;
2843                         addr = hdr->b_l2hdr->b_daddr;
2844                         /*
2845                          * Lock out device removal.
2846                          */
2847                         if (vdev_is_dead(vd) ||
2848                             !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
2849                                 vd = NULL;
2850                 }
2851 
2852                 mutex_exit(hash_lock);
2853 
2854                 ASSERT3U(hdr->b_size, ==, size);
2855                 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
2856                     uint64_t, size, zbookmark_t *, zb);
2857                 ARCSTAT_BUMP(arcstat_misses);
2858                 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2859                     demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2860                     data, metadata, misses);
2861 
2862                 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
2863                         /*
2864                          * Read from the L2ARC if the following are true:
2865                          * 1. The L2ARC vdev was previously cached.
2866                          * 2. This buffer still has L2ARC metadata.
2867                          * 3. This buffer isn't currently writing to the L2ARC.
2868                          * 4. The L2ARC entry wasn't evicted, which may
2869                          *    also have invalidated the vdev.
2870                          * 5. This isn't prefetch and l2arc_noprefetch is set.
2871                          */
2872                         if (hdr->b_l2hdr != NULL &&
2873                             !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
2874                             !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
2875                                 l2arc_read_callback_t *cb;
2876 
2877                                 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
2878                                 ARCSTAT_BUMP(arcstat_l2_hits);
2879 
2880                                 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
2881                                     KM_SLEEP);
2882                                 cb->l2rcb_buf = buf;
2883                                 cb->l2rcb_spa = spa;
2884                                 cb->l2rcb_bp = *bp;
2885                                 cb->l2rcb_zb = *zb;
2886                                 cb->l2rcb_flags = zio_flags;
2887 
2888                                 /*
2889                                  * l2arc read.  The SCL_L2ARC lock will be
2890                                  * released by l2arc_read_done().
2891                                  */
2892                                 rzio = zio_read_phys(pio, vd, addr, size,
2893                                     buf->b_data, ZIO_CHECKSUM_OFF,
2894                                     l2arc_read_done, cb, priority, zio_flags |
2895                                     ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
2896                                     ZIO_FLAG_DONT_PROPAGATE |
2897                                     ZIO_FLAG_DONT_RETRY, B_FALSE);
2898                                 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
2899                                     zio_t *, rzio);
2900                                 ARCSTAT_INCR(arcstat_l2_read_bytes, size);
2901 
2902                                 if (*arc_flags & ARC_NOWAIT) {
2903                                         zio_nowait(rzio);
2904                                         return (0);
2905                                 }
2906 
2907                                 ASSERT(*arc_flags & ARC_WAIT);
2908                                 if (zio_wait(rzio) == 0)
2909                                         return (0);
2910 
2911                                 /* l2arc read error; goto zio_read() */
2912                         } else {
2913                                 DTRACE_PROBE1(l2arc__miss,
2914                                     arc_buf_hdr_t *, hdr);
2915                                 ARCSTAT_BUMP(arcstat_l2_misses);
2916                                 if (HDR_L2_WRITING(hdr))
2917                                         ARCSTAT_BUMP(arcstat_l2_rw_clash);
2918                                 spa_config_exit(spa, SCL_L2ARC, vd);
2919                         }
2920                 } else {
2921                         if (vd != NULL)
2922                                 spa_config_exit(spa, SCL_L2ARC, vd);
2923                         if (l2arc_ndev != 0) {
2924                                 DTRACE_PROBE1(l2arc__miss,
2925                                     arc_buf_hdr_t *, hdr);
2926                                 ARCSTAT_BUMP(arcstat_l2_misses);
2927                         }
2928                 }
2929 
2930                 rzio = zio_read(pio, spa, bp, buf->b_data, size,
2931                     arc_read_done, buf, priority, zio_flags, zb);
2932 
2933                 if (*arc_flags & ARC_WAIT)
2934                         return (zio_wait(rzio));
2935 
2936                 ASSERT(*arc_flags & ARC_NOWAIT);
2937                 zio_nowait(rzio);
2938         }
2939         return (0);
2940 }
2941 
2942 void
2943 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
2944 {
2945         ASSERT(buf->b_hdr != NULL);
2946         ASSERT(buf->b_hdr->b_state != arc_anon);
2947         ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
2948         ASSERT(buf->b_efunc == NULL);
2949         ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
2950 
2951         buf->b_efunc = func;
2952         buf->b_private = private;
2953 }
2954 
2955 /*
2956  * This is used by the DMU to let the ARC know that a buffer is
2957  * being evicted, so the ARC should clean up.  If this arc buf
2958  * is not yet in the evicted state, it will be put there.
2959  */
2960 int
2961 arc_buf_evict(arc_buf_t *buf)
2962 {
2963         arc_buf_hdr_t *hdr;
2964         kmutex_t *hash_lock;
2965         arc_buf_t **bufp;
2966 
2967         mutex_enter(&buf->b_evict_lock);
2968         hdr = buf->b_hdr;
2969         if (hdr == NULL) {
2970                 /*
2971                  * We are in arc_do_user_evicts().
2972                  */
2973                 ASSERT(buf->b_data == NULL);
2974                 mutex_exit(&buf->b_evict_lock);
2975                 return (0);
2976         } else if (buf->b_data == NULL) {
2977                 arc_buf_t copy = *buf; /* structure assignment */
2978                 /*
2979                  * We are on the eviction list; process this buffer now
2980                  * but let arc_do_user_evicts() do the reaping.
2981                  */
2982                 buf->b_efunc = NULL;
2983                 mutex_exit(&buf->b_evict_lock);
2984                 VERIFY(copy.b_efunc(&copy) == 0);
2985                 return (1);
2986         }
2987         hash_lock = HDR_LOCK(hdr);
2988         mutex_enter(hash_lock);
2989         hdr = buf->b_hdr;
2990         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
2991 
2992         ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
2993         ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2994 
2995         /*
2996          * Pull this buffer off of the hdr
2997          */
2998         bufp = &hdr->b_buf;
2999         while (*bufp != buf)
3000                 bufp = &(*bufp)->b_next;
3001         *bufp = buf->b_next;
3002 
3003         ASSERT(buf->b_data != NULL);
3004         arc_buf_destroy(buf, FALSE, FALSE);
3005 
3006         if (hdr->b_datacnt == 0) {
3007                 arc_state_t *old_state = hdr->b_state;
3008                 arc_state_t *evicted_state;
3009 
3010                 ASSERT(hdr->b_buf == NULL);
3011                 ASSERT(refcount_is_zero(&hdr->b_refcnt));
3012 
3013                 evicted_state =
3014                     (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3015 
3016                 mutex_enter(&old_state->arcs_mtx);
3017                 mutex_enter(&evicted_state->arcs_mtx);
3018 
3019                 arc_change_state(evicted_state, hdr, hash_lock);
3020                 ASSERT(HDR_IN_HASH_TABLE(hdr));
3021                 hdr->b_flags |= ARC_IN_HASH_TABLE;
3022                 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3023 
3024                 mutex_exit(&evicted_state->arcs_mtx);
3025                 mutex_exit(&old_state->arcs_mtx);
3026         }
3027         mutex_exit(hash_lock);
3028         mutex_exit(&buf->b_evict_lock);
3029 
3030         VERIFY(buf->b_efunc(buf) == 0);
3031         buf->b_efunc = NULL;
3032         buf->b_private = NULL;
3033         buf->b_hdr = NULL;
3034         buf->b_next = NULL;
3035         kmem_cache_free(buf_cache, buf);
3036         return (1);
3037 }
3038 
3039 /*
3040  * Release this buffer from the cache.  This must be done
3041  * after a read and prior to modifying the buffer contents.
3042  * If the buffer has more than one reference, we must make
3043  * a new hdr for the buffer.
3044  */
3045 void
3046 arc_release(arc_buf_t *buf, void *tag)
3047 {
3048         arc_buf_hdr_t *hdr;
3049         kmutex_t *hash_lock = NULL;
3050         l2arc_buf_hdr_t *l2hdr;
3051         uint64_t buf_size;
3052 
3053         /*
3054          * It would be nice to assert that if it's DMU metadata (level >
3055          * 0 || it's the dnode file), then it must be syncing context.
3056          * But we don't know that information at this level.
3057          */
3058 
3059         mutex_enter(&buf->b_evict_lock);
3060         hdr = buf->b_hdr;
3061 
3062         /* this buffer is not on any list */
3063         ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3064 
3065         if (hdr->b_state == arc_anon) {
3066                 /* this buffer is already released */
3067                 ASSERT(buf->b_efunc == NULL);
3068         } else {
3069                 hash_lock = HDR_LOCK(hdr);
3070                 mutex_enter(hash_lock);
3071                 hdr = buf->b_hdr;
3072                 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3073         }
3074 
3075         l2hdr = hdr->b_l2hdr;
3076         if (l2hdr) {
3077                 mutex_enter(&l2arc_buflist_mtx);
3078                 hdr->b_l2hdr = NULL;
3079                 buf_size = hdr->b_size;
3080         }
3081 
3082         /*
3083          * Do we have more than one buf?
3084          */
3085         if (hdr->b_datacnt > 1) {
3086                 arc_buf_hdr_t *nhdr;
3087                 arc_buf_t **bufp;
3088                 uint64_t blksz = hdr->b_size;
3089                 uint64_t spa = hdr->b_spa;
3090                 arc_buf_contents_t type = hdr->b_type;
3091                 uint32_t flags = hdr->b_flags;
3092 
3093                 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3094                 /*
3095                  * Pull the data off of this hdr and attach it to
3096                  * a new anonymous hdr.
3097                  */
3098                 (void) remove_reference(hdr, hash_lock, tag);
3099                 bufp = &hdr->b_buf;
3100                 while (*bufp != buf)
3101                         bufp = &(*bufp)->b_next;
3102                 *bufp = buf->b_next;
3103                 buf->b_next = NULL;
3104 
3105                 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3106                 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3107                 if (refcount_is_zero(&hdr->b_refcnt)) {
3108                         uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3109                         ASSERT3U(*size, >=, hdr->b_size);
3110                         atomic_add_64(size, -hdr->b_size);
3111                 }
3112                 hdr->b_datacnt -= 1;
3113                 arc_cksum_verify(buf);
3114 
3115                 mutex_exit(hash_lock);
3116 
3117                 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3118                 nhdr->b_size = blksz;
3119                 nhdr->b_spa = spa;
3120                 nhdr->b_type = type;
3121                 nhdr->b_buf = buf;
3122                 nhdr->b_state = arc_anon;
3123                 nhdr->b_arc_access = 0;
3124                 nhdr->b_flags = flags & ARC_L2_WRITING;
3125                 nhdr->b_l2hdr = NULL;
3126                 nhdr->b_datacnt = 1;
3127                 nhdr->b_freeze_cksum = NULL;
3128                 (void) refcount_add(&nhdr->b_refcnt, tag);
3129                 buf->b_hdr = nhdr;
3130                 mutex_exit(&buf->b_evict_lock);
3131                 atomic_add_64(&arc_anon->arcs_size, blksz);
3132         } else {
3133                 mutex_exit(&buf->b_evict_lock);
3134                 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3135                 ASSERT(!list_link_active(&hdr->b_arc_node));
3136                 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3137                 if (hdr->b_state != arc_anon)
3138                         arc_change_state(arc_anon, hdr, hash_lock);
3139                 hdr->b_arc_access = 0;
3140                 if (hash_lock)
3141                         mutex_exit(hash_lock);
3142 
3143                 buf_discard_identity(hdr);
3144                 arc_buf_thaw(buf);
3145         }
3146         buf->b_efunc = NULL;
3147         buf->b_private = NULL;
3148 
3149         if (l2hdr) {
3150                 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3151                 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3152                 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3153                 mutex_exit(&l2arc_buflist_mtx);
3154         }
3155 }
3156 
3157 /*
3158  * Release this buffer.  If it does not match the provided BP, fill it
3159  * with that block's contents.
3160  */
3161 /* ARGSUSED */
3162 int
3163 arc_release_bp(arc_buf_t *buf, void *tag, blkptr_t *bp, spa_t *spa,
3164     zbookmark_t *zb)
3165 {
3166         arc_release(buf, tag);
3167         return (0);
3168 }
3169 
3170 int
3171 arc_released(arc_buf_t *buf)
3172 {
3173         int released;
3174 
3175         mutex_enter(&buf->b_evict_lock);
3176         released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3177         mutex_exit(&buf->b_evict_lock);
3178         return (released);
3179 }
3180 
3181 int
3182 arc_has_callback(arc_buf_t *buf)
3183 {
3184         int callback;
3185 
3186         mutex_enter(&buf->b_evict_lock);
3187         callback = (buf->b_efunc != NULL);
3188         mutex_exit(&buf->b_evict_lock);
3189         return (callback);
3190 }
3191 
3192 #ifdef ZFS_DEBUG
3193 int
3194 arc_referenced(arc_buf_t *buf)
3195 {
3196         int referenced;
3197 
3198         mutex_enter(&buf->b_evict_lock);
3199         referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3200         mutex_exit(&buf->b_evict_lock);
3201         return (referenced);
3202 }
3203 #endif
3204 
3205 static void
3206 arc_write_ready(zio_t *zio)
3207 {
3208         arc_write_callback_t *callback = zio->io_private;
3209         arc_buf_t *buf = callback->awcb_buf;
3210         arc_buf_hdr_t *hdr = buf->b_hdr;
3211 
3212         ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3213         callback->awcb_ready(zio, buf, callback->awcb_private);
3214 
3215         /*
3216          * If the IO is already in progress, then this is a re-write
3217          * attempt, so we need to thaw and re-compute the cksum.
3218          * It is the responsibility of the callback to handle the
3219          * accounting for any re-write attempt.
3220          */
3221         if (HDR_IO_IN_PROGRESS(hdr)) {
3222                 mutex_enter(&hdr->b_freeze_lock);
3223                 if (hdr->b_freeze_cksum != NULL) {
3224                         kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3225                         hdr->b_freeze_cksum = NULL;
3226                 }
3227                 mutex_exit(&hdr->b_freeze_lock);
3228         }
3229         arc_cksum_compute(buf, B_FALSE);
3230         hdr->b_flags |= ARC_IO_IN_PROGRESS;
3231 }
3232 
3233 static void
3234 arc_write_done(zio_t *zio)
3235 {
3236         arc_write_callback_t *callback = zio->io_private;
3237         arc_buf_t *buf = callback->awcb_buf;
3238         arc_buf_hdr_t *hdr = buf->b_hdr;
3239 
3240         ASSERT(hdr->b_acb == NULL);
3241 
3242         if (zio->io_error == 0) {
3243                 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3244                 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3245                 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3246         } else {
3247                 ASSERT(BUF_EMPTY(hdr));
3248         }
3249 
3250         /*
3251          * If the block to be written was all-zero, we may have
3252          * compressed it away.  In this case no write was performed
3253          * so there will be no dva/birth/checksum.  The buffer must
3254          * therefore remain anonymous (and uncached).
3255          */
3256         if (!BUF_EMPTY(hdr)) {
3257                 arc_buf_hdr_t *exists;
3258                 kmutex_t *hash_lock;
3259 
3260                 ASSERT(zio->io_error == 0);
3261 
3262                 arc_cksum_verify(buf);
3263 
3264                 exists = buf_hash_insert(hdr, &hash_lock);
3265                 if (exists) {
3266                         /*
3267                          * This can only happen if we overwrite for
3268                          * sync-to-convergence, because we remove
3269                          * buffers from the hash table when we arc_free().
3270                          */
3271                         if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3272                                 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3273                                         panic("bad overwrite, hdr=%p exists=%p",
3274                                             (void *)hdr, (void *)exists);
3275                                 ASSERT(refcount_is_zero(&exists->b_refcnt));
3276                                 arc_change_state(arc_anon, exists, hash_lock);
3277                                 mutex_exit(hash_lock);
3278                                 arc_hdr_destroy(exists);
3279                                 exists = buf_hash_insert(hdr, &hash_lock);
3280                                 ASSERT3P(exists, ==, NULL);
3281                         } else {
3282                                 /* Dedup */
3283                                 ASSERT(hdr->b_datacnt == 1);
3284                                 ASSERT(hdr->b_state == arc_anon);
3285                                 ASSERT(BP_GET_DEDUP(zio->io_bp));
3286                                 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3287                         }
3288                 }
3289                 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3290                 /* if it's not anon, we are doing a scrub */
3291                 if (!exists && hdr->b_state == arc_anon)
3292                         arc_access(hdr, hash_lock);
3293                 mutex_exit(hash_lock);
3294         } else {
3295                 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3296         }
3297 
3298         ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3299         callback->awcb_done(zio, buf, callback->awcb_private);
3300 
3301         kmem_free(callback, sizeof (arc_write_callback_t));
3302 }
3303 
3304 zio_t *
3305 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3306     blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp,
3307     arc_done_func_t *ready, arc_done_func_t *done, void *private,
3308     int priority, int zio_flags, const zbookmark_t *zb)
3309 {
3310         arc_buf_hdr_t *hdr = buf->b_hdr;
3311         arc_write_callback_t *callback;
3312         zio_t *zio;
3313 
3314         ASSERT(ready != NULL);
3315         ASSERT(done != NULL);
3316         ASSERT(!HDR_IO_ERROR(hdr));
3317         ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3318         ASSERT(hdr->b_acb == NULL);
3319         if (l2arc)
3320                 hdr->b_flags |= ARC_L2CACHE;
3321         callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3322         callback->awcb_ready = ready;
3323         callback->awcb_done = done;
3324         callback->awcb_private = private;
3325         callback->awcb_buf = buf;
3326 
3327         zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3328             arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);
3329 
3330         return (zio);
3331 }
3332 
3333 static int
3334 arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
3335 {
3336 #ifdef _KERNEL
3337         uint64_t available_memory = ptob(freemem);
3338         static uint64_t page_load = 0;
3339         static uint64_t last_txg = 0;
3340 
3341 #if defined(__i386)
3342         available_memory =
3343             MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3344 #endif
3345         if (available_memory >= zfs_write_limit_max)
3346                 return (0);
3347 
3348         if (txg > last_txg) {
3349                 last_txg = txg;
3350                 page_load = 0;
3351         }
3352         /*
3353          * If we are in pageout, we know that memory is already tight,
3354          * the arc is already going to be evicting, so we just want to
3355          * continue to let page writes occur as quickly as possible.
3356          */
3357         if (curproc == proc_pageout) {
3358                 if (page_load > MAX(ptob(minfree), available_memory) / 4)
3359                         return (ERESTART);
3360                 /* Note: reserve is inflated, so we deflate */
3361                 page_load += reserve / 8;
3362                 return (0);
3363         } else if (page_load > 0 && arc_reclaim_needed()) {
3364                 /* memory is low, delay before restarting */
3365                 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3366                 return (EAGAIN);
3367         }
3368         page_load = 0;
3369 
3370         if (arc_size > arc_c_min) {
3371                 uint64_t evictable_memory =
3372                     arc_mru->arcs_lsize[ARC_BUFC_DATA] +
3373                     arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
3374                     arc_mfu->arcs_lsize[ARC_BUFC_DATA] +
3375                     arc_mfu->arcs_lsize[ARC_BUFC_METADATA];
3376                 available_memory += MIN(evictable_memory, arc_size - arc_c_min);
3377         }
3378 
3379         if (inflight_data > available_memory / 4) {
3380                 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3381                 return (ERESTART);
3382         }
3383 #endif
3384         return (0);
3385 }
3386 
3387 void
3388 arc_tempreserve_clear(uint64_t reserve)
3389 {
3390         atomic_add_64(&arc_tempreserve, -reserve);
3391         ASSERT((int64_t)arc_tempreserve >= 0);
3392 }
3393 
3394 int
3395 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3396 {
3397         int error;
3398         uint64_t anon_size;
3399 
3400 #ifdef ZFS_DEBUG
3401         /*
3402          * Once in a while, fail for no reason.  Everything should cope.
3403          */
3404         if (spa_get_random(10000) == 0) {
3405                 dprintf("forcing random failure\n");
3406                 return (ERESTART);
3407         }
3408 #endif
3409         if (reserve > arc_c/4 && !arc_no_grow)
3410                 arc_c = MIN(arc_c_max, reserve * 4);
3411         if (reserve > arc_c)
3412                 return (ENOMEM);
3413 
3414         /*
3415          * Don't count loaned bufs as in flight dirty data to prevent long
3416          * network delays from blocking transactions that are ready to be
3417          * assigned to a txg.
3418          */
3419         anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3420 
3421         /*
3422          * Writes will, almost always, require additional memory allocations
3423          * in order to compress/encrypt/etc the data.  We therefor need to
3424          * make sure that there is sufficient available memory for this.
3425          */
3426         if (error = arc_memory_throttle(reserve, anon_size, txg))
3427                 return (error);
3428 
3429         /*
3430          * Throttle writes when the amount of dirty data in the cache
3431          * gets too large.  We try to keep the cache less than half full
3432          * of dirty blocks so that our sync times don't grow too large.
3433          * Note: if two requests come in concurrently, we might let them
3434          * both succeed, when one of them should fail.  Not a huge deal.
3435          */
3436 
3437         if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3438             anon_size > arc_c / 4) {
3439                 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3440                     "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3441                     arc_tempreserve>>10,
3442                     arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3443                     arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3444                     reserve>>10, arc_c>>10);
3445                 return (ERESTART);
3446         }
3447         atomic_add_64(&arc_tempreserve, reserve);
3448         return (0);
3449 }
3450 
3451 void
3452 arc_init(void)
3453 {
3454         mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3455         cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3456 
3457         /* Convert seconds to clock ticks */
3458         arc_min_prefetch_lifespan = 1 * hz;
3459 
3460         /* Start out with 1/8 of all memory */
3461         arc_c = physmem * PAGESIZE / 8;
3462 
3463 #ifdef _KERNEL
3464         /*
3465          * On architectures where the physical memory can be larger
3466          * than the addressable space (intel in 32-bit mode), we may
3467          * need to limit the cache to 1/8 of VM size.
3468          */
3469         arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3470 #endif
3471 
3472         /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3473         arc_c_min = MAX(arc_c / 4, 64<<20);
3474         /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3475         if (arc_c * 8 >= 1<<30)
3476                 arc_c_max = (arc_c * 8) - (1<<30);
3477         else
3478                 arc_c_max = arc_c_min;
3479         arc_c_max = MAX(arc_c * 6, arc_c_max);
3480 
3481         /*
3482          * Allow the tunables to override our calculations if they are
3483          * reasonable (ie. over 64MB)
3484          */
3485         if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
3486                 arc_c_max = zfs_arc_max;
3487         if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
3488                 arc_c_min = zfs_arc_min;
3489 
3490         arc_c = arc_c_max;
3491         arc_p = (arc_c >> 1);
3492 
3493         /* limit meta-data to 1/4 of the arc capacity */
3494         arc_meta_limit = arc_c_max / 4;
3495 
3496         /* Allow the tunable to override if it is reasonable */
3497         if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
3498                 arc_meta_limit = zfs_arc_meta_limit;
3499 
3500         if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
3501                 arc_c_min = arc_meta_limit / 2;
3502 
3503         if (zfs_arc_grow_retry > 0)
3504                 arc_grow_retry = zfs_arc_grow_retry;
3505 
3506         if (zfs_arc_shrink_shift > 0)
3507                 arc_shrink_shift = zfs_arc_shrink_shift;
3508 
3509         if (zfs_arc_p_min_shift > 0)
3510                 arc_p_min_shift = zfs_arc_p_min_shift;
3511 
3512         /* if kmem_flags are set, lets try to use less memory */
3513         if (kmem_debugging())
3514                 arc_c = arc_c / 2;
3515         if (arc_c < arc_c_min)
3516                 arc_c = arc_c_min;
3517 
3518         arc_anon = &ARC_anon;
3519         arc_mru = &ARC_mru;
3520         arc_mru_ghost = &ARC_mru_ghost;
3521         arc_mfu = &ARC_mfu;
3522         arc_mfu_ghost = &ARC_mfu_ghost;
3523         arc_l2c_only = &ARC_l2c_only;
3524         arc_size = 0;
3525 
3526         mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3527         mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3528         mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3529         mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3530         mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3531         mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3532 
3533         list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
3534             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3535         list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
3536             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3537         list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
3538             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3539         list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
3540             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3541         list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
3542             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3543         list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
3544             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3545         list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
3546             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3547         list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
3548             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3549         list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3550             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3551         list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3552             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3553 
3554         buf_init();
3555 
3556         arc_thread_exit = 0;
3557         arc_eviction_list = NULL;
3558         mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
3559         bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3560 
3561         arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
3562             sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
3563 
3564         if (arc_ksp != NULL) {
3565                 arc_ksp->ks_data = &arc_stats;
3566                 kstat_install(arc_ksp);
3567         }
3568 
3569         (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3570             TS_RUN, minclsyspri);
3571 
3572         arc_dead = FALSE;
3573         arc_warm = B_FALSE;
3574 
3575         if (zfs_write_limit_max == 0)
3576                 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
3577         else
3578                 zfs_write_limit_shift = 0;
3579         mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL);
3580 }
3581 
3582 void
3583 arc_fini(void)
3584 {
3585         mutex_enter(&arc_reclaim_thr_lock);
3586         arc_thread_exit = 1;
3587         while (arc_thread_exit != 0)
3588                 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3589         mutex_exit(&arc_reclaim_thr_lock);
3590 
3591         arc_flush(NULL);
3592 
3593         arc_dead = TRUE;
3594 
3595         if (arc_ksp != NULL) {
3596                 kstat_delete(arc_ksp);
3597                 arc_ksp = NULL;
3598         }
3599 
3600         mutex_destroy(&arc_eviction_mtx);
3601         mutex_destroy(&arc_reclaim_thr_lock);
3602         cv_destroy(&arc_reclaim_thr_cv);
3603 
3604         list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
3605         list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
3606         list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
3607         list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
3608         list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
3609         list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
3610         list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
3611         list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3612 
3613         mutex_destroy(&arc_anon->arcs_mtx);
3614         mutex_destroy(&arc_mru->arcs_mtx);
3615         mutex_destroy(&arc_mru_ghost->arcs_mtx);
3616         mutex_destroy(&arc_mfu->arcs_mtx);
3617         mutex_destroy(&arc_mfu_ghost->arcs_mtx);
3618         mutex_destroy(&arc_l2c_only->arcs_mtx);
3619 
3620         mutex_destroy(&zfs_write_limit_lock);
3621 
3622         buf_fini();
3623 
3624         ASSERT(arc_loaned_bytes == 0);
3625 }
3626 
3627 /*
3628  * Level 2 ARC
3629  *
3630  * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3631  * It uses dedicated storage devices to hold cached data, which are populated
3632  * using large infrequent writes.  The main role of this cache is to boost
3633  * the performance of random read workloads.  The intended L2ARC devices
3634  * include short-stroked disks, solid state disks, and other media with
3635  * substantially faster read latency than disk.
3636  *
3637  *                 +-----------------------+
3638  *                 |         ARC           |
3639  *                 +-----------------------+
3640  *                    |         ^     ^
3641  *                    |         |     |
3642  *      l2arc_feed_thread()    arc_read()
3643  *                    |         |     |
3644  *                    |  l2arc read   |
3645  *                    V         |     |
3646  *               +---------------+    |
3647  *               |     L2ARC     |    |
3648  *               +---------------+    |
3649  *                   |    ^           |
3650  *          l2arc_write() |           |
3651  *                   |    |           |
3652  *                   V    |           |
3653  *                 +-------+      +-------+
3654  *                 | vdev  |      | vdev  |
3655  *                 | cache |      | cache |
3656  *                 +-------+      +-------+
3657  *                 +=========+     .-----.
3658  *                 :  L2ARC  :    |-_____-|
3659  *                 : devices :    | Disks |
3660  *                 +=========+    `-_____-'
3661  *
3662  * Read requests are satisfied from the following sources, in order:
3663  *
3664  *      1) ARC
3665  *      2) vdev cache of L2ARC devices
3666  *      3) L2ARC devices
3667  *      4) vdev cache of disks
3668  *      5) disks
3669  *
3670  * Some L2ARC device types exhibit extremely slow write performance.
3671  * To accommodate for this there are some significant differences between
3672  * the L2ARC and traditional cache design:
3673  *
3674  * 1. There is no eviction path from the ARC to the L2ARC.  Evictions from
3675  * the ARC behave as usual, freeing buffers and placing headers on ghost
3676  * lists.  The ARC does not send buffers to the L2ARC during eviction as
3677  * this would add inflated write latencies for all ARC memory pressure.
3678  *
3679  * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3680  * It does this by periodically scanning buffers from the eviction-end of
3681  * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3682  * not already there.  It scans until a headroom of buffers is satisfied,
3683  * which itself is a buffer for ARC eviction.  The thread that does this is
3684  * l2arc_feed_thread(), illustrated below; example sizes are included to
3685  * provide a better sense of ratio than this diagram:
3686  *
3687  *             head -->                        tail
3688  *              +---------------------+----------+
3689  *      ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->.   # already on L2ARC
3690  *              +---------------------+----------+   |   o L2ARC eligible
3691  *      ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->|   : ARC buffer
3692  *              +---------------------+----------+   |
3693  *                   15.9 Gbytes      ^ 32 Mbytes    |
3694  *                                 headroom          |
3695  *                                            l2arc_feed_thread()
3696  *                                                   |
3697  *                       l2arc write hand <--[oooo]--'
3698  *                               |           8 Mbyte
3699  *                               |          write max
3700  *                               V
3701  *                +==============================+
3702  *      L2ARC dev |####|#|###|###|    |####| ... |
3703  *                +==============================+
3704  *                           32 Gbytes
3705  *
3706  * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3707  * evicted, then the L2ARC has cached a buffer much sooner than it probably
3708  * needed to, potentially wasting L2ARC device bandwidth and storage.  It is
3709  * safe to say that this is an uncommon case, since buffers at the end of
3710  * the ARC lists have moved there due to inactivity.
3711  *
3712  * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3713  * then the L2ARC simply misses copying some buffers.  This serves as a
3714  * pressure valve to prevent heavy read workloads from both stalling the ARC
3715  * with waits and clogging the L2ARC with writes.  This also helps prevent
3716  * the potential for the L2ARC to churn if it attempts to cache content too
3717  * quickly, such as during backups of the entire pool.
3718  *
3719  * 5. After system boot and before the ARC has filled main memory, there are
3720  * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
3721  * lists can remain mostly static.  Instead of searching from tail of these
3722  * lists as pictured, the l2arc_feed_thread() will search from the list heads
3723  * for eligible buffers, greatly increasing its chance of finding them.
3724  *
3725  * The L2ARC device write speed is also boosted during this time so that
3726  * the L2ARC warms up faster.  Since there have been no ARC evictions yet,
3727  * there are no L2ARC reads, and no fear of degrading read performance
3728  * through increased writes.
3729  *
3730  * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3731  * the vdev queue can aggregate them into larger and fewer writes.  Each
3732  * device is written to in a rotor fashion, sweeping writes through
3733  * available space then repeating.
3734  *
3735  * 7. The L2ARC does not store dirty content.  It never needs to flush
3736  * write buffers back to disk based storage.
3737  *
3738  * 8. If an ARC buffer is written (and dirtied) which also exists in the
3739  * L2ARC, the now stale L2ARC buffer is immediately dropped.
3740  *
3741  * The performance of the L2ARC can be tweaked by a number of tunables, which
3742  * may be necessary for different workloads:
3743  *
3744  *      l2arc_write_max         max write bytes per interval
3745  *      l2arc_write_boost       extra write bytes during device warmup
3746  *      l2arc_noprefetch        skip caching prefetched buffers
3747  *      l2arc_headroom          number of max device writes to precache
3748  *      l2arc_feed_secs         seconds between L2ARC writing
3749  *
3750  * Tunables may be removed or added as future performance improvements are
3751  * integrated, and also may become zpool properties.
3752  *
3753  * There are three key functions that control how the L2ARC warms up:
3754  *
3755  *      l2arc_write_eligible()  check if a buffer is eligible to cache
3756  *      l2arc_write_size()      calculate how much to write
3757  *      l2arc_write_interval()  calculate sleep delay between writes
3758  *
3759  * These three functions determine what to write, how much, and how quickly
3760  * to send writes.
3761  */
3762 
3763 static boolean_t
3764 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
3765 {
3766         /*
3767          * A buffer is *not* eligible for the L2ARC if it:
3768          * 1. belongs to a different spa.
3769          * 2. is already cached on the L2ARC.
3770          * 3. has an I/O in progress (it may be an incomplete read).
3771          * 4. is flagged not eligible (zfs property).
3772          */
3773         if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL ||
3774             HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab))
3775                 return (B_FALSE);
3776 
3777         return (B_TRUE);
3778 }
3779 
3780 static uint64_t
3781 l2arc_write_size(l2arc_dev_t *dev)
3782 {
3783         uint64_t size;
3784 
3785         size = dev->l2ad_write;
3786 
3787         if (arc_warm == B_FALSE)
3788                 size += dev->l2ad_boost;
3789 
3790         return (size);
3791 
3792 }
3793 
3794 static clock_t
3795 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
3796 {
3797         clock_t interval, next, now;
3798 
3799         /*
3800          * If the ARC lists are busy, increase our write rate; if the
3801          * lists are stale, idle back.  This is achieved by checking
3802          * how much we previously wrote - if it was more than half of
3803          * what we wanted, schedule the next write much sooner.
3804          */
3805         if (l2arc_feed_again && wrote > (wanted / 2))
3806                 interval = (hz * l2arc_feed_min_ms) / 1000;
3807         else
3808                 interval = hz * l2arc_feed_secs;
3809 
3810         now = ddi_get_lbolt();
3811         next = MAX(now, MIN(now + interval, began + interval));
3812 
3813         return (next);
3814 }
3815 
3816 static void
3817 l2arc_hdr_stat_add(void)
3818 {
3819         ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
3820         ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
3821 }
3822 
3823 static void
3824 l2arc_hdr_stat_remove(void)
3825 {
3826         ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
3827         ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
3828 }
3829 
3830 /*
3831  * Cycle through L2ARC devices.  This is how L2ARC load balances.
3832  * If a device is returned, this also returns holding the spa config lock.
3833  */
3834 static l2arc_dev_t *
3835 l2arc_dev_get_next(void)
3836 {
3837         l2arc_dev_t *first, *next = NULL;
3838 
3839         /*
3840          * Lock out the removal of spas (spa_namespace_lock), then removal
3841          * of cache devices (l2arc_dev_mtx).  Once a device has been selected,
3842          * both locks will be dropped and a spa config lock held instead.
3843          */
3844         mutex_enter(&spa_namespace_lock);
3845         mutex_enter(&l2arc_dev_mtx);
3846 
3847         /* if there are no vdevs, there is nothing to do */
3848         if (l2arc_ndev == 0)
3849                 goto out;
3850 
3851         first = NULL;
3852         next = l2arc_dev_last;
3853         do {
3854                 /* loop around the list looking for a non-faulted vdev */
3855                 if (next == NULL) {
3856                         next = list_head(l2arc_dev_list);
3857                 } else {
3858                         next = list_next(l2arc_dev_list, next);
3859                         if (next == NULL)
3860                                 next = list_head(l2arc_dev_list);
3861                 }
3862 
3863                 /* if we have come back to the start, bail out */
3864                 if (first == NULL)
3865                         first = next;
3866                 else if (next == first)
3867                         break;
3868 
3869         } while (vdev_is_dead(next->l2ad_vdev));
3870 
3871         /* if we were unable to find any usable vdevs, return NULL */
3872         if (vdev_is_dead(next->l2ad_vdev))
3873                 next = NULL;
3874 
3875         l2arc_dev_last = next;
3876 
3877 out:
3878         mutex_exit(&l2arc_dev_mtx);
3879 
3880         /*
3881          * Grab the config lock to prevent the 'next' device from being
3882          * removed while we are writing to it.
3883          */
3884         if (next != NULL)
3885                 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
3886         mutex_exit(&spa_namespace_lock);
3887 
3888         return (next);
3889 }
3890 
3891 /*
3892  * Free buffers that were tagged for destruction.
3893  */
3894 static void
3895 l2arc_do_free_on_write()
3896 {
3897         list_t *buflist;
3898         l2arc_data_free_t *df, *df_prev;
3899 
3900         mutex_enter(&l2arc_free_on_write_mtx);
3901         buflist = l2arc_free_on_write;
3902 
3903         for (df = list_tail(buflist); df; df = df_prev) {
3904                 df_prev = list_prev(buflist, df);
3905                 ASSERT(df->l2df_data != NULL);
3906                 ASSERT(df->l2df_func != NULL);
3907                 df->l2df_func(df->l2df_data, df->l2df_size);
3908                 list_remove(buflist, df);
3909                 kmem_free(df, sizeof (l2arc_data_free_t));
3910         }
3911 
3912         mutex_exit(&l2arc_free_on_write_mtx);
3913 }
3914 
3915 /*
3916  * A write to a cache device has completed.  Update all headers to allow
3917  * reads from these buffers to begin.
3918  */
3919 static void
3920 l2arc_write_done(zio_t *zio)
3921 {
3922         l2arc_write_callback_t *cb;
3923         l2arc_dev_t *dev;
3924         list_t *buflist;
3925         arc_buf_hdr_t *head, *ab, *ab_prev;
3926         l2arc_buf_hdr_t *abl2;
3927         kmutex_t *hash_lock;
3928 
3929         cb = zio->io_private;
3930         ASSERT(cb != NULL);
3931         dev = cb->l2wcb_dev;
3932         ASSERT(dev != NULL);
3933         head = cb->l2wcb_head;
3934         ASSERT(head != NULL);
3935         buflist = dev->l2ad_buflist;
3936         ASSERT(buflist != NULL);
3937         DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
3938             l2arc_write_callback_t *, cb);
3939 
3940         if (zio->io_error != 0)
3941                 ARCSTAT_BUMP(arcstat_l2_writes_error);
3942 
3943         mutex_enter(&l2arc_buflist_mtx);
3944 
3945         /*
3946          * All writes completed, or an error was hit.
3947          */
3948         for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
3949                 ab_prev = list_prev(buflist, ab);
3950 
3951                 hash_lock = HDR_LOCK(ab);
3952                 if (!mutex_tryenter(hash_lock)) {
3953                         /*
3954                          * This buffer misses out.  It may be in a stage
3955                          * of eviction.  Its ARC_L2_WRITING flag will be
3956                          * left set, denying reads to this buffer.
3957                          */
3958                         ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
3959                         continue;
3960                 }
3961 
3962                 if (zio->io_error != 0) {
3963                         /*
3964                          * Error - drop L2ARC entry.
3965                          */
3966                         list_remove(buflist, ab);
3967                         abl2 = ab->b_l2hdr;
3968                         ab->b_l2hdr = NULL;
3969                         kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
3970                         ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
3971                 }
3972 
3973                 /*
3974                  * Allow ARC to begin reads to this L2ARC entry.
3975                  */
3976                 ab->b_flags &= ~ARC_L2_WRITING;
3977 
3978                 mutex_exit(hash_lock);
3979         }
3980 
3981         atomic_inc_64(&l2arc_writes_done);
3982         list_remove(buflist, head);
3983         kmem_cache_free(hdr_cache, head);
3984         mutex_exit(&l2arc_buflist_mtx);
3985 
3986         l2arc_do_free_on_write();
3987 
3988         kmem_free(cb, sizeof (l2arc_write_callback_t));
3989 }
3990 
3991 /*
3992  * A read to a cache device completed.  Validate buffer contents before
3993  * handing over to the regular ARC routines.
3994  */
3995 static void
3996 l2arc_read_done(zio_t *zio)
3997 {
3998         l2arc_read_callback_t *cb;
3999         arc_buf_hdr_t *hdr;
4000         arc_buf_t *buf;
4001         kmutex_t *hash_lock;
4002         int equal;
4003 
4004         ASSERT(zio->io_vd != NULL);
4005         ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4006 
4007         spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4008 
4009         cb = zio->io_private;
4010         ASSERT(cb != NULL);
4011         buf = cb->l2rcb_buf;
4012         ASSERT(buf != NULL);
4013 
4014         hash_lock = HDR_LOCK(buf->b_hdr);
4015         mutex_enter(hash_lock);
4016         hdr = buf->b_hdr;
4017         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4018 
4019         /*
4020          * Check this survived the L2ARC journey.
4021          */
4022         equal = arc_cksum_equal(buf);
4023         if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4024                 mutex_exit(hash_lock);
4025                 zio->io_private = buf;
4026                 zio->io_bp_copy = cb->l2rcb_bp;   /* XXX fix in L2ARC 2.0 */
4027                 zio->io_bp = &zio->io_bp_copy;        /* XXX fix in L2ARC 2.0 */
4028                 arc_read_done(zio);
4029         } else {
4030                 mutex_exit(hash_lock);
4031                 /*
4032                  * Buffer didn't survive caching.  Increment stats and
4033                  * reissue to the original storage device.
4034                  */
4035                 if (zio->io_error != 0) {
4036                         ARCSTAT_BUMP(arcstat_l2_io_error);
4037                 } else {
4038                         zio->io_error = EIO;
4039                 }
4040                 if (!equal)
4041                         ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4042 
4043                 /*
4044                  * If there's no waiter, issue an async i/o to the primary
4045                  * storage now.  If there *is* a waiter, the caller must
4046                  * issue the i/o in a context where it's OK to block.
4047                  */
4048                 if (zio->io_waiter == NULL) {
4049                         zio_t *pio = zio_unique_parent(zio);
4050 
4051                         ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4052 
4053                         zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4054                             buf->b_data, zio->io_size, arc_read_done, buf,
4055                             zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4056                 }
4057         }
4058 
4059         kmem_free(cb, sizeof (l2arc_read_callback_t));
4060 }
4061 
4062 /*
4063  * This is the list priority from which the L2ARC will search for pages to
4064  * cache.  This is used within loops (0..3) to cycle through lists in the
4065  * desired order.  This order can have a significant effect on cache
4066  * performance.
4067  *
4068  * Currently the metadata lists are hit first, MFU then MRU, followed by
4069  * the data lists.  This function returns a locked list, and also returns
4070  * the lock pointer.
4071  */
4072 static list_t *
4073 l2arc_list_locked(int list_num, kmutex_t **lock)
4074 {
4075         list_t *list;
4076 
4077         ASSERT(list_num >= 0 && list_num <= 3);
4078 
4079         switch (list_num) {
4080         case 0:
4081                 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
4082                 *lock = &arc_mfu->arcs_mtx;
4083                 break;
4084         case 1:
4085                 list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
4086                 *lock = &arc_mru->arcs_mtx;
4087                 break;
4088         case 2:
4089                 list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
4090                 *lock = &arc_mfu->arcs_mtx;
4091                 break;
4092         case 3:
4093                 list = &arc_mru->arcs_list[ARC_BUFC_DATA];
4094                 *lock = &arc_mru->arcs_mtx;
4095                 break;
4096         }
4097 
4098         ASSERT(!(MUTEX_HELD(*lock)));
4099         mutex_enter(*lock);
4100         return (list);
4101 }
4102 
4103 /*
4104  * Evict buffers from the device write hand to the distance specified in
4105  * bytes.  This distance may span populated buffers, it may span nothing.
4106  * This is clearing a region on the L2ARC device ready for writing.
4107  * If the 'all' boolean is set, every buffer is evicted.
4108  */
4109 static void
4110 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4111 {
4112         list_t *buflist;
4113         l2arc_buf_hdr_t *abl2;
4114         arc_buf_hdr_t *ab, *ab_prev;
4115         kmutex_t *hash_lock;
4116         uint64_t taddr;
4117 
4118         buflist = dev->l2ad_buflist;
4119 
4120         if (buflist == NULL)
4121                 return;
4122 
4123         if (!all && dev->l2ad_first) {
4124                 /*
4125                  * This is the first sweep through the device.  There is
4126                  * nothing to evict.
4127                  */
4128                 return;
4129         }
4130 
4131         if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4132                 /*
4133                  * When nearing the end of the device, evict to the end
4134                  * before the device write hand jumps to the start.
4135                  */
4136                 taddr = dev->l2ad_end;
4137         } else {
4138                 taddr = dev->l2ad_hand + distance;
4139         }
4140         DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4141             uint64_t, taddr, boolean_t, all);
4142 
4143 top:
4144         mutex_enter(&l2arc_buflist_mtx);
4145         for (ab = list_tail(buflist); ab; ab = ab_prev) {
4146                 ab_prev = list_prev(buflist, ab);
4147 
4148                 hash_lock = HDR_LOCK(ab);
4149                 if (!mutex_tryenter(hash_lock)) {
4150                         /*
4151                          * Missed the hash lock.  Retry.
4152                          */
4153                         ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4154                         mutex_exit(&l2arc_buflist_mtx);
4155                         mutex_enter(hash_lock);
4156                         mutex_exit(hash_lock);
4157                         goto top;
4158                 }
4159 
4160                 if (HDR_L2_WRITE_HEAD(ab)) {
4161                         /*
4162                          * We hit a write head node.  Leave it for
4163                          * l2arc_write_done().
4164                          */
4165                         list_remove(buflist, ab);
4166                         mutex_exit(hash_lock);
4167                         continue;
4168                 }
4169 
4170                 if (!all && ab->b_l2hdr != NULL &&
4171                     (ab->b_l2hdr->b_daddr > taddr ||
4172                     ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4173                         /*
4174                          * We've evicted to the target address,
4175                          * or the end of the device.
4176                          */
4177                         mutex_exit(hash_lock);
4178                         break;
4179                 }
4180 
4181                 if (HDR_FREE_IN_PROGRESS(ab)) {
4182                         /*
4183                          * Already on the path to destruction.
4184                          */
4185                         mutex_exit(hash_lock);
4186                         continue;
4187                 }
4188 
4189                 if (ab->b_state == arc_l2c_only) {
4190                         ASSERT(!HDR_L2_READING(ab));
4191                         /*
4192                          * This doesn't exist in the ARC.  Destroy.
4193                          * arc_hdr_destroy() will call list_remove()
4194                          * and decrement arcstat_l2_size.
4195                          */
4196                         arc_change_state(arc_anon, ab, hash_lock);
4197                         arc_hdr_destroy(ab);
4198                 } else {
4199                         /*
4200                          * Invalidate issued or about to be issued
4201                          * reads, since we may be about to write
4202                          * over this location.
4203                          */
4204                         if (HDR_L2_READING(ab)) {
4205                                 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4206                                 ab->b_flags |= ARC_L2_EVICTED;
4207                         }
4208 
4209                         /*
4210                          * Tell ARC this no longer exists in L2ARC.
4211                          */
4212                         if (ab->b_l2hdr != NULL) {
4213                                 abl2 = ab->b_l2hdr;
4214                                 ab->b_l2hdr = NULL;
4215                                 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4216                                 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4217                         }
4218                         list_remove(buflist, ab);
4219 
4220                         /*
4221                          * This may have been leftover after a
4222                          * failed write.
4223                          */
4224                         ab->b_flags &= ~ARC_L2_WRITING;
4225                 }
4226                 mutex_exit(hash_lock);
4227         }
4228         mutex_exit(&l2arc_buflist_mtx);
4229 
4230         vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0);
4231         dev->l2ad_evict = taddr;
4232 }
4233 
4234 /*
4235  * Find and write ARC buffers to the L2ARC device.
4236  *
4237  * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4238  * for reading until they have completed writing.
4239  */
4240 static uint64_t
4241 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
4242 {
4243         arc_buf_hdr_t *ab, *ab_prev, *head;
4244         l2arc_buf_hdr_t *hdrl2;
4245         list_t *list;
4246         uint64_t passed_sz, write_sz, buf_sz, headroom;
4247         void *buf_data;
4248         kmutex_t *hash_lock, *list_lock;
4249         boolean_t have_lock, full;
4250         l2arc_write_callback_t *cb;
4251         zio_t *pio, *wzio;
4252         uint64_t guid = spa_load_guid(spa);
4253 
4254         ASSERT(dev->l2ad_vdev != NULL);
4255 
4256         pio = NULL;
4257         write_sz = 0;
4258         full = B_FALSE;
4259         head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4260         head->b_flags |= ARC_L2_WRITE_HEAD;
4261 
4262         /*
4263          * Copy buffers for L2ARC writing.
4264          */
4265         mutex_enter(&l2arc_buflist_mtx);
4266         for (int try = 0; try <= 3; try++) {
4267                 list = l2arc_list_locked(try, &list_lock);
4268                 passed_sz = 0;
4269 
4270                 /*
4271                  * L2ARC fast warmup.
4272                  *
4273                  * Until the ARC is warm and starts to evict, read from the
4274                  * head of the ARC lists rather than the tail.
4275                  */
4276                 headroom = target_sz * l2arc_headroom;
4277                 if (arc_warm == B_FALSE)
4278                         ab = list_head(list);
4279                 else
4280                         ab = list_tail(list);
4281 
4282                 for (; ab; ab = ab_prev) {
4283                         if (arc_warm == B_FALSE)
4284                                 ab_prev = list_next(list, ab);
4285                         else
4286                                 ab_prev = list_prev(list, ab);
4287 
4288                         hash_lock = HDR_LOCK(ab);
4289                         have_lock = MUTEX_HELD(hash_lock);
4290                         if (!have_lock && !mutex_tryenter(hash_lock)) {
4291                                 /*
4292                                  * Skip this buffer rather than waiting.
4293                                  */
4294                                 continue;
4295                         }
4296 
4297                         passed_sz += ab->b_size;
4298                         if (passed_sz > headroom) {
4299                                 /*
4300                                  * Searched too far.
4301                                  */
4302                                 mutex_exit(hash_lock);
4303                                 break;
4304                         }
4305 
4306                         if (!l2arc_write_eligible(guid, ab)) {
4307                                 mutex_exit(hash_lock);
4308                                 continue;
4309                         }
4310 
4311                         if ((write_sz + ab->b_size) > target_sz) {
4312                                 full = B_TRUE;
4313                                 mutex_exit(hash_lock);
4314                                 break;
4315                         }
4316 
4317                         if (pio == NULL) {
4318                                 /*
4319                                  * Insert a dummy header on the buflist so
4320                                  * l2arc_write_done() can find where the
4321                                  * write buffers begin without searching.
4322                                  */
4323                                 list_insert_head(dev->l2ad_buflist, head);
4324 
4325                                 cb = kmem_alloc(
4326                                     sizeof (l2arc_write_callback_t), KM_SLEEP);
4327                                 cb->l2wcb_dev = dev;
4328                                 cb->l2wcb_head = head;
4329                                 pio = zio_root(spa, l2arc_write_done, cb,
4330                                     ZIO_FLAG_CANFAIL);
4331                         }
4332 
4333                         /*
4334                          * Create and add a new L2ARC header.
4335                          */
4336                         hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4337                         hdrl2->b_dev = dev;
4338                         hdrl2->b_daddr = dev->l2ad_hand;
4339 
4340                         ab->b_flags |= ARC_L2_WRITING;
4341                         ab->b_l2hdr = hdrl2;
4342                         list_insert_head(dev->l2ad_buflist, ab);
4343                         buf_data = ab->b_buf->b_data;
4344                         buf_sz = ab->b_size;
4345 
4346                         /*
4347                          * Compute and store the buffer cksum before
4348                          * writing.  On debug the cksum is verified first.
4349                          */
4350                         arc_cksum_verify(ab->b_buf);
4351                         arc_cksum_compute(ab->b_buf, B_TRUE);
4352 
4353                         mutex_exit(hash_lock);
4354 
4355                         wzio = zio_write_phys(pio, dev->l2ad_vdev,
4356                             dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
4357                             NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
4358                             ZIO_FLAG_CANFAIL, B_FALSE);
4359 
4360                         DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
4361                             zio_t *, wzio);
4362                         (void) zio_nowait(wzio);
4363 
4364                         /*
4365                          * Keep the clock hand suitably device-aligned.
4366                          */
4367                         buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4368 
4369                         write_sz += buf_sz;
4370                         dev->l2ad_hand += buf_sz;
4371                 }
4372 
4373                 mutex_exit(list_lock);
4374 
4375                 if (full == B_TRUE)
4376                         break;
4377         }
4378         mutex_exit(&l2arc_buflist_mtx);
4379 
4380         if (pio == NULL) {
4381                 ASSERT3U(write_sz, ==, 0);
4382                 kmem_cache_free(hdr_cache, head);
4383                 return (0);
4384         }
4385 
4386         ASSERT3U(write_sz, <=, target_sz);
4387         ARCSTAT_BUMP(arcstat_l2_writes_sent);
4388         ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz);
4389         ARCSTAT_INCR(arcstat_l2_size, write_sz);
4390         vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0);
4391 
4392         /*
4393          * Bump device hand to the device start if it is approaching the end.
4394          * l2arc_evict() will already have evicted ahead for this case.
4395          */
4396         if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4397                 vdev_space_update(dev->l2ad_vdev,
4398                     dev->l2ad_end - dev->l2ad_hand, 0, 0);
4399                 dev->l2ad_hand = dev->l2ad_start;
4400                 dev->l2ad_evict = dev->l2ad_start;
4401                 dev->l2ad_first = B_FALSE;
4402         }
4403 
4404         dev->l2ad_writing = B_TRUE;
4405         (void) zio_wait(pio);
4406         dev->l2ad_writing = B_FALSE;
4407 
4408         return (write_sz);
4409 }
4410 
4411 /*
4412  * This thread feeds the L2ARC at regular intervals.  This is the beating
4413  * heart of the L2ARC.
4414  */
4415 static void
4416 l2arc_feed_thread(void)
4417 {
4418         callb_cpr_t cpr;
4419         l2arc_dev_t *dev;
4420         spa_t *spa;
4421         uint64_t size, wrote;
4422         clock_t begin, next = ddi_get_lbolt();
4423 
4424         CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4425 
4426         mutex_enter(&l2arc_feed_thr_lock);
4427 
4428         while (l2arc_thread_exit == 0) {
4429                 CALLB_CPR_SAFE_BEGIN(&cpr);
4430                 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
4431                     next);
4432                 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
4433                 next = ddi_get_lbolt() + hz;
4434 
4435                 /*
4436                  * Quick check for L2ARC devices.
4437                  */
4438                 mutex_enter(&l2arc_dev_mtx);
4439                 if (l2arc_ndev == 0) {
4440                         mutex_exit(&l2arc_dev_mtx);
4441                         continue;
4442                 }
4443                 mutex_exit(&l2arc_dev_mtx);
4444                 begin = ddi_get_lbolt();
4445 
4446                 /*
4447                  * This selects the next l2arc device to write to, and in
4448                  * doing so the next spa to feed from: dev->l2ad_spa.   This
4449                  * will return NULL if there are now no l2arc devices or if
4450                  * they are all faulted.
4451                  *
4452                  * If a device is returned, its spa's config lock is also
4453                  * held to prevent device removal.  l2arc_dev_get_next()
4454                  * will grab and release l2arc_dev_mtx.
4455                  */
4456                 if ((dev = l2arc_dev_get_next()) == NULL)
4457                         continue;
4458 
4459                 spa = dev->l2ad_spa;
4460                 ASSERT(spa != NULL);
4461 
4462                 /*
4463                  * If the pool is read-only then force the feed thread to
4464                  * sleep a little longer.
4465                  */
4466                 if (!spa_writeable(spa)) {
4467                         next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
4468                         spa_config_exit(spa, SCL_L2ARC, dev);
4469                         continue;
4470                 }
4471 
4472                 /*
4473                  * Avoid contributing to memory pressure.
4474                  */
4475                 if (arc_reclaim_needed()) {
4476                         ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
4477                         spa_config_exit(spa, SCL_L2ARC, dev);
4478                         continue;
4479                 }
4480 
4481                 ARCSTAT_BUMP(arcstat_l2_feeds);
4482 
4483                 size = l2arc_write_size(dev);
4484 
4485                 /*
4486                  * Evict L2ARC buffers that will be overwritten.
4487                  */
4488                 l2arc_evict(dev, size, B_FALSE);
4489 
4490                 /*
4491                  * Write ARC buffers.
4492                  */
4493                 wrote = l2arc_write_buffers(spa, dev, size);
4494 
4495                 /*
4496                  * Calculate interval between writes.
4497                  */
4498                 next = l2arc_write_interval(begin, size, wrote);
4499                 spa_config_exit(spa, SCL_L2ARC, dev);
4500         }
4501 
4502         l2arc_thread_exit = 0;
4503         cv_broadcast(&l2arc_feed_thr_cv);
4504         CALLB_CPR_EXIT(&cpr);               /* drops l2arc_feed_thr_lock */
4505         thread_exit();
4506 }
4507 
4508 boolean_t
4509 l2arc_vdev_present(vdev_t *vd)
4510 {
4511         l2arc_dev_t *dev;
4512 
4513         mutex_enter(&l2arc_dev_mtx);
4514         for (dev = list_head(l2arc_dev_list); dev != NULL;
4515             dev = list_next(l2arc_dev_list, dev)) {
4516                 if (dev->l2ad_vdev == vd)
4517                         break;
4518         }
4519         mutex_exit(&l2arc_dev_mtx);
4520 
4521         return (dev != NULL);
4522 }
4523 
4524 /*
4525  * Add a vdev for use by the L2ARC.  By this point the spa has already
4526  * validated the vdev and opened it.
4527  */
4528 void
4529 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
4530 {
4531         l2arc_dev_t *adddev;
4532 
4533         ASSERT(!l2arc_vdev_present(vd));
4534 
4535         /*
4536          * Create a new l2arc device entry.
4537          */
4538         adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
4539         adddev->l2ad_spa = spa;
4540         adddev->l2ad_vdev = vd;
4541         adddev->l2ad_write = l2arc_write_max;
4542         adddev->l2ad_boost = l2arc_write_boost;
4543         adddev->l2ad_start = VDEV_LABEL_START_SIZE;
4544         adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
4545         adddev->l2ad_hand = adddev->l2ad_start;
4546         adddev->l2ad_evict = adddev->l2ad_start;
4547         adddev->l2ad_first = B_TRUE;
4548         adddev->l2ad_writing = B_FALSE;
4549         ASSERT3U(adddev->l2ad_write, >, 0);
4550 
4551         /*
4552          * This is a list of all ARC buffers that are still valid on the
4553          * device.
4554          */
4555         adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
4556         list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
4557             offsetof(arc_buf_hdr_t, b_l2node));
4558 
4559         vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
4560 
4561         /*
4562          * Add device to global list
4563          */
4564         mutex_enter(&l2arc_dev_mtx);
4565         list_insert_head(l2arc_dev_list, adddev);
4566         atomic_inc_64(&l2arc_ndev);
4567         mutex_exit(&l2arc_dev_mtx);
4568 }
4569 
4570 /*
4571  * Remove a vdev from the L2ARC.
4572  */
4573 void
4574 l2arc_remove_vdev(vdev_t *vd)
4575 {
4576         l2arc_dev_t *dev, *nextdev, *remdev = NULL;
4577 
4578         /*
4579          * Find the device by vdev
4580          */
4581         mutex_enter(&l2arc_dev_mtx);
4582         for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
4583                 nextdev = list_next(l2arc_dev_list, dev);
4584                 if (vd == dev->l2ad_vdev) {
4585                         remdev = dev;
4586                         break;
4587                 }
4588         }
4589         ASSERT(remdev != NULL);
4590 
4591         /*
4592          * Remove device from global list
4593          */
4594         list_remove(l2arc_dev_list, remdev);
4595         l2arc_dev_last = NULL;          /* may have been invalidated */
4596         atomic_dec_64(&l2arc_ndev);
4597         mutex_exit(&l2arc_dev_mtx);
4598 
4599         /*
4600          * Clear all buflists and ARC references.  L2ARC device flush.
4601          */
4602         l2arc_evict(remdev, 0, B_TRUE);
4603         list_destroy(remdev->l2ad_buflist);
4604         kmem_free(remdev->l2ad_buflist, sizeof (list_t));
4605         kmem_free(remdev, sizeof (l2arc_dev_t));
4606 }
4607 
4608 void
4609 l2arc_init(void)
4610 {
4611         l2arc_thread_exit = 0;
4612         l2arc_ndev = 0;
4613         l2arc_writes_sent = 0;
4614         l2arc_writes_done = 0;
4615 
4616         mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4617         cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
4618         mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
4619         mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
4620         mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
4621 
4622         l2arc_dev_list = &L2ARC_dev_list;
4623         l2arc_free_on_write = &L2ARC_free_on_write;
4624         list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
4625             offsetof(l2arc_dev_t, l2ad_node));
4626         list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
4627             offsetof(l2arc_data_free_t, l2df_list_node));
4628 }
4629 
4630 void
4631 l2arc_fini(void)
4632 {
4633         /*
4634          * This is called from dmu_fini(), which is called from spa_fini();
4635          * Because of this, we can assume that all l2arc devices have
4636          * already been removed when the pools themselves were removed.
4637          */
4638 
4639         l2arc_do_free_on_write();
4640 
4641         mutex_destroy(&l2arc_feed_thr_lock);
4642         cv_destroy(&l2arc_feed_thr_cv);
4643         mutex_destroy(&l2arc_dev_mtx);
4644         mutex_destroy(&l2arc_buflist_mtx);
4645         mutex_destroy(&l2arc_free_on_write_mtx);
4646 
4647         list_destroy(l2arc_dev_list);
4648         list_destroy(l2arc_free_on_write);
4649 }
4650 
4651 void
4652 l2arc_start(void)
4653 {
4654         if (!(spa_mode_global & FWRITE))
4655                 return;
4656 
4657         (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
4658             TS_RUN, minclsyspri);
4659 }
4660 
4661 void
4662 l2arc_stop(void)
4663 {
4664         if (!(spa_mode_global & FWRITE))
4665                 return;
4666 
4667         mutex_enter(&l2arc_feed_thr_lock);
4668         cv_signal(&l2arc_feed_thr_cv);      /* kick thread out of startup */
4669         l2arc_thread_exit = 1;
4670         while (l2arc_thread_exit != 0)
4671                 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
4672         mutex_exit(&l2arc_feed_thr_lock);
4673 }