1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2018, Joyent, Inc.
  24  * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
  25  * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
  26  * Copyright 2019 Nexenta Systems, Inc.  All rights reserved.
  27  */
  28 
  29 /*
  30  * DVA-based Adjustable Replacement Cache
  31  *
  32  * While much of the theory of operation used here is
  33  * based on the self-tuning, low overhead replacement cache
  34  * presented by Megiddo and Modha at FAST 2003, there are some
  35  * significant differences:
  36  *
  37  * 1. The Megiddo and Modha model assumes any page is evictable.
  38  * Pages in its cache cannot be "locked" into memory.  This makes
  39  * the eviction algorithm simple: evict the last page in the list.
  40  * This also make the performance characteristics easy to reason
  41  * about.  Our cache is not so simple.  At any given moment, some
  42  * subset of the blocks in the cache are un-evictable because we
  43  * have handed out a reference to them.  Blocks are only evictable
  44  * when there are no external references active.  This makes
  45  * eviction far more problematic:  we choose to evict the evictable
  46  * blocks that are the "lowest" in the list.
  47  *
  48  * There are times when it is not possible to evict the requested
  49  * space.  In these circumstances we are unable to adjust the cache
  50  * size.  To prevent the cache growing unbounded at these times we
  51  * implement a "cache throttle" that slows the flow of new data
  52  * into the cache until we can make space available.
  53  *
  54  * 2. The Megiddo and Modha model assumes a fixed cache size.
  55  * Pages are evicted when the cache is full and there is a cache
  56  * miss.  Our model has a variable sized cache.  It grows with
  57  * high use, but also tries to react to memory pressure from the
  58  * operating system: decreasing its size when system memory is
  59  * tight.
  60  *
  61  * 3. The Megiddo and Modha model assumes a fixed page size. All
  62  * elements of the cache are therefore exactly the same size.  So
  63  * when adjusting the cache size following a cache miss, its simply
  64  * a matter of choosing a single page to evict.  In our model, we
  65  * have variable sized cache blocks (rangeing from 512 bytes to
  66  * 128K bytes).  We therefore choose a set of blocks to evict to make
  67  * space for a cache miss that approximates as closely as possible
  68  * the space used by the new block.
  69  *
  70  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
  71  * by N. Megiddo & D. Modha, FAST 2003
  72  */
  73 
  74 /*
  75  * The locking model:
  76  *
  77  * A new reference to a cache buffer can be obtained in two
  78  * ways: 1) via a hash table lookup using the DVA as a key,
  79  * or 2) via one of the ARC lists.  The arc_read() interface
  80  * uses method 1, while the internal ARC algorithms for
  81  * adjusting the cache use method 2.  We therefore provide two
  82  * types of locks: 1) the hash table lock array, and 2) the
  83  * ARC list locks.
  84  *
  85  * Buffers do not have their own mutexes, rather they rely on the
  86  * hash table mutexes for the bulk of their protection (i.e. most
  87  * fields in the arc_buf_hdr_t are protected by these mutexes).
  88  *
  89  * buf_hash_find() returns the appropriate mutex (held) when it
  90  * locates the requested buffer in the hash table.  It returns
  91  * NULL for the mutex if the buffer was not in the table.
  92  *
  93  * buf_hash_remove() expects the appropriate hash mutex to be
  94  * already held before it is invoked.
  95  *
  96  * Each ARC state also has a mutex which is used to protect the
  97  * buffer list associated with the state.  When attempting to
  98  * obtain a hash table lock while holding an ARC list lock you
  99  * must use: mutex_tryenter() to avoid deadlock.  Also note that
 100  * the active state mutex must be held before the ghost state mutex.
 101  *
 102  * Note that the majority of the performance stats are manipulated
 103  * with atomic operations.
 104  *
 105  * The L2ARC uses the l2ad_mtx on each vdev for the following:
 106  *
 107  *      - L2ARC buflist creation
 108  *      - L2ARC buflist eviction
 109  *      - L2ARC write completion, which walks L2ARC buflists
 110  *      - ARC header destruction, as it removes from L2ARC buflists
 111  *      - ARC header release, as it removes from L2ARC buflists
 112  */
 113 
 114 /*
 115  * ARC operation:
 116  *
 117  * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
 118  * This structure can point either to a block that is still in the cache or to
 119  * one that is only accessible in an L2 ARC device, or it can provide
 120  * information about a block that was recently evicted. If a block is
 121  * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
 122  * information to retrieve it from the L2ARC device. This information is
 123  * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
 124  * that is in this state cannot access the data directly.
 125  *
 126  * Blocks that are actively being referenced or have not been evicted
 127  * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
 128  * the arc_buf_hdr_t that will point to the data block in memory. A block can
 129  * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
 130  * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
 131  * also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
 132  *
 133  * The L1ARC's data pointer may or may not be uncompressed. The ARC has the
 134  * ability to store the physical data (b_pabd) associated with the DVA of the
 135  * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
 136  * it will match its on-disk compression characteristics. This behavior can be
 137  * disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
 138  * compressed ARC functionality is disabled, the b_pabd will point to an
 139  * uncompressed version of the on-disk data.
 140  *
 141  * Data in the L1ARC is not accessed by consumers of the ARC directly. Each
 142  * arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
 143  * Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
 144  * consumer. The ARC will provide references to this data and will keep it
 145  * cached until it is no longer in use. The ARC caches only the L1ARC's physical
 146  * data block and will evict any arc_buf_t that is no longer referenced. The
 147  * amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
 148  * "overhead_size" kstat.
 149  *
 150  * Depending on the consumer, an arc_buf_t can be requested in uncompressed or
 151  * compressed form. The typical case is that consumers will want uncompressed
 152  * data, and when that happens a new data buffer is allocated where the data is
 153  * decompressed for them to use. Currently the only consumer who wants
 154  * compressed arc_buf_t's is "zfs send", when it streams data exactly as it
 155  * exists on disk. When this happens, the arc_buf_t's data buffer is shared
 156  * with the arc_buf_hdr_t.
 157  *
 158  * Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
 159  * first one is owned by a compressed send consumer (and therefore references
 160  * the same compressed data buffer as the arc_buf_hdr_t) and the second could be
 161  * used by any other consumer (and has its own uncompressed copy of the data
 162  * buffer).
 163  *
 164  *   arc_buf_hdr_t
 165  *   +-----------+
 166  *   | fields    |
 167  *   | common to |
 168  *   | L1- and   |
 169  *   | L2ARC     |
 170  *   +-----------+
 171  *   | l2arc_buf_hdr_t
 172  *   |           |
 173  *   +-----------+
 174  *   | l1arc_buf_hdr_t
 175  *   |           |              arc_buf_t
 176  *   | b_buf     +------------>+-----------+      arc_buf_t
 177  *   | b_pabd    +-+           |b_next     +---->+-----------+
 178  *   +-----------+ |           |-----------|     |b_next     +-->NULL
 179  *                 |           |b_comp = T |     +-----------+
 180  *                 |           |b_data     +-+   |b_comp = F |
 181  *                 |           +-----------+ |   |b_data     +-+
 182  *                 +->+------+               |   +-----------+ |
 183  *        compressed  |      |               |                 |
 184  *           data     |      |<--------------+                 | uncompressed
 185  *                    +------+          compressed,            |     data
 186  *                                        shared               +-->+------+
 187  *                                         data                    |      |
 188  *                                                                 |      |
 189  *                                                                 +------+
 190  *
 191  * When a consumer reads a block, the ARC must first look to see if the
 192  * arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
 193  * arc_buf_t and either copies uncompressed data into a new data buffer from an
 194  * existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
 195  * new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
 196  * hdr is compressed and the desired compression characteristics of the
 197  * arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
 198  * arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
 199  * the last buffer in the hdr's b_buf list, however a shared compressed buf can
 200  * be anywhere in the hdr's list.
 201  *
 202  * The diagram below shows an example of an uncompressed ARC hdr that is
 203  * sharing its data with an arc_buf_t (note that the shared uncompressed buf is
 204  * the last element in the buf list):
 205  *
 206  *                arc_buf_hdr_t
 207  *                +-----------+
 208  *                |           |
 209  *                |           |
 210  *                |           |
 211  *                +-----------+
 212  * l2arc_buf_hdr_t|           |
 213  *                |           |
 214  *                +-----------+
 215  * l1arc_buf_hdr_t|           |
 216  *                |           |                 arc_buf_t    (shared)
 217  *                |    b_buf  +------------>+---------+      arc_buf_t
 218  *                |           |             |b_next   +---->+---------+
 219  *                |  b_pabd   +-+           |---------|     |b_next   +-->NULL
 220  *                +-----------+ |           |         |     +---------+
 221  *                              |           |b_data   +-+   |         |
 222  *                              |           +---------+ |   |b_data   +-+
 223  *                              +->+------+             |   +---------+ |
 224  *                                 |      |             |               |
 225  *                   uncompressed  |      |             |               |
 226  *                        data     +------+             |               |
 227  *                                    ^                 +->+------+     |
 228  *                                    |       uncompressed |      |     |
 229  *                                    |           data     |      |     |
 230  *                                    |                    +------+     |
 231  *                                    +---------------------------------+
 232  *
 233  * Writing to the ARC requires that the ARC first discard the hdr's b_pabd
 234  * since the physical block is about to be rewritten. The new data contents
 235  * will be contained in the arc_buf_t. As the I/O pipeline performs the write,
 236  * it may compress the data before writing it to disk. The ARC will be called
 237  * with the transformed data and will bcopy the transformed on-disk block into
 238  * a newly allocated b_pabd. Writes are always done into buffers which have
 239  * either been loaned (and hence are new and don't have other readers) or
 240  * buffers which have been released (and hence have their own hdr, if there
 241  * were originally other readers of the buf's original hdr). This ensures that
 242  * the ARC only needs to update a single buf and its hdr after a write occurs.
 243  *
 244  * When the L2ARC is in use, it will also take advantage of the b_pabd. The
 245  * L2ARC will always write the contents of b_pabd to the L2ARC. This means
 246  * that when compressed ARC is enabled that the L2ARC blocks are identical
 247  * to the on-disk block in the main data pool. This provides a significant
 248  * advantage since the ARC can leverage the bp's checksum when reading from the
 249  * L2ARC to determine if the contents are valid. However, if the compressed
 250  * ARC is disabled, then the L2ARC's block must be transformed to look
 251  * like the physical block in the main data pool before comparing the
 252  * checksum and determining its validity.
 253  */
 254 
 255 #include <sys/spa.h>
 256 #include <sys/spa_impl.h>
 257 #include <sys/zio.h>
 258 #include <sys/spa_impl.h>
 259 #include <sys/zio_compress.h>
 260 #include <sys/zio_checksum.h>
 261 #include <sys/zfs_context.h>
 262 #include <sys/arc.h>
 263 #include <sys/refcount.h>
 264 #include <sys/vdev.h>
 265 #include <sys/vdev_impl.h>
 266 #include <sys/dsl_pool.h>
 267 #include <sys/zio_checksum.h>
 268 #include <sys/multilist.h>
 269 #include <sys/abd.h>
 270 #ifdef _KERNEL
 271 #include <sys/vmsystm.h>
 272 #include <vm/anon.h>
 273 #include <sys/fs/swapnode.h>
 274 #include <sys/dnlc.h>
 275 #endif
 276 #include <sys/callb.h>
 277 #include <sys/kstat.h>
 278 #include <zfs_fletcher.h>
 279 #include <sys/byteorder.h>
 280 #include <sys/spa_impl.h>
 281 
 282 #ifndef _KERNEL
 283 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
 284 boolean_t arc_watch = B_FALSE;
 285 int arc_procfd;
 286 #endif
 287 
 288 static kmutex_t         arc_reclaim_lock;
 289 static kcondvar_t       arc_reclaim_thread_cv;
 290 static boolean_t        arc_reclaim_thread_exit;
 291 static kcondvar_t       arc_reclaim_waiters_cv;
 292 
 293 uint_t arc_reduce_dnlc_percent = 3;
 294 
 295 /*
 296  * The number of headers to evict in arc_evict_state_impl() before
 297  * dropping the sublist lock and evicting from another sublist. A lower
 298  * value means we're more likely to evict the "correct" header (i.e. the
 299  * oldest header in the arc state), but comes with higher overhead
 300  * (i.e. more invocations of arc_evict_state_impl()).
 301  */
 302 int zfs_arc_evict_batch_limit = 10;
 303 
 304 /* number of seconds before growing cache again */
 305 static int              arc_grow_retry = 60;
 306 
 307 /* number of milliseconds before attempting a kmem-cache-reap */
 308 static int              arc_kmem_cache_reap_retry_ms = 1000;
 309 
 310 /* shift of arc_c for calculating overflow limit in arc_get_data_impl */
 311 int             zfs_arc_overflow_shift = 8;
 312 
 313 /* shift of arc_c for calculating both min and max arc_p */
 314 static int              arc_p_min_shift = 4;
 315 
 316 /* log2(fraction of arc to reclaim) */
 317 static int              arc_shrink_shift = 7;
 318 
 319 /*
 320  * log2(fraction of ARC which must be free to allow growing).
 321  * I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
 322  * when reading a new block into the ARC, we will evict an equal-sized block
 323  * from the ARC.
 324  *
 325  * This must be less than arc_shrink_shift, so that when we shrink the ARC,
 326  * we will still not allow it to grow.
 327  */
 328 int                     arc_no_grow_shift = 5;
 329 
 330 
 331 /*
 332  * minimum lifespan of a prefetch block in clock ticks
 333  * (initialized in arc_init())
 334  */
 335 static int              arc_min_prefetch_lifespan;
 336 
 337 /*
 338  * If this percent of memory is free, don't throttle.
 339  */
 340 int arc_lotsfree_percent = 10;
 341 
 342 static int arc_dead;
 343 
 344 /*
 345  * The arc has filled available memory and has now warmed up.
 346  */
 347 static boolean_t arc_warm;
 348 
 349 /*
 350  * log2 fraction of the zio arena to keep free.
 351  */
 352 int arc_zio_arena_free_shift = 2;
 353 
 354 /*
 355  * These tunables are for performance analysis.
 356  */
 357 uint64_t zfs_arc_max;
 358 uint64_t zfs_arc_min;
 359 uint64_t zfs_arc_meta_limit = 0;
 360 uint64_t zfs_arc_meta_min = 0;
 361 uint64_t zfs_arc_ddt_limit = 0;
 362 /*
 363  * Tunable to control "dedup ceiling"
 364  * Possible values:
 365  *  DDT_NO_LIMIT        - default behaviour, ie no ceiling
 366  *  DDT_LIMIT_TO_ARC    - stop DDT growth if DDT is bigger than it's "ARC space"
 367  *  DDT_LIMIT_TO_L2ARC  - stop DDT growth when DDT size is bigger than the
 368  *                        L2ARC DDT dev(s) for that pool
 369  */
 370 zfs_ddt_limit_t zfs_ddt_limit_type = DDT_LIMIT_TO_ARC;
 371 /*
 372  * Alternative to the above way of controlling "dedup ceiling":
 373  * Stop DDT growth when in core DDTs size is above the below tunable.
 374  * This tunable overrides the zfs_ddt_limit_type tunable.
 375  */
 376 uint64_t zfs_ddt_byte_ceiling = 0;
 377 boolean_t zfs_arc_segregate_ddt = B_TRUE;
 378 int zfs_arc_grow_retry = 0;
 379 int zfs_arc_shrink_shift = 0;
 380 int zfs_arc_p_min_shift = 0;
 381 int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
 382 
 383 /* Tuneable, default is 64, which is essentially arbitrary */
 384 int zfs_flush_ntasks = 64;
 385 
 386 boolean_t zfs_compressed_arc_enabled = B_TRUE;
 387 
 388 /*
 389  * Note that buffers can be in one of 6 states:
 390  *      ARC_anon        - anonymous (discussed below)
 391  *      ARC_mru         - recently used, currently cached
 392  *      ARC_mru_ghost   - recentely used, no longer in cache
 393  *      ARC_mfu         - frequently used, currently cached
 394  *      ARC_mfu_ghost   - frequently used, no longer in cache
 395  *      ARC_l2c_only    - exists in L2ARC but not other states
 396  * When there are no active references to the buffer, they are
 397  * are linked onto a list in one of these arc states.  These are
 398  * the only buffers that can be evicted or deleted.  Within each
 399  * state there are multiple lists, one for meta-data and one for
 400  * non-meta-data.  Meta-data (indirect blocks, blocks of dnodes,
 401  * etc.) is tracked separately so that it can be managed more
 402  * explicitly: favored over data, limited explicitly.
 403  *
 404  * Anonymous buffers are buffers that are not associated with
 405  * a DVA.  These are buffers that hold dirty block copies
 406  * before they are written to stable storage.  By definition,
 407  * they are "ref'd" and are considered part of arc_mru
 408  * that cannot be freed.  Generally, they will aquire a DVA
 409  * as they are written and migrate onto the arc_mru list.
 410  *
 411  * The ARC_l2c_only state is for buffers that are in the second
 412  * level ARC but no longer in any of the ARC_m* lists.  The second
 413  * level ARC itself may also contain buffers that are in any of
 414  * the ARC_m* states - meaning that a buffer can exist in two
 415  * places.  The reason for the ARC_l2c_only state is to keep the
 416  * buffer header in the hash table, so that reads that hit the
 417  * second level ARC benefit from these fast lookups.
 418  */
 419 
 420 typedef struct arc_state {
 421         /*
 422          * list of evictable buffers
 423          */
 424         multilist_t *arcs_list[ARC_BUFC_NUMTYPES];
 425         /*
 426          * total amount of evictable data in this state
 427          */
 428         refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
 429         /*
 430          * total amount of data in this state; this includes: evictable,
 431          * non-evictable, ARC_BUFC_DATA, ARC_BUFC_METADATA and ARC_BUFC_DDT.
 432          * ARC_BUFC_DDT list is only populated when zfs_arc_segregate_ddt is
 433          * true.
 434          */
 435         refcount_t arcs_size;
 436 } arc_state_t;
 437 
 438 /*
 439  * We loop through these in l2arc_write_buffers() starting from
 440  * PRIORITY_MFU_DDT until we reach PRIORITY_NUMTYPES or the buffer that we
 441  * will be writing to L2ARC dev gets full.
 442  */
 443 enum l2arc_priorities {
 444         PRIORITY_MFU_DDT,
 445         PRIORITY_MRU_DDT,
 446         PRIORITY_MFU_META,
 447         PRIORITY_MRU_META,
 448         PRIORITY_MFU_DATA,
 449         PRIORITY_MRU_DATA,
 450         PRIORITY_NUMTYPES,
 451 };
 452 
 453 /* The 6 states: */
 454 static arc_state_t ARC_anon;
 455 static arc_state_t ARC_mru;
 456 static arc_state_t ARC_mru_ghost;
 457 static arc_state_t ARC_mfu;
 458 static arc_state_t ARC_mfu_ghost;
 459 static arc_state_t ARC_l2c_only;
 460 
 461 typedef struct arc_stats {
 462         kstat_named_t arcstat_hits;
 463         kstat_named_t arcstat_ddt_hits;
 464         kstat_named_t arcstat_misses;
 465         kstat_named_t arcstat_demand_data_hits;
 466         kstat_named_t arcstat_demand_data_misses;
 467         kstat_named_t arcstat_demand_metadata_hits;
 468         kstat_named_t arcstat_demand_metadata_misses;
 469         kstat_named_t arcstat_demand_ddt_hits;
 470         kstat_named_t arcstat_demand_ddt_misses;
 471         kstat_named_t arcstat_prefetch_data_hits;
 472         kstat_named_t arcstat_prefetch_data_misses;
 473         kstat_named_t arcstat_prefetch_metadata_hits;
 474         kstat_named_t arcstat_prefetch_metadata_misses;
 475         kstat_named_t arcstat_prefetch_ddt_hits;
 476         kstat_named_t arcstat_prefetch_ddt_misses;
 477         kstat_named_t arcstat_mru_hits;
 478         kstat_named_t arcstat_mru_ghost_hits;
 479         kstat_named_t arcstat_mfu_hits;
 480         kstat_named_t arcstat_mfu_ghost_hits;
 481         kstat_named_t arcstat_deleted;
 482         /*
 483          * Number of buffers that could not be evicted because the hash lock
 484          * was held by another thread.  The lock may not necessarily be held
 485          * by something using the same buffer, since hash locks are shared
 486          * by multiple buffers.
 487          */
 488         kstat_named_t arcstat_mutex_miss;
 489         /*
 490          * Number of buffers skipped when updating the access state due to the
 491          * header having already been released after acquiring the hash lock.
 492          */
 493         kstat_named_t arcstat_access_skip;
 494         /*
 495          * Number of buffers skipped because they have I/O in progress, are
 496          * indirect prefetch buffers that have not lived long enough, or are
 497          * not from the spa we're trying to evict from.
 498          */
 499         kstat_named_t arcstat_evict_skip;
 500         /*
 501          * Number of times arc_evict_state() was unable to evict enough
 502          * buffers to reach it's target amount.
 503          */
 504         kstat_named_t arcstat_evict_not_enough;
 505         kstat_named_t arcstat_evict_l2_cached;
 506         kstat_named_t arcstat_evict_l2_eligible;
 507         kstat_named_t arcstat_evict_l2_ineligible;
 508         kstat_named_t arcstat_evict_l2_skip;
 509         kstat_named_t arcstat_hash_elements;
 510         kstat_named_t arcstat_hash_elements_max;
 511         kstat_named_t arcstat_hash_collisions;
 512         kstat_named_t arcstat_hash_chains;
 513         kstat_named_t arcstat_hash_chain_max;
 514         kstat_named_t arcstat_p;
 515         kstat_named_t arcstat_c;
 516         kstat_named_t arcstat_c_min;
 517         kstat_named_t arcstat_c_max;
 518         kstat_named_t arcstat_size;
 519         /*
 520          * Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd.
 521          * Note that the compressed bytes may match the uncompressed bytes
 522          * if the block is either not compressed or compressed arc is disabled.
 523          */
 524         kstat_named_t arcstat_compressed_size;
 525         /*
 526          * Uncompressed size of the data stored in b_pabd. If compressed
 527          * arc is disabled then this value will be identical to the stat
 528          * above.
 529          */
 530         kstat_named_t arcstat_uncompressed_size;
 531         /*
 532          * Number of bytes stored in all the arc_buf_t's. This is classified
 533          * as "overhead" since this data is typically short-lived and will
 534          * be evicted from the arc when it becomes unreferenced unless the
 535          * zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level
 536          * values have been set (see comment in dbuf.c for more information).
 537          */
 538         kstat_named_t arcstat_overhead_size;
 539         /*
 540          * Number of bytes consumed by internal ARC structures necessary
 541          * for tracking purposes; these structures are not actually
 542          * backed by ARC buffers. This includes arc_buf_hdr_t structures
 543          * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
 544          * caches), and arc_buf_t structures (allocated via arc_buf_t
 545          * cache).
 546          */
 547         kstat_named_t arcstat_hdr_size;
 548         /*
 549          * Number of bytes consumed by ARC buffers of type equal to
 550          * ARC_BUFC_DATA. This is generally consumed by buffers backing
 551          * on disk user data (e.g. plain file contents).
 552          */
 553         kstat_named_t arcstat_data_size;
 554         /*
 555          * Number of bytes consumed by ARC buffers of type equal to
 556          * ARC_BUFC_METADATA. This is generally consumed by buffers
 557          * backing on disk data that is used for internal ZFS
 558          * structures (e.g. ZAP, dnode, indirect blocks, etc).
 559          */
 560         kstat_named_t arcstat_metadata_size;
 561         /*
 562          * Number of bytes consumed by ARC buffers of type equal to
 563          * ARC_BUFC_DDT. This is consumed by buffers backing on disk data
 564          * that is used to store DDT (ZAP, ddt stats).
 565          * Only used if zfs_arc_segregate_ddt is true.
 566          */
 567         kstat_named_t arcstat_ddt_size;
 568         /*
 569          * Number of bytes consumed by various buffers and structures
 570          * not actually backed with ARC buffers. This includes bonus
 571          * buffers (allocated directly via zio_buf_* functions),
 572          * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t
 573          * cache), and dnode_t structures (allocated via dnode_t cache).
 574          */
 575         kstat_named_t arcstat_other_size;
 576         /*
 577          * Total number of bytes consumed by ARC buffers residing in the
 578          * arc_anon state. This includes *all* buffers in the arc_anon
 579          * state; e.g. data, metadata, evictable, and unevictable buffers
 580          * are all included in this value.
 581          */
 582         kstat_named_t arcstat_anon_size;
 583         /*
 584          * Number of bytes consumed by ARC buffers that meet the
 585          * following criteria: backing buffers of type ARC_BUFC_DATA,
 586          * residing in the arc_anon state, and are eligible for eviction
 587          * (e.g. have no outstanding holds on the buffer).
 588          */
 589         kstat_named_t arcstat_anon_evictable_data;
 590         /*
 591          * Number of bytes consumed by ARC buffers that meet the
 592          * following criteria: backing buffers of type ARC_BUFC_METADATA,
 593          * residing in the arc_anon state, and are eligible for eviction
 594          * (e.g. have no outstanding holds on the buffer).
 595          */
 596         kstat_named_t arcstat_anon_evictable_metadata;
 597         /*
 598          * Number of bytes consumed by ARC buffers that meet the
 599          * following criteria: backing buffers of type ARC_BUFC_DDT,
 600          * residing in the arc_anon state, and are eligible for eviction
 601          * Only used if zfs_arc_segregate_ddt is true.
 602          */
 603         kstat_named_t arcstat_anon_evictable_ddt;
 604         /*
 605          * Total number of bytes consumed by ARC buffers residing in the
 606          * arc_mru state. This includes *all* buffers in the arc_mru
 607          * state; e.g. data, metadata, evictable, and unevictable buffers
 608          * are all included in this value.
 609          */
 610         kstat_named_t arcstat_mru_size;
 611         /*
 612          * Number of bytes consumed by ARC buffers that meet the
 613          * following criteria: backing buffers of type ARC_BUFC_DATA,
 614          * residing in the arc_mru state, and are eligible for eviction
 615          * (e.g. have no outstanding holds on the buffer).
 616          */
 617         kstat_named_t arcstat_mru_evictable_data;
 618         /*
 619          * Number of bytes consumed by ARC buffers that meet the
 620          * following criteria: backing buffers of type ARC_BUFC_METADATA,
 621          * residing in the arc_mru state, and are eligible for eviction
 622          * (e.g. have no outstanding holds on the buffer).
 623          */
 624         kstat_named_t arcstat_mru_evictable_metadata;
 625         /*
 626          * Number of bytes consumed by ARC buffers that meet the
 627          * following criteria: backing buffers of type ARC_BUFC_DDT,
 628          * residing in the arc_mru state, and are eligible for eviction
 629          * (e.g. have no outstanding holds on the buffer).
 630          * Only used if zfs_arc_segregate_ddt is true.
 631          */
 632         kstat_named_t arcstat_mru_evictable_ddt;
 633         /*
 634          * Total number of bytes that *would have been* consumed by ARC
 635          * buffers in the arc_mru_ghost state. The key thing to note
 636          * here, is the fact that this size doesn't actually indicate
 637          * RAM consumption. The ghost lists only consist of headers and
 638          * don't actually have ARC buffers linked off of these headers.
 639          * Thus, *if* the headers had associated ARC buffers, these
 640          * buffers *would have* consumed this number of bytes.
 641          */
 642         kstat_named_t arcstat_mru_ghost_size;
 643         /*
 644          * Number of bytes that *would have been* consumed by ARC
 645          * buffers that are eligible for eviction, of type
 646          * ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
 647          */
 648         kstat_named_t arcstat_mru_ghost_evictable_data;
 649         /*
 650          * Number of bytes that *would have been* consumed by ARC
 651          * buffers that are eligible for eviction, of type
 652          * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
 653          */
 654         kstat_named_t arcstat_mru_ghost_evictable_metadata;
 655         /*
 656          * Number of bytes that *would have been* consumed by ARC
 657          * buffers that are eligible for eviction, of type
 658          * ARC_BUFC_DDT, and linked off the arc_mru_ghost state.
 659          * Only used if zfs_arc_segregate_ddt is true.
 660          */
 661         kstat_named_t arcstat_mru_ghost_evictable_ddt;
 662         /*
 663          * Total number of bytes consumed by ARC buffers residing in the
 664          * arc_mfu state. This includes *all* buffers in the arc_mfu
 665          * state; e.g. data, metadata, evictable, and unevictable buffers
 666          * are all included in this value.
 667          */
 668         kstat_named_t arcstat_mfu_size;
 669         /*
 670          * Number of bytes consumed by ARC buffers that are eligible for
 671          * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
 672          * state.
 673          */
 674         kstat_named_t arcstat_mfu_evictable_data;
 675         /*
 676          * Number of bytes consumed by ARC buffers that are eligible for
 677          * eviction, of type ARC_BUFC_METADATA, and reside in the
 678          * arc_mfu state.
 679          */
 680         kstat_named_t arcstat_mfu_evictable_metadata;
 681         /*
 682          * Number of bytes consumed by ARC buffers that are eligible for
 683          * eviction, of type ARC_BUFC_DDT, and reside in the
 684          * arc_mfu state.
 685          * Only used if zfs_arc_segregate_ddt is true.
 686          */
 687         kstat_named_t arcstat_mfu_evictable_ddt;
 688         /*
 689          * Total number of bytes that *would have been* consumed by ARC
 690          * buffers in the arc_mfu_ghost state. See the comment above
 691          * arcstat_mru_ghost_size for more details.
 692          */
 693         kstat_named_t arcstat_mfu_ghost_size;
 694         /*
 695          * Number of bytes that *would have been* consumed by ARC
 696          * buffers that are eligible for eviction, of type
 697          * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
 698          */
 699         kstat_named_t arcstat_mfu_ghost_evictable_data;
 700         /*
 701          * Number of bytes that *would have been* consumed by ARC
 702          * buffers that are eligible for eviction, of type
 703          * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
 704          */
 705         kstat_named_t arcstat_mfu_ghost_evictable_metadata;
 706         /*
 707          * Number of bytes that *would have been* consumed by ARC
 708          * buffers that are eligible for eviction, of type
 709          * ARC_BUFC_DDT, and linked off the arc_mru_ghost state.
 710          * Only used if zfs_arc_segregate_ddt is true.
 711          */
 712         kstat_named_t arcstat_mfu_ghost_evictable_ddt;
 713         kstat_named_t arcstat_l2_hits;
 714         kstat_named_t arcstat_l2_ddt_hits;
 715         kstat_named_t arcstat_l2_misses;
 716         kstat_named_t arcstat_l2_feeds;
 717         kstat_named_t arcstat_l2_rw_clash;
 718         kstat_named_t arcstat_l2_read_bytes;
 719         kstat_named_t arcstat_l2_ddt_read_bytes;
 720         kstat_named_t arcstat_l2_write_bytes;
 721         kstat_named_t arcstat_l2_ddt_write_bytes;
 722         kstat_named_t arcstat_l2_writes_sent;
 723         kstat_named_t arcstat_l2_writes_done;
 724         kstat_named_t arcstat_l2_writes_error;
 725         kstat_named_t arcstat_l2_writes_lock_retry;
 726         kstat_named_t arcstat_l2_evict_lock_retry;
 727         kstat_named_t arcstat_l2_evict_reading;
 728         kstat_named_t arcstat_l2_evict_l1cached;
 729         kstat_named_t arcstat_l2_free_on_write;
 730         kstat_named_t arcstat_l2_abort_lowmem;
 731         kstat_named_t arcstat_l2_cksum_bad;
 732         kstat_named_t arcstat_l2_io_error;
 733         kstat_named_t arcstat_l2_lsize;
 734         kstat_named_t arcstat_l2_psize;
 735         kstat_named_t arcstat_l2_hdr_size;
 736         kstat_named_t arcstat_l2_log_blk_writes;
 737         kstat_named_t arcstat_l2_log_blk_avg_size;
 738         kstat_named_t arcstat_l2_data_to_meta_ratio;
 739         kstat_named_t arcstat_l2_rebuild_successes;
 740         kstat_named_t arcstat_l2_rebuild_abort_unsupported;
 741         kstat_named_t arcstat_l2_rebuild_abort_io_errors;
 742         kstat_named_t arcstat_l2_rebuild_abort_cksum_errors;
 743         kstat_named_t arcstat_l2_rebuild_abort_loop_errors;
 744         kstat_named_t arcstat_l2_rebuild_abort_lowmem;
 745         kstat_named_t arcstat_l2_rebuild_size;
 746         kstat_named_t arcstat_l2_rebuild_bufs;
 747         kstat_named_t arcstat_l2_rebuild_bufs_precached;
 748         kstat_named_t arcstat_l2_rebuild_psize;
 749         kstat_named_t arcstat_l2_rebuild_log_blks;
 750         kstat_named_t arcstat_memory_throttle_count;
 751         kstat_named_t arcstat_meta_used;
 752         kstat_named_t arcstat_meta_limit;
 753         kstat_named_t arcstat_meta_max;
 754         kstat_named_t arcstat_meta_min;
 755         kstat_named_t arcstat_ddt_limit;
 756         kstat_named_t arcstat_sync_wait_for_async;
 757         kstat_named_t arcstat_demand_hit_predictive_prefetch;
 758 } arc_stats_t;
 759 
 760 static arc_stats_t arc_stats = {
 761         { "hits",                       KSTAT_DATA_UINT64 },
 762         { "ddt_hits",                   KSTAT_DATA_UINT64 },
 763         { "misses",                     KSTAT_DATA_UINT64 },
 764         { "demand_data_hits",           KSTAT_DATA_UINT64 },
 765         { "demand_data_misses",         KSTAT_DATA_UINT64 },
 766         { "demand_metadata_hits",       KSTAT_DATA_UINT64 },
 767         { "demand_metadata_misses",     KSTAT_DATA_UINT64 },
 768         { "demand_ddt_hits",            KSTAT_DATA_UINT64 },
 769         { "demand_ddt_misses",          KSTAT_DATA_UINT64 },
 770         { "prefetch_data_hits",         KSTAT_DATA_UINT64 },
 771         { "prefetch_data_misses",       KSTAT_DATA_UINT64 },
 772         { "prefetch_metadata_hits",     KSTAT_DATA_UINT64 },
 773         { "prefetch_metadata_misses",   KSTAT_DATA_UINT64 },
 774         { "prefetch_ddt_hits",          KSTAT_DATA_UINT64 },
 775         { "prefetch_ddt_misses",        KSTAT_DATA_UINT64 },
 776         { "mru_hits",                   KSTAT_DATA_UINT64 },
 777         { "mru_ghost_hits",             KSTAT_DATA_UINT64 },
 778         { "mfu_hits",                   KSTAT_DATA_UINT64 },
 779         { "mfu_ghost_hits",             KSTAT_DATA_UINT64 },
 780         { "deleted",                    KSTAT_DATA_UINT64 },
 781         { "mutex_miss",                 KSTAT_DATA_UINT64 },
 782         { "access_skip",                KSTAT_DATA_UINT64 },
 783         { "evict_skip",                 KSTAT_DATA_UINT64 },
 784         { "evict_not_enough",           KSTAT_DATA_UINT64 },
 785         { "evict_l2_cached",            KSTAT_DATA_UINT64 },
 786         { "evict_l2_eligible",          KSTAT_DATA_UINT64 },
 787         { "evict_l2_ineligible",        KSTAT_DATA_UINT64 },
 788         { "evict_l2_skip",              KSTAT_DATA_UINT64 },
 789         { "hash_elements",              KSTAT_DATA_UINT64 },
 790         { "hash_elements_max",          KSTAT_DATA_UINT64 },
 791         { "hash_collisions",            KSTAT_DATA_UINT64 },
 792         { "hash_chains",                KSTAT_DATA_UINT64 },
 793         { "hash_chain_max",             KSTAT_DATA_UINT64 },
 794         { "p",                          KSTAT_DATA_UINT64 },
 795         { "c",                          KSTAT_DATA_UINT64 },
 796         { "c_min",                      KSTAT_DATA_UINT64 },
 797         { "c_max",                      KSTAT_DATA_UINT64 },
 798         { "size",                       KSTAT_DATA_UINT64 },
 799         { "compressed_size",            KSTAT_DATA_UINT64 },
 800         { "uncompressed_size",          KSTAT_DATA_UINT64 },
 801         { "overhead_size",              KSTAT_DATA_UINT64 },
 802         { "hdr_size",                   KSTAT_DATA_UINT64 },
 803         { "data_size",                  KSTAT_DATA_UINT64 },
 804         { "metadata_size",              KSTAT_DATA_UINT64 },
 805         { "ddt_size",                   KSTAT_DATA_UINT64 },
 806         { "other_size",                 KSTAT_DATA_UINT64 },
 807         { "anon_size",                  KSTAT_DATA_UINT64 },
 808         { "anon_evictable_data",        KSTAT_DATA_UINT64 },
 809         { "anon_evictable_metadata",    KSTAT_DATA_UINT64 },
 810         { "anon_evictable_ddt",         KSTAT_DATA_UINT64 },
 811         { "mru_size",                   KSTAT_DATA_UINT64 },
 812         { "mru_evictable_data",         KSTAT_DATA_UINT64 },
 813         { "mru_evictable_metadata",     KSTAT_DATA_UINT64 },
 814         { "mru_evictable_ddt",          KSTAT_DATA_UINT64 },
 815         { "mru_ghost_size",             KSTAT_DATA_UINT64 },
 816         { "mru_ghost_evictable_data",   KSTAT_DATA_UINT64 },
 817         { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
 818         { "mru_ghost_evictable_ddt",    KSTAT_DATA_UINT64 },
 819         { "mfu_size",                   KSTAT_DATA_UINT64 },
 820         { "mfu_evictable_data",         KSTAT_DATA_UINT64 },
 821         { "mfu_evictable_metadata",     KSTAT_DATA_UINT64 },
 822         { "mfu_evictable_ddt",          KSTAT_DATA_UINT64 },
 823         { "mfu_ghost_size",             KSTAT_DATA_UINT64 },
 824         { "mfu_ghost_evictable_data",   KSTAT_DATA_UINT64 },
 825         { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
 826         { "mfu_ghost_evictable_ddt",    KSTAT_DATA_UINT64 },
 827         { "l2_hits",                    KSTAT_DATA_UINT64 },
 828         { "l2_ddt_hits",                KSTAT_DATA_UINT64 },
 829         { "l2_misses",                  KSTAT_DATA_UINT64 },
 830         { "l2_feeds",                   KSTAT_DATA_UINT64 },
 831         { "l2_rw_clash",                KSTAT_DATA_UINT64 },
 832         { "l2_read_bytes",              KSTAT_DATA_UINT64 },
 833         { "l2_ddt_read_bytes",          KSTAT_DATA_UINT64 },
 834         { "l2_write_bytes",             KSTAT_DATA_UINT64 },
 835         { "l2_ddt_write_bytes",         KSTAT_DATA_UINT64 },
 836         { "l2_writes_sent",             KSTAT_DATA_UINT64 },
 837         { "l2_writes_done",             KSTAT_DATA_UINT64 },
 838         { "l2_writes_error",            KSTAT_DATA_UINT64 },
 839         { "l2_writes_lock_retry",       KSTAT_DATA_UINT64 },
 840         { "l2_evict_lock_retry",        KSTAT_DATA_UINT64 },
 841         { "l2_evict_reading",           KSTAT_DATA_UINT64 },
 842         { "l2_evict_l1cached",          KSTAT_DATA_UINT64 },
 843         { "l2_free_on_write",           KSTAT_DATA_UINT64 },
 844         { "l2_abort_lowmem",            KSTAT_DATA_UINT64 },
 845         { "l2_cksum_bad",               KSTAT_DATA_UINT64 },
 846         { "l2_io_error",                KSTAT_DATA_UINT64 },
 847         { "l2_size",                    KSTAT_DATA_UINT64 },
 848         { "l2_asize",                   KSTAT_DATA_UINT64 },
 849         { "l2_hdr_size",                KSTAT_DATA_UINT64 },
 850         { "l2_log_blk_writes",          KSTAT_DATA_UINT64 },
 851         { "l2_log_blk_avg_size",        KSTAT_DATA_UINT64 },
 852         { "l2_data_to_meta_ratio",      KSTAT_DATA_UINT64 },
 853         { "l2_rebuild_successes",       KSTAT_DATA_UINT64 },
 854         { "l2_rebuild_unsupported",     KSTAT_DATA_UINT64 },
 855         { "l2_rebuild_io_errors",       KSTAT_DATA_UINT64 },
 856         { "l2_rebuild_cksum_errors",    KSTAT_DATA_UINT64 },
 857         { "l2_rebuild_loop_errors",     KSTAT_DATA_UINT64 },
 858         { "l2_rebuild_lowmem",          KSTAT_DATA_UINT64 },
 859         { "l2_rebuild_size",            KSTAT_DATA_UINT64 },
 860         { "l2_rebuild_bufs",            KSTAT_DATA_UINT64 },
 861         { "l2_rebuild_bufs_precached",  KSTAT_DATA_UINT64 },
 862         { "l2_rebuild_psize",           KSTAT_DATA_UINT64 },
 863         { "l2_rebuild_log_blks",        KSTAT_DATA_UINT64 },
 864         { "memory_throttle_count",      KSTAT_DATA_UINT64 },
 865         { "arc_meta_used",              KSTAT_DATA_UINT64 },
 866         { "arc_meta_limit",             KSTAT_DATA_UINT64 },
 867         { "arc_meta_max",               KSTAT_DATA_UINT64 },
 868         { "arc_meta_min",               KSTAT_DATA_UINT64 },
 869         { "arc_ddt_limit",              KSTAT_DATA_UINT64 },
 870         { "sync_wait_for_async",        KSTAT_DATA_UINT64 },
 871         { "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
 872 };
 873 
 874 #define ARCSTAT(stat)   (arc_stats.stat.value.ui64)
 875 
 876 #define ARCSTAT_INCR(stat, val) \
 877         atomic_add_64(&arc_stats.stat.value.ui64, (val))
 878 
 879 #define ARCSTAT_BUMP(stat)      ARCSTAT_INCR(stat, 1)
 880 #define ARCSTAT_BUMPDOWN(stat)  ARCSTAT_INCR(stat, -1)
 881 
 882 #define ARCSTAT_MAX(stat, val) {                                        \
 883         uint64_t m;                                                     \
 884         while ((val) > (m = arc_stats.stat.value.ui64) &&            \
 885             (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val))))     \
 886                 continue;                                               \
 887 }
 888 
 889 #define ARCSTAT_MAXSTAT(stat) \
 890         ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
 891 
 892 /*
 893  * We define a macro to allow ARC hits/misses to be easily broken down by
 894  * two separate conditions, giving a total of four different subtypes for
 895  * each of hits and misses (so eight statistics total).
 896  */
 897 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
 898         if (cond1) {                                                    \
 899                 if (cond2) {                                            \
 900                         ARCSTAT_BUMP(arcstat_##stat1##_##stat##_##stat2); \
 901                 } else {                                                \
 902                         ARCSTAT_BUMP(arcstat_##stat1##_##stat##_##notstat2); \
 903                 }                                                       \
 904         } else {                                                        \
 905                 if (cond2) {                                            \
 906                         ARCSTAT_BUMP(arcstat_##notstat1##_##stat##_##stat2); \
 907                 } else {                                                \
 908                         ARCSTAT_BUMP(arcstat_##notstat1##_##stat##_##notstat2);\
 909                 }                                                       \
 910         }
 911 
 912 /*
 913  * This macro allows us to use kstats as floating averages. Each time we
 914  * update this kstat, we first factor it and the update value by
 915  * ARCSTAT_AVG_FACTOR to shrink the new value's contribution to the overall
 916  * average. This macro assumes that integer loads and stores are atomic, but
 917  * is not safe for multiple writers updating the kstat in parallel (only the
 918  * last writer's update will remain).
 919  */
 920 #define ARCSTAT_F_AVG_FACTOR    3
 921 #define ARCSTAT_F_AVG(stat, value) \
 922         do { \
 923                 uint64_t x = ARCSTAT(stat); \
 924                 x = x - x / ARCSTAT_F_AVG_FACTOR + \
 925                     (value) / ARCSTAT_F_AVG_FACTOR; \
 926                 ARCSTAT(stat) = x; \
 927                 _NOTE(CONSTCOND) \
 928         } while (0)
 929 
 930 kstat_t                 *arc_ksp;
 931 static arc_state_t      *arc_anon;
 932 static arc_state_t      *arc_mru;
 933 static arc_state_t      *arc_mru_ghost;
 934 static arc_state_t      *arc_mfu;
 935 static arc_state_t      *arc_mfu_ghost;
 936 static arc_state_t      *arc_l2c_only;
 937 
 938 /*
 939  * There are several ARC variables that are critical to export as kstats --
 940  * but we don't want to have to grovel around in the kstat whenever we wish to
 941  * manipulate them.  For these variables, we therefore define them to be in
 942  * terms of the statistic variable.  This assures that we are not introducing
 943  * the possibility of inconsistency by having shadow copies of the variables,
 944  * while still allowing the code to be readable.
 945  */
 946 #define arc_size        ARCSTAT(arcstat_size)   /* actual total arc size */
 947 #define arc_p           ARCSTAT(arcstat_p)      /* target size of MRU */
 948 #define arc_c           ARCSTAT(arcstat_c)      /* target size of cache */
 949 #define arc_c_min       ARCSTAT(arcstat_c_min)  /* min target cache size */
 950 #define arc_c_max       ARCSTAT(arcstat_c_max)  /* max target cache size */
 951 #define arc_meta_limit  ARCSTAT(arcstat_meta_limit) /* max size for metadata */
 952 #define arc_meta_min    ARCSTAT(arcstat_meta_min) /* min size for metadata */
 953 #define arc_meta_used   ARCSTAT(arcstat_meta_used) /* size of metadata */
 954 #define arc_meta_max    ARCSTAT(arcstat_meta_max) /* max size of metadata */
 955 #define arc_ddt_size    ARCSTAT(arcstat_ddt_size) /* ddt size in arc */
 956 #define arc_ddt_limit   ARCSTAT(arcstat_ddt_limit) /* ddt in arc size limit */
 957 
 958 /*
 959  * Used int zio.c to optionally keep DDT cached in ARC
 960  */
 961 uint64_t const *arc_ddt_evict_threshold;
 962 
 963 /* compressed size of entire arc */
 964 #define arc_compressed_size     ARCSTAT(arcstat_compressed_size)
 965 /* uncompressed size of entire arc */
 966 #define arc_uncompressed_size   ARCSTAT(arcstat_uncompressed_size)
 967 /* number of bytes in the arc from arc_buf_t's */
 968 #define arc_overhead_size       ARCSTAT(arcstat_overhead_size)
 969 
 970 
 971 static int              arc_no_grow;    /* Don't try to grow cache size */
 972 static uint64_t         arc_tempreserve;
 973 static uint64_t         arc_loaned_bytes;
 974 
 975 typedef struct arc_callback arc_callback_t;
 976 
 977 struct arc_callback {
 978         void                    *acb_private;
 979         arc_done_func_t         *acb_done;
 980         arc_buf_t               *acb_buf;
 981         boolean_t               acb_compressed;
 982         zio_t                   *acb_zio_dummy;
 983         arc_callback_t          *acb_next;
 984 };
 985 
 986 typedef struct arc_write_callback arc_write_callback_t;
 987 
 988 struct arc_write_callback {
 989         void            *awcb_private;
 990         arc_done_func_t *awcb_ready;
 991         arc_done_func_t *awcb_children_ready;
 992         arc_done_func_t *awcb_physdone;
 993         arc_done_func_t *awcb_done;
 994         arc_buf_t       *awcb_buf;
 995 };
 996 
 997 /*
 998  * ARC buffers are separated into multiple structs as a memory saving measure:
 999  *   - Common fields struct, always defined, and embedded within it:
1000  *       - L2-only fields, always allocated but undefined when not in L2ARC
1001  *       - L1-only fields, only allocated when in L1ARC
1002  *
1003  *           Buffer in L1                     Buffer only in L2
1004  *    +------------------------+          +------------------------+
1005  *    | arc_buf_hdr_t          |          | arc_buf_hdr_t          |
1006  *    |                        |          |                        |
1007  *    |                        |          |                        |
1008  *    |                        |          |                        |
1009  *    +------------------------+          +------------------------+
1010  *    | l2arc_buf_hdr_t        |          | l2arc_buf_hdr_t        |
1011  *    | (undefined if L1-only) |          |                        |
1012  *    +------------------------+          +------------------------+
1013  *    | l1arc_buf_hdr_t        |
1014  *    |                        |
1015  *    |                        |
1016  *    |                        |
1017  *    |                        |
1018  *    +------------------------+
1019  *
1020  * Because it's possible for the L2ARC to become extremely large, we can wind
1021  * up eating a lot of memory in L2ARC buffer headers, so the size of a header
1022  * is minimized by only allocating the fields necessary for an L1-cached buffer
1023  * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and
1024  * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple
1025  * words in pointers. arc_hdr_realloc() is used to switch a header between
1026  * these two allocation states.
1027  */
1028 typedef struct l1arc_buf_hdr {
1029         kmutex_t                b_freeze_lock;
1030 #ifdef ZFS_DEBUG
1031         /*
1032          * Used for debugging with kmem_flags - by allocating and freeing
1033          * b_thawed when the buffer is thawed, we get a record of the stack
1034          * trace that thawed it.
1035          */
1036         void                    *b_thawed;
1037 #endif
1038 
1039         /* number of krrp tasks using this buffer */
1040         uint64_t                b_krrp;
1041 
1042         arc_buf_t               *b_buf;
1043         uint32_t                b_bufcnt;
1044         /* for waiting on writes to complete */
1045         kcondvar_t              b_cv;
1046         uint8_t                 b_byteswap;
1047 
1048         /* protected by arc state mutex */
1049         arc_state_t             *b_state;
1050         multilist_node_t        b_arc_node;
1051 
1052         /* updated atomically */
1053         clock_t                 b_arc_access;
1054 
1055         /* self protecting */
1056         refcount_t              b_refcnt;
1057 
1058         arc_callback_t          *b_acb;
1059         abd_t                   *b_pabd;
1060 } l1arc_buf_hdr_t;
1061 
1062 typedef struct l2arc_dev l2arc_dev_t;
1063 
1064 typedef struct l2arc_buf_hdr {
1065         /* protected by arc_buf_hdr mutex */
1066         l2arc_dev_t             *b_dev;         /* L2ARC device */
1067         uint64_t                b_daddr;        /* disk address, offset byte */
1068 
1069         list_node_t             b_l2node;
1070 } l2arc_buf_hdr_t;
1071 
1072 struct arc_buf_hdr {
1073         /* protected by hash lock */
1074         dva_t                   b_dva;
1075         uint64_t                b_birth;
1076 
1077         /*
1078          * Even though this checksum is only set/verified when a buffer is in
1079          * the L1 cache, it needs to be in the set of common fields because it
1080          * must be preserved from the time before a buffer is written out to
1081          * L2ARC until after it is read back in.
1082          */
1083         zio_cksum_t             *b_freeze_cksum;
1084 
1085         arc_buf_contents_t      b_type;
1086         arc_buf_hdr_t           *b_hash_next;
1087         arc_flags_t             b_flags;
1088 
1089         /*
1090          * This field stores the size of the data buffer after
1091          * compression, and is set in the arc's zio completion handlers.
1092          * It is in units of SPA_MINBLOCKSIZE (e.g. 1 == 512 bytes).
1093          *
1094          * While the block pointers can store up to 32MB in their psize
1095          * field, we can only store up to 32MB minus 512B. This is due
1096          * to the bp using a bias of 1, whereas we use a bias of 0 (i.e.
1097          * a field of zeros represents 512B in the bp). We can't use a
1098          * bias of 1 since we need to reserve a psize of zero, here, to
1099          * represent holes and embedded blocks.
1100          *
1101          * This isn't a problem in practice, since the maximum size of a
1102          * buffer is limited to 16MB, so we never need to store 32MB in
1103          * this field. Even in the upstream illumos code base, the
1104          * maximum size of a buffer is limited to 16MB.
1105          */
1106         uint16_t                b_psize;
1107 
1108         /*
1109          * This field stores the size of the data buffer before
1110          * compression, and cannot change once set. It is in units
1111          * of SPA_MINBLOCKSIZE (e.g. 2 == 1024 bytes)
1112          */
1113         uint16_t                b_lsize;        /* immutable */
1114         uint64_t                b_spa;          /* immutable */
1115 
1116         /* L2ARC fields. Undefined when not in L2ARC. */
1117         l2arc_buf_hdr_t         b_l2hdr;
1118         /* L1ARC fields. Undefined when in l2arc_only state */
1119         l1arc_buf_hdr_t         b_l1hdr;
1120 };
1121 
1122 #define GHOST_STATE(state)      \
1123         ((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||        \
1124         (state) == arc_l2c_only)
1125 
1126 #define HDR_IN_HASH_TABLE(hdr)  ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
1127 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
1128 #define HDR_IO_ERROR(hdr)       ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
1129 #define HDR_PREFETCH(hdr)       ((hdr)->b_flags & ARC_FLAG_PREFETCH)
1130 #define HDR_COMPRESSION_ENABLED(hdr)    \
1131         ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
1132 
1133 #define HDR_L2CACHE(hdr)        ((hdr)->b_flags & ARC_FLAG_L2CACHE)
1134 #define HDR_L2_READING(hdr)     \
1135         (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) &&   \
1136         ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
1137 #define HDR_L2_WRITING(hdr)     ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
1138 #define HDR_L2_EVICTED(hdr)     ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
1139 #define HDR_L2_WRITE_HEAD(hdr)  ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
1140 #define HDR_SHARED_DATA(hdr)    ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
1141 
1142 #define HDR_ISTYPE_DDT(hdr)     \
1143             ((hdr)->b_flags & ARC_FLAG_BUFC_DDT)
1144 #define HDR_ISTYPE_METADATA(hdr)        \
1145         ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
1146 #define HDR_ISTYPE_DATA(hdr)    (!HDR_ISTYPE_METADATA(hdr) && \
1147         !HDR_ISTYPE_DDT(hdr))
1148 
1149 #define HDR_HAS_L1HDR(hdr)      ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
1150 #define HDR_HAS_L2HDR(hdr)      ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
1151 
1152 /* For storing compression mode in b_flags */
1153 #define HDR_COMPRESS_OFFSET     (highbit64(ARC_FLAG_COMPRESS_0) - 1)
1154 
1155 #define HDR_GET_COMPRESS(hdr)   ((enum zio_compress)BF32_GET((hdr)->b_flags, \
1156         HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
1157 #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
1158         HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
1159 
1160 #define ARC_BUF_LAST(buf)       ((buf)->b_next == NULL)
1161 #define ARC_BUF_SHARED(buf)     ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
1162 #define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
1163 
1164 /*
1165  * Other sizes
1166  */
1167 
1168 #define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
1169 #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
1170 
1171 /*
1172  * Hash table routines
1173  */
1174 
1175 struct ht_table {
1176         arc_buf_hdr_t   *hdr;
1177         kmutex_t        lock;
1178 };
1179 
1180 typedef struct buf_hash_table {
1181         uint64_t ht_mask;
1182         struct ht_table *ht_table;
1183 } buf_hash_table_t;
1184 
1185 #pragma align 64(buf_hash_table)
1186 static buf_hash_table_t buf_hash_table;
1187 
1188 #define BUF_HASH_INDEX(spa, dva, birth) \
1189         (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
1190 #define BUF_HASH_LOCK(idx) (&buf_hash_table.ht_table[idx].lock)
1191 #define HDR_LOCK(hdr) \
1192         (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
1193 
1194 uint64_t zfs_crc64_table[256];
1195 
1196 /*
1197  * Level 2 ARC
1198  */
1199 
1200 #define L2ARC_WRITE_SIZE        (8 * 1024 * 1024)       /* initial write max */
1201 #define L2ARC_HEADROOM          2                       /* num of writes */
1202 /*
1203  * If we discover during ARC scan any buffers to be compressed, we boost
1204  * our headroom for the next scanning cycle by this percentage multiple.
1205  */
1206 #define L2ARC_HEADROOM_BOOST    200
1207 #define L2ARC_FEED_SECS         1               /* caching interval secs */
1208 #define L2ARC_FEED_MIN_MS       200             /* min caching interval ms */
1209 
1210 #define l2arc_writes_sent       ARCSTAT(arcstat_l2_writes_sent)
1211 #define l2arc_writes_done       ARCSTAT(arcstat_l2_writes_done)
1212 
1213 /* L2ARC Performance Tunables */
1214 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE;    /* default max write size */
1215 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE;  /* extra write during warmup */
1216 uint64_t l2arc_headroom = L2ARC_HEADROOM;       /* number of dev writes */
1217 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
1218 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS;     /* interval seconds */
1219 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
1220 boolean_t l2arc_noprefetch = B_TRUE;            /* don't cache prefetch bufs */
1221 boolean_t l2arc_feed_again = B_TRUE;            /* turbo warmup */
1222 boolean_t l2arc_norw = B_TRUE;                  /* no reads during writes */
1223 
1224 static list_t L2ARC_dev_list;                   /* device list */
1225 static list_t *l2arc_dev_list;                  /* device list pointer */
1226 static kmutex_t l2arc_dev_mtx;                  /* device list mutex */
1227 static l2arc_dev_t *l2arc_dev_last;             /* last device used */
1228 static l2arc_dev_t *l2arc_ddt_dev_last;         /* last DDT device used */
1229 static list_t L2ARC_free_on_write;              /* free after write buf list */
1230 static list_t *l2arc_free_on_write;             /* free after write list ptr */
1231 static kmutex_t l2arc_free_on_write_mtx;        /* mutex for list */
1232 static uint64_t l2arc_ndev;                     /* number of devices */
1233 
1234 typedef struct l2arc_read_callback {
1235         arc_buf_hdr_t           *l2rcb_hdr;             /* read header */
1236         blkptr_t                l2rcb_bp;               /* original blkptr */
1237         zbookmark_phys_t        l2rcb_zb;               /* original bookmark */
1238         int                     l2rcb_flags;            /* original flags */
1239         abd_t                   *l2rcb_abd;             /* temporary buffer */
1240 } l2arc_read_callback_t;
1241 
1242 typedef struct l2arc_write_callback {
1243         l2arc_dev_t     *l2wcb_dev;             /* device info */
1244         arc_buf_hdr_t   *l2wcb_head;            /* head of write buflist */
1245         list_t          l2wcb_log_blk_buflist;  /* in-flight log blocks */
1246 } l2arc_write_callback_t;
1247 
1248 typedef struct l2arc_data_free {
1249         /* protected by l2arc_free_on_write_mtx */
1250         abd_t           *l2df_abd;
1251         size_t          l2df_size;
1252         arc_buf_contents_t l2df_type;
1253         list_node_t     l2df_list_node;
1254 } l2arc_data_free_t;
1255 
1256 static kmutex_t l2arc_feed_thr_lock;
1257 static kcondvar_t l2arc_feed_thr_cv;
1258 static uint8_t l2arc_thread_exit;
1259 
1260 static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *);
1261 static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *);
1262 static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *);
1263 static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *);
1264 static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *);
1265 static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag);
1266 static void arc_hdr_free_pabd(arc_buf_hdr_t *);
1267 static void arc_hdr_alloc_pabd(arc_buf_hdr_t *);
1268 static void arc_access(arc_buf_hdr_t *, kmutex_t *);
1269 static boolean_t arc_is_overflowing();
1270 static void arc_buf_watch(arc_buf_t *);
1271 static l2arc_dev_t *l2arc_vdev_get(vdev_t *vd);
1272 
1273 static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
1274 static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
1275 static arc_buf_contents_t arc_flags_to_bufc(uint32_t);
1276 static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
1277 static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
1278 
1279 static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
1280 static void l2arc_read_done(zio_t *);
1281 
1282 static void
1283 arc_update_hit_stat(arc_buf_hdr_t *hdr, boolean_t hit)
1284 {
1285         boolean_t pf = !HDR_PREFETCH(hdr);
1286         switch (arc_buf_type(hdr)) {
1287         case ARC_BUFC_DATA:
1288                 ARCSTAT_CONDSTAT(pf, demand, prefetch, hit, hits, misses, data);
1289                 break;
1290         case ARC_BUFC_METADATA:
1291                 ARCSTAT_CONDSTAT(pf, demand, prefetch, hit, hits, misses,
1292                     metadata);
1293                 break;
1294         case ARC_BUFC_DDT:
1295                 ARCSTAT_CONDSTAT(pf, demand, prefetch, hit, hits, misses, ddt);
1296                 break;
1297         default:
1298                 break;
1299         }
1300 }
1301 
1302 enum {
1303         L2ARC_DEV_HDR_EVICT_FIRST = (1 << 0)      /* mirror of l2ad_first */
1304 };
1305 
1306 /*
1307  * Pointer used in persistent L2ARC (for pointing to log blocks & ARC buffers).
1308  */
1309 typedef struct l2arc_log_blkptr {
1310         uint64_t        lbp_daddr;      /* device address of log */
1311         /*
1312          * lbp_prop is the same format as the blk_prop in blkptr_t:
1313          *      * logical size (in sectors)
1314          *      * physical size (in sectors)
1315          *      * checksum algorithm (used for lbp_cksum)
1316          *      * object type & level (unused for now)
1317          */
1318         uint64_t        lbp_prop;
1319         zio_cksum_t     lbp_cksum;      /* fletcher4 of log */
1320 } l2arc_log_blkptr_t;
1321 
1322 /*
1323  * The persistent L2ARC device header.
1324  * Byte order of magic determines whether 64-bit bswap of fields is necessary.
1325  */
1326 typedef struct l2arc_dev_hdr_phys {
1327         uint64_t        dh_magic;       /* L2ARC_DEV_HDR_MAGIC_Vx */
1328         zio_cksum_t     dh_self_cksum;  /* fletcher4 of fields below */
1329 
1330         /*
1331          * Global L2ARC device state and metadata.
1332          */
1333         uint64_t        dh_spa_guid;
1334         uint64_t        dh_alloc_space;         /* vdev space alloc status */
1335         uint64_t        dh_flags;               /* l2arc_dev_hdr_flags_t */
1336 
1337         /*
1338          * Start of log block chain. [0] -> newest log, [1] -> one older (used
1339          * for initiating prefetch).
1340          */
1341         l2arc_log_blkptr_t      dh_start_lbps[2];
1342 
1343         const uint64_t  dh_pad[44];             /* pad to 512 bytes */
1344 } l2arc_dev_hdr_phys_t;
1345 CTASSERT(sizeof (l2arc_dev_hdr_phys_t) == SPA_MINBLOCKSIZE);
1346 
1347 /*
1348  * A single ARC buffer header entry in a l2arc_log_blk_phys_t.
1349  */
1350 typedef struct l2arc_log_ent_phys {
1351         dva_t                   le_dva; /* dva of buffer */
1352         uint64_t                le_birth;       /* birth txg of buffer */
1353         zio_cksum_t             le_freeze_cksum;
1354         /*
1355          * le_prop is the same format as the blk_prop in blkptr_t:
1356          *      * logical size (in sectors)
1357          *      * physical size (in sectors)
1358          *      * checksum algorithm (used for b_freeze_cksum)
1359          *      * object type & level (used to restore arc_buf_contents_t)
1360          */
1361         uint64_t                le_prop;
1362         uint64_t                le_daddr;       /* buf location on l2dev */
1363         const uint64_t          le_pad[7];      /* resv'd for future use */
1364 } l2arc_log_ent_phys_t;
1365 
1366 /*
1367  * These design limits give us the following metadata overhead (before
1368  * compression):
1369  *      avg_blk_sz      overhead
1370  *      1k              12.51 %
1371  *      2k               6.26 %
1372  *      4k               3.13 %
1373  *      8k               1.56 %
1374  *      16k              0.78 %
1375  *      32k              0.39 %
1376  *      64k              0.20 %
1377  *      128k             0.10 %
1378  * Compression should be able to sequeeze these down by about a factor of 2x.
1379  */
1380 #define L2ARC_LOG_BLK_SIZE                      (128 * 1024)    /* 128k */
1381 #define L2ARC_LOG_BLK_HEADER_LEN                (128)
1382 #define L2ARC_LOG_BLK_ENTRIES                   /* 1023 entries */      \
1383         ((L2ARC_LOG_BLK_SIZE - L2ARC_LOG_BLK_HEADER_LEN) /              \
1384         sizeof (l2arc_log_ent_phys_t))
1385 /*
1386  * Maximum amount of data in an l2arc log block (used to terminate rebuilding
1387  * before we hit the write head and restore potentially corrupted blocks).
1388  */
1389 #define L2ARC_LOG_BLK_MAX_PAYLOAD_SIZE  \
1390         (SPA_MAXBLOCKSIZE * L2ARC_LOG_BLK_ENTRIES)
1391 /*
1392  * For the persistency and rebuild algorithms to operate reliably we need
1393  * the L2ARC device to at least be able to hold 3 full log blocks (otherwise
1394  * excessive log block looping might confuse the log chain end detection).
1395  * Under normal circumstances this is not a problem, since this is somewhere
1396  * around only 400 MB.
1397  */
1398 #define L2ARC_PERSIST_MIN_SIZE  (3 * L2ARC_LOG_BLK_MAX_PAYLOAD_SIZE)
1399 
1400 /*
1401  * A log block of up to 1023 ARC buffer log entries, chained into the
1402  * persistent L2ARC metadata linked list. Byte order of magic determines
1403  * whether 64-bit bswap of fields is necessary.
1404  */
1405 typedef struct l2arc_log_blk_phys {
1406         /* Header - see L2ARC_LOG_BLK_HEADER_LEN above */
1407         uint64_t                lb_magic;       /* L2ARC_LOG_BLK_MAGIC */
1408         l2arc_log_blkptr_t      lb_back2_lbp;   /* back 2 steps in chain */
1409         uint64_t                lb_pad[9];      /* resv'd for future use */
1410         /* Payload */
1411         l2arc_log_ent_phys_t    lb_entries[L2ARC_LOG_BLK_ENTRIES];
1412 } l2arc_log_blk_phys_t;
1413 
1414 CTASSERT(sizeof (l2arc_log_blk_phys_t) == L2ARC_LOG_BLK_SIZE);
1415 CTASSERT(offsetof(l2arc_log_blk_phys_t, lb_entries) -
1416     offsetof(l2arc_log_blk_phys_t, lb_magic) == L2ARC_LOG_BLK_HEADER_LEN);
1417 
1418 /*
1419  * These structures hold in-flight l2arc_log_blk_phys_t's as they're being
1420  * written to the L2ARC device. They may be compressed, hence the uint8_t[].
1421  */
1422 typedef struct l2arc_log_blk_buf {
1423         uint8_t         lbb_log_blk[sizeof (l2arc_log_blk_phys_t)];
1424         list_node_t     lbb_node;
1425 } l2arc_log_blk_buf_t;
1426 
1427 /* Macros for the manipulation fields in the blk_prop format of blkptr_t */
1428 #define BLKPROP_GET_LSIZE(_obj, _field)         \
1429         BF64_GET_SB((_obj)->_field, 0, 16, SPA_MINBLOCKSHIFT, 1)
1430 #define BLKPROP_SET_LSIZE(_obj, _field, x)      \
1431         BF64_SET_SB((_obj)->_field, 0, 16, SPA_MINBLOCKSHIFT, 1, x)
1432 #define BLKPROP_GET_PSIZE(_obj, _field)         \
1433         BF64_GET_SB((_obj)->_field, 16, 16, SPA_MINBLOCKSHIFT, 0)
1434 #define BLKPROP_SET_PSIZE(_obj, _field, x)      \
1435         BF64_SET_SB((_obj)->_field, 16, 16, SPA_MINBLOCKSHIFT, 0, x)
1436 #define BLKPROP_GET_COMPRESS(_obj, _field)      \
1437         BF64_GET((_obj)->_field, 32, 7)
1438 #define BLKPROP_SET_COMPRESS(_obj, _field, x)   \
1439         BF64_SET((_obj)->_field, 32, 7, x)
1440 #define BLKPROP_GET_ARC_COMPRESS(_obj, _field)  \
1441         BF64_GET((_obj)->_field, 39, 1)
1442 #define BLKPROP_SET_ARC_COMPRESS(_obj, _field, x)       \
1443         BF64_SET((_obj)->_field, 39, 1, x)
1444 #define BLKPROP_GET_CHECKSUM(_obj, _field)      \
1445         BF64_GET((_obj)->_field, 40, 8)
1446 #define BLKPROP_SET_CHECKSUM(_obj, _field, x)   \
1447         BF64_SET((_obj)->_field, 40, 8, x)
1448 #define BLKPROP_GET_TYPE(_obj, _field)          \
1449         BF64_GET((_obj)->_field, 48, 8)
1450 #define BLKPROP_SET_TYPE(_obj, _field, x)       \
1451         BF64_SET((_obj)->_field, 48, 8, x)
1452 
1453 /* Macros for manipulating a l2arc_log_blkptr_t->lbp_prop field */
1454 #define LBP_GET_LSIZE(_add)             BLKPROP_GET_LSIZE(_add, lbp_prop)
1455 #define LBP_SET_LSIZE(_add, x)          BLKPROP_SET_LSIZE(_add, lbp_prop, x)
1456 #define LBP_GET_PSIZE(_add)             BLKPROP_GET_PSIZE(_add, lbp_prop)
1457 #define LBP_SET_PSIZE(_add, x)          BLKPROP_SET_PSIZE(_add, lbp_prop, x)
1458 #define LBP_GET_COMPRESS(_add)          BLKPROP_GET_COMPRESS(_add, lbp_prop)
1459 #define LBP_SET_COMPRESS(_add, x)       BLKPROP_SET_COMPRESS(_add, lbp_prop, x)
1460 #define LBP_GET_CHECKSUM(_add)          BLKPROP_GET_CHECKSUM(_add, lbp_prop)
1461 #define LBP_SET_CHECKSUM(_add, x)       BLKPROP_SET_CHECKSUM(_add, lbp_prop, x)
1462 #define LBP_GET_TYPE(_add)              BLKPROP_GET_TYPE(_add, lbp_prop)
1463 #define LBP_SET_TYPE(_add, x)           BLKPROP_SET_TYPE(_add, lbp_prop, x)
1464 
1465 /* Macros for manipulating a l2arc_log_ent_phys_t->le_prop field */
1466 #define LE_GET_LSIZE(_le)       BLKPROP_GET_LSIZE(_le, le_prop)
1467 #define LE_SET_LSIZE(_le, x)    BLKPROP_SET_LSIZE(_le, le_prop, x)
1468 #define LE_GET_PSIZE(_le)       BLKPROP_GET_PSIZE(_le, le_prop)
1469 #define LE_SET_PSIZE(_le, x)    BLKPROP_SET_PSIZE(_le, le_prop, x)
1470 #define LE_GET_COMPRESS(_le)    BLKPROP_GET_COMPRESS(_le, le_prop)
1471 #define LE_SET_COMPRESS(_le, x) BLKPROP_SET_COMPRESS(_le, le_prop, x)
1472 #define LE_GET_ARC_COMPRESS(_le)        BLKPROP_GET_ARC_COMPRESS(_le, le_prop)
1473 #define LE_SET_ARC_COMPRESS(_le, x)     BLKPROP_SET_ARC_COMPRESS(_le, le_prop, x)
1474 #define LE_GET_CHECKSUM(_le)    BLKPROP_GET_CHECKSUM(_le, le_prop)
1475 #define LE_SET_CHECKSUM(_le, x) BLKPROP_SET_CHECKSUM(_le, le_prop, x)
1476 #define LE_GET_TYPE(_le)        BLKPROP_GET_TYPE(_le, le_prop)
1477 #define LE_SET_TYPE(_le, x)     BLKPROP_SET_TYPE(_le, le_prop, x)
1478 
1479 #define PTR_SWAP(x, y)          \
1480         do {                    \
1481                 void *tmp = (x);\
1482                 x = y;          \
1483                 y = tmp;        \
1484                 _NOTE(CONSTCOND)\
1485         } while (0)
1486 
1487 /*
1488  * Sadly, after compressed ARC integration older kernels would panic
1489  * when trying to rebuild persistent L2ARC created by the new code.
1490  */
1491 #define L2ARC_DEV_HDR_MAGIC_V1  0x4c32415243763031LLU   /* ASCII: "L2ARCv01" */
1492 #define L2ARC_LOG_BLK_MAGIC     0x4c4f47424c4b4844LLU   /* ASCII: "LOGBLKHD" */
1493 
1494 /*
1495  * Performance tuning of L2ARC persistency:
1496  *
1497  * l2arc_rebuild_enabled : Controls whether L2ARC device adds (either at
1498  *              pool import or when adding one manually later) will attempt
1499  *              to rebuild L2ARC buffer contents. In special circumstances,
1500  *              the administrator may want to set this to B_FALSE, if they
1501  *              are having trouble importing a pool or attaching an L2ARC
1502  *              device (e.g. the L2ARC device is slow to read in stored log
1503  *              metadata, or the metadata has become somehow
1504  *              fragmented/unusable).
1505  */
1506 boolean_t l2arc_rebuild_enabled = B_TRUE;
1507 
1508 /* L2ARC persistency rebuild control routines. */
1509 static void l2arc_dev_rebuild_start(l2arc_dev_t *dev);
1510 static int l2arc_rebuild(l2arc_dev_t *dev);
1511 
1512 /* L2ARC persistency read I/O routines. */
1513 static int l2arc_dev_hdr_read(l2arc_dev_t *dev);
1514 static int l2arc_log_blk_read(l2arc_dev_t *dev,
1515     const l2arc_log_blkptr_t *this_lp, const l2arc_log_blkptr_t *next_lp,
1516     l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
1517     uint8_t *this_lb_buf, uint8_t *next_lb_buf,
1518     zio_t *this_io, zio_t **next_io);
1519 static zio_t *l2arc_log_blk_prefetch(vdev_t *vd,
1520     const l2arc_log_blkptr_t *lp, uint8_t *lb_buf);
1521 static void l2arc_log_blk_prefetch_abort(zio_t *zio);
1522 
1523 /* L2ARC persistency block restoration routines. */
1524 static void l2arc_log_blk_restore(l2arc_dev_t *dev, uint64_t load_guid,
1525     const l2arc_log_blk_phys_t *lb, uint64_t lb_psize);
1526 static void l2arc_hdr_restore(const l2arc_log_ent_phys_t *le,
1527     l2arc_dev_t *dev, uint64_t guid);
1528 
1529 /* L2ARC persistency write I/O routines. */
1530 static void l2arc_dev_hdr_update(l2arc_dev_t *dev, zio_t *pio);
1531 static void l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio,
1532     l2arc_write_callback_t *cb);
1533 
1534 /* L2ARC persistency auxilliary routines. */
1535 static boolean_t l2arc_log_blkptr_valid(l2arc_dev_t *dev,
1536     const l2arc_log_blkptr_t *lp);
1537 static void l2arc_dev_hdr_checksum(const l2arc_dev_hdr_phys_t *hdr,
1538     zio_cksum_t *cksum);
1539 static boolean_t l2arc_log_blk_insert(l2arc_dev_t *dev,
1540     const arc_buf_hdr_t *ab);
1541 static inline boolean_t l2arc_range_check_overlap(uint64_t bottom,
1542     uint64_t top, uint64_t check);
1543 
1544 /*
1545  * L2ARC Internals
1546  */
1547 struct l2arc_dev {
1548         vdev_t                  *l2ad_vdev;     /* vdev */
1549         spa_t                   *l2ad_spa;      /* spa */
1550         uint64_t                l2ad_hand;      /* next write location */
1551         uint64_t                l2ad_start;     /* first addr on device */
1552         uint64_t                l2ad_end;       /* last addr on device */
1553         boolean_t               l2ad_first;     /* first sweep through */
1554         boolean_t               l2ad_writing;   /* currently writing */
1555         kmutex_t                l2ad_mtx;       /* lock for buffer list */
1556         list_t                  l2ad_buflist;   /* buffer list */
1557         list_node_t             l2ad_node;      /* device list node */
1558         refcount_t              l2ad_alloc;     /* allocated bytes */
1559         l2arc_dev_hdr_phys_t    *l2ad_dev_hdr;  /* persistent device header */
1560         uint64_t                l2ad_dev_hdr_asize; /* aligned hdr size */
1561         l2arc_log_blk_phys_t    l2ad_log_blk;   /* currently open log block */
1562         int                     l2ad_log_ent_idx; /* index into cur log blk */
1563         /* number of bytes in current log block's payload */
1564         uint64_t                l2ad_log_blk_payload_asize;
1565         /* flag indicating whether a rebuild is scheduled or is going on */
1566         boolean_t               l2ad_rebuild;
1567         boolean_t               l2ad_rebuild_cancel;
1568         kt_did_t                l2ad_rebuild_did;
1569 };
1570 
1571 static inline uint64_t
1572 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
1573 {
1574         uint8_t *vdva = (uint8_t *)dva;
1575         uint64_t crc = -1ULL;
1576         int i;
1577 
1578         ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
1579 
1580         for (i = 0; i < sizeof (dva_t); i++)
1581                 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
1582 
1583         crc ^= (spa>>8) ^ birth;
1584 
1585         return (crc);
1586 }
1587 
1588 #define HDR_EMPTY(hdr)                                          \
1589         ((hdr)->b_dva.dva_word[0] == 0 &&                    \
1590         (hdr)->b_dva.dva_word[1] == 0)
1591 
1592 #define HDR_EQUAL(spa, dva, birth, hdr)                         \
1593         ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&       \
1594         ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&       \
1595         ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
1596 
1597 static void
1598 buf_discard_identity(arc_buf_hdr_t *hdr)
1599 {
1600         hdr->b_dva.dva_word[0] = 0;
1601         hdr->b_dva.dva_word[1] = 0;
1602         hdr->b_birth = 0;
1603 }
1604 
1605 static arc_buf_hdr_t *
1606 buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
1607 {
1608         const dva_t *dva = BP_IDENTITY(bp);
1609         uint64_t birth = BP_PHYSICAL_BIRTH(bp);
1610         uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
1611         kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
1612         arc_buf_hdr_t *hdr;
1613 
1614         mutex_enter(hash_lock);
1615         for (hdr = buf_hash_table.ht_table[idx].hdr; hdr != NULL;
1616             hdr = hdr->b_hash_next) {
1617                 if (HDR_EQUAL(spa, dva, birth, hdr)) {
1618                         *lockp = hash_lock;
1619                         return (hdr);
1620                 }
1621         }
1622         mutex_exit(hash_lock);
1623         *lockp = NULL;
1624         return (NULL);
1625 }
1626 
1627 /*
1628  * Insert an entry into the hash table.  If there is already an element
1629  * equal to elem in the hash table, then the already existing element
1630  * will be returned and the new element will not be inserted.
1631  * Otherwise returns NULL.
1632  * If lockp == NULL, the caller is assumed to already hold the hash lock.
1633  */
1634 static arc_buf_hdr_t *
1635 buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
1636 {
1637         uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
1638         kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
1639         arc_buf_hdr_t *fhdr;
1640         uint32_t i;
1641 
1642         ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
1643         ASSERT(hdr->b_birth != 0);
1644         ASSERT(!HDR_IN_HASH_TABLE(hdr));
1645 
1646         if (lockp != NULL) {
1647                 *lockp = hash_lock;
1648                 mutex_enter(hash_lock);
1649         } else {
1650                 ASSERT(MUTEX_HELD(hash_lock));
1651         }
1652 
1653         for (fhdr = buf_hash_table.ht_table[idx].hdr, i = 0; fhdr != NULL;
1654             fhdr = fhdr->b_hash_next, i++) {
1655                 if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
1656                         return (fhdr);
1657         }
1658 
1659         hdr->b_hash_next = buf_hash_table.ht_table[idx].hdr;
1660         buf_hash_table.ht_table[idx].hdr = hdr;
1661         arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
1662 
1663         /* collect some hash table performance data */
1664         if (i > 0) {
1665                 ARCSTAT_BUMP(arcstat_hash_collisions);
1666                 if (i == 1)
1667                         ARCSTAT_BUMP(arcstat_hash_chains);
1668 
1669                 ARCSTAT_MAX(arcstat_hash_chain_max, i);
1670         }
1671 
1672         ARCSTAT_BUMP(arcstat_hash_elements);
1673         ARCSTAT_MAXSTAT(arcstat_hash_elements);
1674 
1675         return (NULL);
1676 }
1677 
1678 static void
1679 buf_hash_remove(arc_buf_hdr_t *hdr)
1680 {
1681         arc_buf_hdr_t *fhdr, **hdrp;
1682         uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
1683 
1684         ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
1685         ASSERT(HDR_IN_HASH_TABLE(hdr));
1686 
1687         hdrp = &buf_hash_table.ht_table[idx].hdr;
1688         while ((fhdr = *hdrp) != hdr) {
1689                 ASSERT3P(fhdr, !=, NULL);
1690                 hdrp = &fhdr->b_hash_next;
1691         }
1692         *hdrp = hdr->b_hash_next;
1693         hdr->b_hash_next = NULL;
1694         arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
1695 
1696         /* collect some hash table performance data */
1697         ARCSTAT_BUMPDOWN(arcstat_hash_elements);
1698 
1699         if (buf_hash_table.ht_table[idx].hdr &&
1700             buf_hash_table.ht_table[idx].hdr->b_hash_next == NULL)
1701                 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
1702 }
1703 
1704 /*
1705  * Global data structures and functions for the buf kmem cache.
1706  */
1707 static kmem_cache_t *hdr_full_cache;
1708 static kmem_cache_t *hdr_l2only_cache;
1709 static kmem_cache_t *buf_cache;
1710 
1711 static void
1712 buf_fini(void)
1713 {
1714         int i;
1715 
1716         for (i = 0; i < buf_hash_table.ht_mask + 1; i++)
1717                 mutex_destroy(&buf_hash_table.ht_table[i].lock);
1718         kmem_free(buf_hash_table.ht_table,
1719             (buf_hash_table.ht_mask + 1) * sizeof (struct ht_table));
1720         kmem_cache_destroy(hdr_full_cache);
1721         kmem_cache_destroy(hdr_l2only_cache);
1722         kmem_cache_destroy(buf_cache);
1723 }
1724 
1725 /*
1726  * Constructor callback - called when the cache is empty
1727  * and a new buf is requested.
1728  */
1729 /* ARGSUSED */
1730 static int
1731 hdr_full_cons(void *vbuf, void *unused, int kmflag)
1732 {
1733         arc_buf_hdr_t *hdr = vbuf;
1734 
1735         bzero(hdr, HDR_FULL_SIZE);
1736         cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
1737         refcount_create(&hdr->b_l1hdr.b_refcnt);
1738         mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
1739         multilist_link_init(&hdr->b_l1hdr.b_arc_node);
1740         arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
1741 
1742         return (0);
1743 }
1744 
1745 /* ARGSUSED */
1746 static int
1747 hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
1748 {
1749         arc_buf_hdr_t *hdr = vbuf;
1750 
1751         bzero(hdr, HDR_L2ONLY_SIZE);
1752         arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
1753 
1754         return (0);
1755 }
1756 
1757 /* ARGSUSED */
1758 static int
1759 buf_cons(void *vbuf, void *unused, int kmflag)
1760 {
1761         arc_buf_t *buf = vbuf;
1762 
1763         bzero(buf, sizeof (arc_buf_t));
1764         mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
1765         arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1766 
1767         return (0);
1768 }
1769 
1770 /*
1771  * Destructor callback - called when a cached buf is
1772  * no longer required.
1773  */
1774 /* ARGSUSED */
1775 static void
1776 hdr_full_dest(void *vbuf, void *unused)
1777 {
1778         arc_buf_hdr_t *hdr = vbuf;
1779 
1780         ASSERT(HDR_EMPTY(hdr));
1781         cv_destroy(&hdr->b_l1hdr.b_cv);
1782         refcount_destroy(&hdr->b_l1hdr.b_refcnt);
1783         mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
1784         ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
1785         arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
1786 }
1787 
1788 /* ARGSUSED */
1789 static void
1790 hdr_l2only_dest(void *vbuf, void *unused)
1791 {
1792         arc_buf_hdr_t *hdr = vbuf;
1793 
1794         ASSERT(HDR_EMPTY(hdr));
1795         arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
1796 }
1797 
1798 /* ARGSUSED */
1799 static void
1800 buf_dest(void *vbuf, void *unused)
1801 {
1802         arc_buf_t *buf = vbuf;
1803 
1804         mutex_destroy(&buf->b_evict_lock);
1805         arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1806 }
1807 
1808 /*
1809  * Reclaim callback -- invoked when memory is low.
1810  */
1811 /* ARGSUSED */
1812 static void
1813 hdr_recl(void *unused)
1814 {
1815         dprintf("hdr_recl called\n");
1816         /*
1817          * umem calls the reclaim func when we destroy the buf cache,
1818          * which is after we do arc_fini().
1819          */
1820         if (!arc_dead)
1821                 cv_signal(&arc_reclaim_thread_cv);
1822 }
1823 
1824 static void
1825 buf_init(void)
1826 {
1827         uint64_t *ct;
1828         uint64_t hsize = 1ULL << 12;
1829         int i, j;
1830 
1831         /*
1832          * The hash table is big enough to fill all of physical memory
1833          * with an average block size of zfs_arc_average_blocksize (default 8K).
1834          * By default, the table will take up
1835          * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
1836          */
1837         while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE)
1838                 hsize <<= 1;
1839 retry:
1840         buf_hash_table.ht_mask = hsize - 1;
1841         buf_hash_table.ht_table =
1842             kmem_zalloc(hsize * sizeof (struct ht_table), KM_NOSLEEP);
1843         if (buf_hash_table.ht_table == NULL) {
1844                 ASSERT(hsize > (1ULL << 8));
1845                 hsize >>= 1;
1846                 goto retry;
1847         }
1848 
1849         hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
1850             0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0);
1851         hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
1852             HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl,
1853             NULL, NULL, 0);
1854         buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
1855             0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
1856 
1857         for (i = 0; i < 256; i++)
1858                 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
1859                         *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
1860 
1861         for (i = 0; i < hsize; i++) {
1862                 mutex_init(&buf_hash_table.ht_table[i].lock,
1863                     NULL, MUTEX_DEFAULT, NULL);
1864         }
1865 }
1866 
1867 /* wait until krrp releases the buffer */
1868 static inline void
1869 arc_wait_for_krrp(arc_buf_hdr_t *hdr)
1870 {
1871         while (HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_krrp != 0)
1872                 cv_wait(&hdr->b_l1hdr.b_cv, HDR_LOCK(hdr));
1873 }
1874 
1875 /*
1876  * This is the size that the buf occupies in memory. If the buf is compressed,
1877  * it will correspond to the compressed size. You should use this method of
1878  * getting the buf size unless you explicitly need the logical size.
1879  */
1880 int32_t
1881 arc_buf_size(arc_buf_t *buf)
1882 {
1883         return (ARC_BUF_COMPRESSED(buf) ?
1884             HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr));
1885 }
1886 
1887 int32_t
1888 arc_buf_lsize(arc_buf_t *buf)
1889 {
1890         return (HDR_GET_LSIZE(buf->b_hdr));
1891 }
1892 
1893 enum zio_compress
1894 arc_get_compression(arc_buf_t *buf)
1895 {
1896         return (ARC_BUF_COMPRESSED(buf) ?
1897             HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF);
1898 }
1899 
1900 #define ARC_MINTIME     (hz>>4) /* 62 ms */
1901 
1902 static inline boolean_t
1903 arc_buf_is_shared(arc_buf_t *buf)
1904 {
1905         boolean_t shared = (buf->b_data != NULL &&
1906             buf->b_hdr->b_l1hdr.b_pabd != NULL &&
1907             abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) &&
1908             buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd));
1909         IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr));
1910         IMPLY(shared, ARC_BUF_SHARED(buf));
1911         IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf));
1912 
1913         /*
1914          * It would be nice to assert arc_can_share() too, but the "hdr isn't
1915          * already being shared" requirement prevents us from doing that.
1916          */
1917 
1918         return (shared);
1919 }
1920 
1921 /*
1922  * Free the checksum associated with this header. If there is no checksum, this
1923  * is a no-op.
1924  */
1925 static inline void
1926 arc_cksum_free(arc_buf_hdr_t *hdr)
1927 {
1928         ASSERT(HDR_HAS_L1HDR(hdr));
1929         mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
1930         if (hdr->b_freeze_cksum != NULL) {
1931                 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1932                 hdr->b_freeze_cksum = NULL;
1933         }
1934         mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1935 }
1936 
1937 /*
1938  * Return true iff at least one of the bufs on hdr is not compressed.
1939  */
1940 static boolean_t
1941 arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
1942 {
1943         for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
1944                 if (!ARC_BUF_COMPRESSED(b)) {
1945                         return (B_TRUE);
1946                 }
1947         }
1948         return (B_FALSE);
1949 }
1950 
1951 /*
1952  * If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
1953  * matches the checksum that is stored in the hdr. If there is no checksum,
1954  * or if the buf is compressed, this is a no-op.
1955  */
1956 static void
1957 arc_cksum_verify(arc_buf_t *buf)
1958 {
1959         arc_buf_hdr_t *hdr = buf->b_hdr;
1960         zio_cksum_t zc;
1961 
1962         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1963                 return;
1964 
1965         if (ARC_BUF_COMPRESSED(buf)) {
1966                 ASSERT(hdr->b_freeze_cksum == NULL ||
1967                     arc_hdr_has_uncompressed_buf(hdr));
1968                 return;
1969         }
1970 
1971         ASSERT(HDR_HAS_L1HDR(hdr));
1972 
1973         mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
1974         if (hdr->b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) {
1975                 mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1976                 return;
1977         }
1978 
1979         fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc);
1980         if (!ZIO_CHECKSUM_EQUAL(*hdr->b_freeze_cksum, zc))
1981                 panic("buffer modified while frozen!");
1982         mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
1983 }
1984 
1985 static boolean_t
1986 arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio)
1987 {
1988         enum zio_compress compress = BP_GET_COMPRESS(zio->io_bp);
1989         boolean_t valid_cksum;
1990 
1991         ASSERT(!BP_IS_EMBEDDED(zio->io_bp));
1992         VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr));
1993 
1994         /*
1995          * We rely on the blkptr's checksum to determine if the block
1996          * is valid or not. When compressed arc is enabled, the l2arc
1997          * writes the block to the l2arc just as it appears in the pool.
1998          * This allows us to use the blkptr's checksum to validate the
1999          * data that we just read off of the l2arc without having to store
2000          * a separate checksum in the arc_buf_hdr_t. However, if compressed
2001          * arc is disabled, then the data written to the l2arc is always
2002          * uncompressed and won't match the block as it exists in the main
2003          * pool. When this is the case, we must first compress it if it is
2004          * compressed on the main pool before we can validate the checksum.
2005          */
2006         if (!HDR_COMPRESSION_ENABLED(hdr) && compress != ZIO_COMPRESS_OFF) {
2007                 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
2008                 uint64_t lsize = HDR_GET_LSIZE(hdr);
2009                 uint64_t csize;
2010 
2011                 void *cbuf = zio_buf_alloc(HDR_GET_PSIZE(hdr));
2012                 csize = zio_compress_data(compress, zio->io_abd, cbuf, lsize);
2013                 abd_t *cdata = abd_get_from_buf(cbuf, HDR_GET_PSIZE(hdr));
2014                 abd_take_ownership_of_buf(cdata, B_TRUE);
2015 
2016                 ASSERT3U(csize, <=, HDR_GET_PSIZE(hdr));
2017                 if (csize < HDR_GET_PSIZE(hdr)) {
2018                         /*
2019                          * Compressed blocks are always a multiple of the
2020                          * smallest ashift in the pool. Ideally, we would
2021                          * like to round up the csize to the next
2022                          * spa_min_ashift but that value may have changed
2023                          * since the block was last written. Instead,
2024                          * we rely on the fact that the hdr's psize
2025                          * was set to the psize of the block when it was
2026                          * last written. We set the csize to that value
2027                          * and zero out any part that should not contain
2028                          * data.
2029                          */
2030                         abd_zero_off(cdata, csize, HDR_GET_PSIZE(hdr) - csize);
2031                         csize = HDR_GET_PSIZE(hdr);
2032                 }
2033                 zio_push_transform(zio, cdata, csize, HDR_GET_PSIZE(hdr), NULL);
2034         }
2035 
2036         /*
2037          * Block pointers always store the checksum for the logical data.
2038          * If the block pointer has the gang bit set, then the checksum
2039          * it represents is for the reconstituted data and not for an
2040          * individual gang member. The zio pipeline, however, must be able to
2041          * determine the checksum of each of the gang constituents so it
2042          * treats the checksum comparison differently than what we need
2043          * for l2arc blocks. This prevents us from using the
2044          * zio_checksum_error() interface directly. Instead we must call the
2045          * zio_checksum_error_impl() so that we can ensure the checksum is
2046          * generated using the correct checksum algorithm and accounts for the
2047          * logical I/O size and not just a gang fragment.
2048          */
2049         valid_cksum = (zio_checksum_error_impl(zio->io_spa, zio->io_bp,
2050             BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size,
2051             zio->io_offset, NULL) == 0);
2052         zio_pop_transforms(zio);
2053         return (valid_cksum);
2054 }
2055 
2056 /*
2057  * Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
2058  * checksum and attaches it to the buf's hdr so that we can ensure that the buf
2059  * isn't modified later on. If buf is compressed or there is already a checksum
2060  * on the hdr, this is a no-op (we only checksum uncompressed bufs).
2061  */
2062 static void
2063 arc_cksum_compute(arc_buf_t *buf)
2064 {
2065         arc_buf_hdr_t *hdr = buf->b_hdr;
2066 
2067         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
2068                 return;
2069 
2070         ASSERT(HDR_HAS_L1HDR(hdr));
2071 
2072         mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
2073         if (hdr->b_freeze_cksum != NULL) {
2074                 ASSERT(arc_hdr_has_uncompressed_buf(hdr));
2075                 mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
2076                 return;
2077         } else if (ARC_BUF_COMPRESSED(buf)) {
2078                 mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
2079                 return;
2080         }
2081 
2082         ASSERT(!ARC_BUF_COMPRESSED(buf));
2083         hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
2084             KM_SLEEP);
2085         fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL,
2086             hdr->b_freeze_cksum);
2087         mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
2088         arc_buf_watch(buf);
2089 }
2090 
2091 #ifndef _KERNEL
2092 typedef struct procctl {
2093         long cmd;
2094         prwatch_t prwatch;
2095 } procctl_t;
2096 #endif
2097 
2098 /* ARGSUSED */
2099 static void
2100 arc_buf_unwatch(arc_buf_t *buf)
2101 {
2102 #ifndef _KERNEL
2103         if (arc_watch) {
2104                 int result;
2105                 procctl_t ctl;
2106                 ctl.cmd = PCWATCH;
2107                 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
2108                 ctl.prwatch.pr_size = 0;
2109                 ctl.prwatch.pr_wflags = 0;
2110                 result = write(arc_procfd, &ctl, sizeof (ctl));
2111                 ASSERT3U(result, ==, sizeof (ctl));
2112         }
2113 #endif
2114 }
2115 
2116 /* ARGSUSED */
2117 static void
2118 arc_buf_watch(arc_buf_t *buf)
2119 {
2120 #ifndef _KERNEL
2121         if (arc_watch) {
2122                 int result;
2123                 procctl_t ctl;
2124                 ctl.cmd = PCWATCH;
2125                 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
2126                 ctl.prwatch.pr_size = arc_buf_size(buf);
2127                 ctl.prwatch.pr_wflags = WA_WRITE;
2128                 result = write(arc_procfd, &ctl, sizeof (ctl));
2129                 ASSERT3U(result, ==, sizeof (ctl));
2130         }
2131 #endif
2132 }
2133 
2134 static arc_buf_contents_t
2135 arc_buf_type(arc_buf_hdr_t *hdr)
2136 {
2137         arc_buf_contents_t type;
2138 
2139         if (HDR_ISTYPE_METADATA(hdr)) {
2140                 type = ARC_BUFC_METADATA;
2141         } else if (HDR_ISTYPE_DDT(hdr)) {
2142                 type = ARC_BUFC_DDT;
2143         } else {
2144                 type = ARC_BUFC_DATA;
2145         }
2146         VERIFY3U(hdr->b_type, ==, type);
2147         return (type);
2148 }
2149 
2150 boolean_t
2151 arc_is_metadata(arc_buf_t *buf)
2152 {
2153         return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0);
2154 }
2155 
2156 static uint32_t
2157 arc_bufc_to_flags(arc_buf_contents_t type)
2158 {
2159         switch (type) {
2160         case ARC_BUFC_DATA:
2161                 /* metadata field is 0 if buffer contains normal data */
2162                 return (0);
2163         case ARC_BUFC_METADATA:
2164                 return (ARC_FLAG_BUFC_METADATA);
2165         case ARC_BUFC_DDT:
2166                 return (ARC_FLAG_BUFC_DDT);
2167         default:
2168                 break;
2169         }
2170         panic("undefined ARC buffer type!");
2171         return ((uint32_t)-1);
2172 }
2173 
2174 static arc_buf_contents_t
2175 arc_flags_to_bufc(uint32_t flags)
2176 {
2177         if (flags & ARC_FLAG_BUFC_DDT)
2178                 return (ARC_BUFC_DDT);
2179         if (flags & ARC_FLAG_BUFC_METADATA)
2180                 return (ARC_BUFC_METADATA);
2181         return (ARC_BUFC_DATA);
2182 }
2183 
2184 void
2185 arc_buf_thaw(arc_buf_t *buf)
2186 {
2187         arc_buf_hdr_t *hdr = buf->b_hdr;
2188 
2189         ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
2190         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2191 
2192         arc_cksum_verify(buf);
2193 
2194         /*
2195          * Compressed buffers do not manipulate the b_freeze_cksum or
2196          * allocate b_thawed.
2197          */
2198         if (ARC_BUF_COMPRESSED(buf)) {
2199                 ASSERT(hdr->b_freeze_cksum == NULL ||
2200                     arc_hdr_has_uncompressed_buf(hdr));
2201                 return;
2202         }
2203 
2204         ASSERT(HDR_HAS_L1HDR(hdr));
2205         arc_cksum_free(hdr);
2206 
2207         mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
2208 #ifdef ZFS_DEBUG
2209         if (zfs_flags & ZFS_DEBUG_MODIFY) {
2210                 if (hdr->b_l1hdr.b_thawed != NULL)
2211                         kmem_free(hdr->b_l1hdr.b_thawed, 1);
2212                 hdr->b_l1hdr.b_thawed = kmem_alloc(1, KM_SLEEP);
2213         }
2214 #endif
2215 
2216         mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
2217 
2218         arc_buf_unwatch(buf);
2219 }
2220 
2221 void
2222 arc_buf_freeze(arc_buf_t *buf)
2223 {
2224         arc_buf_hdr_t *hdr = buf->b_hdr;
2225         kmutex_t *hash_lock;
2226 
2227         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
2228                 return;
2229 
2230         if (ARC_BUF_COMPRESSED(buf)) {
2231                 ASSERT(hdr->b_freeze_cksum == NULL ||
2232                     arc_hdr_has_uncompressed_buf(hdr));
2233                 return;
2234         }
2235 
2236         hash_lock = HDR_LOCK(hdr);
2237         mutex_enter(hash_lock);
2238 
2239         ASSERT(HDR_HAS_L1HDR(hdr));
2240         ASSERT(hdr->b_freeze_cksum != NULL ||
2241             hdr->b_l1hdr.b_state == arc_anon);
2242         arc_cksum_compute(buf);
2243         mutex_exit(hash_lock);
2244 }
2245 
2246 /*
2247  * The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
2248  * the following functions should be used to ensure that the flags are
2249  * updated in a thread-safe way. When manipulating the flags either
2250  * the hash_lock must be held or the hdr must be undiscoverable. This
2251  * ensures that we're not racing with any other threads when updating
2252  * the flags.
2253  */
2254 static inline void
2255 arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
2256 {
2257         ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2258         hdr->b_flags |= flags;
2259 }
2260 
2261 static inline void
2262 arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
2263 {
2264         ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2265         hdr->b_flags &= ~flags;
2266 }
2267 
2268 /*
2269  * Setting the compression bits in the arc_buf_hdr_t's b_flags is
2270  * done in a special way since we have to clear and set bits
2271  * at the same time. Consumers that wish to set the compression bits
2272  * must use this function to ensure that the flags are updated in
2273  * thread-safe manner.
2274  */
2275 static void
2276 arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp)
2277 {
2278         ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2279 
2280         /*
2281          * Holes and embedded blocks will always have a psize = 0 so
2282          * we ignore the compression of the blkptr and set the
2283          * arc_buf_hdr_t's compression to ZIO_COMPRESS_OFF.
2284          * Holes and embedded blocks remain anonymous so we don't
2285          * want to uncompress them. Mark them as uncompressed.
2286          */
2287         if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) {
2288                 arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
2289                 HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
2290                 ASSERT(!HDR_COMPRESSION_ENABLED(hdr));
2291                 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
2292         } else {
2293                 arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
2294                 HDR_SET_COMPRESS(hdr, cmp);
2295                 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp);
2296                 ASSERT(HDR_COMPRESSION_ENABLED(hdr));
2297         }
2298 }
2299 
2300 /*
2301  * Looks for another buf on the same hdr which has the data decompressed, copies
2302  * from it, and returns true. If no such buf exists, returns false.
2303  */
2304 static boolean_t
2305 arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
2306 {
2307         arc_buf_hdr_t *hdr = buf->b_hdr;
2308         boolean_t copied = B_FALSE;
2309 
2310         ASSERT(HDR_HAS_L1HDR(hdr));
2311         ASSERT3P(buf->b_data, !=, NULL);
2312         ASSERT(!ARC_BUF_COMPRESSED(buf));
2313 
2314         for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
2315             from = from->b_next) {
2316                 /* can't use our own data buffer */
2317                 if (from == buf) {
2318                         continue;
2319                 }
2320 
2321                 if (!ARC_BUF_COMPRESSED(from)) {
2322                         bcopy(from->b_data, buf->b_data, arc_buf_size(buf));
2323                         copied = B_TRUE;
2324                         break;
2325                 }
2326         }
2327 
2328         /*
2329          * There were no decompressed bufs, so there should not be a
2330          * checksum on the hdr either.
2331          */
2332         EQUIV(!copied, hdr->b_freeze_cksum == NULL);
2333 
2334         return (copied);
2335 }
2336 
2337 /*
2338  * Given a buf that has a data buffer attached to it, this function will
2339  * efficiently fill the buf with data of the specified compression setting from
2340  * the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
2341  * are already sharing a data buf, no copy is performed.
2342  *
2343  * If the buf is marked as compressed but uncompressed data was requested, this
2344  * will allocate a new data buffer for the buf, remove that flag, and fill the
2345  * buf with uncompressed data. You can't request a compressed buf on a hdr with
2346  * uncompressed data, and (since we haven't added support for it yet) if you
2347  * want compressed data your buf must already be marked as compressed and have
2348  * the correct-sized data buffer.
2349  */
2350 static int
2351 arc_buf_fill(arc_buf_t *buf, boolean_t compressed)
2352 {
2353         arc_buf_hdr_t *hdr = buf->b_hdr;
2354         boolean_t hdr_compressed = (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF);
2355         dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap;
2356 
2357         ASSERT3P(buf->b_data, !=, NULL);
2358         IMPLY(compressed, hdr_compressed);
2359         IMPLY(compressed, ARC_BUF_COMPRESSED(buf));
2360 
2361         if (hdr_compressed == compressed) {
2362                 if (!arc_buf_is_shared(buf)) {
2363                         abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd,
2364                             arc_buf_size(buf));
2365                 }
2366         } else {
2367                 ASSERT(hdr_compressed);
2368                 ASSERT(!compressed);
2369                 ASSERT3U(HDR_GET_LSIZE(hdr), !=, HDR_GET_PSIZE(hdr));
2370 
2371                 /*
2372                  * If the buf is sharing its data with the hdr, unlink it and
2373                  * allocate a new data buffer for the buf.
2374                  */
2375                 if (arc_buf_is_shared(buf)) {
2376                         ASSERT(ARC_BUF_COMPRESSED(buf));
2377 
2378                         /* We need to give the buf it's own b_data */
2379                         buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
2380                         buf->b_data =
2381                             arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
2382                         arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
2383 
2384                         /* Previously overhead was 0; just add new overhead */
2385                         ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr));
2386                 } else if (ARC_BUF_COMPRESSED(buf)) {
2387                         /* We need to reallocate the buf's b_data */
2388                         arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr),
2389                             buf);
2390                         buf->b_data =
2391                             arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
2392 
2393                         /* We increased the size of b_data; update overhead */
2394                         ARCSTAT_INCR(arcstat_overhead_size,
2395                             HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr));
2396                 }
2397 
2398                 /*
2399                  * Regardless of the buf's previous compression settings, it
2400                  * should not be compressed at the end of this function.
2401                  */
2402                 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
2403 
2404                 /*
2405                  * Try copying the data from another buf which already has a
2406                  * decompressed version. If that's not possible, it's time to
2407                  * bite the bullet and decompress the data from the hdr.
2408                  */
2409                 if (arc_buf_try_copy_decompressed_data(buf)) {
2410                         /* Skip byteswapping and checksumming (already done) */
2411                         ASSERT3P(hdr->b_freeze_cksum, !=, NULL);
2412                         return (0);
2413                 } else {
2414                         int error = zio_decompress_data(HDR_GET_COMPRESS(hdr),
2415                             hdr->b_l1hdr.b_pabd, buf->b_data,
2416                             HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
2417 
2418                         /*
2419                          * Absent hardware errors or software bugs, this should
2420                          * be impossible, but log it anyway so we can debug it.
2421                          */
2422                         if (error != 0) {
2423                                 zfs_dbgmsg(
2424                                     "hdr %p, compress %d, psize %d, lsize %d",
2425                                     hdr, HDR_GET_COMPRESS(hdr),
2426                                     HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
2427                                 return (SET_ERROR(EIO));
2428                         }
2429                 }
2430         }
2431 
2432         /* Byteswap the buf's data if necessary */
2433         if (bswap != DMU_BSWAP_NUMFUNCS) {
2434                 ASSERT(!HDR_SHARED_DATA(hdr));
2435                 ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS);
2436                 dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr));
2437         }
2438 
2439         /* Compute the hdr's checksum if necessary */
2440         arc_cksum_compute(buf);
2441 
2442         return (0);
2443 }
2444 
2445 int
2446 arc_decompress(arc_buf_t *buf)
2447 {
2448         return (arc_buf_fill(buf, B_FALSE));
2449 }
2450 
2451 /*
2452  * Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
2453  */
2454 static uint64_t
2455 arc_hdr_size(arc_buf_hdr_t *hdr)
2456 {
2457         uint64_t size;
2458 
2459         if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
2460             HDR_GET_PSIZE(hdr) > 0) {
2461                 size = HDR_GET_PSIZE(hdr);
2462         } else {
2463                 ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0);
2464                 size = HDR_GET_LSIZE(hdr);
2465         }
2466         return (size);
2467 }
2468 
2469 /*
2470  * Increment the amount of evictable space in the arc_state_t's refcount.
2471  * We account for the space used by the hdr and the arc buf individually
2472  * so that we can add and remove them from the refcount individually.
2473  */
2474 static void
2475 arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
2476 {
2477         arc_buf_contents_t type = arc_buf_type(hdr);
2478 
2479         ASSERT(HDR_HAS_L1HDR(hdr));
2480 
2481         if (GHOST_STATE(state)) {
2482                 ASSERT0(hdr->b_l1hdr.b_bufcnt);
2483                 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2484                 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
2485                 (void) refcount_add_many(&state->arcs_esize[type],
2486                     HDR_GET_LSIZE(hdr), hdr);
2487                 return;
2488         }
2489 
2490         ASSERT(!GHOST_STATE(state));
2491         if (hdr->b_l1hdr.b_pabd != NULL) {
2492                 (void) refcount_add_many(&state->arcs_esize[type],
2493                     arc_hdr_size(hdr), hdr);
2494         }
2495         for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
2496             buf = buf->b_next) {
2497                 if (arc_buf_is_shared(buf))
2498                         continue;
2499                 (void) refcount_add_many(&state->arcs_esize[type],
2500                     arc_buf_size(buf), buf);
2501         }
2502 }
2503 
2504 /*
2505  * Decrement the amount of evictable space in the arc_state_t's refcount.
2506  * We account for the space used by the hdr and the arc buf individually
2507  * so that we can add and remove them from the refcount individually.
2508  */
2509 static void
2510 arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
2511 {
2512         arc_buf_contents_t type = arc_buf_type(hdr);
2513 
2514         ASSERT(HDR_HAS_L1HDR(hdr));
2515 
2516         if (GHOST_STATE(state)) {
2517                 ASSERT0(hdr->b_l1hdr.b_bufcnt);
2518                 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2519                 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
2520                 (void) refcount_remove_many(&state->arcs_esize[type],
2521                     HDR_GET_LSIZE(hdr), hdr);
2522                 return;
2523         }
2524 
2525         ASSERT(!GHOST_STATE(state));
2526         if (hdr->b_l1hdr.b_pabd != NULL) {
2527                 (void) refcount_remove_many(&state->arcs_esize[type],
2528                     arc_hdr_size(hdr), hdr);
2529         }
2530         for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
2531             buf = buf->b_next) {
2532                 if (arc_buf_is_shared(buf))
2533                         continue;
2534                 (void) refcount_remove_many(&state->arcs_esize[type],
2535                     arc_buf_size(buf), buf);
2536         }
2537 }
2538 
2539 /*
2540  * Add a reference to this hdr indicating that someone is actively
2541  * referencing that memory. When the refcount transitions from 0 to 1,
2542  * we remove it from the respective arc_state_t list to indicate that
2543  * it is not evictable.
2544  */
2545 static void
2546 add_reference(arc_buf_hdr_t *hdr, void *tag)
2547 {
2548         ASSERT(HDR_HAS_L1HDR(hdr));
2549         if (!MUTEX_HELD(HDR_LOCK(hdr))) {
2550                 ASSERT(hdr->b_l1hdr.b_state == arc_anon);
2551                 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
2552                 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2553         }
2554 
2555         arc_state_t *state = hdr->b_l1hdr.b_state;
2556 
2557         if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
2558             (state != arc_anon)) {
2559                 /* We don't use the L2-only state list. */
2560                 if (state != arc_l2c_only) {
2561                         multilist_remove(state->arcs_list[arc_buf_type(hdr)],
2562                             hdr);
2563                         arc_evictable_space_decrement(hdr, state);
2564                 }
2565                 /* remove the prefetch flag if we get a reference */
2566                 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
2567         }
2568 }
2569 
2570 /*
2571  * Remove a reference from this hdr. When the reference transitions from
2572  * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
2573  * list making it eligible for eviction.
2574  */
2575 static int
2576 remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
2577 {
2578         int cnt;
2579         arc_state_t *state = hdr->b_l1hdr.b_state;
2580 
2581         ASSERT(HDR_HAS_L1HDR(hdr));
2582         ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
2583         ASSERT(!GHOST_STATE(state));
2584 
2585         /*
2586          * arc_l2c_only counts as a ghost state so we don't need to explicitly
2587          * check to prevent usage of the arc_l2c_only list.
2588          */
2589         if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
2590             (state != arc_anon)) {
2591                 multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr);
2592                 ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
2593                 arc_evictable_space_increment(hdr, state);
2594         }
2595         return (cnt);
2596 }
2597 
2598 /*
2599  * Move the supplied buffer to the indicated state. The hash lock
2600  * for the buffer must be held by the caller.
2601  */
2602 static void
2603 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
2604     kmutex_t *hash_lock)
2605 {
2606         arc_state_t *old_state;
2607         int64_t refcnt;
2608         uint32_t bufcnt;
2609         boolean_t update_old, update_new;
2610         arc_buf_contents_t buftype = arc_buf_type(hdr);
2611 
2612         /*
2613          * We almost always have an L1 hdr here, since we call arc_hdr_realloc()
2614          * in arc_read() when bringing a buffer out of the L2ARC.  However, the
2615          * L1 hdr doesn't always exist when we change state to arc_anon before
2616          * destroying a header, in which case reallocating to add the L1 hdr is
2617          * pointless.
2618          */
2619         if (HDR_HAS_L1HDR(hdr)) {
2620                 old_state = hdr->b_l1hdr.b_state;
2621                 refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
2622                 bufcnt = hdr->b_l1hdr.b_bufcnt;
2623                 update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL);
2624         } else {
2625                 old_state = arc_l2c_only;
2626                 refcnt = 0;
2627                 bufcnt = 0;
2628                 update_old = B_FALSE;
2629         }
2630         update_new = update_old;
2631 
2632         ASSERT(MUTEX_HELD(hash_lock));
2633         ASSERT3P(new_state, !=, old_state);
2634         ASSERT(!GHOST_STATE(new_state) || bufcnt == 0);
2635         ASSERT(old_state != arc_anon || bufcnt <= 1);
2636 
2637         /*
2638          * If this buffer is evictable, transfer it from the
2639          * old state list to the new state list.
2640          */
2641         if (refcnt == 0) {
2642                 if (old_state != arc_anon && old_state != arc_l2c_only) {
2643                         ASSERT(HDR_HAS_L1HDR(hdr));
2644                         multilist_remove(old_state->arcs_list[buftype], hdr);
2645 
2646                         if (GHOST_STATE(old_state)) {
2647                                 ASSERT0(bufcnt);
2648                                 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2649                                 update_old = B_TRUE;
2650                         }
2651                         arc_evictable_space_decrement(hdr, old_state);
2652                 }
2653                 if (new_state != arc_anon && new_state != arc_l2c_only) {
2654 
2655                         /*
2656                          * An L1 header always exists here, since if we're
2657                          * moving to some L1-cached state (i.e. not l2c_only or
2658                          * anonymous), we realloc the header to add an L1hdr
2659                          * beforehand.
2660                          */
2661                         ASSERT(HDR_HAS_L1HDR(hdr));
2662                         multilist_insert(new_state->arcs_list[buftype], hdr);
2663 
2664                         if (GHOST_STATE(new_state)) {
2665                                 ASSERT0(bufcnt);
2666                                 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
2667                                 update_new = B_TRUE;
2668                         }
2669                         arc_evictable_space_increment(hdr, new_state);
2670                 }
2671         }
2672 
2673         ASSERT(!HDR_EMPTY(hdr));
2674         if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr)) {
2675                 arc_wait_for_krrp(hdr);
2676                 buf_hash_remove(hdr);
2677         }
2678 
2679         /* adjust state sizes (ignore arc_l2c_only) */
2680 
2681         if (update_new && new_state != arc_l2c_only) {
2682                 ASSERT(HDR_HAS_L1HDR(hdr));
2683                 if (GHOST_STATE(new_state)) {
2684                         ASSERT0(bufcnt);
2685 
2686                         /*
2687                          * When moving a header to a ghost state, we first
2688                          * remove all arc buffers. Thus, we'll have a
2689                          * bufcnt of zero, and no arc buffer to use for
2690                          * the reference. As a result, we use the arc
2691                          * header pointer for the reference.
2692                          */
2693                         (void) refcount_add_many(&new_state->arcs_size,
2694                             HDR_GET_LSIZE(hdr), hdr);
2695                         ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
2696                 } else {
2697                         uint32_t buffers = 0;
2698 
2699                         /*
2700                          * Each individual buffer holds a unique reference,
2701                          * thus we must remove each of these references one
2702                          * at a time.
2703                          */
2704                         for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
2705                             buf = buf->b_next) {
2706                                 ASSERT3U(bufcnt, !=, 0);
2707                                 buffers++;
2708 
2709                                 /*
2710                                  * When the arc_buf_t is sharing the data
2711                                  * block with the hdr, the owner of the
2712                                  * reference belongs to the hdr. Only
2713                                  * add to the refcount if the arc_buf_t is
2714                                  * not shared.
2715                                  */
2716                                 if (arc_buf_is_shared(buf))
2717                                         continue;
2718 
2719                                 (void) refcount_add_many(&new_state->arcs_size,
2720                                     arc_buf_size(buf), buf);
2721                         }
2722                         ASSERT3U(bufcnt, ==, buffers);
2723 
2724                         if (hdr->b_l1hdr.b_pabd != NULL) {
2725                                 (void) refcount_add_many(&new_state->arcs_size,
2726                                     arc_hdr_size(hdr), hdr);
2727                         } else {
2728                                 ASSERT(GHOST_STATE(old_state));
2729                         }
2730                 }
2731         }
2732 
2733         if (update_old && old_state != arc_l2c_only) {
2734                 ASSERT(HDR_HAS_L1HDR(hdr));
2735                 if (GHOST_STATE(old_state)) {
2736                         ASSERT0(bufcnt);
2737                         ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
2738 
2739                         /*
2740                          * When moving a header off of a ghost state,
2741                          * the header will not contain any arc buffers.
2742                          * We use the arc header pointer for the reference
2743                          * which is exactly what we did when we put the
2744                          * header on the ghost state.
2745                          */
2746 
2747                         (void) refcount_remove_many(&old_state->arcs_size,
2748                             HDR_GET_LSIZE(hdr), hdr);
2749                 } else {
2750                         uint32_t buffers = 0;
2751 
2752                         /*
2753                          * Each individual buffer holds a unique reference,
2754                          * thus we must remove each of these references one
2755                          * at a time.
2756                          */
2757                         for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
2758                             buf = buf->b_next) {
2759                                 ASSERT3U(bufcnt, !=, 0);
2760                                 buffers++;
2761 
2762                                 /*
2763                                  * When the arc_buf_t is sharing the data
2764                                  * block with the hdr, the owner of the
2765                                  * reference belongs to the hdr. Only
2766                                  * add to the refcount if the arc_buf_t is
2767                                  * not shared.
2768                                  */
2769                                 if (arc_buf_is_shared(buf))
2770                                         continue;
2771 
2772                                 (void) refcount_remove_many(
2773                                     &old_state->arcs_size, arc_buf_size(buf),
2774                                     buf);
2775                         }
2776                         ASSERT3U(bufcnt, ==, buffers);
2777                         ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
2778                         (void) refcount_remove_many(
2779                             &old_state->arcs_size, arc_hdr_size(hdr), hdr);
2780                 }
2781         }
2782 
2783         if (HDR_HAS_L1HDR(hdr))
2784                 hdr->b_l1hdr.b_state = new_state;
2785 
2786         /*
2787          * L2 headers should never be on the L2 state list since they don't
2788          * have L1 headers allocated.
2789          */
2790         ASSERT(multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_DATA]));
2791         ASSERT(multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_METADATA]));
2792         ASSERT(multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_DDT]));
2793 }
2794 
2795 void
2796 arc_space_consume(uint64_t space, arc_space_type_t type)
2797 {
2798         ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
2799 
2800         switch (type) {
2801         case ARC_SPACE_DATA:
2802                 ARCSTAT_INCR(arcstat_data_size, space);
2803                 break;
2804         case ARC_SPACE_META:
2805                 ARCSTAT_INCR(arcstat_metadata_size, space);
2806                 break;
2807         case ARC_SPACE_DDT:
2808                 ARCSTAT_INCR(arcstat_ddt_size, space);
2809                 break;
2810         case ARC_SPACE_OTHER:
2811                 ARCSTAT_INCR(arcstat_other_size, space);
2812                 break;
2813         case ARC_SPACE_HDRS:
2814                 ARCSTAT_INCR(arcstat_hdr_size, space);
2815                 break;
2816         case ARC_SPACE_L2HDRS:
2817                 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
2818                 break;
2819         }
2820 
2821         if (type != ARC_SPACE_DATA && type != ARC_SPACE_DDT)
2822                 ARCSTAT_INCR(arcstat_meta_used, space);
2823 
2824         atomic_add_64(&arc_size, space);
2825 }
2826 
2827 void
2828 arc_space_return(uint64_t space, arc_space_type_t type)
2829 {
2830         ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
2831 
2832         switch (type) {
2833         case ARC_SPACE_DATA:
2834                 ARCSTAT_INCR(arcstat_data_size, -space);
2835                 break;
2836         case ARC_SPACE_META:
2837                 ARCSTAT_INCR(arcstat_metadata_size, -space);
2838                 break;
2839         case ARC_SPACE_DDT:
2840                 ARCSTAT_INCR(arcstat_ddt_size, -space);
2841                 break;
2842         case ARC_SPACE_OTHER:
2843                 ARCSTAT_INCR(arcstat_other_size, -space);
2844                 break;
2845         case ARC_SPACE_HDRS:
2846                 ARCSTAT_INCR(arcstat_hdr_size, -space);
2847                 break;
2848         case ARC_SPACE_L2HDRS:
2849                 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
2850                 break;
2851         }
2852 
2853         if (type != ARC_SPACE_DATA && type != ARC_SPACE_DDT) {
2854                 ASSERT(arc_meta_used >= space);
2855                 if (arc_meta_max < arc_meta_used)
2856                         arc_meta_max = arc_meta_used;
2857                 ARCSTAT_INCR(arcstat_meta_used, -space);
2858         }
2859 
2860         ASSERT(arc_size >= space);
2861         atomic_add_64(&arc_size, -space);
2862 }
2863 
2864 /*
2865  * Given a hdr and a buf, returns whether that buf can share its b_data buffer
2866  * with the hdr's b_pabd.
2867  */
2868 static boolean_t
2869 arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
2870 {
2871         /*
2872          * The criteria for sharing a hdr's data are:
2873          * 1. the hdr's compression matches the buf's compression
2874          * 2. the hdr doesn't need to be byteswapped
2875          * 3. the hdr isn't already being shared
2876          * 4. the buf is either compressed or it is the last buf in the hdr list
2877          *
2878          * Criterion #4 maintains the invariant that shared uncompressed
2879          * bufs must be the final buf in the hdr's b_buf list. Reading this, you
2880          * might ask, "if a compressed buf is allocated first, won't that be the
2881          * last thing in the list?", but in that case it's impossible to create
2882          * a shared uncompressed buf anyway (because the hdr must be compressed
2883          * to have the compressed buf). You might also think that #3 is
2884          * sufficient to make this guarantee, however it's possible
2885          * (specifically in the rare L2ARC write race mentioned in
2886          * arc_buf_alloc_impl()) there will be an existing uncompressed buf that
2887          * is sharable, but wasn't at the time of its allocation. Rather than
2888          * allow a new shared uncompressed buf to be created and then shuffle
2889          * the list around to make it the last element, this simply disallows
2890          * sharing if the new buf isn't the first to be added.
2891          */
2892         ASSERT3P(buf->b_hdr, ==, hdr);
2893         boolean_t hdr_compressed = HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF;
2894         boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
2895         return (buf_compressed == hdr_compressed &&
2896             hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
2897             !HDR_SHARED_DATA(hdr) &&
2898             (ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf)));
2899 }
2900 
2901 /*
2902  * Allocate a buf for this hdr. If you care about the data that's in the hdr,
2903  * or if you want a compressed buffer, pass those flags in. Returns 0 if the
2904  * copy was made successfully, or an error code otherwise.
2905  */
2906 static int
2907 arc_buf_alloc_impl(arc_buf_hdr_t *hdr, void *tag, boolean_t compressed,
2908     boolean_t fill, arc_buf_t **ret)
2909 {
2910         arc_buf_t *buf;
2911 
2912         ASSERT(HDR_HAS_L1HDR(hdr));
2913         ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
2914         VERIFY(hdr->b_type == ARC_BUFC_DATA ||
2915             hdr->b_type == ARC_BUFC_METADATA ||
2916             hdr->b_type == ARC_BUFC_DDT);
2917         ASSERT3P(ret, !=, NULL);
2918         ASSERT3P(*ret, ==, NULL);
2919 
2920         buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2921         buf->b_hdr = hdr;
2922         buf->b_data = NULL;
2923         buf->b_next = hdr->b_l1hdr.b_buf;
2924         buf->b_flags = 0;
2925 
2926         add_reference(hdr, tag);
2927 
2928         /*
2929          * We're about to change the hdr's b_flags. We must either
2930          * hold the hash_lock or be undiscoverable.
2931          */
2932         ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
2933 
2934         /*
2935          * Only honor requests for compressed bufs if the hdr is actually
2936          * compressed.
2937          */
2938         if (compressed && HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF)
2939                 buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
2940 
2941         /*
2942          * If the hdr's data can be shared then we share the data buffer and
2943          * set the appropriate bit in the hdr's b_flags to indicate the hdr is
2944          * sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new
2945          * buffer to store the buf's data.
2946          *
2947          * There are two additional restrictions here because we're sharing
2948          * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
2949          * actively involved in an L2ARC write, because if this buf is used by
2950          * an arc_write() then the hdr's data buffer will be released when the
2951          * write completes, even though the L2ARC write might still be using it.
2952          * Second, the hdr's ABD must be linear so that the buf's user doesn't
2953          * need to be ABD-aware.
2954          */
2955         boolean_t can_share = arc_can_share(hdr, buf) && !HDR_L2_WRITING(hdr) &&
2956             abd_is_linear(hdr->b_l1hdr.b_pabd);
2957 
2958         /* Set up b_data and sharing */
2959         if (can_share) {
2960                 buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd);
2961                 buf->b_flags |= ARC_BUF_FLAG_SHARED;
2962                 arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
2963         } else {
2964                 buf->b_data =
2965                     arc_get_data_buf(hdr, arc_buf_size(buf), buf);
2966                 ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
2967         }
2968         VERIFY3P(buf->b_data, !=, NULL);
2969 
2970         hdr->b_l1hdr.b_buf = buf;
2971         hdr->b_l1hdr.b_bufcnt += 1;
2972 
2973         /*
2974          * If the user wants the data from the hdr, we need to either copy or
2975          * decompress the data.
2976          */
2977         if (fill) {
2978                 return (arc_buf_fill(buf, ARC_BUF_COMPRESSED(buf) != 0));
2979         }
2980 
2981         return (0);
2982 }
2983 
2984 static char *arc_onloan_tag = "onloan";
2985 
2986 static inline void
2987 arc_loaned_bytes_update(int64_t delta)
2988 {
2989         atomic_add_64(&arc_loaned_bytes, delta);
2990 
2991         /* assert that it did not wrap around */
2992         ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
2993 }
2994 
2995 /*
2996  * Allocates an ARC buf header that's in an evicted & L2-cached state.
2997  * This is used during l2arc reconstruction to make empty ARC buffers
2998  * which circumvent the regular disk->arc->l2arc path and instead come
2999  * into being in the reverse order, i.e. l2arc->arc.
3000  */
3001 static arc_buf_hdr_t *
3002 arc_buf_alloc_l2only(uint64_t load_guid, arc_buf_contents_t type,
3003     l2arc_dev_t *dev, dva_t dva, uint64_t daddr, uint64_t lsize,
3004     uint64_t psize, uint64_t birth, zio_cksum_t cksum, int checksum_type,
3005     enum zio_compress compress, boolean_t arc_compress)
3006 {
3007         arc_buf_hdr_t *hdr;
3008 
3009         if (type == ARC_BUFC_DDT && !zfs_arc_segregate_ddt)
3010                 type = ARC_BUFC_METADATA;
3011 
3012         ASSERT(lsize != 0);
3013         hdr = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
3014         ASSERT(HDR_EMPTY(hdr));
3015         ASSERT3P(hdr->b_freeze_cksum, ==, NULL);
3016 
3017         hdr->b_spa = load_guid;
3018         hdr->b_type = type;
3019         hdr->b_flags = 0;
3020 
3021         if (arc_compress)
3022                 arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
3023         else
3024                 arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
3025 
3026         HDR_SET_COMPRESS(hdr, compress);
3027 
3028         arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L2HDR);
3029         hdr->b_dva = dva;
3030         hdr->b_birth = birth;
3031         if (checksum_type != ZIO_CHECKSUM_OFF) {
3032                 hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
3033                 bcopy(&cksum, hdr->b_freeze_cksum, sizeof (cksum));
3034         }
3035 
3036         HDR_SET_PSIZE(hdr, psize);
3037         HDR_SET_LSIZE(hdr, lsize);
3038 
3039         hdr->b_l2hdr.b_dev = dev;
3040         hdr->b_l2hdr.b_daddr = daddr;
3041 
3042         return (hdr);
3043 }
3044 
3045 /*
3046  * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
3047  * flight data by arc_tempreserve_space() until they are "returned". Loaned
3048  * buffers must be returned to the arc before they can be used by the DMU or
3049  * freed.
3050  */
3051 arc_buf_t *
3052 arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
3053 {
3054         arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
3055             is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
3056 
3057         arc_loaned_bytes_update(size);
3058 
3059         return (buf);
3060 }
3061 
3062 arc_buf_t *
3063 arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
3064     enum zio_compress compression_type)
3065 {
3066         arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
3067             psize, lsize, compression_type);
3068 
3069         arc_loaned_bytes_update(psize);
3070 
3071         return (buf);
3072 }
3073 
3074 
3075 /*
3076  * Return a loaned arc buffer to the arc.
3077  */
3078 void
3079 arc_return_buf(arc_buf_t *buf, void *tag)
3080 {
3081         arc_buf_hdr_t *hdr = buf->b_hdr;
3082 
3083         ASSERT3P(buf->b_data, !=, NULL);
3084         ASSERT(HDR_HAS_L1HDR(hdr));
3085         (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
3086         (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
3087 
3088         arc_loaned_bytes_update(-arc_buf_size(buf));
3089 }
3090 
3091 /* Detach an arc_buf from a dbuf (tag) */
3092 void
3093 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
3094 {
3095         arc_buf_hdr_t *hdr = buf->b_hdr;
3096 
3097         ASSERT3P(buf->b_data, !=, NULL);
3098         ASSERT(HDR_HAS_L1HDR(hdr));
3099         (void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
3100         (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
3101 
3102         arc_loaned_bytes_update(arc_buf_size(buf));
3103 }
3104 
3105 static void
3106 l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type)
3107 {
3108         l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP);
3109 
3110         df->l2df_abd = abd;
3111         df->l2df_size = size;
3112         df->l2df_type = type;
3113         mutex_enter(&l2arc_free_on_write_mtx);
3114         list_insert_head(l2arc_free_on_write, df);
3115         mutex_exit(&l2arc_free_on_write_mtx);
3116 }
3117 
3118 static void
3119 arc_hdr_free_on_write(arc_buf_hdr_t *hdr)
3120 {
3121         arc_state_t *state = hdr->b_l1hdr.b_state;
3122         arc_buf_contents_t type = arc_buf_type(hdr);
3123         uint64_t size = arc_hdr_size(hdr);
3124 
3125         /* protected by hash lock, if in the hash table */
3126         if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
3127                 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
3128                 ASSERT(state != arc_anon && state != arc_l2c_only);
3129 
3130                 (void) refcount_remove_many(&state->arcs_esize[type],
3131                     size, hdr);
3132         }
3133         (void) refcount_remove_many(&state->arcs_size, size, hdr);
3134         if (type == ARC_BUFC_DDT) {
3135                 arc_space_return(size, ARC_SPACE_DDT);
3136         } else if (type == ARC_BUFC_METADATA) {
3137                 arc_space_return(size, ARC_SPACE_META);
3138         } else {
3139                 ASSERT(type == ARC_BUFC_DATA);
3140                 arc_space_return(size, ARC_SPACE_DATA);
3141         }
3142 
3143         l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type);
3144 }
3145 
3146 /*
3147  * Share the arc_buf_t's data with the hdr. Whenever we are sharing the
3148  * data buffer, we transfer the refcount ownership to the hdr and update
3149  * the appropriate kstats.
3150  */
3151 static void
3152 arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
3153 {
3154         arc_state_t *state = hdr->b_l1hdr.b_state;
3155 
3156         ASSERT(arc_can_share(hdr, buf));
3157         ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
3158         ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
3159 
3160         /*
3161          * Start sharing the data buffer. We transfer the
3162          * refcount ownership to the hdr since it always owns
3163          * the refcount whenever an arc_buf_t is shared.
3164          */
3165         refcount_transfer_ownership(&state->arcs_size, buf, hdr);
3166         hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
3167         abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
3168             !HDR_ISTYPE_DATA(hdr));
3169         arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
3170         buf->b_flags |= ARC_BUF_FLAG_SHARED;
3171 
3172         /*
3173          * Since we've transferred ownership to the hdr we need
3174          * to increment its compressed and uncompressed kstats and
3175          * decrement the overhead size.
3176          */
3177         ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
3178         ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
3179         ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf));
3180 }
3181 
3182 static void
3183 arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
3184 {
3185         arc_state_t *state = hdr->b_l1hdr.b_state;
3186 
3187         ASSERT(arc_buf_is_shared(buf));
3188         ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
3189         ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
3190 
3191         /*
3192          * We are no longer sharing this buffer so we need
3193          * to transfer its ownership to the rightful owner.
3194          */
3195         refcount_transfer_ownership(&state->arcs_size, hdr, buf);
3196         arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
3197         abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
3198         abd_put(hdr->b_l1hdr.b_pabd);
3199         hdr->b_l1hdr.b_pabd = NULL;
3200         buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
3201 
3202         /*
3203          * Since the buffer is no longer shared between
3204          * the arc buf and the hdr, count it as overhead.
3205          */
3206         ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
3207         ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
3208         ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
3209 }
3210 
3211 /*
3212  * Remove an arc_buf_t from the hdr's buf list and return the last
3213  * arc_buf_t on the list. If no buffers remain on the list then return
3214  * NULL.
3215  */
3216 static arc_buf_t *
3217 arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
3218 {
3219         ASSERT(HDR_HAS_L1HDR(hdr));
3220         ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
3221 
3222         arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
3223         arc_buf_t *lastbuf = NULL;
3224 
3225         /*
3226          * Remove the buf from the hdr list and locate the last
3227          * remaining buffer on the list.
3228          */
3229         while (*bufp != NULL) {
3230                 if (*bufp == buf)
3231                         *bufp = buf->b_next;
3232 
3233                 /*
3234                  * If we've removed a buffer in the middle of
3235                  * the list then update the lastbuf and update
3236                  * bufp.
3237                  */
3238                 if (*bufp != NULL) {
3239                         lastbuf = *bufp;
3240                         bufp = &(*bufp)->b_next;
3241                 }
3242         }
3243         buf->b_next = NULL;
3244         ASSERT3P(lastbuf, !=, buf);
3245         IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL);
3246         IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL);
3247         IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf));
3248 
3249         return (lastbuf);
3250 }
3251 
3252 /*
3253  * Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's
3254  * list and free it.
3255  */
3256 static void
3257 arc_buf_destroy_impl(arc_buf_t *buf)
3258 {
3259         arc_buf_hdr_t *hdr = buf->b_hdr;
3260 
3261         /*
3262          * Free up the data associated with the buf but only if we're not
3263          * sharing this with the hdr. If we are sharing it with the hdr, the
3264          * hdr is responsible for doing the free.
3265          */
3266         if (buf->b_data != NULL) {
3267                 /*
3268                  * We're about to change the hdr's b_flags. We must either
3269                  * hold the hash_lock or be undiscoverable.
3270                  */
3271                 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
3272 
3273                 arc_cksum_verify(buf);
3274                 arc_buf_unwatch(buf);
3275 
3276                 if (arc_buf_is_shared(buf)) {
3277                         arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
3278                 } else {
3279                         uint64_t size = arc_buf_size(buf);
3280                         arc_free_data_buf(hdr, buf->b_data, size, buf);
3281                         ARCSTAT_INCR(arcstat_overhead_size, -size);
3282                 }
3283                 buf->b_data = NULL;
3284 
3285                 ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
3286                 hdr->b_l1hdr.b_bufcnt -= 1;
3287         }
3288 
3289         arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
3290 
3291         if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
3292                 /*
3293                  * If the current arc_buf_t is sharing its data buffer with the
3294                  * hdr, then reassign the hdr's b_pabd to share it with the new
3295                  * buffer at the end of the list. The shared buffer is always
3296                  * the last one on the hdr's buffer list.
3297                  *
3298                  * There is an equivalent case for compressed bufs, but since
3299                  * they aren't guaranteed to be the last buf in the list and
3300                  * that is an exceedingly rare case, we just allow that space be
3301                  * wasted temporarily.
3302                  */
3303                 if (lastbuf != NULL) {
3304                         /* Only one buf can be shared at once */
3305                         VERIFY(!arc_buf_is_shared(lastbuf));
3306                         /* hdr is uncompressed so can't have compressed buf */
3307                         VERIFY(!ARC_BUF_COMPRESSED(lastbuf));
3308 
3309                         ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
3310                         arc_hdr_free_pabd(hdr);
3311 
3312                         /*
3313                          * We must setup a new shared block between the
3314                          * last buffer and the hdr. The data would have
3315                          * been allocated by the arc buf so we need to transfer
3316                          * ownership to the hdr since it's now being shared.
3317                          */
3318                         arc_share_buf(hdr, lastbuf);
3319                 }
3320         } else if (HDR_SHARED_DATA(hdr)) {
3321                 /*
3322                  * Uncompressed shared buffers are always at the end
3323                  * of the list. Compressed buffers don't have the
3324                  * same requirements. This makes it hard to
3325                  * simply assert that the lastbuf is shared so
3326                  * we rely on the hdr's compression flags to determine
3327                  * if we have a compressed, shared buffer.
3328                  */
3329                 ASSERT3P(lastbuf, !=, NULL);
3330                 ASSERT(arc_buf_is_shared(lastbuf) ||
3331                     HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF);
3332         }
3333 
3334         /*
3335          * Free the checksum if we're removing the last uncompressed buf from
3336          * this hdr.
3337          */
3338         if (!arc_hdr_has_uncompressed_buf(hdr)) {
3339                 arc_cksum_free(hdr);
3340         }
3341 
3342         /* clean up the buf */
3343         buf->b_hdr = NULL;
3344         kmem_cache_free(buf_cache, buf);
3345 }
3346 
3347 static void
3348 arc_hdr_alloc_pabd(arc_buf_hdr_t *hdr)
3349 {
3350         ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
3351         ASSERT(HDR_HAS_L1HDR(hdr));
3352         ASSERT(!HDR_SHARED_DATA(hdr));
3353 
3354         ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
3355         hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr);
3356         hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
3357         ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
3358 
3359         ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
3360         ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
3361         arc_update_hit_stat(hdr, B_TRUE);
3362 }
3363 
3364 static void
3365 arc_hdr_free_pabd(arc_buf_hdr_t *hdr)
3366 {
3367         ASSERT(HDR_HAS_L1HDR(hdr));
3368         ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
3369 
3370         /*
3371          * If the hdr is currently being written to the l2arc then
3372          * we defer freeing the data by adding it to the l2arc_free_on_write
3373          * list. The l2arc will free the data once it's finished
3374          * writing it to the l2arc device.
3375          */
3376         if (HDR_L2_WRITING(hdr)) {
3377                 arc_hdr_free_on_write(hdr);
3378                 ARCSTAT_BUMP(arcstat_l2_free_on_write);
3379         } else {
3380                 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
3381                     arc_hdr_size(hdr), hdr);
3382         }
3383         hdr->b_l1hdr.b_pabd = NULL;
3384         hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
3385 
3386         ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
3387         ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
3388 }
3389 
3390 static arc_buf_hdr_t *
3391 arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
3392     enum zio_compress compression_type, arc_buf_contents_t type)
3393 {
3394         arc_buf_hdr_t *hdr;
3395 
3396         ASSERT3U(lsize, >, 0);
3397 
3398         if (type == ARC_BUFC_DDT && !zfs_arc_segregate_ddt)
3399                 type = ARC_BUFC_METADATA;
3400         VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA ||
3401             type == ARC_BUFC_DDT);
3402 
3403         hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
3404         ASSERT(HDR_EMPTY(hdr));
3405         ASSERT3P(hdr->b_freeze_cksum, ==, NULL);
3406         ASSERT3P(hdr->b_l1hdr.b_thawed, ==, NULL);
3407         HDR_SET_PSIZE(hdr, psize);
3408         HDR_SET_LSIZE(hdr, lsize);
3409         hdr->b_spa = spa;
3410         hdr->b_type = type;
3411         hdr->b_flags = 0;
3412         arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR);
3413         arc_hdr_set_compress(hdr, compression_type);
3414 
3415         hdr->b_l1hdr.b_state = arc_anon;
3416         hdr->b_l1hdr.b_arc_access = 0;
3417         hdr->b_l1hdr.b_bufcnt = 0;
3418         hdr->b_l1hdr.b_buf = NULL;
3419 
3420         /*
3421          * Allocate the hdr's buffer. This will contain either
3422          * the compressed or uncompressed data depending on the block
3423          * it references and compressed arc enablement.
3424          */
3425         arc_hdr_alloc_pabd(hdr);
3426         ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
3427 
3428         return (hdr);
3429 }
3430 
3431 /*
3432  * Transition between the two allocation states for the arc_buf_hdr struct.
3433  * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
3434  * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
3435  * version is used when a cache buffer is only in the L2ARC in order to reduce
3436  * memory usage.
3437  */
3438 static arc_buf_hdr_t *
3439 arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
3440 {
3441         ASSERT(HDR_HAS_L2HDR(hdr));
3442 
3443         arc_buf_hdr_t *nhdr;
3444         l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
3445 
3446         ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
3447             (old == hdr_l2only_cache && new == hdr_full_cache));
3448 
3449         nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
3450 
3451         ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
3452         buf_hash_remove(hdr);
3453 
3454         bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
3455 
3456         if (new == hdr_full_cache) {
3457                 arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
3458                 /*
3459                  * arc_access and arc_change_state need to be aware that a
3460                  * header has just come out of L2ARC, so we set its state to
3461                  * l2c_only even though it's about to change.
3462                  */
3463                 nhdr->b_l1hdr.b_state = arc_l2c_only;
3464 
3465                 /* Verify previous threads set to NULL before freeing */
3466                 ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
3467         } else {
3468                 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
3469                 ASSERT0(hdr->b_l1hdr.b_bufcnt);
3470                 ASSERT3P(hdr->b_freeze_cksum, ==, NULL);
3471 
3472                 /*
3473                  * If we've reached here, We must have been called from
3474                  * arc_evict_hdr(), as such we should have already been
3475                  * removed from any ghost list we were previously on
3476                  * (which protects us from racing with arc_evict_state),
3477                  * thus no locking is needed during this check.
3478                  */
3479                 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
3480 
3481                 /*
3482                  * A buffer must not be moved into the arc_l2c_only
3483                  * state if it's not finished being written out to the
3484                  * l2arc device. Otherwise, the b_l1hdr.b_pabd field
3485                  * might try to be accessed, even though it was removed.
3486                  */
3487                 VERIFY(!HDR_L2_WRITING(hdr));
3488                 VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
3489 
3490 #ifdef ZFS_DEBUG
3491                 if (hdr->b_l1hdr.b_thawed != NULL) {
3492                         kmem_free(hdr->b_l1hdr.b_thawed, 1);
3493                         hdr->b_l1hdr.b_thawed = NULL;
3494                 }
3495 #endif
3496 
3497                 arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
3498         }
3499         /*
3500          * The header has been reallocated so we need to re-insert it into any
3501          * lists it was on.
3502          */
3503         (void) buf_hash_insert(nhdr, NULL);
3504 
3505         ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
3506 
3507         mutex_enter(&dev->l2ad_mtx);
3508 
3509         /*
3510          * We must place the realloc'ed header back into the list at
3511          * the same spot. Otherwise, if it's placed earlier in the list,
3512          * l2arc_write_buffers() could find it during the function's
3513          * write phase, and try to write it out to the l2arc.
3514          */
3515         list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
3516         list_remove(&dev->l2ad_buflist, hdr);
3517 
3518         mutex_exit(&dev->l2ad_mtx);
3519 
3520         /*
3521          * Since we're using the pointer address as the tag when
3522          * incrementing and decrementing the l2ad_alloc refcount, we
3523          * must remove the old pointer (that we're about to destroy) and
3524          * add the new pointer to the refcount. Otherwise we'd remove
3525          * the wrong pointer address when calling arc_hdr_destroy() later.
3526          */
3527 
3528         (void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
3529         (void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr);
3530 
3531         buf_discard_identity(hdr);
3532         kmem_cache_free(old, hdr);
3533 
3534         return (nhdr);
3535 }
3536 
3537 /*
3538  * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
3539  * The buf is returned thawed since we expect the consumer to modify it.
3540  */
3541 arc_buf_t *
3542 arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size)
3543 {
3544         arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
3545             ZIO_COMPRESS_OFF, type);
3546         ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
3547 
3548         arc_buf_t *buf = NULL;
3549         VERIFY0(arc_buf_alloc_impl(hdr, tag, B_FALSE, B_FALSE, &buf));
3550         arc_buf_thaw(buf);
3551 
3552         return (buf);
3553 }
3554 
3555 /*
3556  * Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
3557  * for bufs containing metadata.
3558  */
3559 arc_buf_t *
3560 arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize,
3561     enum zio_compress compression_type)
3562 {
3563         ASSERT3U(lsize, >, 0);
3564         ASSERT3U(lsize, >=, psize);
3565         ASSERT(compression_type > ZIO_COMPRESS_OFF);
3566         ASSERT(compression_type < ZIO_COMPRESS_FUNCTIONS);
3567 
3568         arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
3569             compression_type, ARC_BUFC_DATA);
3570         ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
3571 
3572         arc_buf_t *buf = NULL;
3573         VERIFY0(arc_buf_alloc_impl(hdr, tag, B_TRUE, B_FALSE, &buf));
3574         arc_buf_thaw(buf);
3575         ASSERT3P(hdr->b_freeze_cksum, ==, NULL);
3576 
3577         if (!arc_buf_is_shared(buf)) {
3578                 /*
3579                  * To ensure that the hdr has the correct data in it if we call
3580                  * arc_decompress() on this buf before it's been written to
3581                  * disk, it's easiest if we just set up sharing between the
3582                  * buf and the hdr.
3583                  */
3584                 ASSERT(!abd_is_linear(hdr->b_l1hdr.b_pabd));
3585                 arc_hdr_free_pabd(hdr);
3586                 arc_share_buf(hdr, buf);
3587         }
3588 
3589         return (buf);
3590 }
3591 
3592 static void
3593 arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
3594 {
3595         l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
3596         l2arc_dev_t *dev = l2hdr->b_dev;
3597         uint64_t psize = arc_hdr_size(hdr);
3598 
3599         ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
3600         ASSERT(HDR_HAS_L2HDR(hdr));
3601 
3602         list_remove(&dev->l2ad_buflist, hdr);
3603 
3604         ARCSTAT_INCR(arcstat_l2_psize, -psize);
3605         ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
3606 
3607         /*
3608          * l2ad_vdev can be NULL here if we async evicted it
3609          */
3610         if (dev->l2ad_vdev != NULL)
3611                 vdev_space_update(dev->l2ad_vdev, -psize, 0, 0);
3612 
3613         (void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
3614         arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
3615 }
3616 
3617 static void
3618 arc_hdr_destroy(arc_buf_hdr_t *hdr)
3619 {
3620         if (HDR_HAS_L1HDR(hdr)) {
3621                 ASSERT(hdr->b_l1hdr.b_buf == NULL ||
3622                     hdr->b_l1hdr.b_bufcnt > 0);
3623                 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
3624                 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
3625         }
3626         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3627         ASSERT(!HDR_IN_HASH_TABLE(hdr));
3628 
3629         if (HDR_HAS_L2HDR(hdr)) {
3630                 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
3631                 boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
3632 
3633                 /* To avoid racing with L2ARC the header needs to be locked */
3634                 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
3635 
3636                 if (!buflist_held)
3637                         mutex_enter(&dev->l2ad_mtx);
3638 
3639                 /*
3640                  * L2ARC buflist has been held, so we can safety discard
3641                  * identity, otherwise L2ARC can lock incorrect mutex
3642                  * for the hdr, that will cause a panic. That is possible,
3643                  * because a mutex is selected according to identity.
3644                  */
3645                 if (!HDR_EMPTY(hdr))
3646                         buf_discard_identity(hdr);
3647 
3648                 /*
3649                  * Even though we checked this conditional above, we
3650                  * need to check this again now that we have the
3651                  * l2ad_mtx. This is because we could be racing with
3652                  * another thread calling l2arc_evict() which might have
3653                  * destroyed this header's L2 portion as we were waiting
3654                  * to acquire the l2ad_mtx. If that happens, we don't
3655                  * want to re-destroy the header's L2 portion.
3656                  */
3657                 if (HDR_HAS_L2HDR(hdr))
3658                         arc_hdr_l2hdr_destroy(hdr);
3659 
3660                 if (!buflist_held)
3661                         mutex_exit(&dev->l2ad_mtx);
3662         }
3663 
3664         if (!HDR_EMPTY(hdr))
3665                 buf_discard_identity(hdr);
3666 
3667         if (HDR_HAS_L1HDR(hdr)) {
3668                 arc_cksum_free(hdr);
3669 
3670                 while (hdr->b_l1hdr.b_buf != NULL)
3671                         arc_buf_destroy_impl(hdr->b_l1hdr.b_buf);
3672 
3673 #ifdef ZFS_DEBUG
3674                 if (hdr->b_l1hdr.b_thawed != NULL) {
3675                         kmem_free(hdr->b_l1hdr.b_thawed, 1);
3676                         hdr->b_l1hdr.b_thawed = NULL;
3677                 }
3678 #endif
3679 
3680                 if (hdr->b_l1hdr.b_pabd != NULL) {
3681                         arc_hdr_free_pabd(hdr);
3682                 }
3683         }
3684 
3685         ASSERT3P(hdr->b_hash_next, ==, NULL);
3686         if (HDR_HAS_L1HDR(hdr)) {
3687                 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
3688                 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
3689                 kmem_cache_free(hdr_full_cache, hdr);
3690         } else {
3691                 kmem_cache_free(hdr_l2only_cache, hdr);
3692         }
3693 }
3694 
3695 void
3696 arc_buf_destroy(arc_buf_t *buf, void* tag)
3697 {
3698         arc_buf_hdr_t *hdr = buf->b_hdr;
3699         kmutex_t *hash_lock = HDR_LOCK(hdr);
3700 
3701         if (hdr->b_l1hdr.b_state == arc_anon) {
3702                 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
3703                 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3704                 VERIFY0(remove_reference(hdr, NULL, tag));
3705                 arc_hdr_destroy(hdr);
3706                 return;
3707         }
3708 
3709         mutex_enter(hash_lock);
3710         ASSERT3P(hdr, ==, buf->b_hdr);
3711         ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
3712         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3713         ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon);
3714         ASSERT3P(buf->b_data, !=, NULL);
3715 
3716         (void) remove_reference(hdr, hash_lock, tag);
3717         arc_buf_destroy_impl(buf);
3718         mutex_exit(hash_lock);
3719 }
3720 
3721 /*
3722  * Evict the arc_buf_hdr that is provided as a parameter. The resultant
3723  * state of the header is dependent on it's state prior to entering this
3724  * function. The following transitions are possible:
3725  *
3726  *    - arc_mru -> arc_mru_ghost
3727  *    - arc_mfu -> arc_mfu_ghost
3728  *    - arc_mru_ghost -> arc_l2c_only
3729  *    - arc_mru_ghost -> deleted
3730  *    - arc_mfu_ghost -> arc_l2c_only
3731  *    - arc_mfu_ghost -> deleted
3732  */
3733 static int64_t
3734 arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
3735 {
3736         arc_state_t *evicted_state, *state;
3737         int64_t bytes_evicted = 0;
3738 
3739         ASSERT(MUTEX_HELD(hash_lock));
3740         ASSERT(HDR_HAS_L1HDR(hdr));
3741 
3742         arc_wait_for_krrp(hdr);
3743 
3744         state = hdr->b_l1hdr.b_state;
3745         if (GHOST_STATE(state)) {
3746                 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3747                 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
3748 
3749                 /*
3750                  * l2arc_write_buffers() relies on a header's L1 portion
3751                  * (i.e. its b_pabd field) during it's write phase.
3752                  * Thus, we cannot push a header onto the arc_l2c_only
3753                  * state (removing it's L1 piece) until the header is
3754                  * done being written to the l2arc.
3755                  */
3756                 if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
3757                         ARCSTAT_BUMP(arcstat_evict_l2_skip);
3758                         return (bytes_evicted);
3759                 }
3760 
3761                 ARCSTAT_BUMP(arcstat_deleted);
3762                 bytes_evicted += HDR_GET_LSIZE(hdr);
3763 
3764                 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
3765 
3766                 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
3767                 if (HDR_HAS_L2HDR(hdr)) {
3768                         /*
3769                          * This buffer is cached on the 2nd Level ARC;
3770                          * don't destroy the header.
3771                          */
3772                         arc_change_state(arc_l2c_only, hdr, hash_lock);
3773                         /*
3774                          * dropping from L1+L2 cached to L2-only,
3775                          * realloc to remove the L1 header.
3776                          */
3777                         hdr = arc_hdr_realloc(hdr, hdr_full_cache,
3778                             hdr_l2only_cache);
3779                 } else {
3780                         arc_change_state(arc_anon, hdr, hash_lock);
3781                         arc_hdr_destroy(hdr);
3782                 }
3783                 return (bytes_evicted);
3784         }
3785 
3786         ASSERT(state == arc_mru || state == arc_mfu);
3787         evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3788 
3789         /* prefetch buffers have a minimum lifespan */
3790         if (HDR_IO_IN_PROGRESS(hdr) ||
3791             ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
3792             ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
3793             arc_min_prefetch_lifespan)) {
3794                 ARCSTAT_BUMP(arcstat_evict_skip);
3795                 return (bytes_evicted);
3796         }
3797 
3798         ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
3799         while (hdr->b_l1hdr.b_buf) {
3800                 arc_buf_t *buf = hdr->b_l1hdr.b_buf;
3801                 if (!mutex_tryenter(&buf->b_evict_lock)) {
3802                         ARCSTAT_BUMP(arcstat_mutex_miss);
3803                         break;
3804                 }
3805                 if (buf->b_data != NULL)
3806                         bytes_evicted += HDR_GET_LSIZE(hdr);
3807                 mutex_exit(&buf->b_evict_lock);
3808                 arc_buf_destroy_impl(buf);
3809         }
3810 
3811         if (HDR_HAS_L2HDR(hdr)) {
3812                 ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr));
3813         } else {
3814                 if (l2arc_write_eligible(hdr->b_spa, hdr)) {
3815                         ARCSTAT_INCR(arcstat_evict_l2_eligible,
3816                             HDR_GET_LSIZE(hdr));
3817                 } else {
3818                         ARCSTAT_INCR(arcstat_evict_l2_ineligible,
3819                             HDR_GET_LSIZE(hdr));
3820                 }
3821         }
3822 
3823         if (hdr->b_l1hdr.b_bufcnt == 0) {
3824                 arc_cksum_free(hdr);
3825 
3826                 bytes_evicted += arc_hdr_size(hdr);
3827 
3828                 /*
3829                  * If this hdr is being evicted and has a compressed
3830                  * buffer then we discard it here before we change states.
3831                  * This ensures that the accounting is updated correctly
3832                  * in arc_free_data_impl().
3833                  */
3834                 arc_hdr_free_pabd(hdr);
3835 
3836                 arc_change_state(evicted_state, hdr, hash_lock);
3837                 ASSERT(HDR_IN_HASH_TABLE(hdr));
3838                 arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
3839                 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
3840         }
3841 
3842         return (bytes_evicted);
3843 }
3844 
3845 static uint64_t
3846 arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
3847     uint64_t spa, int64_t bytes)
3848 {
3849         multilist_sublist_t *mls;
3850         uint64_t bytes_evicted = 0;
3851         arc_buf_hdr_t *hdr;
3852         kmutex_t *hash_lock;
3853         int evict_count = 0;
3854 
3855         ASSERT3P(marker, !=, NULL);
3856         IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
3857 
3858         mls = multilist_sublist_lock(ml, idx);
3859 
3860         for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL;
3861             hdr = multilist_sublist_prev(mls, marker)) {
3862                 if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) ||
3863                     (evict_count >= zfs_arc_evict_batch_limit))
3864                         break;
3865 
3866                 /*
3867                  * To keep our iteration location, move the marker
3868                  * forward. Since we're not holding hdr's hash lock, we
3869                  * must be very careful and not remove 'hdr' from the
3870                  * sublist. Otherwise, other consumers might mistake the
3871                  * 'hdr' as not being on a sublist when they call the
3872                  * multilist_link_active() function (they all rely on
3873                  * the hash lock protecting concurrent insertions and
3874                  * removals). multilist_sublist_move_forward() was
3875                  * specifically implemented to ensure this is the case
3876                  * (only 'marker' will be removed and re-inserted).
3877                  */
3878                 multilist_sublist_move_forward(mls, marker);
3879 
3880                 /*
3881                  * The only case where the b_spa field should ever be
3882                  * zero, is the marker headers inserted by
3883                  * arc_evict_state(). It's possible for multiple threads
3884                  * to be calling arc_evict_state() concurrently (e.g.
3885                  * dsl_pool_close() and zio_inject_fault()), so we must
3886                  * skip any markers we see from these other threads.
3887                  */
3888                 if (hdr->b_spa == 0)
3889                         continue;
3890 
3891                 /* we're only interested in evicting buffers of a certain spa */
3892                 if (spa != 0 && hdr->b_spa != spa) {
3893                         ARCSTAT_BUMP(arcstat_evict_skip);
3894                         continue;
3895                 }
3896 
3897                 hash_lock = HDR_LOCK(hdr);
3898 
3899                 /*
3900                  * We aren't calling this function from any code path
3901                  * that would already be holding a hash lock, so we're
3902                  * asserting on this assumption to be defensive in case
3903                  * this ever changes. Without this check, it would be
3904                  * possible to incorrectly increment arcstat_mutex_miss
3905                  * below (e.g. if the code changed such that we called
3906                  * this function with a hash lock held).
3907                  */
3908                 ASSERT(!MUTEX_HELD(hash_lock));
3909 
3910                 if (mutex_tryenter(hash_lock)) {
3911                         uint64_t evicted = arc_evict_hdr(hdr, hash_lock);
3912                         mutex_exit(hash_lock);
3913 
3914                         bytes_evicted += evicted;
3915 
3916                         /*
3917                          * If evicted is zero, arc_evict_hdr() must have
3918                          * decided to skip this header, don't increment
3919                          * evict_count in this case.
3920                          */
3921                         if (evicted != 0)
3922                                 evict_count++;
3923 
3924                         /*
3925                          * If arc_size isn't overflowing, signal any
3926                          * threads that might happen to be waiting.
3927                          *
3928                          * For each header evicted, we wake up a single
3929                          * thread. If we used cv_broadcast, we could
3930                          * wake up "too many" threads causing arc_size
3931                          * to significantly overflow arc_c; since
3932                          * arc_get_data_impl() doesn't check for overflow
3933                          * when it's woken up (it doesn't because it's
3934                          * possible for the ARC to be overflowing while
3935                          * full of un-evictable buffers, and the
3936                          * function should proceed in this case).
3937                          *
3938                          * If threads are left sleeping, due to not
3939                          * using cv_broadcast, they will be woken up
3940                          * just before arc_reclaim_thread() sleeps.
3941                          */
3942                         mutex_enter(&arc_reclaim_lock);
3943                         if (!arc_is_overflowing())
3944                                 cv_signal(&arc_reclaim_waiters_cv);
3945                         mutex_exit(&arc_reclaim_lock);
3946                 } else {
3947                         ARCSTAT_BUMP(arcstat_mutex_miss);
3948                 }
3949         }
3950 
3951         multilist_sublist_unlock(mls);
3952 
3953         return (bytes_evicted);
3954 }
3955 
3956 /*
3957  * Evict buffers from the given arc state, until we've removed the
3958  * specified number of bytes. Move the removed buffers to the
3959  * appropriate evict state.
3960  *
3961  * This function makes a "best effort". It skips over any buffers
3962  * it can't get a hash_lock on, and so, may not catch all candidates.
3963  * It may also return without evicting as much space as requested.
3964  *
3965  * If bytes is specified using the special value ARC_EVICT_ALL, this
3966  * will evict all available (i.e. unlocked and evictable) buffers from
3967  * the given arc state; which is used by arc_flush().
3968  */
3969 static uint64_t
3970 arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
3971     arc_buf_contents_t type)
3972 {
3973         uint64_t total_evicted = 0;
3974         multilist_t *ml = state->arcs_list[type];
3975         int num_sublists;
3976         arc_buf_hdr_t **markers;
3977 
3978         IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
3979 
3980         num_sublists = multilist_get_num_sublists(ml);
3981 
3982         /*
3983          * If we've tried to evict from each sublist, made some
3984          * progress, but still have not hit the target number of bytes
3985          * to evict, we want to keep trying. The markers allow us to
3986          * pick up where we left off for each individual sublist, rather
3987          * than starting from the tail each time.
3988          */
3989         markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP);
3990         for (int i = 0; i < num_sublists; i++) {
3991                 markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
3992 
3993                 /*
3994                  * A b_spa of 0 is used to indicate that this header is
3995                  * a marker. This fact is used in arc_adjust_type() and
3996                  * arc_evict_state_impl().
3997                  */
3998                 markers[i]->b_spa = 0;
3999 
4000                 multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
4001                 multilist_sublist_insert_tail(mls, markers[i]);
4002                 multilist_sublist_unlock(mls);
4003         }
4004 
4005         /*
4006          * While we haven't hit our target number of bytes to evict, or
4007          * we're evicting all available buffers.
4008          */
4009         while (total_evicted < bytes || bytes == ARC_EVICT_ALL) {
4010                 /*
4011                  * Start eviction using a randomly selected sublist,
4012                  * this is to try and evenly balance eviction across all
4013                  * sublists. Always starting at the same sublist
4014                  * (e.g. index 0) would cause evictions to favor certain
4015                  * sublists over others.
4016                  */
4017                 int sublist_idx = multilist_get_random_index(ml);
4018                 uint64_t scan_evicted = 0;
4019 
4020                 for (int i = 0; i < num_sublists; i++) {
4021                         uint64_t bytes_remaining;
4022                         uint64_t bytes_evicted;
4023 
4024                         if (bytes == ARC_EVICT_ALL)
4025                                 bytes_remaining = ARC_EVICT_ALL;
4026                         else if (total_evicted < bytes)
4027                                 bytes_remaining = bytes - total_evicted;
4028                         else
4029                                 break;
4030 
4031                         bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
4032                             markers[sublist_idx], spa, bytes_remaining);
4033 
4034                         scan_evicted += bytes_evicted;
4035                         total_evicted += bytes_evicted;
4036 
4037                         /* we've reached the end, wrap to the beginning */
4038                         if (++sublist_idx >= num_sublists)
4039                                 sublist_idx = 0;
4040                 }
4041 
4042                 /*
4043                  * If we didn't evict anything during this scan, we have
4044                  * no reason to believe we'll evict more during another
4045                  * scan, so break the loop.
4046                  */
4047                 if (scan_evicted == 0) {
4048                         /* This isn't possible, let's make that obvious */
4049                         ASSERT3S(bytes, !=, 0);
4050 
4051                         /*
4052                          * When bytes is ARC_EVICT_ALL, the only way to
4053                          * break the loop is when scan_evicted is zero.
4054                          * In that case, we actually have evicted enough,
4055                          * so we don't want to increment the kstat.
4056                          */
4057                         if (bytes != ARC_EVICT_ALL) {
4058                                 ASSERT3S(total_evicted, <, bytes);
4059                                 ARCSTAT_BUMP(arcstat_evict_not_enough);
4060                         }
4061 
4062                         break;
4063                 }
4064         }
4065 
4066         for (int i = 0; i < num_sublists; i++) {
4067                 multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
4068                 multilist_sublist_remove(mls, markers[i]);
4069                 multilist_sublist_unlock(mls);
4070 
4071                 kmem_cache_free(hdr_full_cache, markers[i]);
4072         }
4073         kmem_free(markers, sizeof (*markers) * num_sublists);
4074 
4075         return (total_evicted);
4076 }
4077 
4078 /*
4079  * Flush all "evictable" data of the given type from the arc state
4080  * specified. This will not evict any "active" buffers (i.e. referenced).
4081  *
4082  * When 'retry' is set to B_FALSE, the function will make a single pass
4083  * over the state and evict any buffers that it can. Since it doesn't
4084  * continually retry the eviction, it might end up leaving some buffers
4085  * in the ARC due to lock misses.
4086  *
4087  * When 'retry' is set to B_TRUE, the function will continually retry the
4088  * eviction until *all* evictable buffers have been removed from the
4089  * state. As a result, if concurrent insertions into the state are
4090  * allowed (e.g. if the ARC isn't shutting down), this function might
4091  * wind up in an infinite loop, continually trying to evict buffers.
4092  */
4093 static uint64_t
4094 arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
4095     boolean_t retry)
4096 {
4097         uint64_t evicted = 0;
4098 
4099         while (refcount_count(&state->arcs_esize[type]) != 0) {
4100                 evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
4101 
4102                 if (!retry)
4103                         break;
4104         }
4105 
4106         return (evicted);
4107 }
4108 
4109 /*
4110  * Evict the specified number of bytes from the state specified,
4111  * restricting eviction to the spa and type given. This function
4112  * prevents us from trying to evict more from a state's list than
4113  * is "evictable", and to skip evicting altogether when passed a
4114  * negative value for "bytes". In contrast, arc_evict_state() will
4115  * evict everything it can, when passed a negative value for "bytes".
4116  */
4117 static uint64_t
4118 arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
4119     arc_buf_contents_t type)
4120 {
4121         int64_t delta;
4122 
4123         if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) {
4124                 delta = MIN(refcount_count(&state->arcs_esize[type]), bytes);
4125                 return (arc_evict_state(state, spa, delta, type));
4126         }
4127 
4128         return (0);
4129 }
4130 
4131 /*
4132  * Depending on the value of adjust_ddt arg evict either DDT (B_TRUE)
4133  * or metadata (B_TRUE) buffers.
4134  * Evict metadata or DDT buffers from the cache, such that arc_meta_used or
4135  * arc_ddt_size is capped by the arc_meta_limit or arc_ddt_limit tunable.
4136  */
4137 static uint64_t
4138 arc_adjust_meta_or_ddt(boolean_t adjust_ddt)
4139 {
4140         uint64_t total_evicted = 0;
4141         int64_t target, over_limit;
4142         arc_buf_contents_t type;
4143 
4144         if (adjust_ddt) {
4145                 over_limit = arc_ddt_size - arc_ddt_limit;
4146                 type = ARC_BUFC_DDT;
4147         } else {
4148                 over_limit = arc_meta_used - arc_meta_limit;
4149                 type = ARC_BUFC_METADATA;
4150         }
4151 
4152         /*
4153          * If we're over the limit, we want to evict enough
4154          * to get back under the limit. We don't want to
4155          * evict so much that we drop the MRU below arc_p, though. If
4156          * we're over the meta limit more than we're over arc_p, we
4157          * evict some from the MRU here, and some from the MFU below.
4158          */
4159         target = MIN(over_limit,
4160             (int64_t)(refcount_count(&arc_anon->arcs_size) +
4161             refcount_count(&arc_mru->arcs_size) - arc_p));
4162 
4163         total_evicted += arc_adjust_impl(arc_mru, 0, target, type);
4164 
4165         over_limit = adjust_ddt ? arc_ddt_size - arc_ddt_limit :
4166             arc_meta_used - arc_meta_limit;
4167 
4168         /*
4169          * Similar to the above, we want to evict enough bytes to get us
4170          * below the meta limit, but not so much as to drop us below the
4171          * space allotted to the MFU (which is defined as arc_c - arc_p).
4172          */
4173         target = MIN(over_limit,
4174             (int64_t)(refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p)));
4175 
4176         total_evicted += arc_adjust_impl(arc_mfu, 0, target, type);
4177 
4178         return (total_evicted);
4179 }
4180 
4181 /*
4182  * Return the type of the oldest buffer in the given arc state
4183  *
4184  * This function will select a random sublists of type ARC_BUFC_DATA,
4185  * ARC_BUFC_METADATA, and ARC_BUFC_DDT. The tail of each sublist
4186  * is compared, and the type which contains the "older" buffer will be
4187  * returned.
4188  */
4189 static arc_buf_contents_t
4190 arc_adjust_type(arc_state_t *state)
4191 {
4192         multilist_t *data_ml = state->arcs_list[ARC_BUFC_DATA];
4193         multilist_t *meta_ml = state->arcs_list[ARC_BUFC_METADATA];
4194         multilist_t *ddt_ml = state->arcs_list[ARC_BUFC_DDT];
4195         int data_idx = multilist_get_random_index(data_ml);
4196         int meta_idx = multilist_get_random_index(meta_ml);
4197         int ddt_idx = multilist_get_random_index(ddt_ml);
4198         multilist_sublist_t *data_mls;
4199         multilist_sublist_t *meta_mls;
4200         multilist_sublist_t *ddt_mls;
4201         arc_buf_contents_t type = ARC_BUFC_DATA; /* silence compiler warning */
4202         arc_buf_hdr_t *data_hdr;
4203         arc_buf_hdr_t *meta_hdr;
4204         arc_buf_hdr_t *ddt_hdr;
4205         clock_t oldest;
4206 
4207         /*
4208          * We keep the sublist lock until we're finished, to prevent
4209          * the headers from being destroyed via arc_evict_state().
4210          */
4211         data_mls = multilist_sublist_lock(data_ml, data_idx);
4212         meta_mls = multilist_sublist_lock(meta_ml, meta_idx);
4213         ddt_mls = multilist_sublist_lock(ddt_ml, ddt_idx);
4214 
4215         /*
4216          * These two loops are to ensure we skip any markers that
4217          * might be at the tail of the lists due to arc_evict_state().
4218          */
4219 
4220         for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL;
4221             data_hdr = multilist_sublist_prev(data_mls, data_hdr)) {
4222                 if (data_hdr->b_spa != 0)
4223                         break;
4224         }
4225 
4226         for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL;
4227             meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) {
4228                 if (meta_hdr->b_spa != 0)
4229                         break;
4230         }
4231 
4232         for (ddt_hdr = multilist_sublist_tail(ddt_mls); ddt_hdr != NULL;
4233             ddt_hdr = multilist_sublist_prev(ddt_mls, ddt_hdr)) {
4234                 if (ddt_hdr->b_spa != 0)
4235                         break;
4236         }
4237 
4238         if (data_hdr == NULL && meta_hdr == NULL && ddt_hdr == NULL) {
4239                 type = ARC_BUFC_DATA;
4240         } else if (data_hdr != NULL && meta_hdr != NULL && ddt_hdr != NULL) {
4241                 /* The headers can't be on the sublist without an L1 header */
4242                 ASSERT(HDR_HAS_L1HDR(data_hdr));
4243                 ASSERT(HDR_HAS_L1HDR(meta_hdr));
4244                 ASSERT(HDR_HAS_L1HDR(ddt_hdr));
4245 
4246                 oldest = data_hdr->b_l1hdr.b_arc_access;
4247                 type = ARC_BUFC_DATA;
4248                 if (oldest > meta_hdr->b_l1hdr.b_arc_access) {
4249                         oldest = meta_hdr->b_l1hdr.b_arc_access;
4250                         type = ARC_BUFC_METADATA;
4251                 }
4252                 if (oldest > ddt_hdr->b_l1hdr.b_arc_access) {
4253                         type = ARC_BUFC_DDT;
4254                 }
4255         } else if (data_hdr == NULL && ddt_hdr == NULL) {
4256                 ASSERT3P(meta_hdr, !=, NULL);
4257                 type = ARC_BUFC_METADATA;
4258         } else if (meta_hdr == NULL && ddt_hdr == NULL) {
4259                 ASSERT3P(data_hdr, !=, NULL);
4260                 type = ARC_BUFC_DATA;
4261         } else if (meta_hdr == NULL && data_hdr == NULL) {
4262                 ASSERT3P(ddt_hdr, !=, NULL);
4263                 type = ARC_BUFC_DDT;
4264         } else if (data_hdr != NULL && ddt_hdr != NULL) {
4265                 ASSERT3P(meta_hdr, ==, NULL);
4266 
4267                 /* The headers can't be on the sublist without an L1 header */
4268                 ASSERT(HDR_HAS_L1HDR(data_hdr));
4269                 ASSERT(HDR_HAS_L1HDR(ddt_hdr));
4270 
4271                 if (data_hdr->b_l1hdr.b_arc_access <
4272                     ddt_hdr->b_l1hdr.b_arc_access) {
4273                         type = ARC_BUFC_DATA;
4274                 } else {
4275                         type = ARC_BUFC_DDT;
4276                 }
4277         } else if (meta_hdr != NULL && ddt_hdr != NULL) {
4278                 ASSERT3P(data_hdr, ==, NULL);
4279 
4280                 /* The headers can't be on the sublist without an L1 header */
4281                 ASSERT(HDR_HAS_L1HDR(meta_hdr));
4282                 ASSERT(HDR_HAS_L1HDR(ddt_hdr));
4283 
4284                 if (meta_hdr->b_l1hdr.b_arc_access <
4285                     ddt_hdr->b_l1hdr.b_arc_access) {
4286                         type = ARC_BUFC_METADATA;
4287                 } else {
4288                         type = ARC_BUFC_DDT;
4289                 }
4290         } else if (meta_hdr != NULL && data_hdr != NULL) {
4291                 ASSERT3P(ddt_hdr, ==, NULL);
4292 
4293                 /* The headers can't be on the sublist without an L1 header */
4294                 ASSERT(HDR_HAS_L1HDR(data_hdr));
4295                 ASSERT(HDR_HAS_L1HDR(meta_hdr));
4296 
4297                 if (data_hdr->b_l1hdr.b_arc_access <
4298                     meta_hdr->b_l1hdr.b_arc_access) {
4299                         type = ARC_BUFC_DATA;
4300                 } else {
4301                         type = ARC_BUFC_METADATA;
4302                 }
4303         } else {
4304                 /* should never get here */
4305                 ASSERT(0);
4306         }
4307 
4308         multilist_sublist_unlock(ddt_mls);
4309         multilist_sublist_unlock(meta_mls);
4310         multilist_sublist_unlock(data_mls);
4311 
4312         return (type);
4313 }
4314 
4315 /*
4316  * Evict buffers from the cache, such that arc_size is capped by arc_c.
4317  */
4318 static uint64_t
4319 arc_adjust(void)
4320 {
4321         uint64_t total_evicted = 0;
4322         uint64_t bytes;
4323         int64_t target;
4324 
4325         /*
4326          * If we're over arc_meta_limit, we want to correct that before
4327          * potentially evicting data buffers below.
4328          */
4329         total_evicted += arc_adjust_meta_or_ddt(B_FALSE);
4330 
4331         /*
4332          * If we're over arc_ddt_limit, we want to correct that before
4333          * potentially evicting data buffers below.
4334          */
4335         total_evicted += arc_adjust_meta_or_ddt(B_TRUE);
4336 
4337         /*
4338          * Adjust MRU size
4339          *
4340          * If we're over the target cache size, we want to evict enough
4341          * from the list to get back to our target size. We don't want
4342          * to evict too much from the MRU, such that it drops below
4343          * arc_p. So, if we're over our target cache size more than
4344          * the MRU is over arc_p, we'll evict enough to get back to
4345          * arc_p here, and then evict more from the MFU below.
4346          */
4347         target = MIN((int64_t)(arc_size - arc_c),
4348             (int64_t)(refcount_count(&arc_anon->arcs_size) +
4349             refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p));
4350 
4351         /*
4352          * If we're below arc_meta_min, always prefer to evict data.
4353          * Otherwise, try to satisfy the requested number of bytes to
4354          * evict from the type which contains older buffers; in an
4355          * effort to keep newer buffers in the cache regardless of their
4356          * type. If we cannot satisfy the number of bytes from this
4357          * type, spill over into the next type.
4358          */
4359         if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA &&
4360             arc_meta_used > arc_meta_min) {
4361                 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
4362                 total_evicted += bytes;
4363 
4364                 /*
4365                  * If we couldn't evict our target number of bytes from
4366                  * metadata, we try to get the rest from data.
4367                  */
4368                 target -= bytes;
4369 
4370                 bytes += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA);
4371                 total_evicted += bytes;
4372         } else {
4373                 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA);
4374                 total_evicted += bytes;
4375 
4376                 /*
4377                  * If we couldn't evict our target number of bytes from
4378                  * data, we try to get the rest from metadata.
4379                  */
4380                 target -= bytes;
4381 
4382                 bytes += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
4383                 total_evicted += bytes;
4384         }
4385 
4386         /*
4387          * If we couldn't evict our target number of bytes from
4388          * data and metadata, we try to get the rest from ddt.
4389          */
4390         target -= bytes;
4391         total_evicted +=
4392             arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DDT);
4393 
4394         /*
4395          * Adjust MFU size
4396          *
4397          * Now that we've tried to evict enough from the MRU to get its
4398          * size back to arc_p, if we're still above the target cache
4399          * size, we evict the rest from the MFU.
4400          */
4401         target = arc_size - arc_c;
4402 
4403         if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA &&
4404             arc_meta_used > arc_meta_min) {
4405                 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
4406                 total_evicted += bytes;
4407 
4408                 /*
4409                  * If we couldn't evict our target number of bytes from
4410                  * metadata, we try to get the rest from data.
4411                  */
4412                 target -= bytes;
4413 
4414                 bytes += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
4415                 total_evicted += bytes;
4416         } else {
4417                 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
4418                 total_evicted += bytes;
4419 
4420                 /*
4421                  * If we couldn't evict our target number of bytes from
4422                  * data, we try to get the rest from data.
4423                  */
4424                 target -= bytes;
4425 
4426                 bytes += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
4427                 total_evicted += bytes;
4428         }
4429 
4430         /*
4431          * If we couldn't evict our target number of bytes from
4432          * data and metadata, we try to get the rest from ddt.
4433          */
4434         target -= bytes;
4435         total_evicted +=
4436             arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DDT);
4437 
4438         /*
4439          * Adjust ghost lists
4440          *
4441          * In addition to the above, the ARC also defines target values
4442          * for the ghost lists. The sum of the mru list and mru ghost
4443          * list should never exceed the target size of the cache, and
4444          * the sum of the mru list, mfu list, mru ghost list, and mfu
4445          * ghost list should never exceed twice the target size of the
4446          * cache. The following logic enforces these limits on the ghost
4447          * caches, and evicts from them as needed.
4448          */
4449         target = refcount_count(&arc_mru->arcs_size) +
4450             refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
4451 
4452         bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
4453         total_evicted += bytes;
4454 
4455         target -= bytes;
4456 
4457         bytes += arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA);
4458         total_evicted += bytes;
4459 
4460         target -= bytes;
4461 
4462         total_evicted +=
4463             arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DDT);
4464 
4465         /*
4466          * We assume the sum of the mru list and mfu list is less than
4467          * or equal to arc_c (we enforced this above), which means we
4468          * can use the simpler of the two equations below:
4469          *
4470          *      mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
4471          *                  mru ghost + mfu ghost <= arc_c
4472          */
4473         target = refcount_count(&arc_mru_ghost->arcs_size) +
4474             refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
4475 
4476         bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
4477         total_evicted += bytes;
4478 
4479         target -= bytes;
4480 
4481         bytes += arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA);
4482         total_evicted += bytes;
4483 
4484         target -= bytes;
4485 
4486         total_evicted +=
4487             arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DDT);
4488 
4489         return (total_evicted);
4490 }
4491 
4492 typedef struct arc_async_flush_data {
4493         uint64_t        aaf_guid;
4494         boolean_t       aaf_retry;
4495 } arc_async_flush_data_t;
4496 
4497 static taskq_t *arc_flush_taskq;
4498 
4499 static void
4500 arc_flush_impl(uint64_t guid, boolean_t retry)
4501 {
4502         arc_buf_contents_t arcs;
4503 
4504         for (arcs = ARC_BUFC_DATA; arcs < ARC_BUFC_NUMTYPES; ++arcs) {
4505                 (void) arc_flush_state(arc_mru, guid, arcs, retry);
4506                 (void) arc_flush_state(arc_mfu, guid, arcs, retry);
4507                 (void) arc_flush_state(arc_mru_ghost, guid, arcs, retry);
4508                 (void) arc_flush_state(arc_mfu_ghost, guid, arcs, retry);
4509         }
4510 }
4511 
4512 static void
4513 arc_flush_task(void *arg)
4514 {
4515         arc_async_flush_data_t *aaf = (arc_async_flush_data_t *)arg;
4516         arc_flush_impl(aaf->aaf_guid, aaf->aaf_retry);
4517         kmem_free(aaf, sizeof (arc_async_flush_data_t));
4518 }
4519 
4520 boolean_t zfs_fastflush = B_TRUE;
4521 
4522 void
4523 arc_flush(spa_t *spa, boolean_t retry)
4524 {
4525         uint64_t guid = 0;
4526         boolean_t async_flush = (spa != NULL ? zfs_fastflush : FALSE);
4527         arc_async_flush_data_t *aaf = NULL;
4528 
4529         /*
4530          * If retry is B_TRUE, a spa must not be specified since we have
4531          * no good way to determine if all of a spa's buffers have been
4532          * evicted from an arc state.
4533          */
4534         ASSERT(!retry || spa == NULL);
4535 
4536         if (spa != NULL) {
4537                 guid = spa_load_guid(spa);
4538                 if (async_flush) {
4539                         aaf = kmem_alloc(sizeof (arc_async_flush_data_t),
4540                             KM_SLEEP);
4541                         aaf->aaf_guid = guid;
4542                         aaf->aaf_retry = retry;
4543                 }
4544         }
4545 
4546         /*
4547          * Try to flush per-spa remaining ARC ghost buffers asynchronously
4548          * while a pool is being closed.
4549          * An ARC buffer is bound to spa only by guid, so buffer can
4550          * exist even when pool has already gone. If asynchronous flushing
4551          * fails we fall back to regular (synchronous) one.
4552          * NOTE: If asynchronous flushing had not yet finished when the pool
4553          * was imported again it wouldn't be a problem, even when guids before
4554          * and after export/import are the same. We can evict only unreferenced
4555          * buffers, other are skipped.
4556          */
4557         if (!async_flush || (taskq_dispatch(arc_flush_taskq, arc_flush_task,
4558             aaf, TQ_NOSLEEP) == NULL)) {
4559                 arc_flush_impl(guid, retry);
4560                 if (async_flush)
4561                         kmem_free(aaf, sizeof (arc_async_flush_data_t));
4562         }
4563 }
4564 
4565 void
4566 arc_shrink(int64_t to_free)
4567 {
4568         if (arc_c > arc_c_min) {
4569 
4570                 if (arc_c > arc_c_min + to_free)
4571                         atomic_add_64(&arc_c, -to_free);
4572                 else
4573                         arc_c = arc_c_min;
4574 
4575                 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
4576                 if (arc_c > arc_size)
4577                         arc_c = MAX(arc_size, arc_c_min);
4578                 if (arc_p > arc_c)
4579                         arc_p = (arc_c >> 1);
4580                 ASSERT(arc_c >= arc_c_min);
4581                 ASSERT((int64_t)arc_p >= 0);
4582         }
4583 
4584         if (arc_size > arc_c)
4585                 (void) arc_adjust();
4586 }
4587 
4588 typedef enum free_memory_reason_t {
4589         FMR_UNKNOWN,
4590         FMR_NEEDFREE,
4591         FMR_LOTSFREE,
4592         FMR_SWAPFS_MINFREE,
4593         FMR_PAGES_PP_MAXIMUM,
4594         FMR_HEAP_ARENA,
4595         FMR_ZIO_ARENA,
4596 } free_memory_reason_t;
4597 
4598 int64_t last_free_memory;
4599 free_memory_reason_t last_free_reason;
4600 
4601 /*
4602  * Additional reserve of pages for pp_reserve.
4603  */
4604 int64_t arc_pages_pp_reserve = 64;
4605 
4606 /*
4607  * Additional reserve of pages for swapfs.
4608  */
4609 int64_t arc_swapfs_reserve = 64;
4610 
4611 /*
4612  * Return the amount of memory that can be consumed before reclaim will be
4613  * needed.  Positive if there is sufficient free memory, negative indicates
4614  * the amount of memory that needs to be freed up.
4615  */
4616 static int64_t
4617 arc_available_memory(void)
4618 {
4619         int64_t lowest = INT64_MAX;
4620         int64_t n;
4621         free_memory_reason_t r = FMR_UNKNOWN;
4622 
4623 #ifdef _KERNEL
4624         if (needfree > 0) {
4625                 n = PAGESIZE * (-needfree);
4626                 if (n < lowest) {
4627                         lowest = n;
4628                         r = FMR_NEEDFREE;
4629                 }
4630         }
4631 
4632         /*
4633          * check that we're out of range of the pageout scanner.  It starts to
4634          * schedule paging if freemem is less than lotsfree and needfree.
4635          * lotsfree is the high-water mark for pageout, and needfree is the
4636          * number of needed free pages.  We add extra pages here to make sure
4637          * the scanner doesn't start up while we're freeing memory.
4638          */
4639         n = PAGESIZE * (freemem - lotsfree - needfree - desfree);
4640         if (n < lowest) {
4641                 lowest = n;
4642                 r = FMR_LOTSFREE;
4643         }
4644 
4645         /*
4646          * check to make sure that swapfs has enough space so that anon
4647          * reservations can still succeed. anon_resvmem() checks that the
4648          * availrmem is greater than swapfs_minfree, and the number of reserved
4649          * swap pages.  We also add a bit of extra here just to prevent
4650          * circumstances from getting really dire.
4651          */
4652         n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve -
4653             desfree - arc_swapfs_reserve);
4654         if (n < lowest) {
4655                 lowest = n;
4656                 r = FMR_SWAPFS_MINFREE;
4657         }
4658 
4659 
4660         /*
4661          * Check that we have enough availrmem that memory locking (e.g., via
4662          * mlock(3C) or memcntl(2)) can still succeed.  (pages_pp_maximum
4663          * stores the number of pages that cannot be locked; when availrmem
4664          * drops below pages_pp_maximum, page locking mechanisms such as
4665          * page_pp_lock() will fail.)
4666          */
4667         n = PAGESIZE * (availrmem - pages_pp_maximum -
4668             arc_pages_pp_reserve);
4669         if (n < lowest) {
4670                 lowest = n;
4671                 r = FMR_PAGES_PP_MAXIMUM;
4672         }
4673 
4674 #if defined(__i386)
4675         /*
4676          * If we're on an i386 platform, it's possible that we'll exhaust the
4677          * kernel heap space before we ever run out of available physical
4678          * memory.  Most checks of the size of the heap_area compare against
4679          * tune.t_minarmem, which is the minimum available real memory that we
4680          * can have in the system.  However, this is generally fixed at 25 pages
4681          * which is so low that it's useless.  In this comparison, we seek to
4682          * calculate the total heap-size, and reclaim if more than 3/4ths of the
4683          * heap is allocated.  (Or, in the calculation, if less than 1/4th is
4684          * free)
4685          */
4686         n = (int64_t)vmem_size(heap_arena, VMEM_FREE) -
4687             (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2);
4688         if (n < lowest) {
4689                 lowest = n;
4690                 r = FMR_HEAP_ARENA;
4691         }
4692 #endif
4693 
4694         /*
4695          * If zio data pages are being allocated out of a separate heap segment,
4696          * then enforce that the size of available vmem for this arena remains
4697          * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
4698          *
4699          * Note that reducing the arc_zio_arena_free_shift keeps more virtual
4700          * memory (in the zio_arena) free, which can avoid memory
4701          * fragmentation issues.
4702          */
4703         if (zio_arena != NULL) {
4704                 n = (int64_t)vmem_size(zio_arena, VMEM_FREE) -
4705                     (vmem_size(zio_arena, VMEM_ALLOC) >>
4706                     arc_zio_arena_free_shift);
4707                 if (n < lowest) {
4708                         lowest = n;
4709                         r = FMR_ZIO_ARENA;
4710                 }
4711         }
4712 #else
4713         /* Every 100 calls, free a small amount */
4714         if (spa_get_random(100) == 0)
4715                 lowest = -1024;
4716 #endif
4717 
4718         last_free_memory = lowest;
4719         last_free_reason = r;
4720 
4721         return (lowest);
4722 }
4723 
4724 
4725 /*
4726  * Determine if the system is under memory pressure and is asking
4727  * to reclaim memory. A return value of B_TRUE indicates that the system
4728  * is under memory pressure and that the arc should adjust accordingly.
4729  */
4730 static boolean_t
4731 arc_reclaim_needed(void)
4732 {
4733         return (arc_available_memory() < 0);
4734 }
4735 
4736 static void
4737 arc_kmem_reap_now(void)
4738 {
4739         size_t                  i;
4740         kmem_cache_t            *prev_cache = NULL;
4741         kmem_cache_t            *prev_data_cache = NULL;
4742         extern kmem_cache_t     *zio_buf_cache[];
4743         extern kmem_cache_t     *zio_data_buf_cache[];
4744         extern kmem_cache_t     *range_seg_cache;
4745         extern kmem_cache_t     *abd_chunk_cache;
4746 
4747 #ifdef _KERNEL
4748         if (arc_meta_used >= arc_meta_limit || arc_ddt_size >= arc_ddt_limit) {
4749                 /*
4750                  * We are exceeding our meta-data or DDT cache limit.
4751                  * Purge some DNLC entries to release holds on meta-data/DDT.
4752                  */
4753                 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
4754         }
4755 #if defined(__i386)
4756         /*
4757          * Reclaim unused memory from all kmem caches.
4758          */
4759         kmem_reap();
4760 #endif
4761 #endif
4762 
4763         /*
4764          * If a kmem reap is already active, don't schedule more.  We must
4765          * check for this because kmem_cache_reap_soon() won't actually
4766          * block on the cache being reaped (this is to prevent callers from
4767          * becoming implicitly blocked by a system-wide kmem reap -- which,
4768          * on a system with many, many full magazines, can take minutes).
4769          */
4770         if (kmem_cache_reap_active())
4771                 return;
4772 
4773         for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
4774                 if (zio_buf_cache[i] != prev_cache) {
4775                         prev_cache = zio_buf_cache[i];
4776                         kmem_cache_reap_soon(zio_buf_cache[i]);
4777                 }
4778                 if (zio_data_buf_cache[i] != prev_data_cache) {
4779                         prev_data_cache = zio_data_buf_cache[i];
4780                         kmem_cache_reap_soon(zio_data_buf_cache[i]);
4781                 }
4782         }
4783         kmem_cache_reap_soon(abd_chunk_cache);
4784         kmem_cache_reap_soon(buf_cache);
4785         kmem_cache_reap_soon(hdr_full_cache);
4786         kmem_cache_reap_soon(hdr_l2only_cache);
4787         kmem_cache_reap_soon(range_seg_cache);
4788 
4789         if (zio_arena != NULL) {
4790                 /*
4791                  * Ask the vmem arena to reclaim unused memory from its
4792                  * quantum caches.
4793                  */
4794                 vmem_qcache_reap(zio_arena);
4795         }
4796 }
4797 
4798 /*
4799  * Threads can block in arc_get_data_impl() waiting for this thread to evict
4800  * enough data and signal them to proceed. When this happens, the threads in
4801  * arc_get_data_impl() are sleeping while holding the hash lock for their
4802  * particular arc header. Thus, we must be careful to never sleep on a
4803  * hash lock in this thread. This is to prevent the following deadlock:
4804  *
4805  *  - Thread A sleeps on CV in arc_get_data_impl() holding hash lock "L",
4806  *    waiting for the reclaim thread to signal it.
4807  *
4808  *  - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter,
4809  *    fails, and goes to sleep forever.
4810  *
4811  * This possible deadlock is avoided by always acquiring a hash lock
4812  * using mutex_tryenter() from arc_reclaim_thread().
4813  */
4814 /* ARGSUSED */
4815 static void
4816 arc_reclaim_thread(void *unused)
4817 {
4818         hrtime_t                growtime = 0;
4819         hrtime_t                kmem_reap_time = 0;
4820         callb_cpr_t             cpr;
4821 
4822         CALLB_CPR_INIT(&cpr, &arc_reclaim_lock, callb_generic_cpr, FTAG);
4823 
4824         mutex_enter(&arc_reclaim_lock);
4825         while (!arc_reclaim_thread_exit) {
4826                 uint64_t evicted = 0;
4827 
4828                 /*
4829                  * This is necessary in order for the mdb ::arc dcmd to
4830                  * show up to date information. Since the ::arc command
4831                  * does not call the kstat's update function, without
4832                  * this call, the command may show stale stats for the
4833                  * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
4834                  * with this change, the data might be up to 1 second
4835                  * out of date; but that should suffice. The arc_state_t
4836                  * structures can be queried directly if more accurate
4837                  * information is needed.
4838                  */
4839                 if (arc_ksp != NULL)
4840                         arc_ksp->ks_update(arc_ksp, KSTAT_READ);
4841 
4842                 mutex_exit(&arc_reclaim_lock);
4843 
4844                 /*
4845                  * We call arc_adjust() before (possibly) calling
4846                  * arc_kmem_reap_now(), so that we can wake up
4847                  * arc_get_data_impl() sooner.
4848                  */
4849                 evicted = arc_adjust();
4850 
4851                 int64_t free_memory = arc_available_memory();
4852                 if (free_memory < 0) {
4853                         hrtime_t curtime = gethrtime();
4854                         arc_no_grow = B_TRUE;
4855                         arc_warm = B_TRUE;
4856 
4857                         /*
4858                          * Wait at least zfs_grow_retry (default 60) seconds
4859                          * before considering growing.
4860                          */
4861                         growtime = curtime + SEC2NSEC(arc_grow_retry);
4862 
4863                         /*
4864                          * Wait at least arc_kmem_cache_reap_retry_ms
4865                          * between arc_kmem_reap_now() calls. Without
4866                          * this check it is possible to end up in a
4867                          * situation where we spend lots of time
4868                          * reaping caches, while we're near arc_c_min.
4869                          */
4870                         if (curtime >= kmem_reap_time) {
4871                                 arc_kmem_reap_now();
4872                                 kmem_reap_time = gethrtime() +
4873                                     MSEC2NSEC(arc_kmem_cache_reap_retry_ms);
4874                         }
4875 
4876                         /*
4877                          * If we are still low on memory, shrink the ARC
4878                          * so that we have arc_shrink_min free space.
4879                          */
4880                         free_memory = arc_available_memory();
4881 
4882                         int64_t to_free =
4883                             (arc_c >> arc_shrink_shift) - free_memory;
4884                         if (to_free > 0) {
4885 #ifdef _KERNEL
4886                                 to_free = MAX(to_free, ptob(needfree));
4887 #endif
4888                                 arc_shrink(to_free);
4889                         }
4890                 } else if (free_memory < arc_c >> arc_no_grow_shift) {
4891                         arc_no_grow = B_TRUE;
4892                 } else if (gethrtime() >= growtime) {
4893                         arc_no_grow = B_FALSE;
4894                 }
4895 
4896                 mutex_enter(&arc_reclaim_lock);
4897 
4898                 /*
4899                  * If evicted is zero, we couldn't evict anything via
4900                  * arc_adjust(). This could be due to hash lock
4901                  * collisions, but more likely due to the majority of
4902                  * arc buffers being unevictable. Therefore, even if
4903                  * arc_size is above arc_c, another pass is unlikely to
4904                  * be helpful and could potentially cause us to enter an
4905                  * infinite loop.
4906                  */
4907                 if (arc_size <= arc_c || evicted == 0) {
4908                         /*
4909                          * We're either no longer overflowing, or we
4910                          * can't evict anything more, so we should wake
4911                          * up any threads before we go to sleep.
4912                          */
4913                         cv_broadcast(&arc_reclaim_waiters_cv);
4914 
4915                         /*
4916                          * Block until signaled, or after one second (we
4917                          * might need to perform arc_kmem_reap_now()
4918                          * even if we aren't being signalled)
4919                          */
4920                         CALLB_CPR_SAFE_BEGIN(&cpr);
4921                         (void) cv_timedwait_hires(&arc_reclaim_thread_cv,
4922                             &arc_reclaim_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
4923                         CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_lock);
4924                 }
4925         }
4926 
4927         arc_reclaim_thread_exit = B_FALSE;
4928         cv_broadcast(&arc_reclaim_thread_cv);
4929         CALLB_CPR_EXIT(&cpr);               /* drops arc_reclaim_lock */
4930         thread_exit();
4931 }
4932 
4933 /*
4934  * Adapt arc info given the number of bytes we are trying to add and
4935  * the state that we are comming from.  This function is only called
4936  * when we are adding new content to the cache.
4937  */
4938 static void
4939 arc_adapt(int bytes, arc_state_t *state)
4940 {
4941         int mult;
4942         uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
4943         int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size);
4944         int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size);
4945 
4946         if (state == arc_l2c_only)
4947                 return;
4948 
4949         ASSERT(bytes > 0);
4950         /*
4951          * Adapt the target size of the MRU list:
4952          *      - if we just hit in the MRU ghost list, then increase
4953          *        the target size of the MRU list.
4954          *      - if we just hit in the MFU ghost list, then increase
4955          *        the target size of the MFU list by decreasing the
4956          *        target size of the MRU list.
4957          */
4958         if (state == arc_mru_ghost) {
4959                 mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size);
4960                 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
4961 
4962                 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
4963         } else if (state == arc_mfu_ghost) {
4964                 uint64_t delta;
4965 
4966                 mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size);
4967                 mult = MIN(mult, 10);
4968 
4969                 delta = MIN(bytes * mult, arc_p);
4970                 arc_p = MAX(arc_p_min, arc_p - delta);
4971         }
4972         ASSERT((int64_t)arc_p >= 0);
4973 
4974         if (arc_reclaim_needed()) {
4975                 cv_signal(&arc_reclaim_thread_cv);
4976                 return;
4977         }
4978 
4979         if (arc_no_grow)
4980                 return;
4981 
4982         if (arc_c >= arc_c_max)
4983                 return;
4984 
4985         /*
4986          * If we're within (2 * maxblocksize) bytes of the target
4987          * cache size, increment the target cache size
4988          */
4989         if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
4990                 atomic_add_64(&arc_c, (int64_t)bytes);
4991                 if (arc_c > arc_c_max)
4992                         arc_c = arc_c_max;
4993                 else if (state == arc_anon)
4994                         atomic_add_64(&arc_p, (int64_t)bytes);
4995                 if (arc_p > arc_c)
4996                         arc_p = arc_c;
4997         }
4998         ASSERT((int64_t)arc_p >= 0);
4999 }
5000 
5001 /*
5002  * Check if arc_size has grown past our upper threshold, determined by
5003  * zfs_arc_overflow_shift.
5004  */
5005 static boolean_t
5006 arc_is_overflowing(void)
5007 {
5008         /* Always allow at least one block of overflow */
5009         uint64_t overflow = MAX(SPA_MAXBLOCKSIZE,
5010             arc_c >> zfs_arc_overflow_shift);
5011 
5012         return (arc_size >= arc_c + overflow);
5013 }
5014 
5015 static abd_t *
5016 arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
5017 {
5018         arc_buf_contents_t type = arc_buf_type(hdr);
5019 
5020         arc_get_data_impl(hdr, size, tag);
5021         if (type == ARC_BUFC_METADATA || type == ARC_BUFC_DDT) {
5022                 return (abd_alloc(size, B_TRUE));
5023         } else {
5024                 ASSERT(type == ARC_BUFC_DATA);
5025                 return (abd_alloc(size, B_FALSE));
5026         }
5027 }
5028 
5029 static void *
5030 arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
5031 {
5032         arc_buf_contents_t type = arc_buf_type(hdr);
5033 
5034         arc_get_data_impl(hdr, size, tag);
5035         if (type == ARC_BUFC_METADATA || type == ARC_BUFC_DDT) {
5036                 return (zio_buf_alloc(size));
5037         } else {
5038                 ASSERT(type == ARC_BUFC_DATA);
5039                 return (zio_data_buf_alloc(size));
5040         }
5041 }
5042 
5043 /*
5044  * Allocate a block and return it to the caller. If we are hitting the
5045  * hard limit for the cache size, we must sleep, waiting for the eviction
5046  * thread to catch up. If we're past the target size but below the hard
5047  * limit, we'll only signal the reclaim thread and continue on.
5048  */
5049 static void
5050 arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
5051 {
5052         arc_state_t *state = hdr->b_l1hdr.b_state;
5053         arc_buf_contents_t type = arc_buf_type(hdr);
5054 
5055         arc_adapt(size, state);
5056 
5057         /*
5058          * If arc_size is currently overflowing, and has grown past our
5059          * upper limit, we must be adding data faster than the evict
5060          * thread can evict. Thus, to ensure we don't compound the
5061          * problem by adding more data and forcing arc_size to grow even
5062          * further past it's target size, we halt and wait for the
5063          * eviction thread to catch up.
5064          *
5065          * It's also possible that the reclaim thread is unable to evict
5066          * enough buffers to get arc_size below the overflow limit (e.g.
5067          * due to buffers being un-evictable, or hash lock collisions).
5068          * In this case, we want to proceed regardless if we're
5069          * overflowing; thus we don't use a while loop here.
5070          */
5071         if (arc_is_overflowing()) {
5072                 mutex_enter(&arc_reclaim_lock);
5073 
5074                 /*
5075                  * Now that we've acquired the lock, we may no longer be
5076                  * over the overflow limit, lets check.
5077                  *
5078                  * We're ignoring the case of spurious wake ups. If that
5079                  * were to happen, it'd let this thread consume an ARC
5080                  * buffer before it should have (i.e. before we're under
5081                  * the overflow limit and were signalled by the reclaim
5082                  * thread). As long as that is a rare occurrence, it
5083                  * shouldn't cause any harm.
5084                  */
5085                 if (arc_is_overflowing()) {
5086                         cv_signal(&arc_reclaim_thread_cv);
5087                         cv_wait(&arc_reclaim_waiters_cv, &arc_reclaim_lock);
5088                 }
5089 
5090                 mutex_exit(&arc_reclaim_lock);
5091         }
5092 
5093         VERIFY3U(hdr->b_type, ==, type);
5094         if (type == ARC_BUFC_DDT) {
5095                 arc_space_consume(size, ARC_SPACE_DDT);
5096         } else if (type == ARC_BUFC_METADATA) {
5097                 arc_space_consume(size, ARC_SPACE_META);
5098         } else {
5099                 arc_space_consume(size, ARC_SPACE_DATA);
5100         }
5101 
5102         /*
5103          * Update the state size.  Note that ghost states have a
5104          * "ghost size" and so don't need to be updated.
5105          */
5106         if (!GHOST_STATE(state)) {
5107 
5108                 (void) refcount_add_many(&state->arcs_size, size, tag);
5109 
5110                 /*
5111                  * If this is reached via arc_read, the link is
5112                  * protected by the hash lock. If reached via
5113                  * arc_buf_alloc, the header should not be accessed by
5114                  * any other thread. And, if reached via arc_read_done,
5115                  * the hash lock will protect it if it's found in the
5116                  * hash table; otherwise no other thread should be
5117                  * trying to [add|remove]_reference it.
5118                  */
5119                 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
5120                         ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
5121                         (void) refcount_add_many(&state->arcs_esize[type],
5122                             size, tag);
5123                 }
5124 
5125                 /*
5126                  * If we are growing the cache, and we are adding anonymous
5127                  * data, and we have outgrown arc_p, update arc_p
5128                  */
5129                 if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon &&
5130                     (refcount_count(&arc_anon->arcs_size) +
5131                     refcount_count(&arc_mru->arcs_size) > arc_p))
5132                         arc_p = MIN(arc_c, arc_p + size);
5133         }
5134 }
5135 
5136 static void
5137 arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size, void *tag)
5138 {
5139         arc_free_data_impl(hdr, size, tag);
5140         abd_free(abd);
5141 }
5142 
5143 static void
5144 arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, void *tag)
5145 {
5146         arc_buf_contents_t type = arc_buf_type(hdr);
5147 
5148         arc_free_data_impl(hdr, size, tag);
5149         if (type == ARC_BUFC_METADATA || type == ARC_BUFC_DDT) {
5150                 zio_buf_free(buf, size);
5151         } else {
5152                 ASSERT(type == ARC_BUFC_DATA);
5153                 zio_data_buf_free(buf, size);
5154         }
5155 }
5156 
5157 /*
5158  * Free the arc data buffer.
5159  */
5160 static void
5161 arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
5162 {
5163         arc_state_t *state = hdr->b_l1hdr.b_state;
5164         arc_buf_contents_t type = arc_buf_type(hdr);
5165 
5166         /* protected by hash lock, if in the hash table */
5167         if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
5168                 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
5169                 ASSERT(state != arc_anon && state != arc_l2c_only);
5170 
5171                 (void) refcount_remove_many(&state->arcs_esize[type],
5172                     size, tag);
5173         }
5174         (void) refcount_remove_many(&state->arcs_size, size, tag);
5175 
5176         VERIFY3U(hdr->b_type, ==, type);
5177         if (type == ARC_BUFC_DDT) {
5178                 arc_space_return(size, ARC_SPACE_DDT);
5179         } else if (type == ARC_BUFC_METADATA) {
5180                 arc_space_return(size, ARC_SPACE_META);
5181         } else {
5182                 ASSERT(type == ARC_BUFC_DATA);
5183                 arc_space_return(size, ARC_SPACE_DATA);
5184         }
5185 }
5186 
5187 /*
5188  * This routine is called whenever a buffer is accessed.
5189  * NOTE: the hash lock is dropped in this function.
5190  */
5191 static void
5192 arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
5193 {
5194         clock_t now;
5195 
5196         ASSERT(MUTEX_HELD(hash_lock));
5197         ASSERT(HDR_HAS_L1HDR(hdr));
5198 
5199         if (hdr->b_l1hdr.b_state == arc_anon) {
5200                 /*
5201                  * This buffer is not in the cache, and does not
5202                  * appear in our "ghost" list.  Add the new buffer
5203                  * to the MRU state.
5204                  */
5205 
5206                 ASSERT0(hdr->b_l1hdr.b_arc_access);
5207                 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
5208                 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
5209                 arc_change_state(arc_mru, hdr, hash_lock);
5210 
5211         } else if (hdr->b_l1hdr.b_state == arc_mru) {
5212                 now = ddi_get_lbolt();
5213 
5214                 /*
5215                  * If this buffer is here because of a prefetch, then either:
5216                  * - clear the flag if this is a "referencing" read
5217                  *   (any subsequent access will bump this into the MFU state).
5218                  * or
5219                  * - move the buffer to the head of the list if this is
5220                  *   another prefetch (to make it less likely to be evicted).
5221                  */
5222                 if (HDR_PREFETCH(hdr)) {
5223                         if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
5224                                 /* link protected by hash lock */
5225                                 ASSERT(multilist_link_active(
5226                                     &hdr->b_l1hdr.b_arc_node));
5227                         } else {
5228                                 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
5229                                 ARCSTAT_BUMP(arcstat_mru_hits);
5230                         }
5231                         hdr->b_l1hdr.b_arc_access = now;
5232                         return;
5233                 }
5234 
5235                 /*
5236                  * This buffer has been "accessed" only once so far,
5237                  * but it is still in the cache. Move it to the MFU
5238                  * state.
5239                  */
5240                 if (now > hdr->b_l1hdr.b_arc_access + ARC_MINTIME) {
5241                         /*
5242                          * More than 125ms have passed since we
5243                          * instantiated this buffer.  Move it to the
5244                          * most frequently used state.
5245                          */
5246                         hdr->b_l1hdr.b_arc_access = now;
5247                         DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
5248                         arc_change_state(arc_mfu, hdr, hash_lock);
5249                 }
5250                 ARCSTAT_BUMP(arcstat_mru_hits);
5251         } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
5252                 arc_state_t     *new_state;
5253                 /*
5254                  * This buffer has been "accessed" recently, but
5255                  * was evicted from the cache.  Move it to the
5256                  * MFU state.
5257                  */
5258 
5259                 if (HDR_PREFETCH(hdr)) {
5260                         new_state = arc_mru;
5261                         if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
5262                                 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
5263                         DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
5264                 } else {
5265                         new_state = arc_mfu;
5266                         DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
5267                 }
5268 
5269                 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
5270                 arc_change_state(new_state, hdr, hash_lock);
5271 
5272                 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
5273         } else if (hdr->b_l1hdr.b_state == arc_mfu) {
5274                 /*
5275                  * This buffer has been accessed more than once and is
5276                  * still in the cache.  Keep it in the MFU state.
5277                  *
5278                  * NOTE: an add_reference() that occurred when we did
5279                  * the arc_read() will have kicked this off the list.
5280                  * If it was a prefetch, we will explicitly move it to
5281                  * the head of the list now.
5282                  */
5283                 if ((HDR_PREFETCH(hdr)) != 0) {
5284                         ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
5285                         /* link protected by hash_lock */
5286                         ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node));
5287                 }
5288                 ARCSTAT_BUMP(arcstat_mfu_hits);
5289                 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
5290         } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
5291                 arc_state_t     *new_state = arc_mfu;
5292                 /*
5293                  * This buffer has been accessed more than once but has
5294                  * been evicted from the cache.  Move it back to the
5295                  * MFU state.
5296                  */
5297 
5298                 if (HDR_PREFETCH(hdr)) {
5299                         /*
5300                          * This is a prefetch access...
5301                          * move this block back to the MRU state.
5302                          */
5303                         ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
5304                         new_state = arc_mru;
5305                 }
5306 
5307                 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
5308                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
5309                 arc_change_state(new_state, hdr, hash_lock);
5310 
5311                 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
5312         } else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
5313                 /*
5314                  * This buffer is on the 2nd Level ARC.
5315                  */
5316 
5317                 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
5318                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
5319                 arc_change_state(arc_mfu, hdr, hash_lock);
5320         } else {
5321                 ASSERT(!"invalid arc state");
5322         }
5323 }
5324 
5325 /*
5326  * This routine is called by dbuf_hold() to update the arc_access() state
5327  * which otherwise would be skipped for entries in the dbuf cache.
5328  */
5329 void
5330 arc_buf_access(arc_buf_t *buf)
5331 {
5332         mutex_enter(&buf->b_evict_lock);
5333         arc_buf_hdr_t *hdr = buf->b_hdr;
5334 
5335         /*
5336          * Avoid taking the hash_lock when possible as an optimization.
5337          * The header must be checked again under the hash_lock in order
5338          * to handle the case where it is concurrently being released.
5339          */
5340         if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
5341                 mutex_exit(&buf->b_evict_lock);
5342                 return;
5343         }
5344 
5345         kmutex_t *hash_lock = HDR_LOCK(hdr);
5346         mutex_enter(hash_lock);
5347 
5348         if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
5349                 mutex_exit(hash_lock);
5350                 mutex_exit(&buf->b_evict_lock);
5351                 ARCSTAT_BUMP(arcstat_access_skip);
5352                 return;
5353         }
5354 
5355         mutex_exit(&buf->b_evict_lock);
5356 
5357         ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
5358             hdr->b_l1hdr.b_state == arc_mfu);
5359 
5360         DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
5361         arc_access(hdr, hash_lock);
5362         mutex_exit(hash_lock);
5363 
5364         ARCSTAT_BUMP(arcstat_hits);
5365         /*
5366          * Upstream used the ARCSTAT_CONDSTAT macro here, but they changed
5367          * the argument format for that macro, which would requie that we
5368          * go and modify all other uses of it. So it's easier to just expand
5369          * this one invocation of the macro to do the right thing.
5370          */
5371         if (!HDR_PREFETCH(hdr)) {
5372                 if (!HDR_ISTYPE_METADATA(hdr))
5373                         ARCSTAT_BUMP(arcstat_demand_data_hits);
5374                 else
5375                         ARCSTAT_BUMP(arcstat_demand_metadata_hits);
5376         } else {
5377                 if (!HDR_ISTYPE_METADATA(hdr))
5378                         ARCSTAT_BUMP(arcstat_prefetch_data_hits);
5379                 else
5380                         ARCSTAT_BUMP(arcstat_prefetch_metadata_hits);
5381         }
5382 }
5383 
5384 /* a generic arc_done_func_t which you can use */
5385 /* ARGSUSED */
5386 void
5387 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
5388 {
5389         if (zio == NULL || zio->io_error == 0)
5390                 bcopy(buf->b_data, arg, arc_buf_size(buf));
5391         arc_buf_destroy(buf, arg);
5392 }
5393 
5394 /* a generic arc_done_func_t */
5395 void
5396 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
5397 {
5398         arc_buf_t **bufp = arg;
5399         if (zio && zio->io_error) {
5400                 arc_buf_destroy(buf, arg);
5401                 *bufp = NULL;
5402         } else {
5403                 *bufp = buf;
5404                 ASSERT(buf->b_data);
5405         }
5406 }
5407 
5408 static void
5409 arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
5410 {
5411         if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
5412                 ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
5413                 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
5414         } else {
5415                 if (HDR_COMPRESSION_ENABLED(hdr)) {
5416                         ASSERT3U(HDR_GET_COMPRESS(hdr), ==,
5417                             BP_GET_COMPRESS(bp));
5418                 }
5419                 ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
5420                 ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp));
5421         }
5422 }
5423 
5424 static void
5425 arc_read_done(zio_t *zio)
5426 {
5427         arc_buf_hdr_t   *hdr = zio->io_private;
5428         kmutex_t        *hash_lock = NULL;
5429         arc_callback_t  *callback_list;
5430         arc_callback_t  *acb;
5431         boolean_t       freeable = B_FALSE;
5432         boolean_t       no_zio_error = (zio->io_error == 0);
5433 
5434         /*
5435          * The hdr was inserted into hash-table and removed from lists
5436          * prior to starting I/O.  We should find this header, since
5437          * it's in the hash table, and it should be legit since it's
5438          * not possible to evict it during the I/O.  The only possible
5439          * reason for it not to be found is if we were freed during the
5440          * read.
5441          */
5442         if (HDR_IN_HASH_TABLE(hdr)) {
5443                 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
5444                 ASSERT3U(hdr->b_dva.dva_word[0], ==,
5445                     BP_IDENTITY(zio->io_bp)->dva_word[0]);
5446                 ASSERT3U(hdr->b_dva.dva_word[1], ==,
5447                     BP_IDENTITY(zio->io_bp)->dva_word[1]);
5448 
5449                 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp,
5450                     &hash_lock);
5451 
5452                 ASSERT((found == hdr &&
5453                     DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
5454                     (found == hdr && HDR_L2_READING(hdr)));
5455                 ASSERT3P(hash_lock, !=, NULL);
5456         }
5457 
5458         if (no_zio_error) {
5459                 /* byteswap if necessary */
5460                 if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
5461                         if (BP_GET_LEVEL(zio->io_bp) > 0) {
5462                                 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
5463                         } else {
5464                                 hdr->b_l1hdr.b_byteswap =
5465                                     DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
5466                         }
5467                 } else {
5468                         hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
5469                 }
5470         }
5471 
5472         arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED);
5473         if (l2arc_noprefetch && HDR_PREFETCH(hdr))
5474                 arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE);
5475 
5476         callback_list = hdr->b_l1hdr.b_acb;
5477         ASSERT3P(callback_list, !=, NULL);
5478 
5479         if (hash_lock && no_zio_error && hdr->b_l1hdr.b_state == arc_anon) {
5480                 /*
5481                  * Only call arc_access on anonymous buffers.  This is because
5482                  * if we've issued an I/O for an evicted buffer, we've already
5483                  * called arc_access (to prevent any simultaneous readers from
5484                  * getting confused).
5485                  */
5486                 arc_access(hdr, hash_lock);
5487         }
5488 
5489         /*
5490          * If a read request has a callback (i.e. acb_done is not NULL), then we
5491          * make a buf containing the data according to the parameters which were
5492          * passed in. The implementation of arc_buf_alloc_impl() ensures that we
5493          * aren't needlessly decompressing the data multiple times.
5494          */
5495         int callback_cnt = 0;
5496         for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
5497                 if (!acb->acb_done)
5498                         continue;
5499 
5500                 /* This is a demand read since prefetches don't use callbacks */
5501                 callback_cnt++;
5502 
5503                 int error = arc_buf_alloc_impl(hdr, acb->acb_private,
5504                     acb->acb_compressed, no_zio_error, &acb->acb_buf);
5505                 if (no_zio_error) {
5506                         zio->io_error = error;
5507                 }
5508         }
5509         hdr->b_l1hdr.b_acb = NULL;
5510         arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
5511         if (callback_cnt == 0) {
5512                 ASSERT(HDR_PREFETCH(hdr));
5513                 ASSERT0(hdr->b_l1hdr.b_bufcnt);
5514                 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
5515         }
5516 
5517         ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
5518             callback_list != NULL);
5519 
5520         if (no_zio_error) {
5521                 arc_hdr_verify(hdr, zio->io_bp);
5522         } else {
5523                 arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
5524                 if (hdr->b_l1hdr.b_state != arc_anon)
5525                         arc_change_state(arc_anon, hdr, hash_lock);
5526                 if (HDR_IN_HASH_TABLE(hdr)) {
5527                         if (hash_lock)
5528                                 arc_wait_for_krrp(hdr);
5529                         buf_hash_remove(hdr);
5530                 }
5531                 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
5532         }
5533 
5534         /*
5535          * Broadcast before we drop the hash_lock to avoid the possibility
5536          * that the hdr (and hence the cv) might be freed before we get to
5537          * the cv_broadcast().
5538          */
5539         cv_broadcast(&hdr->b_l1hdr.b_cv);
5540 
5541         if (hash_lock != NULL) {
5542                 mutex_exit(hash_lock);
5543         } else {
5544                 /*
5545                  * This block was freed while we waited for the read to
5546                  * complete.  It has been removed from the hash table and
5547                  * moved to the anonymous state (so that it won't show up
5548                  * in the cache).
5549                  */
5550                 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
5551                 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
5552         }
5553 
5554         /* execute each callback and free its structure */
5555         while ((acb = callback_list) != NULL) {
5556                 if (acb->acb_done)
5557                         acb->acb_done(zio, acb->acb_buf, acb->acb_private);
5558 
5559                 if (acb->acb_zio_dummy != NULL) {
5560                         acb->acb_zio_dummy->io_error = zio->io_error;
5561                         zio_nowait(acb->acb_zio_dummy);
5562                 }
5563 
5564                 callback_list = acb->acb_next;
5565                 kmem_free(acb, sizeof (arc_callback_t));
5566         }
5567 
5568         if (freeable)
5569                 arc_hdr_destroy(hdr);
5570 }
5571 
5572 /*
5573  * The function to process data from arc by a callback
5574  * The main purpose is to directly copy data from arc to a target buffer
5575  */
5576 int
5577 arc_io_bypass(spa_t *spa, const blkptr_t *bp,
5578     arc_bypass_io_func func, void *arg)
5579 {
5580         arc_buf_hdr_t *hdr;
5581         kmutex_t *hash_lock = NULL;
5582         int error = 0;
5583         uint64_t guid = spa_load_guid(spa);
5584 
5585 top:
5586         hdr = buf_hash_find(guid, bp, &hash_lock);
5587         if (hdr && HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_bufcnt > 0 &&
5588             hdr->b_l1hdr.b_buf->b_data) {
5589                 if (HDR_IO_IN_PROGRESS(hdr)) {
5590                         cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
5591                         mutex_exit(hash_lock);
5592                         DTRACE_PROBE(arc_bypass_wait);
5593                         goto top;
5594                 }
5595 
5596                 /*
5597                  * As the func is an arbitrary callback, which can block, lock
5598                  * should be released not to block other threads from
5599                  * performing. A counter is used to hold a reference to block
5600                  * which are held by krrp.
5601                  */
5602 
5603                 hdr->b_l1hdr.b_krrp++;
5604                 mutex_exit(hash_lock);
5605 
5606                 error = func(hdr->b_l1hdr.b_buf->b_data, hdr->b_lsize, arg);
5607 
5608                 mutex_enter(hash_lock);
5609                 hdr->b_l1hdr.b_krrp--;
5610                 cv_broadcast(&hdr->b_l1hdr.b_cv);
5611                 mutex_exit(hash_lock);
5612 
5613                 return (error);
5614         } else {
5615                 if (hash_lock)
5616                         mutex_exit(hash_lock);
5617                 return (ENODATA);
5618         }
5619 }
5620 
5621 /*
5622  * "Read" the block at the specified DVA (in bp) via the
5623  * cache.  If the block is found in the cache, invoke the provided
5624  * callback immediately and return.  Note that the `zio' parameter
5625  * in the callback will be NULL in this case, since no IO was
5626  * required.  If the block is not in the cache pass the read request
5627  * on to the spa with a substitute callback function, so that the
5628  * requested block will be added to the cache.
5629  *
5630  * If a read request arrives for a block that has a read in-progress,
5631  * either wait for the in-progress read to complete (and return the
5632  * results); or, if this is a read with a "done" func, add a record
5633  * to the read to invoke the "done" func when the read completes,
5634  * and return; or just return.
5635  *
5636  * arc_read_done() will invoke all the requested "done" functions
5637  * for readers of this block.
5638  */
5639 int
5640 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
5641     void *private, zio_priority_t priority, int zio_flags,
5642     arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
5643 {
5644         arc_buf_hdr_t *hdr = NULL;
5645         kmutex_t *hash_lock = NULL;
5646         zio_t *rzio;
5647         uint64_t guid = spa_load_guid(spa);
5648         boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW) != 0;
5649 
5650         ASSERT(!BP_IS_EMBEDDED(bp) ||
5651             BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
5652 
5653 top:
5654         if (!BP_IS_EMBEDDED(bp)) {
5655                 /*
5656                  * Embedded BP's have no DVA and require no I/O to "read".
5657                  * Create an anonymous arc buf to back it.
5658                  */
5659                 hdr = buf_hash_find(guid, bp, &hash_lock);
5660         }
5661 
5662         if (hdr != NULL && HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_pabd != NULL) {
5663                 arc_buf_t *buf = NULL;
5664                 *arc_flags |= ARC_FLAG_CACHED;
5665 
5666                 if (HDR_IO_IN_PROGRESS(hdr)) {
5667 
5668                         if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
5669                             priority == ZIO_PRIORITY_SYNC_READ) {
5670                                 /*
5671                                  * This sync read must wait for an
5672                                  * in-progress async read (e.g. a predictive
5673                                  * prefetch).  Async reads are queued
5674                                  * separately at the vdev_queue layer, so
5675                                  * this is a form of priority inversion.
5676                                  * Ideally, we would "inherit" the demand
5677                                  * i/o's priority by moving the i/o from
5678                                  * the async queue to the synchronous queue,
5679                                  * but there is currently no mechanism to do
5680                                  * so.  Track this so that we can evaluate
5681                                  * the magnitude of this potential performance
5682                                  * problem.
5683                                  *
5684                                  * Note that if the prefetch i/o is already
5685                                  * active (has been issued to the device),
5686                                  * the prefetch improved performance, because
5687                                  * we issued it sooner than we would have
5688                                  * without the prefetch.
5689                                  */
5690                                 DTRACE_PROBE1(arc__sync__wait__for__async,
5691                                     arc_buf_hdr_t *, hdr);
5692                                 ARCSTAT_BUMP(arcstat_sync_wait_for_async);
5693                         }
5694                         if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
5695                                 arc_hdr_clear_flags(hdr,
5696                                     ARC_FLAG_PREDICTIVE_PREFETCH);
5697                         }
5698 
5699                         if (*arc_flags & ARC_FLAG_WAIT) {
5700                                 cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
5701                                 mutex_exit(hash_lock);
5702                                 goto top;
5703                         }
5704                         ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
5705 
5706                         if (done) {
5707                                 arc_callback_t *acb = NULL;
5708 
5709                                 acb = kmem_zalloc(sizeof (arc_callback_t),
5710                                     KM_SLEEP);
5711                                 acb->acb_done = done;
5712                                 acb->acb_private = private;
5713                                 acb->acb_compressed = compressed_read;
5714                                 if (pio != NULL)
5715                                         acb->acb_zio_dummy = zio_null(pio,
5716                                             spa, NULL, NULL, NULL, zio_flags);
5717 
5718                                 ASSERT3P(acb->acb_done, !=, NULL);
5719                                 acb->acb_next = hdr->b_l1hdr.b_acb;
5720                                 hdr->b_l1hdr.b_acb = acb;
5721                                 mutex_exit(hash_lock);
5722                                 return (0);
5723                         }
5724                         mutex_exit(hash_lock);
5725                         return (0);
5726                 }
5727 
5728                 ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
5729                     hdr->b_l1hdr.b_state == arc_mfu);
5730 
5731                 if (done) {
5732                         if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
5733                                 /*
5734                                  * This is a demand read which does not have to
5735                                  * wait for i/o because we did a predictive
5736                                  * prefetch i/o for it, which has completed.
5737                                  */
5738                                 DTRACE_PROBE1(
5739                                     arc__demand__hit__predictive__prefetch,
5740                                     arc_buf_hdr_t *, hdr);
5741                                 ARCSTAT_BUMP(
5742                                     arcstat_demand_hit_predictive_prefetch);
5743                                 arc_hdr_clear_flags(hdr,
5744                                     ARC_FLAG_PREDICTIVE_PREFETCH);
5745                         }
5746                         ASSERT(!BP_IS_EMBEDDED(bp) || !BP_IS_HOLE(bp));
5747 
5748                         /* Get a buf with the desired data in it. */
5749                         VERIFY0(arc_buf_alloc_impl(hdr, private,
5750                             compressed_read, B_TRUE, &buf));
5751                 } else if (*arc_flags & ARC_FLAG_PREFETCH &&
5752                     refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
5753                         arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
5754                 }
5755                 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
5756                 arc_access(hdr, hash_lock);
5757                 if (*arc_flags & ARC_FLAG_L2CACHE)
5758                         arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
5759                 mutex_exit(hash_lock);
5760                 ARCSTAT_BUMP(arcstat_hits);
5761                 if (HDR_ISTYPE_DDT(hdr))
5762                         ARCSTAT_BUMP(arcstat_ddt_hits);
5763                 arc_update_hit_stat(hdr, B_TRUE);
5764 
5765                 if (done)
5766                         done(NULL, buf, private);
5767         } else {
5768                 uint64_t lsize = BP_GET_LSIZE(bp);
5769                 uint64_t psize = BP_GET_PSIZE(bp);
5770                 arc_callback_t *acb;
5771                 vdev_t *vd = NULL;
5772                 uint64_t addr = 0;
5773                 boolean_t devw = B_FALSE;
5774                 uint64_t size;
5775 
5776                 if (hdr == NULL) {
5777                         /* this block is not in the cache */
5778                         arc_buf_hdr_t *exists = NULL;
5779                         arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
5780                         hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
5781                             BP_GET_COMPRESS(bp), type);
5782 
5783                         if (!BP_IS_EMBEDDED(bp)) {
5784                                 hdr->b_dva = *BP_IDENTITY(bp);
5785                                 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
5786                                 exists = buf_hash_insert(hdr, &hash_lock);
5787                         }
5788                         if (exists != NULL) {
5789                                 /* somebody beat us to the hash insert */
5790                                 arc_hdr_destroy(hdr);
5791                                 mutex_exit(hash_lock);
5792                                 goto top; /* restart the IO request */
5793                         }
5794                 } else {
5795                         /*
5796                          * This block is in the ghost cache. If it was L2-only
5797                          * (and thus didn't have an L1 hdr), we realloc the
5798                          * header to add an L1 hdr.
5799                          */
5800                         if (!HDR_HAS_L1HDR(hdr)) {
5801                                 hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
5802                                     hdr_full_cache);
5803                         }
5804                         ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
5805                         ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state));
5806                         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
5807                         ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
5808                         ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
5809                         ASSERT3P(hdr->b_freeze_cksum, ==, NULL);
5810 
5811                         /*
5812                          * This is a delicate dance that we play here.
5813                          * This hdr is in the ghost list so we access it
5814                          * to move it out of the ghost list before we
5815                          * initiate the read. If it's a prefetch then
5816                          * it won't have a callback so we'll remove the
5817                          * reference that arc_buf_alloc_impl() created. We
5818                          * do this after we've called arc_access() to
5819                          * avoid hitting an assert in remove_reference().
5820                          */
5821                         arc_access(hdr, hash_lock);
5822                         arc_hdr_alloc_pabd(hdr);
5823                 }
5824                 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
5825                 size = arc_hdr_size(hdr);
5826 
5827                 /*
5828                  * If compression is enabled on the hdr, then will do
5829                  * RAW I/O and will store the compressed data in the hdr's
5830                  * data block. Otherwise, the hdr's data block will contain
5831                  * the uncompressed data.
5832                  */
5833                 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) {
5834                         zio_flags |= ZIO_FLAG_RAW;
5835                 }
5836 
5837                 if (*arc_flags & ARC_FLAG_PREFETCH)
5838                         arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
5839                 if (*arc_flags & ARC_FLAG_L2CACHE)
5840                         arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
5841                 if (BP_GET_LEVEL(bp) > 0)
5842                         arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT);
5843                 if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH)
5844                         arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH);
5845                 ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
5846 
5847                 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
5848                 acb->acb_done = done;
5849                 acb->acb_private = private;
5850                 acb->acb_compressed = compressed_read;
5851 
5852                 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
5853                 hdr->b_l1hdr.b_acb = acb;
5854                 arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
5855 
5856                 if (HDR_HAS_L2HDR(hdr) &&
5857                     (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
5858                         devw = hdr->b_l2hdr.b_dev->l2ad_writing;
5859                         addr = hdr->b_l2hdr.b_daddr;
5860                         /*
5861                          * Lock out device removal.
5862                          */
5863                         if (vdev_is_dead(vd) ||
5864                             !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
5865                                 vd = NULL;
5866                 }
5867 
5868                 if (priority == ZIO_PRIORITY_ASYNC_READ)
5869                         arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
5870                 else
5871                         arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
5872 
5873                 if (hash_lock != NULL)
5874                         mutex_exit(hash_lock);
5875 
5876                 /*
5877                  * At this point, we have a level 1 cache miss.  Try again in
5878                  * L2ARC if possible.
5879                  */
5880                 ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize);
5881 
5882                 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
5883                     uint64_t, lsize, zbookmark_phys_t *, zb);
5884                 ARCSTAT_BUMP(arcstat_misses);
5885                 arc_update_hit_stat(hdr, B_FALSE);
5886 
5887                 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
5888                         /*
5889                          * Read from the L2ARC if the following are true:
5890                          * 1. The L2ARC vdev was previously cached.
5891                          * 2. This buffer still has L2ARC metadata.
5892                          * 3. This buffer isn't currently writing to the L2ARC.
5893                          * 4. The L2ARC entry wasn't evicted, which may
5894                          *    also have invalidated the vdev.
5895                          * 5. This isn't prefetch and l2arc_noprefetch is set.
5896                          */
5897                         if (HDR_HAS_L2HDR(hdr) &&
5898                             !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
5899                             !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
5900                                 l2arc_read_callback_t *cb;
5901                                 abd_t *abd;
5902                                 uint64_t asize;
5903 
5904                                 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
5905                                 ARCSTAT_BUMP(arcstat_l2_hits);
5906                                 if (vdev_type_is_ddt(vd))
5907                                         ARCSTAT_BUMP(arcstat_l2_ddt_hits);
5908 
5909                                 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
5910                                     KM_SLEEP);
5911                                 cb->l2rcb_hdr = hdr;
5912                                 cb->l2rcb_bp = *bp;
5913                                 cb->l2rcb_zb = *zb;
5914                                 cb->l2rcb_flags = zio_flags;
5915 
5916                                 asize = vdev_psize_to_asize(vd, size);
5917                                 if (asize != size) {
5918                                         abd = abd_alloc_for_io(asize,
5919                                             !HDR_ISTYPE_DATA(hdr));
5920                                         cb->l2rcb_abd = abd;
5921                                 } else {
5922                                         abd = hdr->b_l1hdr.b_pabd;
5923                                 }
5924 
5925                                 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
5926                                     addr + asize <= vd->vdev_psize -
5927                                     VDEV_LABEL_END_SIZE);
5928 
5929                                 /*
5930                                  * l2arc read.  The SCL_L2ARC lock will be
5931                                  * released by l2arc_read_done().
5932                                  * Issue a null zio if the underlying buffer
5933                                  * was squashed to zero size by compression.
5934                                  */
5935                                 ASSERT3U(HDR_GET_COMPRESS(hdr), !=,
5936                                     ZIO_COMPRESS_EMPTY);
5937                                 rzio = zio_read_phys(pio, vd, addr,
5938                                     asize, abd,
5939                                     ZIO_CHECKSUM_OFF,
5940                                     l2arc_read_done, cb, priority,
5941                                     zio_flags | ZIO_FLAG_DONT_CACHE |
5942                                     ZIO_FLAG_CANFAIL |
5943                                     ZIO_FLAG_DONT_PROPAGATE |
5944                                     ZIO_FLAG_DONT_RETRY, B_FALSE);
5945                                 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
5946                                     zio_t *, rzio);
5947 
5948                                 ARCSTAT_INCR(arcstat_l2_read_bytes, size);
5949                                 if (vdev_type_is_ddt(vd))
5950                                         ARCSTAT_INCR(arcstat_l2_ddt_read_bytes,
5951                                             size);
5952 
5953                                 if (*arc_flags & ARC_FLAG_NOWAIT) {
5954                                         zio_nowait(rzio);
5955                                         return (0);
5956                                 }
5957 
5958                                 ASSERT(*arc_flags & ARC_FLAG_WAIT);
5959                                 if (zio_wait(rzio) == 0)
5960                                         return (0);
5961 
5962                                 /* l2arc read error; goto zio_read() */
5963                         } else {
5964                                 DTRACE_PROBE1(l2arc__miss,
5965                                     arc_buf_hdr_t *, hdr);
5966                                 ARCSTAT_BUMP(arcstat_l2_misses);
5967                                 if (HDR_L2_WRITING(hdr))
5968                                         ARCSTAT_BUMP(arcstat_l2_rw_clash);
5969                                 spa_config_exit(spa, SCL_L2ARC, vd);
5970                         }
5971                 } else {
5972                         if (vd != NULL)
5973                                 spa_config_exit(spa, SCL_L2ARC, vd);
5974                         if (l2arc_ndev != 0) {
5975                                 DTRACE_PROBE1(l2arc__miss,
5976                                     arc_buf_hdr_t *, hdr);
5977                                 ARCSTAT_BUMP(arcstat_l2_misses);
5978                         }
5979                 }
5980 
5981                 rzio = zio_read(pio, spa, bp, hdr->b_l1hdr.b_pabd, size,
5982                     arc_read_done, hdr, priority, zio_flags, zb);
5983 
5984                 if (*arc_flags & ARC_FLAG_WAIT)
5985                         return (zio_wait(rzio));
5986 
5987                 ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
5988                 zio_nowait(rzio);
5989         }
5990         return (0);
5991 }
5992 
5993 /*
5994  * Notify the arc that a block was freed, and thus will never be used again.
5995  */
5996 void
5997 arc_freed(spa_t *spa, const blkptr_t *bp)
5998 {
5999         arc_buf_hdr_t *hdr;
6000         kmutex_t *hash_lock;
6001         uint64_t guid = spa_load_guid(spa);
6002 
6003         ASSERT(!BP_IS_EMBEDDED(bp));
6004 
6005         hdr = buf_hash_find(guid, bp, &hash_lock);
6006         if (hdr == NULL)
6007                 return;
6008 
6009         /*
6010          * We might be trying to free a block that is still doing I/O
6011          * (i.e. prefetch) or has a reference (i.e. a dedup-ed,
6012          * dmu_sync-ed block). If this block is being prefetched, then it
6013          * would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr
6014          * until the I/O completes. A block may also have a reference if it is
6015          * part of a dedup-ed, dmu_synced write. The dmu_sync() function would
6016          * have written the new block to its final resting place on disk but
6017          * without the dedup flag set. This would have left the hdr in the MRU
6018          * state and discoverable. When the txg finally syncs it detects that
6019          * the block was overridden in open context and issues an override I/O.
6020          * Since this is a dedup block, the override I/O will determine if the
6021          * block is already in the DDT. If so, then it will replace the io_bp
6022          * with the bp from the DDT and allow the I/O to finish. When the I/O
6023          * reaches the done callback, dbuf_write_override_done, it will
6024          * check to see if the io_bp and io_bp_override are identical.
6025          * If they are not, then it indicates that the bp was replaced with
6026          * the bp in the DDT and the override bp is freed. This allows
6027          * us to arrive here with a reference on a block that is being
6028          * freed. So if we have an I/O in progress, or a reference to
6029          * this hdr, then we don't destroy the hdr.
6030          */
6031         if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
6032             refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
6033                 arc_change_state(arc_anon, hdr, hash_lock);
6034                 arc_hdr_destroy(hdr);
6035                 mutex_exit(hash_lock);
6036         } else {
6037                 mutex_exit(hash_lock);
6038         }
6039 
6040 }
6041 
6042 /*
6043  * Release this buffer from the cache, making it an anonymous buffer.  This
6044  * must be done after a read and prior to modifying the buffer contents.
6045  * If the buffer has more than one reference, we must make
6046  * a new hdr for the buffer.
6047  */
6048 void
6049 arc_release(arc_buf_t *buf, void *tag)
6050 {
6051         arc_buf_hdr_t *hdr = buf->b_hdr;
6052 
6053         /*
6054          * It would be nice to assert that if it's DMU metadata (level >
6055          * 0 || it's the dnode file), then it must be syncing context.
6056          * But we don't know that information at this level.
6057          */
6058 
6059         mutex_enter(&buf->b_evict_lock);
6060 
6061         ASSERT(HDR_HAS_L1HDR(hdr));
6062 
6063         /*
6064          * We don't grab the hash lock prior to this check, because if
6065          * the buffer's header is in the arc_anon state, it won't be
6066          * linked into the hash table.
6067          */
6068         if (hdr->b_l1hdr.b_state == arc_anon) {
6069                 mutex_exit(&buf->b_evict_lock);
6070                 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
6071                 ASSERT(!HDR_IN_HASH_TABLE(hdr));
6072                 ASSERT(!HDR_HAS_L2HDR(hdr));
6073                 ASSERT(HDR_EMPTY(hdr));
6074 
6075                 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
6076                 ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
6077                 ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
6078 
6079                 hdr->b_l1hdr.b_arc_access = 0;
6080 
6081                 /*
6082                  * If the buf is being overridden then it may already
6083                  * have a hdr that is not empty.
6084                  */
6085                 buf_discard_identity(hdr);
6086                 arc_buf_thaw(buf);
6087 
6088                 return;
6089         }
6090 
6091         kmutex_t *hash_lock = HDR_LOCK(hdr);
6092         mutex_enter(hash_lock);
6093 
6094         /*
6095          * This assignment is only valid as long as the hash_lock is
6096          * held, we must be careful not to reference state or the
6097          * b_state field after dropping the lock.
6098          */
6099         arc_state_t *state = hdr->b_l1hdr.b_state;
6100         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
6101         ASSERT3P(state, !=, arc_anon);
6102 
6103         /* this buffer is not on any list */
6104         ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
6105 
6106         if (HDR_HAS_L2HDR(hdr)) {
6107                 mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
6108 
6109                 /*
6110                  * We have to recheck this conditional again now that
6111                  * we're holding the l2ad_mtx to prevent a race with
6112                  * another thread which might be concurrently calling
6113                  * l2arc_evict(). In that case, l2arc_evict() might have
6114                  * destroyed the header's L2 portion as we were waiting
6115                  * to acquire the l2ad_mtx.
6116                  */
6117                 if (HDR_HAS_L2HDR(hdr))
6118                         arc_hdr_l2hdr_destroy(hdr);
6119 
6120                 mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
6121         }
6122 
6123         /*
6124          * Do we have more than one buf?
6125          */
6126         if (hdr->b_l1hdr.b_bufcnt > 1) {
6127                 arc_buf_hdr_t *nhdr;
6128                 uint64_t spa = hdr->b_spa;
6129                 uint64_t psize = HDR_GET_PSIZE(hdr);
6130                 uint64_t lsize = HDR_GET_LSIZE(hdr);
6131                 enum zio_compress compress = HDR_GET_COMPRESS(hdr);
6132                 arc_buf_contents_t type = arc_buf_type(hdr);
6133                 VERIFY3U(hdr->b_type, ==, type);
6134 
6135                 ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
6136                 (void) remove_reference(hdr, hash_lock, tag);
6137 
6138                 if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) {
6139                         ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
6140                         ASSERT(ARC_BUF_LAST(buf));
6141                 }
6142 
6143                 /*
6144                  * Pull the data off of this hdr and attach it to
6145                  * a new anonymous hdr. Also find the last buffer
6146                  * in the hdr's buffer list.
6147                  */
6148                 arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
6149                 ASSERT3P(lastbuf, !=, NULL);
6150 
6151                 /*
6152                  * If the current arc_buf_t and the hdr are sharing their data
6153                  * buffer, then we must stop sharing that block.
6154                  */
6155                 if (arc_buf_is_shared(buf)) {
6156                         VERIFY(!arc_buf_is_shared(lastbuf));
6157 
6158                         /*
6159                          * First, sever the block sharing relationship between
6160                          * buf and the arc_buf_hdr_t.
6161                          */
6162                         arc_unshare_buf(hdr, buf);
6163 
6164                         /*
6165                          * Now we need to recreate the hdr's b_pabd. Since we
6166                          * have lastbuf handy, we try to share with it, but if
6167                          * we can't then we allocate a new b_pabd and copy the
6168                          * data from buf into it.
6169                          */
6170                         if (arc_can_share(hdr, lastbuf)) {
6171                                 arc_share_buf(hdr, lastbuf);
6172                         } else {
6173                                 arc_hdr_alloc_pabd(hdr);
6174                                 abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
6175                                     buf->b_data, psize);
6176                         }
6177                         VERIFY3P(lastbuf->b_data, !=, NULL);
6178                 } else if (HDR_SHARED_DATA(hdr)) {
6179                         /*
6180                          * Uncompressed shared buffers are always at the end
6181                          * of the list. Compressed buffers don't have the
6182                          * same requirements. This makes it hard to
6183                          * simply assert that the lastbuf is shared so
6184                          * we rely on the hdr's compression flags to determine
6185                          * if we have a compressed, shared buffer.
6186                          */
6187                         ASSERT(arc_buf_is_shared(lastbuf) ||
6188                             HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF);
6189                         ASSERT(!ARC_BUF_SHARED(buf));
6190                 }
6191                 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
6192                 ASSERT3P(state, !=, arc_l2c_only);
6193 
6194                 (void) refcount_remove_many(&state->arcs_size,
6195                     arc_buf_size(buf), buf);
6196 
6197                 if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
6198                         ASSERT3P(state, !=, arc_l2c_only);
6199                         (void) refcount_remove_many(&state->arcs_esize[type],
6200                             arc_buf_size(buf), buf);
6201                 }
6202 
6203                 hdr->b_l1hdr.b_bufcnt -= 1;
6204                 arc_cksum_verify(buf);
6205                 arc_buf_unwatch(buf);
6206 
6207                 mutex_exit(hash_lock);
6208 
6209                 /*
6210                  * Allocate a new hdr. The new hdr will contain a b_pabd
6211                  * buffer which will be freed in arc_write().
6212                  */
6213                 nhdr = arc_hdr_alloc(spa, psize, lsize, compress, type);
6214                 ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
6215                 ASSERT0(nhdr->b_l1hdr.b_bufcnt);
6216                 ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt));
6217                 VERIFY3U(nhdr->b_type, ==, type);
6218                 ASSERT(!HDR_SHARED_DATA(nhdr));
6219 
6220                 nhdr->b_l1hdr.b_buf = buf;
6221                 nhdr->b_l1hdr.b_bufcnt = 1;
6222                 (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
6223                 nhdr->b_l1hdr.b_krrp = 0;
6224 
6225                 buf->b_hdr = nhdr;
6226 
6227                 mutex_exit(&buf->b_evict_lock);
6228                 (void) refcount_add_many(&arc_anon->arcs_size,
6229                     arc_buf_size(buf), buf);
6230         } else {
6231                 mutex_exit(&buf->b_evict_lock);
6232                 ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
6233                 /* protected by hash lock, or hdr is on arc_anon */
6234                 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
6235                 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
6236                 arc_change_state(arc_anon, hdr, hash_lock);
6237                 hdr->b_l1hdr.b_arc_access = 0;
6238                 mutex_exit(hash_lock);
6239 
6240                 buf_discard_identity(hdr);
6241                 arc_buf_thaw(buf);
6242         }
6243 }
6244 
6245 int
6246 arc_released(arc_buf_t *buf)
6247 {
6248         int released;
6249 
6250         mutex_enter(&buf->b_evict_lock);
6251         released = (buf->b_data != NULL &&
6252             buf->b_hdr->b_l1hdr.b_state == arc_anon);
6253         mutex_exit(&buf->b_evict_lock);
6254         return (released);
6255 }
6256 
6257 #ifdef ZFS_DEBUG
6258 int
6259 arc_referenced(arc_buf_t *buf)
6260 {
6261         int referenced;
6262 
6263         mutex_enter(&buf->b_evict_lock);
6264         referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
6265         mutex_exit(&buf->b_evict_lock);
6266         return (referenced);
6267 }
6268 #endif
6269 
6270 static void
6271 arc_write_ready(zio_t *zio)
6272 {
6273         arc_write_callback_t *callback = zio->io_private;
6274         arc_buf_t *buf = callback->awcb_buf;
6275         arc_buf_hdr_t *hdr = buf->b_hdr;
6276         uint64_t psize = BP_IS_HOLE(zio->io_bp) ? 0 : BP_GET_PSIZE(zio->io_bp);
6277 
6278         ASSERT(HDR_HAS_L1HDR(hdr));
6279         ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
6280         ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
6281 
6282         /*
6283          * If we're reexecuting this zio because the pool suspended, then
6284          * cleanup any state that was previously set the first time the
6285          * callback was invoked.
6286          */
6287         if (zio->io_flags & ZIO_FLAG_REEXECUTED) {
6288                 arc_cksum_free(hdr);
6289                 arc_buf_unwatch(buf);
6290                 if (hdr->b_l1hdr.b_pabd != NULL) {
6291                         if (arc_buf_is_shared(buf)) {
6292                                 arc_unshare_buf(hdr, buf);
6293                         } else {
6294                                 arc_hdr_free_pabd(hdr);
6295                         }
6296                 }
6297         }
6298         ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
6299         ASSERT(!HDR_SHARED_DATA(hdr));
6300         ASSERT(!arc_buf_is_shared(buf));
6301 
6302         callback->awcb_ready(zio, buf, callback->awcb_private);
6303 
6304         if (HDR_IO_IN_PROGRESS(hdr))
6305                 ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED);
6306 
6307         arc_cksum_compute(buf);
6308         arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
6309 
6310         enum zio_compress compress;
6311         if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
6312                 compress = ZIO_COMPRESS_OFF;
6313         } else {
6314                 ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(zio->io_bp));
6315                 compress = BP_GET_COMPRESS(zio->io_bp);
6316         }
6317         HDR_SET_PSIZE(hdr, psize);
6318         arc_hdr_set_compress(hdr, compress);
6319 
6320 
6321         /*
6322          * Fill the hdr with data. If the hdr is compressed, the data we want
6323          * is available from the zio, otherwise we can take it from the buf.
6324          *
6325          * We might be able to share the buf's data with the hdr here. However,
6326          * doing so would cause the ARC to be full of linear ABDs if we write a
6327          * lot of shareable data. As a compromise, we check whether scattered
6328          * ABDs are allowed, and assume that if they are then the user wants
6329          * the ARC to be primarily filled with them regardless of the data being
6330          * written. Therefore, if they're allowed then we allocate one and copy
6331          * the data into it; otherwise, we share the data directly if we can.
6332          */
6333         if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) {
6334                 arc_hdr_alloc_pabd(hdr);
6335 
6336                 /*
6337                  * Ideally, we would always copy the io_abd into b_pabd, but the
6338                  * user may have disabled compressed ARC, thus we must check the
6339                  * hdr's compression setting rather than the io_bp's.
6340                  */
6341                 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) {
6342                         ASSERT3U(BP_GET_COMPRESS(zio->io_bp), !=,
6343                             ZIO_COMPRESS_OFF);
6344                         ASSERT3U(psize, >, 0);
6345 
6346                         abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize);
6347                 } else {
6348                         ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr));
6349 
6350                         abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data,
6351                             arc_buf_size(buf));
6352                 }
6353         } else {
6354                 ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd));
6355                 ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf));
6356                 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
6357 
6358                 arc_share_buf(hdr, buf);
6359         }
6360 
6361         arc_hdr_verify(hdr, zio->io_bp);
6362 }
6363 
6364 static void
6365 arc_write_children_ready(zio_t *zio)
6366 {
6367         arc_write_callback_t *callback = zio->io_private;
6368         arc_buf_t *buf = callback->awcb_buf;
6369 
6370         callback->awcb_children_ready(zio, buf, callback->awcb_private);
6371 }
6372 
6373 /*
6374  * The SPA calls this callback for each physical write that happens on behalf
6375  * of a logical write.  See the comment in dbuf_write_physdone() for details.
6376  */
6377 static void
6378 arc_write_physdone(zio_t *zio)
6379 {
6380         arc_write_callback_t *cb = zio->io_private;
6381         if (cb->awcb_physdone != NULL)
6382                 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
6383 }
6384 
6385 static void
6386 arc_write_done(zio_t *zio)
6387 {
6388         arc_write_callback_t *callback = zio->io_private;
6389         arc_buf_t *buf = callback->awcb_buf;
6390         arc_buf_hdr_t *hdr = buf->b_hdr;
6391 
6392         ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
6393 
6394         if (zio->io_error == 0) {
6395                 arc_hdr_verify(hdr, zio->io_bp);
6396 
6397                 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
6398                         buf_discard_identity(hdr);
6399                 } else {
6400                         hdr->b_dva = *BP_IDENTITY(zio->io_bp);
6401                         hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
6402                 }
6403         } else {
6404                 ASSERT(HDR_EMPTY(hdr));
6405         }
6406 
6407         /*
6408          * If the block to be written was all-zero or compressed enough to be
6409          * embedded in the BP, no write was performed so there will be no
6410          * dva/birth/checksum.  The buffer must therefore remain anonymous
6411          * (and uncached).
6412          */
6413         if (!HDR_EMPTY(hdr)) {
6414                 arc_buf_hdr_t *exists;
6415                 kmutex_t *hash_lock;
6416 
6417                 ASSERT3U(zio->io_error, ==, 0);
6418 
6419                 arc_cksum_verify(buf);
6420 
6421                 exists = buf_hash_insert(hdr, &hash_lock);
6422                 if (exists != NULL) {
6423                         /*
6424                          * This can only happen if we overwrite for
6425                          * sync-to-convergence, because we remove
6426                          * buffers from the hash table when we arc_free().
6427                          */
6428                         if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
6429                                 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
6430                                         panic("bad overwrite, hdr=%p exists=%p",
6431                                             (void *)hdr, (void *)exists);
6432                                 ASSERT(refcount_is_zero(
6433                                     &exists->b_l1hdr.b_refcnt));
6434                                 arc_change_state(arc_anon, exists, hash_lock);
6435                                 arc_wait_for_krrp(exists);
6436                                 arc_hdr_destroy(exists);
6437                                 mutex_exit(hash_lock);
6438                                 exists = buf_hash_insert(hdr, &hash_lock);
6439                                 ASSERT3P(exists, ==, NULL);
6440                         } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
6441                                 /* nopwrite */
6442                                 ASSERT(zio->io_prop.zp_nopwrite);
6443                                 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
6444                                         panic("bad nopwrite, hdr=%p exists=%p",
6445                                             (void *)hdr, (void *)exists);
6446                         } else {
6447                                 /* Dedup */
6448                                 ASSERT(hdr->b_l1hdr.b_bufcnt == 1);
6449                                 ASSERT(hdr->b_l1hdr.b_state == arc_anon);
6450                                 ASSERT(BP_GET_DEDUP(zio->io_bp));
6451                                 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
6452                         }
6453                 }
6454                 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
6455                 /* if it's not anon, we are doing a scrub */
6456                 if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
6457                         arc_access(hdr, hash_lock);
6458                 mutex_exit(hash_lock);
6459         } else {
6460                 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
6461         }
6462 
6463         ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
6464         callback->awcb_done(zio, buf, callback->awcb_private);
6465 
6466         abd_put(zio->io_abd);
6467         kmem_free(callback, sizeof (arc_write_callback_t));
6468 }
6469 
6470 zio_t *
6471 arc_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
6472     boolean_t l2arc, const zio_prop_t *zp, arc_done_func_t *ready,
6473     arc_done_func_t *children_ready, arc_done_func_t *physdone,
6474     arc_done_func_t *done, void *private, zio_priority_t priority,
6475     int zio_flags, const zbookmark_phys_t *zb,
6476     const zio_smartcomp_info_t *smartcomp)
6477 {
6478         arc_buf_hdr_t *hdr = buf->b_hdr;
6479         arc_write_callback_t *callback;
6480         zio_t *zio;
6481         zio_prop_t localprop = *zp;
6482 
6483         ASSERT3P(ready, !=, NULL);
6484         ASSERT3P(done, !=, NULL);
6485         ASSERT(!HDR_IO_ERROR(hdr));
6486         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
6487         ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
6488         ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
6489         if (l2arc)
6490                 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
6491         if (ARC_BUF_COMPRESSED(buf)) {
6492                 /*
6493                  * We're writing a pre-compressed buffer.  Make the
6494                  * compression algorithm requested by the zio_prop_t match
6495                  * the pre-compressed buffer's compression algorithm.
6496                  */
6497                 localprop.zp_compress = HDR_GET_COMPRESS(hdr);
6498 
6499                 ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
6500                 zio_flags |= ZIO_FLAG_RAW;
6501         }
6502         callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
6503         callback->awcb_ready = ready;
6504         callback->awcb_children_ready = children_ready;
6505         callback->awcb_physdone = physdone;
6506         callback->awcb_done = done;
6507         callback->awcb_private = private;
6508         callback->awcb_buf = buf;
6509 
6510         /*
6511          * The hdr's b_pabd is now stale, free it now. A new data block
6512          * will be allocated when the zio pipeline calls arc_write_ready().
6513          */
6514         if (hdr->b_l1hdr.b_pabd != NULL) {
6515                 /*
6516                  * If the buf is currently sharing the data block with
6517                  * the hdr then we need to break that relationship here.
6518                  * The hdr will remain with a NULL data pointer and the
6519                  * buf will take sole ownership of the block.
6520                  */
6521                 if (arc_buf_is_shared(buf)) {
6522                         arc_unshare_buf(hdr, buf);
6523                 } else {
6524                         arc_hdr_free_pabd(hdr);
6525                 }
6526                 VERIFY3P(buf->b_data, !=, NULL);
6527                 arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
6528         }
6529         ASSERT(!arc_buf_is_shared(buf));
6530         ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
6531 
6532         zio = zio_write(pio, spa, txg, bp,
6533             abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
6534             HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready,
6535             (children_ready != NULL) ? arc_write_children_ready : NULL,
6536             arc_write_physdone, arc_write_done, callback,
6537             priority, zio_flags, zb, smartcomp);
6538 
6539         return (zio);
6540 }
6541 
6542 static int
6543 arc_memory_throttle(uint64_t reserve, uint64_t txg)
6544 {
6545 #ifdef _KERNEL
6546         uint64_t available_memory = ptob(freemem);
6547         static uint64_t page_load = 0;
6548         static uint64_t last_txg = 0;
6549 
6550 #if defined(__i386)
6551         available_memory =
6552             MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
6553 #endif
6554 
6555         if (freemem > physmem * arc_lotsfree_percent / 100)
6556                 return (0);
6557 
6558         if (txg > last_txg) {
6559                 last_txg = txg;
6560                 page_load = 0;
6561         }
6562         /*
6563          * If we are in pageout, we know that memory is already tight,
6564          * the arc is already going to be evicting, so we just want to
6565          * continue to let page writes occur as quickly as possible.
6566          */
6567         if (curproc == proc_pageout) {
6568                 if (page_load > MAX(ptob(minfree), available_memory) / 4)
6569                         return (SET_ERROR(ERESTART));
6570                 /* Note: reserve is inflated, so we deflate */
6571                 page_load += reserve / 8;
6572                 return (0);
6573         } else if (page_load > 0 && arc_reclaim_needed()) {
6574                 /* memory is low, delay before restarting */
6575                 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
6576                 return (SET_ERROR(EAGAIN));
6577         }
6578         page_load = 0;
6579 #endif
6580         return (0);
6581 }
6582 
6583 void
6584 arc_tempreserve_clear(uint64_t reserve)
6585 {
6586         atomic_add_64(&arc_tempreserve, -reserve);
6587         ASSERT((int64_t)arc_tempreserve >= 0);
6588 }
6589 
6590 int
6591 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
6592 {
6593         int error;
6594         uint64_t anon_size;
6595 
6596         if (reserve > arc_c/4 && !arc_no_grow)
6597                 arc_c = MIN(arc_c_max, reserve * 4);
6598         if (reserve > arc_c)
6599                 return (SET_ERROR(ENOMEM));
6600 
6601         /*
6602          * Don't count loaned bufs as in flight dirty data to prevent long
6603          * network delays from blocking transactions that are ready to be
6604          * assigned to a txg.
6605          */
6606 
6607         /* assert that it has not wrapped around */
6608         ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
6609 
6610         anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
6611             arc_loaned_bytes), 0);
6612 
6613         /*
6614          * Writes will, almost always, require additional memory allocations
6615          * in order to compress/encrypt/etc the data.  We therefore need to
6616          * make sure that there is sufficient available memory for this.
6617          */
6618         error = arc_memory_throttle(reserve, txg);
6619         if (error != 0)
6620                 return (error);
6621 
6622         /*
6623          * Throttle writes when the amount of dirty data in the cache
6624          * gets too large.  We try to keep the cache less than half full
6625          * of dirty blocks so that our sync times don't grow too large.
6626          * Note: if two requests come in concurrently, we might let them
6627          * both succeed, when one of them should fail.  Not a huge deal.
6628          */
6629         if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
6630             anon_size > arc_c / 4) {
6631                 DTRACE_PROBE4(arc__tempreserve__space__throttle, uint64_t,
6632                     arc_tempreserve, arc_state_t *, arc_anon, uint64_t,
6633                     reserve, uint64_t, arc_c);
6634 
6635                 uint64_t meta_esize =
6636                     refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
6637                 uint64_t data_esize =
6638                     refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
6639                 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
6640                     "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
6641                     arc_tempreserve >> 10, meta_esize >> 10,
6642                     data_esize >> 10, reserve >> 10, arc_c >> 10);
6643                 return (SET_ERROR(ERESTART));
6644         }
6645         atomic_add_64(&arc_tempreserve, reserve);
6646         return (0);
6647 }
6648 
6649 static void
6650 arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
6651     kstat_named_t *evict_data, kstat_named_t *evict_metadata,
6652     kstat_named_t *evict_ddt)
6653 {
6654         size->value.ui64 = refcount_count(&state->arcs_size);
6655         evict_data->value.ui64 =
6656             refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
6657         evict_metadata->value.ui64 =
6658             refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
6659         evict_ddt->value.ui64 =
6660             refcount_count(&state->arcs_esize[ARC_BUFC_DDT]);
6661 }
6662 
6663 static int
6664 arc_kstat_update(kstat_t *ksp, int rw)
6665 {
6666         arc_stats_t *as = ksp->ks_data;
6667 
6668         if (rw == KSTAT_WRITE) {
6669                 return (EACCES);
6670         } else {
6671                 arc_kstat_update_state(arc_anon,
6672                     &as->arcstat_anon_size,
6673                     &as->arcstat_anon_evictable_data,
6674                     &as->arcstat_anon_evictable_metadata,
6675                     &as->arcstat_anon_evictable_ddt);
6676                 arc_kstat_update_state(arc_mru,
6677                     &as->arcstat_mru_size,
6678                     &as->arcstat_mru_evictable_data,
6679                     &as->arcstat_mru_evictable_metadata,
6680                     &as->arcstat_mru_evictable_ddt);
6681                 arc_kstat_update_state(arc_mru_ghost,
6682                     &as->arcstat_mru_ghost_size,
6683                     &as->arcstat_mru_ghost_evictable_data,
6684                     &as->arcstat_mru_ghost_evictable_metadata,
6685                     &as->arcstat_mru_ghost_evictable_ddt);
6686                 arc_kstat_update_state(arc_mfu,
6687                     &as->arcstat_mfu_size,
6688                     &as->arcstat_mfu_evictable_data,
6689                     &as->arcstat_mfu_evictable_metadata,
6690                     &as->arcstat_mfu_evictable_ddt);
6691                 arc_kstat_update_state(arc_mfu_ghost,
6692                     &as->arcstat_mfu_ghost_size,
6693                     &as->arcstat_mfu_ghost_evictable_data,
6694                     &as->arcstat_mfu_ghost_evictable_metadata,
6695                     &as->arcstat_mfu_ghost_evictable_ddt);
6696         }
6697 
6698         return (0);
6699 }
6700 
6701 /*
6702  * This function *must* return indices evenly distributed between all
6703  * sublists of the multilist. This is needed due to how the ARC eviction
6704  * code is laid out; arc_evict_state() assumes ARC buffers are evenly
6705  * distributed between all sublists and uses this assumption when
6706  * deciding which sublist to evict from and how much to evict from it.
6707  */
6708 unsigned int
6709 arc_state_multilist_index_func(multilist_t *ml, void *obj)
6710 {
6711         arc_buf_hdr_t *hdr = obj;
6712 
6713         /*
6714          * We rely on b_dva to generate evenly distributed index
6715          * numbers using buf_hash below. So, as an added precaution,
6716          * let's make sure we never add empty buffers to the arc lists.
6717          */
6718         ASSERT(!HDR_EMPTY(hdr));
6719 
6720         /*
6721          * The assumption here, is the hash value for a given
6722          * arc_buf_hdr_t will remain constant throughout it's lifetime
6723          * (i.e. it's b_spa, b_dva, and b_birth fields don't change).
6724          * Thus, we don't need to store the header's sublist index
6725          * on insertion, as this index can be recalculated on removal.
6726          *
6727          * Also, the low order bits of the hash value are thought to be
6728          * distributed evenly. Otherwise, in the case that the multilist
6729          * has a power of two number of sublists, each sublists' usage
6730          * would not be evenly distributed.
6731          */
6732         return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
6733             multilist_get_num_sublists(ml));
6734 }
6735 
6736 static void
6737 arc_state_init(void)
6738 {
6739         arc_anon = &ARC_anon;
6740         arc_mru = &ARC_mru;
6741         arc_mru_ghost = &ARC_mru_ghost;
6742         arc_mfu = &ARC_mfu;
6743         arc_mfu_ghost = &ARC_mfu_ghost;
6744         arc_l2c_only = &ARC_l2c_only;
6745         arc_buf_contents_t arcs;
6746 
6747         for (arcs = ARC_BUFC_DATA; arcs < ARC_BUFC_NUMTYPES; ++arcs) {
6748                 arc_mru->arcs_list[arcs] =
6749                     multilist_create(sizeof (arc_buf_hdr_t),
6750                     offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
6751                     arc_state_multilist_index_func);
6752                 arc_mru_ghost->arcs_list[arcs] =
6753                     multilist_create(sizeof (arc_buf_hdr_t),
6754                     offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
6755                         arc_state_multilist_index_func);
6756                 arc_mfu->arcs_list[arcs] =
6757                     multilist_create(sizeof (arc_buf_hdr_t),
6758                     offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
6759                     arc_state_multilist_index_func);
6760                 arc_mfu_ghost->arcs_list[arcs] =
6761                     multilist_create(sizeof (arc_buf_hdr_t),
6762                     offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
6763                     arc_state_multilist_index_func);
6764                 arc_l2c_only->arcs_list[arcs] =
6765                     multilist_create(sizeof (arc_buf_hdr_t),
6766                     offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
6767                     arc_state_multilist_index_func);
6768 
6769                 refcount_create(&arc_anon->arcs_esize[arcs]);
6770                 refcount_create(&arc_mru->arcs_esize[arcs]);
6771                 refcount_create(&arc_mru_ghost->arcs_esize[arcs]);
6772                 refcount_create(&arc_mfu->arcs_esize[arcs]);
6773                 refcount_create(&arc_mfu_ghost->arcs_esize[arcs]);
6774                 refcount_create(&arc_l2c_only->arcs_esize[arcs]);
6775         }
6776 
6777         arc_flush_taskq = taskq_create("arc_flush_tq",
6778             max_ncpus, minclsyspri, 1, zfs_flush_ntasks, TASKQ_DYNAMIC);
6779 
6780         refcount_create(&arc_anon->arcs_size);
6781         refcount_create(&arc_mru->arcs_size);
6782         refcount_create(&arc_mru_ghost->arcs_size);
6783         refcount_create(&arc_mfu->arcs_size);
6784         refcount_create(&arc_mfu_ghost->arcs_size);
6785         refcount_create(&arc_l2c_only->arcs_size);
6786 }
6787 
6788 static void
6789 arc_state_fini(void)
6790 {
6791         arc_buf_contents_t arcs;
6792 
6793         refcount_destroy(&arc_anon->arcs_size);
6794         refcount_destroy(&arc_mru->arcs_size);
6795         refcount_destroy(&arc_mru_ghost->arcs_size);
6796         refcount_destroy(&arc_mfu->arcs_size);
6797         refcount_destroy(&arc_mfu_ghost->arcs_size);
6798         refcount_destroy(&arc_l2c_only->arcs_size);
6799 
6800         for (arcs = ARC_BUFC_DATA; arcs < ARC_BUFC_NUMTYPES; ++arcs) {
6801                 multilist_destroy(arc_mru->arcs_list[arcs]);
6802                 multilist_destroy(arc_mru_ghost->arcs_list[arcs]);
6803                 multilist_destroy(arc_mfu->arcs_list[arcs]);
6804                 multilist_destroy(arc_mfu_ghost->arcs_list[arcs]);
6805                 multilist_destroy(arc_l2c_only->arcs_list[arcs]);
6806 
6807                 refcount_destroy(&arc_anon->arcs_esize[arcs]);
6808                 refcount_destroy(&arc_mru->arcs_esize[arcs]);
6809                 refcount_destroy(&arc_mru_ghost->arcs_esize[arcs]);
6810                 refcount_destroy(&arc_mfu->arcs_esize[arcs]);
6811                 refcount_destroy(&arc_mfu_ghost->arcs_esize[arcs]);
6812                 refcount_destroy(&arc_l2c_only->arcs_esize[arcs]);
6813         }
6814 }
6815 
6816 uint64_t
6817 arc_max_bytes(void)
6818 {
6819         return (arc_c_max);
6820 }
6821 
6822 void
6823 arc_init(void)
6824 {
6825         /*
6826          * allmem is "all memory that we could possibly use".
6827          */
6828 #ifdef _KERNEL
6829         uint64_t allmem = ptob(physmem - swapfs_minfree);
6830 #else
6831         uint64_t allmem = (physmem * PAGESIZE) / 2;
6832 #endif
6833 
6834         mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
6835         cv_init(&arc_reclaim_thread_cv, NULL, CV_DEFAULT, NULL);
6836         cv_init(&arc_reclaim_waiters_cv, NULL, CV_DEFAULT, NULL);
6837 
6838         /* Convert seconds to clock ticks */
6839         arc_min_prefetch_lifespan = 1 * hz;
6840 
6841         /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
6842         arc_c_min = MAX(allmem / 32, 64 << 20);
6843         /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
6844         if (allmem >= 1 << 30)
6845                 arc_c_max = allmem - (1 << 30);
6846         else
6847                 arc_c_max = arc_c_min;
6848         arc_c_max = MAX(allmem * 3 / 4, arc_c_max);
6849 
6850         /*
6851          * In userland, there's only the memory pressure that we artificially
6852          * create (see arc_available_memory()).  Don't let arc_c get too
6853          * small, because it can cause transactions to be larger than
6854          * arc_c, causing arc_tempreserve_space() to fail.
6855          */
6856 #ifndef _KERNEL
6857         arc_c_min = arc_c_max / 2;
6858 #endif
6859 
6860         /*
6861          * Allow the tunables to override our calculations if they are
6862          * reasonable (ie. over 64MB)
6863          */
6864         if (zfs_arc_max > 64 << 20 && zfs_arc_max < allmem) {
6865                 arc_c_max = zfs_arc_max;
6866                 arc_c_min = MIN(arc_c_min, arc_c_max);
6867         }
6868         if (zfs_arc_min > 64 << 20 && zfs_arc_min <= arc_c_max)
6869                 arc_c_min = zfs_arc_min;
6870 
6871         arc_c = arc_c_max;
6872         arc_p = (arc_c >> 1);
6873         arc_size = 0;
6874 
6875         /* limit ddt meta-data to 1/4 of the arc capacity */
6876         arc_ddt_limit = arc_c_max / 4;
6877         /* limit meta-data to 1/4 of the arc capacity */
6878         arc_meta_limit = arc_c_max / 4;
6879 
6880 #ifdef _KERNEL
6881         /*
6882          * Metadata is stored in the kernel's heap.  Don't let us
6883          * use more than half the heap for the ARC.
6884          */
6885         arc_meta_limit = MIN(arc_meta_limit,
6886             vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 2);
6887 #endif
6888 
6889         /* Allow the tunable to override if it is reasonable */
6890         if (zfs_arc_ddt_limit > 0 && zfs_arc_ddt_limit <= arc_c_max)
6891                 arc_ddt_limit = zfs_arc_ddt_limit;
6892         arc_ddt_evict_threshold =
6893             zfs_arc_segregate_ddt ? &arc_ddt_limit : &arc_meta_limit;
6894 
6895         /* Allow the tunable to override if it is reasonable */
6896         if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
6897                 arc_meta_limit = zfs_arc_meta_limit;
6898 
6899         if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
6900                 arc_c_min = arc_meta_limit / 2;
6901 
6902         if (zfs_arc_meta_min > 0) {
6903                 arc_meta_min = zfs_arc_meta_min;
6904         } else {
6905                 arc_meta_min = arc_c_min / 2;
6906         }
6907 
6908         if (zfs_arc_grow_retry > 0)
6909                 arc_grow_retry = zfs_arc_grow_retry;
6910 
6911         if (zfs_arc_shrink_shift > 0)
6912                 arc_shrink_shift = zfs_arc_shrink_shift;
6913 
6914         /*
6915          * Ensure that arc_no_grow_shift is less than arc_shrink_shift.
6916          */
6917         if (arc_no_grow_shift >= arc_shrink_shift)
6918                 arc_no_grow_shift = arc_shrink_shift - 1;
6919 
6920         if (zfs_arc_p_min_shift > 0)
6921                 arc_p_min_shift = zfs_arc_p_min_shift;
6922 
6923         /* if kmem_flags are set, lets try to use less memory */
6924         if (kmem_debugging())
6925                 arc_c = arc_c / 2;
6926         if (arc_c < arc_c_min)
6927                 arc_c = arc_c_min;
6928 
6929         arc_state_init();
6930         buf_init();
6931 
6932         arc_reclaim_thread_exit = B_FALSE;
6933 
6934         arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
6935             sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
6936 
6937         if (arc_ksp != NULL) {
6938                 arc_ksp->ks_data = &arc_stats;
6939                 arc_ksp->ks_update = arc_kstat_update;
6940                 kstat_install(arc_ksp);
6941         }
6942 
6943         (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
6944             TS_RUN, minclsyspri);
6945 
6946         arc_dead = B_FALSE;
6947         arc_warm = B_FALSE;
6948 
6949         /*
6950          * Calculate maximum amount of dirty data per pool.
6951          *
6952          * If it has been set by /etc/system, take that.
6953          * Otherwise, use a percentage of physical memory defined by
6954          * zfs_dirty_data_max_percent (default 10%) with a cap at
6955          * zfs_dirty_data_max_max (default 4GB).
6956          */
6957         if (zfs_dirty_data_max == 0) {
6958                 zfs_dirty_data_max = physmem * PAGESIZE *
6959                     zfs_dirty_data_max_percent / 100;
6960                 zfs_dirty_data_max = MIN(zfs_dirty_data_max,
6961                     zfs_dirty_data_max_max);
6962         }
6963 }
6964 
6965 void
6966 arc_fini(void)
6967 {
6968         mutex_enter(&arc_reclaim_lock);
6969         arc_reclaim_thread_exit = B_TRUE;
6970         /*
6971          * The reclaim thread will set arc_reclaim_thread_exit back to
6972          * B_FALSE when it is finished exiting; we're waiting for that.
6973          */
6974         while (arc_reclaim_thread_exit) {
6975                 cv_signal(&arc_reclaim_thread_cv);
6976                 cv_wait(&arc_reclaim_thread_cv, &arc_reclaim_lock);
6977         }
6978         mutex_exit(&arc_reclaim_lock);
6979 
6980         /* Use B_TRUE to ensure *all* buffers are evicted */
6981         arc_flush(NULL, B_TRUE);
6982 
6983         arc_dead = B_TRUE;
6984 
6985         if (arc_ksp != NULL) {
6986                 kstat_delete(arc_ksp);
6987                 arc_ksp = NULL;
6988         }
6989 
6990         taskq_destroy(arc_flush_taskq);
6991 
6992         mutex_destroy(&arc_reclaim_lock);
6993         cv_destroy(&arc_reclaim_thread_cv);
6994         cv_destroy(&arc_reclaim_waiters_cv);
6995 
6996         arc_state_fini();
6997         buf_fini();
6998 
6999         ASSERT0(arc_loaned_bytes);
7000 }
7001 
7002 /*
7003  * Level 2 ARC
7004  *
7005  * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
7006  * It uses dedicated storage devices to hold cached data, which are populated
7007  * using large infrequent writes.  The main role of this cache is to boost
7008  * the performance of random read workloads.  The intended L2ARC devices
7009  * include short-stroked disks, solid state disks, and other media with
7010  * substantially faster read latency than disk.
7011  *
7012  *                 +-----------------------+
7013  *                 |         ARC           |
7014  *                 +-----------------------+
7015  *                    |         ^     ^
7016  *                    |         |     |
7017  *      l2arc_feed_thread()    arc_read()
7018  *                    |         |     |
7019  *                    |  l2arc read   |
7020  *                    V         |     |
7021  *               +---------------+    |
7022  *               |     L2ARC     |    |
7023  *               +---------------+    |
7024  *                   |    ^           |
7025  *          l2arc_write() |           |
7026  *                   |    |           |
7027  *                   V    |           |
7028  *                 +-------+      +-------+
7029  *                 | vdev  |      | vdev  |
7030  *                 | cache |      | cache |
7031  *                 +-------+      +-------+
7032  *                 +=========+     .-----.
7033  *                 :  L2ARC  :    |-_____-|
7034  *                 : devices :    | Disks |
7035  *                 +=========+    `-_____-'
7036  *
7037  * Read requests are satisfied from the following sources, in order:
7038  *
7039  *      1) ARC
7040  *      2) vdev cache of L2ARC devices
7041  *      3) L2ARC devices
7042  *      4) vdev cache of disks
7043  *      5) disks
7044  *
7045  * Some L2ARC device types exhibit extremely slow write performance.
7046  * To accommodate for this there are some significant differences between
7047  * the L2ARC and traditional cache design:
7048  *
7049  * 1. There is no eviction path from the ARC to the L2ARC.  Evictions from
7050  * the ARC behave as usual, freeing buffers and placing headers on ghost
7051  * lists.  The ARC does not send buffers to the L2ARC during eviction as
7052  * this would add inflated write latencies for all ARC memory pressure.
7053  *
7054  * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
7055  * It does this by periodically scanning buffers from the eviction-end of
7056  * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
7057  * not already there. It scans until a headroom of buffers is satisfied,
7058  * which itself is a buffer for ARC eviction. If a compressible buffer is
7059  * found during scanning and selected for writing to an L2ARC device, we
7060  * temporarily boost scanning headroom during the next scan cycle to make
7061  * sure we adapt to compression effects (which might significantly reduce
7062  * the data volume we write to L2ARC). The thread that does this is
7063  * l2arc_feed_thread(), illustrated below; example sizes are included to
7064  * provide a better sense of ratio than this diagram:
7065  *
7066  *             head -->                        tail
7067  *              +---------------------+----------+
7068  *      ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->.   # already on L2ARC
7069  *              +---------------------+----------+   |   o L2ARC eligible
7070  *      ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->|   : ARC buffer
7071  *              +---------------------+----------+   |
7072  *                   15.9 Gbytes      ^ 32 Mbytes    |
7073  *                                 headroom          |
7074  *                                            l2arc_feed_thread()
7075  *                                                   |
7076  *                       l2arc write hand <--[oooo]--'
7077  *                               |           8 Mbyte
7078  *                               |          write max
7079  *                               V
7080  *                +==============================+
7081  *      L2ARC dev |####|#|###|###|    |####| ... |
7082  *                +==============================+
7083  *                           32 Gbytes
7084  *
7085  * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
7086  * evicted, then the L2ARC has cached a buffer much sooner than it probably
7087  * needed to, potentially wasting L2ARC device bandwidth and storage.  It is
7088  * safe to say that this is an uncommon case, since buffers at the end of
7089  * the ARC lists have moved there due to inactivity.
7090  *
7091  * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
7092  * then the L2ARC simply misses copying some buffers.  This serves as a
7093  * pressure valve to prevent heavy read workloads from both stalling the ARC
7094  * with waits and clogging the L2ARC with writes.  This also helps prevent
7095  * the potential for the L2ARC to churn if it attempts to cache content too
7096  * quickly, such as during backups of the entire pool.
7097  *
7098  * 5. After system boot and before the ARC has filled main memory, there are
7099  * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
7100  * lists can remain mostly static.  Instead of searching from tail of these
7101  * lists as pictured, the l2arc_feed_thread() will search from the list heads
7102  * for eligible buffers, greatly increasing its chance of finding them.
7103  *
7104  * The L2ARC device write speed is also boosted during this time so that
7105  * the L2ARC warms up faster.  Since there have been no ARC evictions yet,
7106  * there are no L2ARC reads, and no fear of degrading read performance
7107  * through increased writes.
7108  *
7109  * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
7110  * the vdev queue can aggregate them into larger and fewer writes.  Each
7111  * device is written to in a rotor fashion, sweeping writes through
7112  * available space then repeating.
7113  *
7114  * 7. The L2ARC does not store dirty content.  It never needs to flush
7115  * write buffers back to disk based storage.
7116  *
7117  * 8. If an ARC buffer is written (and dirtied) which also exists in the
7118  * L2ARC, the now stale L2ARC buffer is immediately dropped.
7119  *
7120  * The performance of the L2ARC can be tweaked by a number of tunables, which
7121  * may be necessary for different workloads:
7122  *
7123  *      l2arc_write_max         max write bytes per interval
7124  *      l2arc_write_boost       extra write bytes during device warmup
7125  *      l2arc_noprefetch        skip caching prefetched buffers
7126  *      l2arc_headroom          number of max device writes to precache
7127  *      l2arc_headroom_boost    when we find compressed buffers during ARC
7128  *                              scanning, we multiply headroom by this
7129  *                              percentage factor for the next scan cycle,
7130  *                              since more compressed buffers are likely to
7131  *                              be present
7132  *      l2arc_feed_secs         seconds between L2ARC writing
7133  *
7134  * Tunables may be removed or added as future performance improvements are
7135  * integrated, and also may become zpool properties.
7136  *
7137  * There are three key functions that control how the L2ARC warms up:
7138  *
7139  *      l2arc_write_eligible()  check if a buffer is eligible to cache
7140  *      l2arc_write_size()      calculate how much to write
7141  *      l2arc_write_interval()  calculate sleep delay between writes
7142  *
7143  * These three functions determine what to write, how much, and how quickly
7144  * to send writes.
7145  *
7146  * L2ARC persistency:
7147  *
7148  * When writing buffers to L2ARC, we periodically add some metadata to
7149  * make sure we can pick them up after reboot, thus dramatically reducing
7150  * the impact that any downtime has on the performance of storage systems
7151  * with large caches.
7152  *
7153  * The implementation works fairly simply by integrating the following two
7154  * modifications:
7155  *
7156  * *) Every now and then we mix in a piece of metadata (called a log block)
7157  *    into the L2ARC write. This allows us to understand what's been written,
7158  *    so that we can rebuild the arc_buf_hdr_t structures of the main ARC
7159  *    buffers. The log block also includes a "2-back-reference" pointer to
7160  *    he second-to-previous block, forming a back-linked list of blocks on
7161  *    the L2ARC device.
7162  *
7163  * *) We reserve SPA_MINBLOCKSIZE of space at the start of each L2ARC device
7164  *    for our header bookkeeping purposes. This contains a device header,
7165  *    which contains our top-level reference structures. We update it each
7166  *    time we write a new log block, so that we're able to locate it in the
7167  *    L2ARC device. If this write results in an inconsistent device header
7168  *    (e.g. due to power failure), we detect this by verifying the header's
7169  *    checksum and simply drop the entries from L2ARC.
7170  *
7171  * Implementation diagram:
7172  *
7173  * +=== L2ARC device (not to scale) ======================================+
7174  * |       ___two newest log block pointers__.__________                  |
7175  * |      /                                   \1 back   \latest           |
7176  * |.____/_.                                   V         V                |
7177  * ||L2 dev|....|lb |bufs |lb |bufs |lb |bufs |lb |bufs |lb |---(empty)---|
7178  * ||   hdr|      ^         /^       /^        /         /                |
7179  * |+------+  ...--\-------/  \-----/--\------/         /                 |
7180  * |                \--------------/    \--------------/                  |
7181  * +======================================================================+
7182  *
7183  * As can be seen on the diagram, rather than using a simple linked list,
7184  * we use a pair of linked lists with alternating elements. This is a
7185  * performance enhancement due to the fact that we only find out of the
7186  * address of the next log block access once the current block has been
7187  * completely read in. Obviously, this hurts performance, because we'd be
7188  * keeping the device's I/O queue at only a 1 operation deep, thus
7189  * incurring a large amount of I/O round-trip latency. Having two lists
7190  * allows us to "prefetch" two log blocks ahead of where we are currently
7191  * rebuilding L2ARC buffers.
7192  *
7193  * On-device data structures:
7194  *
7195  * L2ARC device header: l2arc_dev_hdr_phys_t
7196  * L2ARC log block:     l2arc_log_blk_phys_t
7197  *
7198  * L2ARC reconstruction:
7199  *
7200  * When writing data, we simply write in the standard rotary fashion,
7201  * evicting buffers as we go and simply writing new data over them (writing
7202  * a new log block every now and then). This obviously means that once we
7203  * loop around the end of the device, we will start cutting into an already
7204  * committed log block (and its referenced data buffers), like so:
7205  *
7206  *    current write head__       __old tail
7207  *                        \     /
7208  *                        V    V
7209  * <--|bufs |lb |bufs |lb |    |bufs |lb |bufs |lb |-->
7210  *                         ^    ^^^^^^^^^___________________________________
7211  *                         |                                                \
7212  *                   <<nextwrite>> may overwrite this blk and/or its bufs --'
7213  *
7214  * When importing the pool, we detect this situation and use it to stop
7215  * our scanning process (see l2arc_rebuild).
7216  *
7217  * There is one significant caveat to consider when rebuilding ARC contents
7218  * from an L2ARC device: what about invalidated buffers? Given the above
7219  * construction, we cannot update blocks which we've already written to amend
7220  * them to remove buffers which were invalidated. Thus, during reconstruction,
7221  * we might be populating the cache with buffers for data that's not on the
7222  * main pool anymore, or may have been overwritten!
7223  *
7224  * As it turns out, this isn't a problem. Every arc_read request includes
7225  * both the DVA and, crucially, the birth TXG of the BP the caller is
7226  * looking for. So even if the cache were populated by completely rotten
7227  * blocks for data that had been long deleted and/or overwritten, we'll
7228  * never actually return bad data from the cache, since the DVA with the
7229  * birth TXG uniquely identify a block in space and time - once created,
7230  * a block is immutable on disk. The worst thing we have done is wasted
7231  * some time and memory at l2arc rebuild to reconstruct outdated ARC
7232  * entries that will get dropped from the l2arc as it is being updated
7233  * with new blocks.
7234  */
7235 
7236 static boolean_t
7237 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
7238 {
7239         /*
7240          * A buffer is *not* eligible for the L2ARC if it:
7241          * 1. belongs to a different spa.
7242          * 2. is already cached on the L2ARC.
7243          * 3. has an I/O in progress (it may be an incomplete read).
7244          * 4. is flagged not eligible (zfs property).
7245          */
7246         if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) ||
7247             HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr))
7248                 return (B_FALSE);
7249 
7250         return (B_TRUE);
7251 }
7252 
7253 static uint64_t
7254 l2arc_write_size(void)
7255 {
7256         uint64_t size;
7257 
7258         /*
7259          * Make sure our globals have meaningful values in case the user
7260          * altered them.
7261          */
7262         size = l2arc_write_max;
7263         if (size == 0) {
7264                 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
7265                     "be greater than zero, resetting it to the default (%d)",
7266                     L2ARC_WRITE_SIZE);
7267                 size = l2arc_write_max = L2ARC_WRITE_SIZE;
7268         }
7269 
7270         if (arc_warm == B_FALSE)
7271                 size += l2arc_write_boost;
7272 
7273         return (size);
7274 
7275 }
7276 
7277 static clock_t
7278 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
7279 {
7280         clock_t interval, next, now;
7281 
7282         /*
7283          * If the ARC lists are busy, increase our write rate; if the
7284          * lists are stale, idle back.  This is achieved by checking
7285          * how much we previously wrote - if it was more than half of
7286          * what we wanted, schedule the next write much sooner.
7287          */
7288         if (l2arc_feed_again && wrote > (wanted / 2))
7289                 interval = (hz * l2arc_feed_min_ms) / 1000;
7290         else
7291                 interval = hz * l2arc_feed_secs;
7292 
7293         now = ddi_get_lbolt();
7294         next = MAX(now, MIN(now + interval, began + interval));
7295 
7296         return (next);
7297 }
7298 
7299 typedef enum l2ad_feed {
7300         L2ARC_FEED_ALL = 1,
7301         L2ARC_FEED_DDT_DEV,
7302         L2ARC_FEED_NON_DDT_DEV,
7303 } l2ad_feed_t;
7304 
7305 /*
7306  * Cycle through L2ARC devices.  This is how L2ARC load balances.
7307  * If a device is returned, this also returns holding the spa config lock.
7308  */
7309 static l2arc_dev_t *
7310 l2arc_dev_get_next(l2ad_feed_t feed_type)
7311 {
7312         l2arc_dev_t *start = NULL, *next = NULL;
7313 
7314         /*
7315          * Lock out the removal of spas (spa_namespace_lock), then removal
7316          * of cache devices (l2arc_dev_mtx).  Once a device has been selected,
7317          * both locks will be dropped and a spa config lock held instead.
7318          */
7319         mutex_enter(&spa_namespace_lock);
7320         mutex_enter(&l2arc_dev_mtx);
7321 
7322         /* if there are no vdevs, there is nothing to do */
7323         if (l2arc_ndev == 0)
7324                 goto out;
7325 
7326         if (feed_type == L2ARC_FEED_DDT_DEV)
7327                 next = l2arc_ddt_dev_last;
7328         else
7329                 next = l2arc_dev_last;
7330 
7331         /* figure out what the next device we look at should be */
7332         if (next == NULL)
7333                 next = list_head(l2arc_dev_list);
7334         else if (list_next(l2arc_dev_list, next) == NULL)
7335                 next = list_head(l2arc_dev_list);
7336         else
7337                 next = list_next(l2arc_dev_list, next);
7338         ASSERT(next);
7339 
7340         /* loop through L2ARC devs looking for the one we need */
7341         /* LINTED(E_CONSTANT_CONDITION) */
7342         while (1) {
7343                 if (next == NULL) /* reached list end, start from beginning */
7344                         next = list_head(l2arc_dev_list);
7345 
7346                 if (start == NULL) { /* save starting dev */
7347                         start = next;
7348                 } else if (start == next) { /* full loop completed - stop now */
7349                         next = NULL;
7350                         if (feed_type == L2ARC_FEED_DDT_DEV) {
7351                                 l2arc_ddt_dev_last = NULL;
7352                                 goto out;
7353                         } else {
7354                                 break;
7355                         }
7356                 }
7357 
7358                 if (!vdev_is_dead(next->l2ad_vdev) && !next->l2ad_rebuild) {
7359                         if (feed_type == L2ARC_FEED_DDT_DEV) {
7360                                 if (vdev_type_is_ddt(next->l2ad_vdev)) {
7361                                         l2arc_ddt_dev_last = next;
7362                                         goto out;
7363                                 }
7364                         } else if (feed_type == L2ARC_FEED_NON_DDT_DEV) {
7365                                 if (!vdev_type_is_ddt(next->l2ad_vdev)) {
7366                                         break;
7367                                 }
7368                         } else {
7369                                 ASSERT(feed_type == L2ARC_FEED_ALL);
7370                                 break;
7371                         }
7372                 }
7373                 next = list_next(l2arc_dev_list, next);
7374         }
7375         l2arc_dev_last = next;
7376 
7377 out:
7378         mutex_exit(&l2arc_dev_mtx);
7379 
7380         /*
7381          * Grab the config lock to prevent the 'next' device from being
7382          * removed while we are writing to it.
7383          */
7384         if (next != NULL)
7385                 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
7386         mutex_exit(&spa_namespace_lock);
7387 
7388         return (next);
7389 }
7390 
7391 /*
7392  * Free buffers that were tagged for destruction.
7393  */
7394 static void
7395 l2arc_do_free_on_write()
7396 {
7397         list_t *buflist;
7398         l2arc_data_free_t *df, *df_prev;
7399 
7400         mutex_enter(&l2arc_free_on_write_mtx);
7401         buflist = l2arc_free_on_write;
7402 
7403         for (df = list_tail(buflist); df; df = df_prev) {
7404                 df_prev = list_prev(buflist, df);
7405                 ASSERT3P(df->l2df_abd, !=, NULL);
7406                 abd_free(df->l2df_abd);
7407                 list_remove(buflist, df);
7408                 kmem_free(df, sizeof (l2arc_data_free_t));
7409         }
7410 
7411         mutex_exit(&l2arc_free_on_write_mtx);
7412 }
7413 
7414 /*
7415  * A write to a cache device has completed.  Update all headers to allow
7416  * reads from these buffers to begin.
7417  */
7418 static void
7419 l2arc_write_done(zio_t *zio)
7420 {
7421         l2arc_write_callback_t *cb;
7422         l2arc_dev_t *dev;
7423         list_t *buflist;
7424         arc_buf_hdr_t *head, *hdr, *hdr_prev;
7425         kmutex_t *hash_lock;
7426         int64_t bytes_dropped = 0;
7427         l2arc_log_blk_buf_t *lb_buf;
7428 
7429         cb = zio->io_private;
7430         ASSERT3P(cb, !=, NULL);
7431         dev = cb->l2wcb_dev;
7432         ASSERT3P(dev, !=, NULL);
7433         head = cb->l2wcb_head;
7434         ASSERT3P(head, !=, NULL);
7435         buflist = &dev->l2ad_buflist;
7436         ASSERT3P(buflist, !=, NULL);
7437         DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
7438             l2arc_write_callback_t *, cb);
7439 
7440         if (zio->io_error != 0)
7441                 ARCSTAT_BUMP(arcstat_l2_writes_error);
7442 
7443         /*
7444          * All writes completed, or an error was hit.
7445          */
7446 top:
7447         mutex_enter(&dev->l2ad_mtx);
7448         for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
7449                 hdr_prev = list_prev(buflist, hdr);
7450 
7451                 hash_lock = HDR_LOCK(hdr);
7452 
7453                 /*
7454                  * We cannot use mutex_enter or else we can deadlock
7455                  * with l2arc_write_buffers (due to swapping the order
7456                  * the hash lock and l2ad_mtx are taken).
7457                  */
7458                 if (!mutex_tryenter(hash_lock)) {
7459                         /*
7460                          * Missed the hash lock. We must retry so we
7461                          * don't leave the ARC_FLAG_L2_WRITING bit set.
7462                          */
7463                         ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
7464 
7465                         /*
7466                          * We don't want to rescan the headers we've
7467                          * already marked as having been written out, so
7468                          * we reinsert the head node so we can pick up
7469                          * where we left off.
7470                          */
7471                         list_remove(buflist, head);
7472                         list_insert_after(buflist, hdr, head);
7473 
7474                         mutex_exit(&dev->l2ad_mtx);
7475 
7476                         /*
7477                          * We wait for the hash lock to become available
7478                          * to try and prevent busy waiting, and increase
7479                          * the chance we'll be able to acquire the lock
7480                          * the next time around.
7481                          */
7482                         mutex_enter(hash_lock);
7483                         mutex_exit(hash_lock);
7484                         goto top;
7485                 }
7486 
7487                 /*
7488                  * We could not have been moved into the arc_l2c_only
7489                  * state while in-flight due to our ARC_FLAG_L2_WRITING
7490                  * bit being set. Let's just ensure that's being enforced.
7491                  */
7492                 ASSERT(HDR_HAS_L1HDR(hdr));
7493 
7494                 if (zio->io_error != 0) {
7495                         /*
7496                          * Error - drop L2ARC entry.
7497                          */
7498                         list_remove(buflist, hdr);
7499                         arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
7500 
7501                         ARCSTAT_INCR(arcstat_l2_psize, -arc_hdr_size(hdr));
7502                         ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
7503 
7504                         bytes_dropped += arc_hdr_size(hdr);
7505                         (void) refcount_remove_many(&dev->l2ad_alloc,
7506                             arc_hdr_size(hdr), hdr);
7507                 }
7508 
7509                 /*
7510                  * Allow ARC to begin reads and ghost list evictions to
7511                  * this L2ARC entry.
7512                  */
7513                 arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING);
7514 
7515                 mutex_exit(hash_lock);
7516         }
7517 
7518         atomic_inc_64(&l2arc_writes_done);
7519         list_remove(buflist, head);
7520         ASSERT(!HDR_HAS_L1HDR(head));
7521         kmem_cache_free(hdr_l2only_cache, head);
7522         mutex_exit(&dev->l2ad_mtx);
7523 
7524         ASSERT(dev->l2ad_vdev != NULL);
7525         vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
7526 
7527         l2arc_do_free_on_write();
7528 
7529         while ((lb_buf = list_remove_tail(&cb->l2wcb_log_blk_buflist)) != NULL)
7530                 kmem_free(lb_buf, sizeof (*lb_buf));
7531         list_destroy(&cb->l2wcb_log_blk_buflist);
7532         kmem_free(cb, sizeof (l2arc_write_callback_t));
7533 }
7534 
7535 /*
7536  * A read to a cache device completed.  Validate buffer contents before
7537  * handing over to the regular ARC routines.
7538  */
7539 static void
7540 l2arc_read_done(zio_t *zio)
7541 {
7542         l2arc_read_callback_t *cb;
7543         arc_buf_hdr_t *hdr;
7544         kmutex_t *hash_lock;
7545         boolean_t valid_cksum;
7546 
7547         ASSERT3P(zio->io_vd, !=, NULL);
7548         ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
7549 
7550         spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
7551 
7552         cb = zio->io_private;
7553         ASSERT3P(cb, !=, NULL);
7554         hdr = cb->l2rcb_hdr;
7555         ASSERT3P(hdr, !=, NULL);
7556 
7557         hash_lock = HDR_LOCK(hdr);
7558         mutex_enter(hash_lock);
7559         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
7560 
7561         /*
7562          * If the data was read into a temporary buffer,
7563          * move it and free the buffer.
7564          */
7565         if (cb->l2rcb_abd != NULL) {
7566                 ASSERT3U(arc_hdr_size(hdr), <, zio->io_size);
7567                 if (zio->io_error == 0) {
7568                         abd_copy(hdr->b_l1hdr.b_pabd, cb->l2rcb_abd,
7569                             arc_hdr_size(hdr));
7570                 }
7571 
7572                 /*
7573                  * The following must be done regardless of whether
7574                  * there was an error:
7575                  * - free the temporary buffer
7576                  * - point zio to the real ARC buffer
7577                  * - set zio size accordingly
7578                  * These are required because zio is either re-used for
7579                  * an I/O of the block in the case of the error
7580                  * or the zio is passed to arc_read_done() and it
7581                  * needs real data.
7582                  */
7583                 abd_free(cb->l2rcb_abd);
7584                 zio->io_size = zio->io_orig_size = arc_hdr_size(hdr);
7585                 zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
7586         }
7587 
7588         ASSERT3P(zio->io_abd, !=, NULL);
7589 
7590         /*
7591          * Check this survived the L2ARC journey.
7592          */
7593         ASSERT3P(zio->io_abd, ==, hdr->b_l1hdr.b_pabd);
7594         zio->io_bp_copy = cb->l2rcb_bp;   /* XXX fix in L2ARC 2.0 */
7595         zio->io_bp = &zio->io_bp_copy;        /* XXX fix in L2ARC 2.0 */
7596 
7597         valid_cksum = arc_cksum_is_equal(hdr, zio);
7598         if (valid_cksum && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
7599                 mutex_exit(hash_lock);
7600                 zio->io_private = hdr;
7601                 arc_read_done(zio);
7602         } else {
7603                 mutex_exit(hash_lock);
7604                 /*
7605                  * Buffer didn't survive caching.  Increment stats and
7606                  * reissue to the original storage device.
7607                  */
7608                 if (zio->io_error != 0) {
7609                         ARCSTAT_BUMP(arcstat_l2_io_error);
7610                 } else {
7611                         zio->io_error = SET_ERROR(EIO);
7612                 }
7613                 if (!valid_cksum)
7614                         ARCSTAT_BUMP(arcstat_l2_cksum_bad);
7615 
7616                 /*
7617                  * If there's no waiter, issue an async i/o to the primary
7618                  * storage now.  If there *is* a waiter, the caller must
7619                  * issue the i/o in a context where it's OK to block.
7620                  */
7621                 if (zio->io_waiter == NULL) {
7622                         zio_t *pio = zio_unique_parent(zio);
7623 
7624                         ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
7625 
7626                         zio_nowait(zio_read(pio, zio->io_spa, zio->io_bp,
7627                             hdr->b_l1hdr.b_pabd, zio->io_size, arc_read_done,
7628                             hdr, zio->io_priority, cb->l2rcb_flags,
7629                             &cb->l2rcb_zb));
7630                 }
7631         }
7632 
7633         kmem_free(cb, sizeof (l2arc_read_callback_t));
7634 }
7635 
7636 /*
7637  * This is the list priority from which the L2ARC will search for pages to
7638  * cache.  This is used within loops to cycle through lists in the
7639  * desired order.  This order can have a significant effect on cache
7640  * performance.
7641  *
7642  * Currently the ddt lists are hit first (MFU then MRU),
7643  * followed by metadata then by the data lists.
7644  * This function returns a locked list, and also returns the lock pointer.
7645  */
7646 static multilist_sublist_t *
7647 l2arc_sublist_lock(enum l2arc_priorities prio)
7648 {
7649         multilist_t *ml = NULL;
7650         unsigned int idx;
7651 
7652         ASSERT(prio >= PRIORITY_MFU_DDT);
7653         ASSERT(prio < PRIORITY_NUMTYPES);
7654 
7655         switch (prio) {
7656         case PRIORITY_MFU_DDT:
7657                 ml = arc_mfu->arcs_list[ARC_BUFC_DDT];
7658                 break;
7659         case PRIORITY_MRU_DDT:
7660                 ml = arc_mru->arcs_list[ARC_BUFC_DDT];
7661                 break;
7662         case PRIORITY_MFU_META:
7663                 ml = arc_mfu->arcs_list[ARC_BUFC_METADATA];
7664                 break;
7665         case PRIORITY_MRU_META:
7666                 ml = arc_mru->arcs_list[ARC_BUFC_METADATA];
7667                 break;
7668         case PRIORITY_MFU_DATA:
7669                 ml = arc_mfu->arcs_list[ARC_BUFC_DATA];
7670                 break;
7671         case PRIORITY_MRU_DATA:
7672                 ml = arc_mru->arcs_list[ARC_BUFC_DATA];
7673                 break;
7674         }
7675 
7676         /*
7677          * Return a randomly-selected sublist. This is acceptable
7678          * because the caller feeds only a little bit of data for each
7679          * call (8MB). Subsequent calls will result in different
7680          * sublists being selected.
7681          */
7682         idx = multilist_get_random_index(ml);
7683         return (multilist_sublist_lock(ml, idx));
7684 }
7685 
7686 /*
7687  * Calculates the maximum overhead of L2ARC metadata log blocks for a given
7688  * L2ARC write size. l2arc_evict and l2arc_write_buffers need to include this
7689  * overhead in processing to make sure there is enough headroom available
7690  * when writing buffers.
7691  */
7692 static inline uint64_t
7693 l2arc_log_blk_overhead(uint64_t write_sz)
7694 {
7695         return ((write_sz / SPA_MINBLOCKSIZE / L2ARC_LOG_BLK_ENTRIES) + 1) *
7696             L2ARC_LOG_BLK_SIZE;
7697 }
7698 
7699 /*
7700  * Evict buffers from the device write hand to the distance specified in
7701  * bytes.  This distance may span populated buffers, it may span nothing.
7702  * This is clearing a region on the L2ARC device ready for writing.
7703  * If the 'all' boolean is set, every buffer is evicted.
7704  */
7705 static void
7706 l2arc_evict_impl(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
7707 {
7708         list_t *buflist;
7709         arc_buf_hdr_t *hdr, *hdr_prev;
7710         kmutex_t *hash_lock;
7711         uint64_t taddr;
7712 
7713         buflist = &dev->l2ad_buflist;
7714 
7715         if (!all && dev->l2ad_first) {
7716                 /*
7717                  * This is the first sweep through the device.  There is
7718                  * nothing to evict.
7719                  */
7720                 return;
7721         }
7722 
7723         /*
7724          * We need to add in the worst case scenario of log block overhead.
7725          */
7726         distance += l2arc_log_blk_overhead(distance);
7727         if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
7728                 /*
7729                  * When nearing the end of the device, evict to the end
7730                  * before the device write hand jumps to the start.
7731                  */
7732                 taddr = dev->l2ad_end;
7733         } else {
7734                 taddr = dev->l2ad_hand + distance;
7735         }
7736         DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
7737             uint64_t, taddr, boolean_t, all);
7738 
7739 top:
7740         mutex_enter(&dev->l2ad_mtx);
7741         for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
7742                 hdr_prev = list_prev(buflist, hdr);
7743 
7744                 hash_lock = HDR_LOCK(hdr);
7745 
7746                 /*
7747                  * We cannot use mutex_enter or else we can deadlock
7748                  * with l2arc_write_buffers (due to swapping the order
7749                  * the hash lock and l2ad_mtx are taken).
7750                  */
7751                 if (!mutex_tryenter(hash_lock)) {
7752                         /*
7753                          * Missed the hash lock.  Retry.
7754                          */
7755                         ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
7756                         mutex_exit(&dev->l2ad_mtx);
7757                         mutex_enter(hash_lock);
7758                         mutex_exit(hash_lock);
7759                         goto top;
7760                 }
7761 
7762                 /*
7763                  * A header can't be on this list if it doesn't have L2 header.
7764                  */
7765                 ASSERT(HDR_HAS_L2HDR(hdr));
7766 
7767                 /* Ensure this header has finished being written. */
7768                 ASSERT(!HDR_L2_WRITING(hdr));
7769                 ASSERT(!HDR_L2_WRITE_HEAD(hdr));
7770 
7771                 if (!all && (hdr->b_l2hdr.b_daddr >= taddr ||
7772                     hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
7773                         /*
7774                          * We've evicted to the target address,
7775                          * or the end of the device.
7776                          */
7777                         mutex_exit(hash_lock);
7778                         break;
7779                 }
7780 
7781                 if (!HDR_HAS_L1HDR(hdr)) {
7782                         ASSERT(!HDR_L2_READING(hdr));
7783                         /*
7784                          * This doesn't exist in the ARC.  Destroy.
7785                          * arc_hdr_destroy() will call list_remove()
7786                          * and decrement arcstat_l2_lsize.
7787                          */
7788                         arc_change_state(arc_anon, hdr, hash_lock);
7789                         arc_hdr_destroy(hdr);
7790                 } else {
7791                         ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
7792                         ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
7793                         /*
7794                          * Invalidate issued or about to be issued
7795                          * reads, since we may be about to write
7796                          * over this location.
7797                          */
7798                         if (HDR_L2_READING(hdr)) {
7799                                 ARCSTAT_BUMP(arcstat_l2_evict_reading);
7800                                 arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED);
7801                         }
7802 
7803                         arc_hdr_l2hdr_destroy(hdr);
7804                 }
7805                 mutex_exit(hash_lock);
7806         }
7807         mutex_exit(&dev->l2ad_mtx);
7808 }
7809 
7810 static void
7811 l2arc_evict_task(void *arg)
7812 {
7813         l2arc_dev_t *dev = arg;
7814         ASSERT(dev);
7815 
7816         /*
7817          * Evict l2arc buffers asynchronously; we need to keep the device
7818          * around until we are sure there aren't any buffers referencing it.
7819          * We do not need to hold any config locks, etc. because at this point,
7820          * we are the only ones who knows about this device (the in-core
7821          * structure), so no new buffers can be created (e.g. if the pool is
7822          * re-imported while the asynchronous eviction is in progress) that
7823          * reference this same in-core structure. Also remove the vdev link
7824          * since further use of it as l2arc device is prohibited.
7825          */
7826         dev->l2ad_vdev = NULL;
7827         l2arc_evict_impl(dev, 0LL, B_TRUE);
7828 
7829         /* Same cleanup as in the synchronous path */
7830         list_destroy(&dev->l2ad_buflist);
7831         mutex_destroy(&dev->l2ad_mtx);
7832         refcount_destroy(&dev->l2ad_alloc);
7833         kmem_free(dev->l2ad_dev_hdr, dev->l2ad_dev_hdr_asize);
7834         kmem_free(dev, sizeof (l2arc_dev_t));
7835 }
7836 
7837 boolean_t zfs_l2arc_async_evict = B_TRUE;
7838 
7839 /*
7840  * Perform l2arc eviction for buffers associated with this device
7841  * If evicting all buffers (done at pool export time), try to evict
7842  * asynchronously, and fall back to synchronous eviction in case of error
7843  * Tell the caller whether to cleanup the device:
7844  *  - B_TRUE means "asynchronous eviction, do not cleanup"
7845  *  - B_FALSE means "synchronous eviction, done, please cleanup"
7846  */
7847 static boolean_t
7848 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
7849 {
7850         /*
7851          *  If we are evicting all the buffers for this device, which happens
7852          *  at pool export time, schedule asynchronous task
7853          */
7854         if (all && zfs_l2arc_async_evict) {
7855                 if ((taskq_dispatch(arc_flush_taskq, l2arc_evict_task,
7856                     dev, TQ_NOSLEEP) == NULL)) {
7857                         /*
7858                          * Failed to dispatch asynchronous task
7859                          * cleanup, evict synchronously
7860                          */
7861                         l2arc_evict_impl(dev, distance, all);
7862                 } else {
7863                         /*
7864                          * Successful dispatch, vdev space updated
7865                          */
7866                         return (B_TRUE);
7867                 }
7868         } else {
7869                 /* Evict synchronously */
7870                 l2arc_evict_impl(dev, distance, all);
7871         }
7872 
7873         return (B_FALSE);
7874 }
7875 
7876 /*
7877  * Find and write ARC buffers to the L2ARC device.
7878  *
7879  * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
7880  * for reading until they have completed writing.
7881  * The headroom_boost is an in-out parameter used to maintain headroom boost
7882  * state between calls to this function.
7883  *
7884  * Returns the number of bytes actually written (which may be smaller than
7885  * the delta by which the device hand has changed due to alignment).
7886  */
7887 static uint64_t
7888 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
7889     l2ad_feed_t feed_type)
7890 {
7891         arc_buf_hdr_t *hdr, *hdr_prev, *head;
7892         /*
7893          * We must carefully track the space we deal with here:
7894          * - write_size: sum of the size of all buffers to be written
7895          *      without compression or inter-buffer alignment applied.
7896          *      This size is added to arcstat_l2_size, because subsequent
7897          *      eviction of buffers decrements this kstat by only the
7898          *      buffer's b_lsize (which doesn't take alignment into account).
7899          * - write_asize: sum of the size of all buffers to be written
7900          *      with inter-buffer alignment applied.
7901          *      This size is used to estimate the maximum number of bytes
7902          *      we could take up on the device and is thus used to gauge how
7903          *      close we are to hitting target_sz.
7904          */
7905         uint64_t write_asize, write_psize, write_lsize, headroom;
7906         boolean_t full;
7907         l2arc_write_callback_t *cb;
7908         zio_t *pio, *wzio;
7909         enum l2arc_priorities try;
7910         uint64_t guid = spa_load_guid(spa);
7911         boolean_t dev_hdr_update = B_FALSE;
7912 
7913         ASSERT3P(dev->l2ad_vdev, !=, NULL);
7914 
7915         pio = NULL;
7916         cb = NULL;
7917         write_lsize = write_asize = write_psize = 0;
7918         full = B_FALSE;
7919         head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
7920         arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR);
7921 
7922         /*
7923          * Copy buffers for L2ARC writing.
7924          */
7925         for (try = PRIORITY_MFU_DDT; try < PRIORITY_NUMTYPES; try++) {
7926                 multilist_sublist_t *mls = l2arc_sublist_lock(try);
7927                 uint64_t passed_sz = 0;
7928 
7929                 /*
7930                  * L2ARC fast warmup.
7931                  *
7932                  * Until the ARC is warm and starts to evict, read from the
7933                  * head of the ARC lists rather than the tail.
7934                  */
7935                 if (arc_warm == B_FALSE)
7936                         hdr = multilist_sublist_head(mls);
7937                 else
7938                         hdr = multilist_sublist_tail(mls);
7939 
7940                 headroom = target_sz * l2arc_headroom;
7941                 if (zfs_compressed_arc_enabled)
7942                         headroom = (headroom * l2arc_headroom_boost) / 100;
7943 
7944                 for (; hdr; hdr = hdr_prev) {
7945                         kmutex_t *hash_lock;
7946 
7947                         if (arc_warm == B_FALSE)
7948                                 hdr_prev = multilist_sublist_next(mls, hdr);
7949                         else
7950                                 hdr_prev = multilist_sublist_prev(mls, hdr);
7951 
7952                         hash_lock = HDR_LOCK(hdr);
7953                         if (!mutex_tryenter(hash_lock)) {
7954                                 /*
7955                                  * Skip this buffer rather than waiting.
7956                                  */
7957                                 continue;
7958                         }
7959 
7960                         passed_sz += HDR_GET_LSIZE(hdr);
7961                         if (passed_sz > headroom) {
7962                                 /*
7963                                  * Searched too far.
7964                                  */
7965                                 mutex_exit(hash_lock);
7966                                 break;
7967                         }
7968 
7969                         if (!l2arc_write_eligible(guid, hdr)) {
7970                                 mutex_exit(hash_lock);
7971                                 continue;
7972                         }
7973 
7974                         /*
7975                          * We rely on the L1 portion of the header below, so
7976                          * it's invalid for this header to have been evicted out
7977                          * of the ghost cache, prior to being written out. The
7978                          * ARC_FLAG_L2_WRITING bit ensures this won't happen.
7979                          */
7980                         ASSERT(HDR_HAS_L1HDR(hdr));
7981 
7982                         ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
7983                         ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
7984                         ASSERT3U(arc_hdr_size(hdr), >, 0);
7985                         uint64_t psize = arc_hdr_size(hdr);
7986                         uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
7987                             psize);
7988 
7989                         if ((write_asize + asize) > target_sz) {
7990                                 full = B_TRUE;
7991                                 mutex_exit(hash_lock);
7992                                 break;
7993                         }
7994 
7995                         /* make sure buf we select corresponds to feed_type */
7996                         if ((feed_type == L2ARC_FEED_DDT_DEV &&
7997                             arc_buf_type(hdr) != ARC_BUFC_DDT) ||
7998                             (feed_type == L2ARC_FEED_NON_DDT_DEV &&
7999                             arc_buf_type(hdr) == ARC_BUFC_DDT)) {
8000                                         mutex_exit(hash_lock);
8001                                         continue;
8002                         }
8003 
8004                         if (pio == NULL) {
8005                                 /*
8006                                  * Insert a dummy header on the buflist so
8007                                  * l2arc_write_done() can find where the
8008                                  * write buffers begin without searching.
8009                                  */
8010                                 mutex_enter(&dev->l2ad_mtx);
8011                                 list_insert_head(&dev->l2ad_buflist, head);
8012                                 mutex_exit(&dev->l2ad_mtx);
8013 
8014                                 cb = kmem_zalloc(
8015                                     sizeof (l2arc_write_callback_t), KM_SLEEP);
8016                                 cb->l2wcb_dev = dev;
8017                                 cb->l2wcb_head = head;
8018                                 list_create(&cb->l2wcb_log_blk_buflist,
8019                                     sizeof (l2arc_log_blk_buf_t),
8020                                     offsetof(l2arc_log_blk_buf_t, lbb_node));
8021                                 pio = zio_root(spa, l2arc_write_done, cb,
8022                                     ZIO_FLAG_CANFAIL);
8023                         }
8024 
8025                         hdr->b_l2hdr.b_dev = dev;
8026                         hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
8027                         arc_hdr_set_flags(hdr,
8028                             ARC_FLAG_L2_WRITING | ARC_FLAG_HAS_L2HDR);
8029 
8030                         mutex_enter(&dev->l2ad_mtx);
8031                         list_insert_head(&dev->l2ad_buflist, hdr);
8032                         mutex_exit(&dev->l2ad_mtx);
8033 
8034                         (void) refcount_add_many(&dev->l2ad_alloc, psize, hdr);
8035 
8036                         /*
8037                          * Normally the L2ARC can use the hdr's data, but if
8038                          * we're sharing data between the hdr and one of its
8039                          * bufs, L2ARC needs its own copy of the data so that
8040                          * the ZIO below can't race with the buf consumer.
8041                          * Another case where we need to create a copy of the
8042                          * data is when the buffer size is not device-aligned
8043                          * and we need to pad the block to make it such.
8044                          * That also keeps the clock hand suitably aligned.
8045                          *
8046                          * To ensure that the copy will be available for the
8047                          * lifetime of the ZIO and be cleaned up afterwards, we
8048                          * add it to the l2arc_free_on_write queue.
8049                          */
8050                         abd_t *to_write;
8051                         if (!HDR_SHARED_DATA(hdr) && psize == asize) {
8052                                 to_write = hdr->b_l1hdr.b_pabd;
8053                         } else {
8054                                 to_write = abd_alloc_for_io(asize,
8055                                     !HDR_ISTYPE_DATA(hdr));
8056                                 abd_copy(to_write, hdr->b_l1hdr.b_pabd, psize);
8057                                 if (asize != psize) {
8058                                         abd_zero_off(to_write, psize,
8059                                             asize - psize);
8060                                 }
8061                                 l2arc_free_abd_on_write(to_write, asize,
8062                                     arc_buf_type(hdr));
8063                         }
8064                         wzio = zio_write_phys(pio, dev->l2ad_vdev,
8065                             hdr->b_l2hdr.b_daddr, asize, to_write,
8066                             ZIO_CHECKSUM_OFF, NULL, hdr,
8067                             ZIO_PRIORITY_ASYNC_WRITE,
8068                             ZIO_FLAG_CANFAIL, B_FALSE);
8069 
8070                         write_lsize += HDR_GET_LSIZE(hdr);
8071                         DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
8072                             zio_t *, wzio);
8073 
8074                         write_psize += psize;
8075                         write_asize += asize;
8076                         dev->l2ad_hand += asize;
8077 
8078                         mutex_exit(hash_lock);
8079 
8080                         (void) zio_nowait(wzio);
8081 
8082                         /*
8083                          * Append buf info to current log and commit if full.
8084                          * arcstat_l2_{size,asize} kstats are updated internally.
8085                          */
8086                         if (l2arc_log_blk_insert(dev, hdr)) {
8087                                 l2arc_log_blk_commit(dev, pio, cb);
8088                                 dev_hdr_update = B_TRUE;
8089                         }
8090                 }
8091 
8092                 multilist_sublist_unlock(mls);
8093 
8094                 if (full == B_TRUE)
8095                         break;
8096         }
8097 
8098         /* No buffers selected for writing? */
8099         if (pio == NULL) {
8100                 ASSERT0(write_lsize);
8101                 ASSERT(!HDR_HAS_L1HDR(head));
8102                 kmem_cache_free(hdr_l2only_cache, head);
8103                 return (0);
8104         }
8105 
8106         /*
8107          * If we wrote any logs as part of this write, update dev hdr
8108          * to point to it.
8109          */
8110         if (dev_hdr_update)
8111                 l2arc_dev_hdr_update(dev, pio);
8112 
8113         ASSERT3U(write_asize, <=, target_sz);
8114         ARCSTAT_BUMP(arcstat_l2_writes_sent);
8115         ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize);
8116         if (feed_type == L2ARC_FEED_DDT_DEV)
8117                 ARCSTAT_INCR(arcstat_l2_ddt_write_bytes, write_psize);
8118         ARCSTAT_INCR(arcstat_l2_lsize, write_lsize);
8119         ARCSTAT_INCR(arcstat_l2_psize, write_psize);
8120         vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0);
8121 
8122         /*
8123          * Bump device hand to the device start if it is approaching the end.
8124          * l2arc_evict() will already have evicted ahead for this case.
8125          */
8126         if (dev->l2ad_hand + target_sz + l2arc_log_blk_overhead(target_sz) >=
8127             dev->l2ad_end) {
8128                 dev->l2ad_hand = dev->l2ad_start;
8129                 dev->l2ad_first = B_FALSE;
8130         }
8131 
8132         dev->l2ad_writing = B_TRUE;
8133         (void) zio_wait(pio);
8134         dev->l2ad_writing = B_FALSE;
8135 
8136         return (write_asize);
8137 }
8138 
8139 static boolean_t
8140 l2arc_feed_dev(l2ad_feed_t feed_type, uint64_t *wrote)
8141 {
8142         spa_t *spa;
8143         l2arc_dev_t *dev;
8144         uint64_t size;
8145 
8146         /*
8147          * This selects the next l2arc device to write to, and in
8148          * doing so the next spa to feed from: dev->l2ad_spa.   This
8149          * will return NULL if there are now no l2arc devices or if
8150          * they are all faulted.
8151          *
8152          * If a device is returned, its spa's config lock is also
8153          * held to prevent device removal.  l2arc_dev_get_next()
8154          * will grab and release l2arc_dev_mtx.
8155          */
8156         if ((dev = l2arc_dev_get_next(feed_type)) == NULL)
8157                 return (B_FALSE);
8158 
8159         spa = dev->l2ad_spa;
8160         ASSERT(spa != NULL);
8161 
8162         /*
8163          * If the pool is read-only - skip it
8164          */
8165         if (!spa_writeable(spa)) {
8166                 spa_config_exit(spa, SCL_L2ARC, dev);
8167                 return (B_FALSE);
8168         }
8169 
8170         ARCSTAT_BUMP(arcstat_l2_feeds);
8171         size = l2arc_write_size();
8172 
8173         /*
8174          * Evict L2ARC buffers that will be overwritten.
8175          * B_FALSE guarantees synchronous eviction.
8176          */
8177         (void) l2arc_evict(dev, size, B_FALSE);
8178 
8179         /*
8180          * Write ARC buffers.
8181          */
8182         *wrote = l2arc_write_buffers(spa, dev, size, feed_type);
8183 
8184         spa_config_exit(spa, SCL_L2ARC, dev);
8185 
8186         return (B_TRUE);
8187 }
8188 
8189 /*
8190  * This thread feeds the L2ARC at regular intervals.  This is the beating
8191  * heart of the L2ARC.
8192  */
8193 /* ARGSUSED */
8194 static void
8195 l2arc_feed_thread(void *unused)
8196 {
8197         callb_cpr_t cpr;
8198         uint64_t size, total_written = 0;
8199         clock_t begin, next = ddi_get_lbolt();
8200         l2ad_feed_t feed_type = L2ARC_FEED_ALL;
8201 
8202         CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
8203 
8204         mutex_enter(&l2arc_feed_thr_lock);
8205 
8206         while (l2arc_thread_exit == 0) {
8207                 CALLB_CPR_SAFE_BEGIN(&cpr);
8208                 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
8209                     next);
8210                 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
8211                 next = ddi_get_lbolt() + hz;
8212 
8213                 /*
8214                  * Quick check for L2ARC devices.
8215                  */
8216                 mutex_enter(&l2arc_dev_mtx);
8217                 if (l2arc_ndev == 0) {
8218                         mutex_exit(&l2arc_dev_mtx);
8219                         continue;
8220                 }
8221                 mutex_exit(&l2arc_dev_mtx);
8222                 begin = ddi_get_lbolt();
8223 
8224                 /*
8225                  * Avoid contributing to memory pressure.
8226                  */
8227                 if (arc_reclaim_needed()) {
8228                         ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
8229                         continue;
8230                 }
8231 
8232                 /* try to write to DDT L2ARC device if any */
8233                 if (l2arc_feed_dev(L2ARC_FEED_DDT_DEV, &size)) {
8234                         total_written += size;
8235                         feed_type = L2ARC_FEED_NON_DDT_DEV;
8236                 }
8237 
8238                 /* try to write to the regular L2ARC device if any */
8239                 if (l2arc_feed_dev(feed_type, &size)) {
8240                         total_written += size;
8241                         if (feed_type == L2ARC_FEED_NON_DDT_DEV)
8242                                 total_written /= 2; /* avg written per device */
8243                 }
8244 
8245                 /*
8246                  * Calculate interval between writes.
8247                  */
8248                 next = l2arc_write_interval(begin, l2arc_write_size(),
8249                     total_written);
8250 
8251                 total_written = 0;
8252         }
8253 
8254         l2arc_thread_exit = 0;
8255         cv_broadcast(&l2arc_feed_thr_cv);
8256         CALLB_CPR_EXIT(&cpr);               /* drops l2arc_feed_thr_lock */
8257         thread_exit();
8258 }
8259 
8260 boolean_t
8261 l2arc_vdev_present(vdev_t *vd)
8262 {
8263         return (l2arc_vdev_get(vd) != NULL);
8264 }
8265 
8266 /*
8267  * Returns the l2arc_dev_t associated with a particular vdev_t or NULL if
8268  * the vdev_t isn't an L2ARC device.
8269  */
8270 static l2arc_dev_t *
8271 l2arc_vdev_get(vdev_t *vd)
8272 {
8273         l2arc_dev_t     *dev;
8274         boolean_t       held = MUTEX_HELD(&l2arc_dev_mtx);
8275 
8276         if (!held)
8277                 mutex_enter(&l2arc_dev_mtx);
8278         for (dev = list_head(l2arc_dev_list); dev != NULL;
8279             dev = list_next(l2arc_dev_list, dev)) {
8280                 if (dev->l2ad_vdev == vd)
8281                         break;
8282         }
8283         if (!held)
8284                 mutex_exit(&l2arc_dev_mtx);
8285 
8286         return (dev);
8287 }
8288 
8289 /*
8290  * Add a vdev for use by the L2ARC.  By this point the spa has already
8291  * validated the vdev and opened it. The `rebuild' flag indicates whether
8292  * we should attempt an L2ARC persistency rebuild.
8293  */
8294 void
8295 l2arc_add_vdev(spa_t *spa, vdev_t *vd, boolean_t rebuild)
8296 {
8297         l2arc_dev_t *adddev;
8298 
8299         ASSERT(!l2arc_vdev_present(vd));
8300 
8301         /*
8302          * Create a new l2arc device entry.
8303          */
8304         adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
8305         adddev->l2ad_spa = spa;
8306         adddev->l2ad_vdev = vd;
8307         /* leave extra size for an l2arc device header */
8308         adddev->l2ad_dev_hdr_asize = MAX(sizeof (*adddev->l2ad_dev_hdr),
8309             1 << vd->vdev_ashift);
8310         adddev->l2ad_start = VDEV_LABEL_START_SIZE + adddev->l2ad_dev_hdr_asize;
8311         adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
8312         ASSERT3U(adddev->l2ad_start, <, adddev->l2ad_end);
8313         adddev->l2ad_hand = adddev->l2ad_start;
8314         adddev->l2ad_first = B_TRUE;
8315         adddev->l2ad_writing = B_FALSE;
8316         adddev->l2ad_dev_hdr = kmem_zalloc(adddev->l2ad_dev_hdr_asize,
8317             KM_SLEEP);
8318 
8319         mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
8320         /*
8321          * This is a list of all ARC buffers that are still valid on the
8322          * device.
8323          */
8324         list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
8325             offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
8326 
8327         vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
8328         refcount_create(&adddev->l2ad_alloc);
8329 
8330         /*
8331          * Add device to global list
8332          */
8333         mutex_enter(&l2arc_dev_mtx);
8334         list_insert_head(l2arc_dev_list, adddev);
8335         atomic_inc_64(&l2arc_ndev);
8336         if (rebuild && l2arc_rebuild_enabled &&
8337             adddev->l2ad_end - adddev->l2ad_start > L2ARC_PERSIST_MIN_SIZE) {
8338                 /*
8339                  * Just mark the device as pending for a rebuild. We won't
8340                  * be starting a rebuild in line here as it would block pool
8341                  * import. Instead spa_load_impl will hand that off to an
8342                  * async task which will call l2arc_spa_rebuild_start.
8343                  */
8344                 adddev->l2ad_rebuild = B_TRUE;
8345         }
8346         mutex_exit(&l2arc_dev_mtx);
8347 }
8348 
8349 /*
8350  * Remove a vdev from the L2ARC.
8351  */
8352 void
8353 l2arc_remove_vdev(vdev_t *vd)
8354 {
8355         l2arc_dev_t *dev, *nextdev, *remdev = NULL;
8356 
8357         /*
8358          * Find the device by vdev
8359          */
8360         mutex_enter(&l2arc_dev_mtx);
8361         for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
8362                 nextdev = list_next(l2arc_dev_list, dev);
8363                 if (vd == dev->l2ad_vdev) {
8364                         remdev = dev;
8365                         break;
8366                 }
8367         }
8368         ASSERT3P(remdev, !=, NULL);
8369 
8370         /*
8371          * Cancel any ongoing or scheduled rebuild (race protection with
8372          * l2arc_spa_rebuild_start provided via l2arc_dev_mtx).
8373          */
8374         remdev->l2ad_rebuild_cancel = B_TRUE;
8375         if (remdev->l2ad_rebuild_did != 0) {
8376                 /*
8377                  * N.B. it should be safe to thread_join with the rebuild
8378                  * thread while holding l2arc_dev_mtx because it is not
8379                  * accessed from anywhere in the l2arc rebuild code below
8380                  * (except for l2arc_spa_rebuild_start, which is ok).
8381                  */
8382                 thread_join(remdev->l2ad_rebuild_did);
8383         }
8384 
8385         /*
8386          * Remove device from global list
8387          */
8388         list_remove(l2arc_dev_list, remdev);
8389         l2arc_dev_last = NULL;          /* may have been invalidated */
8390         l2arc_ddt_dev_last = NULL;      /* may have been invalidated */
8391         atomic_dec_64(&l2arc_ndev);
8392         mutex_exit(&l2arc_dev_mtx);
8393 
8394         if (vdev_type_is_ddt(remdev->l2ad_vdev))
8395                 atomic_add_64(&remdev->l2ad_spa->spa_l2arc_ddt_devs_size,
8396                     -(vdev_get_min_asize(remdev->l2ad_vdev)));
8397 
8398         /*
8399          * Clear all buflists and ARC references.  L2ARC device flush.
8400          */
8401         if (l2arc_evict(remdev, 0, B_TRUE) == B_FALSE) {
8402                 /*
8403                  * The eviction was done synchronously, cleanup here
8404                  * Otherwise, the asynchronous task will cleanup
8405                  */
8406                 list_destroy(&remdev->l2ad_buflist);
8407                 mutex_destroy(&remdev->l2ad_mtx);
8408                 kmem_free(remdev->l2ad_dev_hdr, remdev->l2ad_dev_hdr_asize);
8409                 kmem_free(remdev, sizeof (l2arc_dev_t));
8410         }
8411 }
8412 
8413 void
8414 l2arc_init(void)
8415 {
8416         l2arc_thread_exit = 0;
8417         l2arc_ndev = 0;
8418         l2arc_writes_sent = 0;
8419         l2arc_writes_done = 0;
8420 
8421         mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
8422         cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
8423         mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
8424         mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
8425 
8426         l2arc_dev_list = &L2ARC_dev_list;
8427         l2arc_free_on_write = &L2ARC_free_on_write;
8428         list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
8429             offsetof(l2arc_dev_t, l2ad_node));
8430         list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
8431             offsetof(l2arc_data_free_t, l2df_list_node));
8432 }
8433 
8434 void
8435 l2arc_fini(void)
8436 {
8437         /*
8438          * This is called from dmu_fini(), which is called from spa_fini();
8439          * Because of this, we can assume that all l2arc devices have
8440          * already been removed when the pools themselves were removed.
8441          */
8442 
8443         l2arc_do_free_on_write();
8444 
8445         mutex_destroy(&l2arc_feed_thr_lock);
8446         cv_destroy(&l2arc_feed_thr_cv);
8447         mutex_destroy(&l2arc_dev_mtx);
8448         mutex_destroy(&l2arc_free_on_write_mtx);
8449 
8450         list_destroy(l2arc_dev_list);
8451         list_destroy(l2arc_free_on_write);
8452 }
8453 
8454 void
8455 l2arc_start(void)
8456 {
8457         if (!(spa_mode_global & FWRITE))
8458                 return;
8459 
8460         (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
8461             TS_RUN, minclsyspri);
8462 }
8463 
8464 void
8465 l2arc_stop(void)
8466 {
8467         if (!(spa_mode_global & FWRITE))
8468                 return;
8469 
8470         mutex_enter(&l2arc_feed_thr_lock);
8471         cv_signal(&l2arc_feed_thr_cv);      /* kick thread out of startup */
8472         l2arc_thread_exit = 1;
8473         while (l2arc_thread_exit != 0)
8474                 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
8475         mutex_exit(&l2arc_feed_thr_lock);
8476 }
8477 
8478 /*
8479  * Punches out rebuild threads for the L2ARC devices in a spa. This should
8480  * be called after pool import from the spa async thread, since starting
8481  * these threads directly from spa_import() will make them part of the
8482  * "zpool import" context and delay process exit (and thus pool import).
8483  */
8484 void
8485 l2arc_spa_rebuild_start(spa_t *spa)
8486 {
8487         /*
8488          * Locate the spa's l2arc devices and kick off rebuild threads.
8489          */
8490         mutex_enter(&l2arc_dev_mtx);
8491         for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
8492                 l2arc_dev_t *dev =
8493                     l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]);
8494                 if (dev == NULL) {
8495                         /* Don't attempt a rebuild if the vdev is UNAVAIL */
8496                         continue;
8497                 }
8498                 if (dev->l2ad_rebuild && !dev->l2ad_rebuild_cancel) {
8499                         VERIFY3U(dev->l2ad_rebuild_did, ==, 0);
8500 #ifdef  _KERNEL
8501                         dev->l2ad_rebuild_did = thread_create(NULL, 0,
8502                             l2arc_dev_rebuild_start, dev, 0, &p0, TS_RUN,
8503                             minclsyspri)->t_did;
8504 #endif
8505                 }
8506         }
8507         mutex_exit(&l2arc_dev_mtx);
8508 }
8509 
8510 /*
8511  * Main entry point for L2ARC rebuilding.
8512  */
8513 static void
8514 l2arc_dev_rebuild_start(l2arc_dev_t *dev)
8515 {
8516         if (!dev->l2ad_rebuild_cancel) {
8517                 VERIFY(dev->l2ad_rebuild);
8518                 (void) l2arc_rebuild(dev);
8519                 dev->l2ad_rebuild = B_FALSE;
8520         }
8521 }
8522 
8523 /*
8524  * This function implements the actual L2ARC metadata rebuild. It:
8525  *
8526  * 1) reads the device's header
8527  * 2) if a good device header is found, starts reading the log block chain
8528  * 3) restores each block's contents to memory (reconstructing arc_buf_hdr_t's)
8529  *
8530  * Operation stops under any of the following conditions:
8531  *
8532  * 1) We reach the end of the log blk chain (the back-reference in the blk is
8533  *    invalid or loops over our starting point).
8534  * 2) We encounter *any* error condition (cksum errors, io errors, looped
8535  *    blocks, etc.).
8536  */
8537 static int
8538 l2arc_rebuild(l2arc_dev_t *dev)
8539 {
8540         vdev_t                  *vd = dev->l2ad_vdev;
8541         spa_t                   *spa = vd->vdev_spa;
8542         int                     err;
8543         l2arc_log_blk_phys_t    *this_lb, *next_lb;
8544         uint8_t                 *this_lb_buf, *next_lb_buf;
8545         zio_t                   *this_io = NULL, *next_io = NULL;
8546         l2arc_log_blkptr_t      lb_ptrs[2];
8547         boolean_t               first_pass, lock_held;
8548         uint64_t                load_guid;
8549 
8550         this_lb = kmem_zalloc(sizeof (*this_lb), KM_SLEEP);
8551         next_lb = kmem_zalloc(sizeof (*next_lb), KM_SLEEP);
8552         this_lb_buf = kmem_zalloc(sizeof (l2arc_log_blk_phys_t), KM_SLEEP);
8553         next_lb_buf = kmem_zalloc(sizeof (l2arc_log_blk_phys_t), KM_SLEEP);
8554 
8555         /*
8556          * We prevent device removal while issuing reads to the device,
8557          * then during the rebuilding phases we drop this lock again so
8558          * that a spa_unload or device remove can be initiated - this is
8559          * safe, because the spa will signal us to stop before removing
8560          * our device and wait for us to stop.
8561          */
8562         spa_config_enter(spa, SCL_L2ARC, vd, RW_READER);
8563         lock_held = B_TRUE;
8564 
8565         load_guid = spa_load_guid(dev->l2ad_vdev->vdev_spa);
8566         /*
8567          * Device header processing phase.
8568          */
8569         if ((err = l2arc_dev_hdr_read(dev)) != 0) {
8570                 /* device header corrupted, start a new one */
8571                 bzero(dev->l2ad_dev_hdr, dev->l2ad_dev_hdr_asize);
8572                 goto out;
8573         }
8574 
8575         /* Retrieve the persistent L2ARC device state */
8576         dev->l2ad_hand = vdev_psize_to_asize(dev->l2ad_vdev,
8577             dev->l2ad_dev_hdr->dh_start_lbps[0].lbp_daddr +
8578             LBP_GET_PSIZE(&dev->l2ad_dev_hdr->dh_start_lbps[0]));
8579         dev->l2ad_first = !!(dev->l2ad_dev_hdr->dh_flags &
8580             L2ARC_DEV_HDR_EVICT_FIRST);
8581 
8582         /* Prepare the rebuild processing state */
8583         bcopy(dev->l2ad_dev_hdr->dh_start_lbps, lb_ptrs, sizeof (lb_ptrs));
8584         first_pass = B_TRUE;
8585 
8586         /* Start the rebuild process */
8587         for (;;) {
8588                 if (!l2arc_log_blkptr_valid(dev, &lb_ptrs[0]))
8589                         /* We hit an invalid block address, end the rebuild. */
8590                         break;
8591 
8592                 if ((err = l2arc_log_blk_read(dev, &lb_ptrs[0], &lb_ptrs[1],
8593                     this_lb, next_lb, this_lb_buf, next_lb_buf,
8594                     this_io, &next_io)) != 0)
8595                         break;
8596 
8597                 spa_config_exit(spa, SCL_L2ARC, vd);
8598                 lock_held = B_FALSE;
8599 
8600                 /* Protection against infinite loops of log blocks. */
8601                 if (l2arc_range_check_overlap(lb_ptrs[1].lbp_daddr,
8602                     lb_ptrs[0].lbp_daddr,
8603                     dev->l2ad_dev_hdr->dh_start_lbps[0].lbp_daddr) &&
8604                     !first_pass) {
8605                         ARCSTAT_BUMP(arcstat_l2_rebuild_abort_loop_errors);
8606                         err = SET_ERROR(ELOOP);
8607                         break;
8608                 }
8609 
8610                 /*
8611                  * Our memory pressure valve. If the system is running low
8612                  * on memory, rather than swamping memory with new ARC buf
8613                  * hdrs, we opt not to rebuild the L2ARC. At this point,
8614                  * however, we have already set up our L2ARC dev to chain in
8615                  * new metadata log blk, so the user may choose to re-add the
8616                  * L2ARC dev at a later time to reconstruct it (when there's
8617                  * less memory pressure).
8618                  */
8619                 if (arc_reclaim_needed()) {
8620                         ARCSTAT_BUMP(arcstat_l2_rebuild_abort_lowmem);
8621                         cmn_err(CE_NOTE, "System running low on memory, "
8622                             "aborting L2ARC rebuild.");
8623                         err = SET_ERROR(ENOMEM);
8624                         break;
8625                 }
8626 
8627                 /*
8628                  * Now that we know that the next_lb checks out alright, we
8629                  * can start reconstruction from this lb - we can be sure
8630                  * that the L2ARC write hand has not yet reached any of our
8631                  * buffers.
8632                  */
8633                 l2arc_log_blk_restore(dev, load_guid, this_lb,
8634                     LBP_GET_PSIZE(&lb_ptrs[0]));
8635 
8636                 /*
8637                  * End of list detection. We can look ahead two steps in the
8638                  * blk chain and if the 2nd blk from this_lb dips below the
8639                  * initial chain starting point, then we know two things:
8640                  *      1) it can't be valid, and
8641                  *      2) the next_lb's ARC entries might have already been
8642                  *      partially overwritten and so we should stop before
8643                  *      we restore it
8644                  */
8645                 if (l2arc_range_check_overlap(
8646                     this_lb->lb_back2_lbp.lbp_daddr, lb_ptrs[0].lbp_daddr,
8647                     dev->l2ad_dev_hdr->dh_start_lbps[0].lbp_daddr) &&
8648                     !first_pass)
8649                         break;
8650 
8651                 /* log blk restored, continue with next one in the list */
8652                 lb_ptrs[0] = lb_ptrs[1];
8653                 lb_ptrs[1] = this_lb->lb_back2_lbp;
8654                 PTR_SWAP(this_lb, next_lb);
8655                 PTR_SWAP(this_lb_buf, next_lb_buf);
8656                 this_io = next_io;
8657                 next_io = NULL;
8658                 first_pass = B_FALSE;
8659 
8660                 for (;;) {
8661                         if (dev->l2ad_rebuild_cancel) {
8662                                 err = SET_ERROR(ECANCELED);
8663                                 goto out;
8664                         }
8665                         if (spa_config_tryenter(spa, SCL_L2ARC, vd,
8666                             RW_READER)) {
8667                                 lock_held = B_TRUE;
8668                                 break;
8669                         }
8670                         /*
8671                          * L2ARC config lock held by somebody in writer,
8672                          * possibly due to them trying to remove us. They'll
8673                          * likely to want us to shut down, so after a little
8674                          * delay, we check l2ad_rebuild_cancel and retry
8675                          * the lock again.
8676                          */
8677                         delay(1);
8678                 }
8679         }
8680 out:
8681         if (next_io != NULL)
8682                 l2arc_log_blk_prefetch_abort(next_io);
8683         kmem_free(this_lb, sizeof (*this_lb));
8684         kmem_free(next_lb, sizeof (*next_lb));
8685         kmem_free(this_lb_buf, sizeof (l2arc_log_blk_phys_t));
8686         kmem_free(next_lb_buf, sizeof (l2arc_log_blk_phys_t));
8687         if (err == 0)
8688                 ARCSTAT_BUMP(arcstat_l2_rebuild_successes);
8689 
8690         if (lock_held)
8691                 spa_config_exit(spa, SCL_L2ARC, vd);
8692 
8693         return (err);
8694 }
8695 
8696 /*
8697  * Attempts to read the device header on the provided L2ARC device and writes
8698  * it to `hdr'. On success, this function returns 0, otherwise the appropriate
8699  * error code is returned.
8700  */
8701 static int
8702 l2arc_dev_hdr_read(l2arc_dev_t *dev)
8703 {
8704         int                     err;
8705         uint64_t                guid;
8706         zio_cksum_t             cksum;
8707         l2arc_dev_hdr_phys_t    *hdr = dev->l2ad_dev_hdr;
8708         const uint64_t          hdr_asize = dev->l2ad_dev_hdr_asize;
8709         abd_t *abd;
8710 
8711         guid = spa_guid(dev->l2ad_vdev->vdev_spa);
8712 
8713         abd = abd_get_from_buf(hdr, hdr_asize);
8714         err = zio_wait(zio_read_phys(NULL, dev->l2ad_vdev,
8715             VDEV_LABEL_START_SIZE, hdr_asize, abd,
8716             ZIO_CHECKSUM_OFF, NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
8717             ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
8718             ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, B_FALSE));
8719         abd_put(abd);
8720         if (err != 0) {
8721                 ARCSTAT_BUMP(arcstat_l2_rebuild_abort_io_errors);
8722                 return (err);
8723         }
8724 
8725         if (hdr->dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC_V1))
8726                 byteswap_uint64_array(hdr, sizeof (*hdr));
8727 
8728         if (hdr->dh_magic != L2ARC_DEV_HDR_MAGIC_V1 ||
8729             hdr->dh_spa_guid != guid) {
8730                 /*
8731                  * Attempt to rebuild a device containing no actual dev hdr
8732                  * or containing a header from some other pool.
8733                  */
8734                 ARCSTAT_BUMP(arcstat_l2_rebuild_abort_unsupported);
8735                 return (SET_ERROR(ENOTSUP));
8736         }
8737 
8738         l2arc_dev_hdr_checksum(hdr, &cksum);
8739         if (!ZIO_CHECKSUM_EQUAL(hdr->dh_self_cksum, cksum)) {
8740                 ARCSTAT_BUMP(arcstat_l2_rebuild_abort_cksum_errors);
8741                 return (SET_ERROR(EINVAL));
8742         }
8743 
8744         return (0);
8745 }
8746 
8747 /*
8748  * Reads L2ARC log blocks from storage and validates their contents.
8749  *
8750  * This function implements a simple prefetcher to make sure that while
8751  * we're processing one buffer the L2ARC is already prefetching the next
8752  * one in the chain.
8753  *
8754  * The arguments this_lp and next_lp point to the current and next log blk
8755  * address in the block chain. Similarly, this_lb and next_lb hold the
8756  * l2arc_log_blk_phys_t's of the current and next L2ARC blk. The this_lb_buf
8757  * and next_lb_buf must be buffers of appropriate to hold a raw
8758  * l2arc_log_blk_phys_t (they are used as catch buffers for read ops prior
8759  * to buffer decompression).
8760  *
8761  * The `this_io' and `next_io' arguments are used for block prefetching.
8762  * When issuing the first blk IO during rebuild, you should pass NULL for
8763  * `this_io'. This function will then issue a sync IO to read the block and
8764  * also issue an async IO to fetch the next block in the block chain. The
8765  * prefetch IO is returned in `next_io'. On subsequent calls to this
8766  * function, pass the value returned in `next_io' from the previous call
8767  * as `this_io' and a fresh `next_io' pointer to hold the next prefetch IO.
8768  * Prior to the call, you should initialize your `next_io' pointer to be
8769  * NULL. If no prefetch IO was issued, the pointer is left set at NULL.
8770  *
8771  * On success, this function returns 0, otherwise it returns an appropriate
8772  * error code. On error the prefetching IO is aborted and cleared before
8773  * returning from this function. Therefore, if we return `success', the
8774  * caller can assume that we have taken care of cleanup of prefetch IOs.
8775  */
8776 static int
8777 l2arc_log_blk_read(l2arc_dev_t *dev,
8778     const l2arc_log_blkptr_t *this_lbp, const l2arc_log_blkptr_t *next_lbp,
8779     l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
8780     uint8_t *this_lb_buf, uint8_t *next_lb_buf,
8781     zio_t *this_io, zio_t **next_io)
8782 {
8783         int             err = 0;
8784         zio_cksum_t     cksum;
8785 
8786         ASSERT(this_lbp != NULL && next_lbp != NULL);
8787         ASSERT(this_lb != NULL && next_lb != NULL);
8788         ASSERT(this_lb_buf != NULL && next_lb_buf != NULL);
8789         ASSERT(next_io != NULL && *next_io == NULL);
8790         ASSERT(l2arc_log_blkptr_valid(dev, this_lbp));
8791 
8792         /*
8793          * Check to see if we have issued the IO for this log blk in a
8794          * previous run. If not, this is the first call, so issue it now.
8795          */
8796         if (this_io == NULL) {
8797                 this_io = l2arc_log_blk_prefetch(dev->l2ad_vdev, this_lbp,
8798                     this_lb_buf);
8799         }
8800 
8801         /*
8802          * Peek to see if we can start issuing the next IO immediately.
8803          */
8804         if (l2arc_log_blkptr_valid(dev, next_lbp)) {
8805                 /*
8806                  * Start issuing IO for the next log blk early - this
8807                  * should help keep the L2ARC device busy while we
8808                  * decompress and restore this log blk.
8809                  */
8810                 *next_io = l2arc_log_blk_prefetch(dev->l2ad_vdev, next_lbp,
8811                     next_lb_buf);
8812         }
8813 
8814         /* Wait for the IO to read this log block to complete */
8815         if ((err = zio_wait(this_io)) != 0) {
8816                 ARCSTAT_BUMP(arcstat_l2_rebuild_abort_io_errors);
8817                 goto cleanup;
8818         }
8819 
8820         /* Make sure the buffer checks out */
8821         fletcher_4_native(this_lb_buf, LBP_GET_PSIZE(this_lbp), NULL, &cksum);
8822         if (!ZIO_CHECKSUM_EQUAL(cksum, this_lbp->lbp_cksum)) {
8823                 ARCSTAT_BUMP(arcstat_l2_rebuild_abort_cksum_errors);
8824                 err = SET_ERROR(EINVAL);
8825                 goto cleanup;
8826         }
8827 
8828         /* Now we can take our time decoding this buffer */
8829         switch (LBP_GET_COMPRESS(this_lbp)) {
8830         case ZIO_COMPRESS_OFF:
8831                 bcopy(this_lb_buf, this_lb, sizeof (*this_lb));
8832                 break;
8833         case ZIO_COMPRESS_LZ4:
8834                 err = zio_decompress_data_buf(LBP_GET_COMPRESS(this_lbp),
8835                     this_lb_buf, this_lb, LBP_GET_PSIZE(this_lbp),
8836                     sizeof (*this_lb));
8837                 if (err != 0) {
8838                         err = SET_ERROR(EINVAL);
8839                         goto cleanup;
8840                 }
8841 
8842                 break;
8843         default:
8844                 err = SET_ERROR(EINVAL);
8845                 break;
8846         }
8847 
8848         if (this_lb->lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
8849                 byteswap_uint64_array(this_lb, sizeof (*this_lb));
8850 
8851         if (this_lb->lb_magic != L2ARC_LOG_BLK_MAGIC) {
8852                 err = SET_ERROR(EINVAL);
8853                 goto cleanup;
8854         }
8855 
8856 cleanup:
8857         /* Abort an in-flight prefetch I/O in case of error */
8858         if (err != 0 && *next_io != NULL) {
8859                 l2arc_log_blk_prefetch_abort(*next_io);
8860                 *next_io = NULL;
8861         }
8862         return (err);
8863 }
8864 
8865 /*
8866  * Restores the payload of a log blk to ARC. This creates empty ARC hdr
8867  * entries which only contain an l2arc hdr, essentially restoring the
8868  * buffers to their L2ARC evicted state. This function also updates space
8869  * usage on the L2ARC vdev to make sure it tracks restored buffers.
8870  */
8871 static void
8872 l2arc_log_blk_restore(l2arc_dev_t *dev, uint64_t load_guid,
8873     const l2arc_log_blk_phys_t *lb, uint64_t lb_psize)
8874 {
8875         uint64_t        size = 0, psize = 0;
8876 
8877         for (int i = L2ARC_LOG_BLK_ENTRIES - 1; i >= 0; i--) {
8878                 /*
8879                  * Restore goes in the reverse temporal direction to preserve
8880                  * correct temporal ordering of buffers in the l2ad_buflist.
8881                  * l2arc_hdr_restore also does a list_insert_tail instead of
8882                  * list_insert_head on the l2ad_buflist:
8883                  *
8884                  *              LIST    l2ad_buflist            LIST
8885                  *              HEAD  <------ (time) ------  TAIL
8886                  * direction    +-----+-----+-----+-----+-----+    direction
8887                  * of l2arc <== | buf | buf | buf | buf | buf | ===> of rebuild
8888                  * fill         +-----+-----+-----+-----+-----+
8889                  *              ^                               ^
8890                  *              |                               |
8891                  *              |                               |
8892                  *      l2arc_fill_thread               l2arc_rebuild
8893                  *      places new bufs here            restores bufs here
8894                  *
8895                  * This also works when the restored bufs get evicted at any
8896                  * point during the rebuild.
8897                  */
8898                 l2arc_hdr_restore(&lb->lb_entries[i], dev, load_guid);
8899                 size += LE_GET_LSIZE(&lb->lb_entries[i]);
8900                 psize += LE_GET_PSIZE(&lb->lb_entries[i]);
8901         }
8902 
8903         /*
8904          * Record rebuild stats:
8905          *      size            In-memory size of restored buffer data in ARC
8906          *      psize           Physical size of restored buffers in the L2ARC
8907          *      bufs            # of ARC buffer headers restored
8908          *      log_blks        # of L2ARC log entries processed during restore
8909          */
8910         ARCSTAT_INCR(arcstat_l2_rebuild_size, size);
8911         ARCSTAT_INCR(arcstat_l2_rebuild_psize, psize);
8912         ARCSTAT_INCR(arcstat_l2_rebuild_bufs, L2ARC_LOG_BLK_ENTRIES);
8913         ARCSTAT_BUMP(arcstat_l2_rebuild_log_blks);
8914         ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_size, lb_psize);
8915         ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio, psize / lb_psize);
8916         vdev_space_update(dev->l2ad_vdev, psize, 0, 0);
8917 }
8918 
8919 /*
8920  * Restores a single ARC buf hdr from a log block. The ARC buffer is put
8921  * into a state indicating that it has been evicted to L2ARC.
8922  */
8923 static void
8924 l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev,
8925     uint64_t load_guid)
8926 {
8927         arc_buf_hdr_t           *hdr, *exists;
8928         kmutex_t                *hash_lock;
8929         arc_buf_contents_t      type = LE_GET_TYPE(le);
8930 
8931         /*
8932          * Do all the allocation before grabbing any locks, this lets us
8933          * sleep if memory is full and we don't have to deal with failed
8934          * allocations.
8935          */
8936         hdr = arc_buf_alloc_l2only(load_guid, type, dev, le->le_dva,
8937             le->le_daddr, LE_GET_LSIZE(le), LE_GET_PSIZE(le),
8938             le->le_birth, le->le_freeze_cksum, LE_GET_CHECKSUM(le),
8939             LE_GET_COMPRESS(le), LE_GET_ARC_COMPRESS(le));
8940 
8941         ARCSTAT_INCR(arcstat_l2_lsize, HDR_GET_LSIZE(hdr));
8942         ARCSTAT_INCR(arcstat_l2_psize, arc_hdr_size(hdr));
8943 
8944         mutex_enter(&dev->l2ad_mtx);
8945         /*
8946          * We connect the l2hdr to the hdr only after the hdr is in the hash
8947          * table, otherwise the rest of the arc hdr manipulation machinery
8948          * might get confused.
8949          */
8950         list_insert_tail(&dev->l2ad_buflist, hdr);
8951         (void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
8952         mutex_exit(&dev->l2ad_mtx);
8953 
8954         exists = buf_hash_insert(hdr, &hash_lock);
8955         if (exists) {
8956                 /* Buffer was already cached, no need to restore it. */
8957                 arc_hdr_destroy(hdr);
8958                 mutex_exit(hash_lock);
8959                 ARCSTAT_BUMP(arcstat_l2_rebuild_bufs_precached);
8960                 return;
8961         }
8962 
8963         mutex_exit(hash_lock);
8964 }
8965 
8966 /*
8967  * Used by PL2ARC related functions that do
8968  * async read/write
8969  */
8970 static void
8971 pl2arc_io_done(zio_t *zio)
8972 {
8973         abd_put(zio->io_private);
8974         zio->io_private = NULL;
8975 }
8976 
8977 /*
8978  * Starts an asynchronous read IO to read a log block. This is used in log
8979  * block reconstruction to start reading the next block before we are done
8980  * decoding and reconstructing the current block, to keep the l2arc device
8981  * nice and hot with read IO to process.
8982  * The returned zio will contain a newly allocated memory buffers for the IO
8983  * data which should then be freed by the caller once the zio is no longer
8984  * needed (i.e. due to it having completed). If you wish to abort this
8985  * zio, you should do so using l2arc_log_blk_prefetch_abort, which takes
8986  * care of disposing of the allocated buffers correctly.
8987  */
8988 static zio_t *
8989 l2arc_log_blk_prefetch(vdev_t *vd, const l2arc_log_blkptr_t *lbp,
8990     uint8_t *lb_buf)
8991 {
8992         uint32_t        psize;
8993         zio_t           *pio;
8994         abd_t           *abd;
8995 
8996         psize = LBP_GET_PSIZE(lbp);
8997         ASSERT(psize <= sizeof (l2arc_log_blk_phys_t));
8998         pio = zio_root(vd->vdev_spa, NULL, NULL, ZIO_FLAG_DONT_CACHE |
8999             ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
9000             ZIO_FLAG_DONT_RETRY);
9001         abd = abd_get_from_buf(lb_buf, psize);
9002         (void) zio_nowait(zio_read_phys(pio, vd, lbp->lbp_daddr, psize,
9003             abd, ZIO_CHECKSUM_OFF, pl2arc_io_done, abd,
9004                 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
9005             ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, B_FALSE));
9006 
9007         return (pio);
9008 }
9009 
9010 /*
9011  * Aborts a zio returned from l2arc_log_blk_prefetch and frees the data
9012  * buffers allocated for it.
9013  */
9014 static void
9015 l2arc_log_blk_prefetch_abort(zio_t *zio)
9016 {
9017         (void) zio_wait(zio);
9018 }
9019 
9020 /*
9021  * Creates a zio to update the device header on an l2arc device. The zio is
9022  * initiated as a child of `pio'.
9023  */
9024 static void
9025 l2arc_dev_hdr_update(l2arc_dev_t *dev, zio_t *pio)
9026 {
9027         zio_t                   *wzio;
9028         abd_t                   *abd;
9029         l2arc_dev_hdr_phys_t    *hdr = dev->l2ad_dev_hdr;
9030         const uint64_t          hdr_asize = dev->l2ad_dev_hdr_asize;
9031 
9032         hdr->dh_magic = L2ARC_DEV_HDR_MAGIC_V1;
9033         hdr->dh_spa_guid = spa_guid(dev->l2ad_vdev->vdev_spa);
9034         hdr->dh_alloc_space = refcount_count(&dev->l2ad_alloc);
9035         hdr->dh_flags = 0;
9036         if (dev->l2ad_first)
9037                 hdr->dh_flags |= L2ARC_DEV_HDR_EVICT_FIRST;
9038 
9039         /* checksum operation goes last */
9040         l2arc_dev_hdr_checksum(hdr, &hdr->dh_self_cksum);
9041 
9042         abd = abd_get_from_buf(hdr, hdr_asize);
9043         wzio = zio_write_phys(pio, dev->l2ad_vdev, VDEV_LABEL_START_SIZE,
9044             hdr_asize, abd, ZIO_CHECKSUM_OFF, pl2arc_io_done, abd,
9045             ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE);
9046         DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio);
9047         (void) zio_nowait(wzio);
9048 }
9049 
9050 /*
9051  * Commits a log block to the L2ARC device. This routine is invoked from
9052  * l2arc_write_buffers when the log block fills up.
9053  * This function allocates some memory to temporarily hold the serialized
9054  * buffer to be written. This is then released in l2arc_write_done.
9055  */
9056 static void
9057 l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio,
9058     l2arc_write_callback_t *cb)
9059 {
9060         l2arc_log_blk_phys_t    *lb = &dev->l2ad_log_blk;
9061         uint64_t                psize, asize;
9062         l2arc_log_blk_buf_t     *lb_buf;
9063         abd_t *abd;
9064         zio_t                   *wzio;
9065 
9066         VERIFY(dev->l2ad_log_ent_idx == L2ARC_LOG_BLK_ENTRIES);
9067 
9068         /* link the buffer into the block chain */
9069         lb->lb_back2_lbp = dev->l2ad_dev_hdr->dh_start_lbps[1];
9070         lb->lb_magic = L2ARC_LOG_BLK_MAGIC;
9071 
9072         /* try to compress the buffer */
9073         lb_buf = kmem_zalloc(sizeof (*lb_buf), KM_SLEEP);
9074         list_insert_tail(&cb->l2wcb_log_blk_buflist, lb_buf);
9075         abd = abd_get_from_buf(lb, sizeof (*lb));
9076         psize = zio_compress_data(ZIO_COMPRESS_LZ4, abd, lb_buf->lbb_log_blk,
9077             sizeof (*lb));
9078         abd_put(abd);
9079         /* a log block is never entirely zero */
9080         ASSERT(psize != 0);
9081         asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
9082         ASSERT(asize <= sizeof (lb_buf->lbb_log_blk));
9083 
9084         /*
9085          * Update the start log blk pointer in the device header to point
9086          * to the log block we're about to write.
9087          */
9088         dev->l2ad_dev_hdr->dh_start_lbps[1] =
9089             dev->l2ad_dev_hdr->dh_start_lbps[0];
9090         dev->l2ad_dev_hdr->dh_start_lbps[0].lbp_daddr = dev->l2ad_hand;
9091         _NOTE(CONSTCOND)
9092         LBP_SET_LSIZE(&dev->l2ad_dev_hdr->dh_start_lbps[0], sizeof (*lb));
9093         LBP_SET_PSIZE(&dev->l2ad_dev_hdr->dh_start_lbps[0], asize);
9094         LBP_SET_CHECKSUM(&dev->l2ad_dev_hdr->dh_start_lbps[0],
9095             ZIO_CHECKSUM_FLETCHER_4);
9096         LBP_SET_TYPE(&dev->l2ad_dev_hdr->dh_start_lbps[0], 0);
9097 
9098         if (asize < sizeof (*lb)) {
9099                 /* compression succeeded */
9100                 bzero(lb_buf->lbb_log_blk + psize, asize - psize);
9101                 LBP_SET_COMPRESS(&dev->l2ad_dev_hdr->dh_start_lbps[0],
9102                     ZIO_COMPRESS_LZ4);
9103         } else {
9104                 /* compression failed */
9105                 bcopy(lb, lb_buf->lbb_log_blk, sizeof (*lb));
9106                 LBP_SET_COMPRESS(&dev->l2ad_dev_hdr->dh_start_lbps[0],
9107                     ZIO_COMPRESS_OFF);
9108         }
9109 
9110         /* checksum what we're about to write */
9111         fletcher_4_native(lb_buf->lbb_log_blk, asize,
9112             NULL, &dev->l2ad_dev_hdr->dh_start_lbps[0].lbp_cksum);
9113 
9114         /* perform the write itself */
9115         CTASSERT(L2ARC_LOG_BLK_SIZE >= SPA_MINBLOCKSIZE &&
9116             L2ARC_LOG_BLK_SIZE <= SPA_MAXBLOCKSIZE);
9117         abd = abd_get_from_buf(lb_buf->lbb_log_blk, asize);
9118         wzio = zio_write_phys(pio, dev->l2ad_vdev, dev->l2ad_hand,
9119             asize, abd, ZIO_CHECKSUM_OFF, pl2arc_io_done, abd,
9120             ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE);
9121         DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio);
9122         (void) zio_nowait(wzio);
9123 
9124         dev->l2ad_hand += asize;
9125         vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
9126 
9127         /* bump the kstats */
9128         ARCSTAT_INCR(arcstat_l2_write_bytes, asize);
9129         ARCSTAT_BUMP(arcstat_l2_log_blk_writes);
9130         ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_size, asize);
9131         ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio,
9132             dev->l2ad_log_blk_payload_asize / asize);
9133 
9134         /* start a new log block */
9135         dev->l2ad_log_ent_idx = 0;
9136         dev->l2ad_log_blk_payload_asize = 0;
9137 }
9138 
9139 /*
9140  * Validates an L2ARC log blk address to make sure that it can be read
9141  * from the provided L2ARC device. Returns B_TRUE if the address is
9142  * within the device's bounds, or B_FALSE if not.
9143  */
9144 static boolean_t
9145 l2arc_log_blkptr_valid(l2arc_dev_t *dev, const l2arc_log_blkptr_t *lbp)
9146 {
9147         uint64_t psize = LBP_GET_PSIZE(lbp);
9148         uint64_t end = lbp->lbp_daddr + psize;
9149 
9150         /*
9151          * A log block is valid if all of the following conditions are true:
9152          * - it fits entirely between l2ad_start and l2ad_end
9153          * - it has a valid size
9154          */
9155         return (lbp->lbp_daddr >= dev->l2ad_start && end <= dev->l2ad_end &&
9156             psize > 0 && psize <= sizeof (l2arc_log_blk_phys_t));
9157 }
9158 
9159 /*
9160  * Computes the checksum of `hdr' and stores it in `cksum'.
9161  */
9162 static void
9163 l2arc_dev_hdr_checksum(const l2arc_dev_hdr_phys_t *hdr, zio_cksum_t *cksum)
9164 {
9165         fletcher_4_native((uint8_t *)hdr +
9166             offsetof(l2arc_dev_hdr_phys_t, dh_spa_guid),
9167             sizeof (*hdr) - offsetof(l2arc_dev_hdr_phys_t, dh_spa_guid),
9168             NULL, cksum);
9169 }
9170 
9171 /*
9172  * Inserts ARC buffer `ab' into the current L2ARC log blk on the device.
9173  * The buffer being inserted must be present in L2ARC.
9174  * Returns B_TRUE if the L2ARC log blk is full and needs to be committed
9175  * to L2ARC, or B_FALSE if it still has room for more ARC buffers.
9176  */
9177 static boolean_t
9178 l2arc_log_blk_insert(l2arc_dev_t *dev, const arc_buf_hdr_t *ab)
9179 {
9180         l2arc_log_blk_phys_t    *lb = &dev->l2ad_log_blk;
9181         l2arc_log_ent_phys_t    *le;
9182         int                     index = dev->l2ad_log_ent_idx++;
9183 
9184         ASSERT(index < L2ARC_LOG_BLK_ENTRIES);
9185 
9186         le = &lb->lb_entries[index];
9187         bzero(le, sizeof (*le));
9188         le->le_dva = ab->b_dva;
9189         le->le_birth = ab->b_birth;
9190         le->le_daddr = ab->b_l2hdr.b_daddr;
9191         LE_SET_LSIZE(le, HDR_GET_LSIZE(ab));
9192         LE_SET_PSIZE(le, HDR_GET_PSIZE(ab));
9193 
9194         if ((ab->b_flags & ARC_FLAG_COMPRESSED_ARC) != 0) {
9195                 LE_SET_ARC_COMPRESS(le, 1);
9196                 LE_SET_COMPRESS(le, HDR_GET_COMPRESS(ab));
9197         } else {
9198                 ASSERT3U(HDR_GET_COMPRESS(ab), ==, ZIO_COMPRESS_OFF);
9199                 LE_SET_ARC_COMPRESS(le, 0);
9200                 LE_SET_COMPRESS(le, ZIO_COMPRESS_OFF);
9201         }
9202 
9203         if (ab->b_freeze_cksum != NULL) {
9204                 le->le_freeze_cksum = *ab->b_freeze_cksum;
9205                 LE_SET_CHECKSUM(le, ZIO_CHECKSUM_FLETCHER_2);
9206         } else {
9207                 LE_SET_CHECKSUM(le, ZIO_CHECKSUM_OFF);
9208         }
9209 
9210         LE_SET_TYPE(le, arc_flags_to_bufc(ab->b_flags));
9211         dev->l2ad_log_blk_payload_asize += arc_hdr_size((arc_buf_hdr_t *)ab);
9212 
9213         return (dev->l2ad_log_ent_idx == L2ARC_LOG_BLK_ENTRIES);
9214 }
9215 
9216 /*
9217  * Checks whether a given L2ARC device address sits in a time-sequential
9218  * range. The trick here is that the L2ARC is a rotary buffer, so we can't
9219  * just do a range comparison, we need to handle the situation in which the
9220  * range wraps around the end of the L2ARC device. Arguments:
9221  *      bottom  Lower end of the range to check (written to earlier).
9222  *      top     Upper end of the range to check (written to later).
9223  *      check   The address for which we want to determine if it sits in
9224  *              between the top and bottom.
9225  *
9226  * The 3-way conditional below represents the following cases:
9227  *
9228  *      bottom < top : Sequentially ordered case:
9229  *        <check>--------+-------------------+
9230  *                       |  (overlap here?)  |
9231  *       L2ARC dev       V                   V
9232  *       |---------------<bottom>============<top>--------------|
9233  *
9234  *      bottom > top: Looped-around case:
9235  *                            <check>--------+------------------+
9236  *                                           |  (overlap here?) |
9237  *       L2ARC dev                           V                  V
9238  *       |===============<top>---------------<bottom>===========|
9239  *       ^               ^
9240  *       |  (or here?)   |
9241  *       +---------------+---------<check>
9242  *
9243  *      top == bottom : Just a single address comparison.
9244  */
9245 static inline boolean_t
9246 l2arc_range_check_overlap(uint64_t bottom, uint64_t top, uint64_t check)
9247 {
9248         if (bottom < top)
9249                 return (bottom <= check && check <= top);
9250         else if (bottom > top)
9251                 return (check <= top || bottom <= check);
9252         else
9253                 return (check == top);
9254 }