1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
  24  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
  25  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
  26  */
  27 
  28 #ifndef _SYS_DNODE_H
  29 #define _SYS_DNODE_H
  30 
  31 #include <sys/zfs_context.h>
  32 #include <sys/avl.h>
  33 #include <sys/spa.h>
  34 #include <sys/txg.h>
  35 #include <sys/zio.h>
  36 #include <sys/refcount.h>
  37 #include <sys/dmu_zfetch.h>
  38 #include <sys/zrlock.h>
  39 #include <sys/multilist.h>
  40 
  41 #ifdef  __cplusplus
  42 extern "C" {
  43 #endif
  44 
  45 /*
  46  * dnode_hold() flags.
  47  */
  48 #define DNODE_MUST_BE_ALLOCATED 1
  49 #define DNODE_MUST_BE_FREE      2
  50 
  51 /*
  52  * dnode_next_offset() flags.
  53  */
  54 #define DNODE_FIND_HOLE         1
  55 #define DNODE_FIND_BACKWARDS    2
  56 #define DNODE_FIND_HAVELOCK     4
  57 
  58 /*
  59  * Fixed constants.
  60  */
  61 #define DNODE_SHIFT             9       /* 512 bytes */
  62 #define DN_MIN_INDBLKSHIFT      12      /* 4k */
  63 #define DN_DFL_INDBLKSHIFT      14      /* 16k */
  64 /*
  65  * If we ever increase this value beyond 20, we need to revisit all logic that
  66  * does x << level * ebps to handle overflow.  With a 1M indirect block size,
  67  * 4 levels of indirect blocks would not be able to guarantee addressing an
  68  * entire object, so 5 levels will be used, but 5 * (20 - 7) = 65.
  69  */
  70 #define DN_MAX_INDBLKSHIFT      17      /* 128k */
  71 #define DNODE_BLOCK_SHIFT       14      /* 16k */
  72 #define DNODE_CORE_SIZE         64      /* 64 bytes for dnode sans blkptrs */
  73 #define DN_MAX_OBJECT_SHIFT     48      /* 256 trillion (zfs_fid_t limit) */
  74 #define DN_MAX_OFFSET_SHIFT     64      /* 2^64 bytes in a dnode */
  75 
  76 /*
  77  * dnode id flags
  78  *
  79  * Note: a file will never ever have its
  80  * ids moved from bonus->spill
  81  * and only in a crypto environment would it be on spill
  82  */
  83 #define DN_ID_CHKED_BONUS       0x1
  84 #define DN_ID_CHKED_SPILL       0x2
  85 #define DN_ID_OLD_EXIST         0x4
  86 #define DN_ID_NEW_EXIST         0x8
  87 
  88 /*
  89  * Derived constants.
  90  */
  91 #define DNODE_SIZE      (1 << DNODE_SHIFT)
  92 #define DN_MAX_NBLKPTR  ((DNODE_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT)
  93 #define DN_MAX_BONUSLEN (DNODE_SIZE - DNODE_CORE_SIZE - (1 << SPA_BLKPTRSHIFT))
  94 #define DN_MAX_OBJECT   (1ULL << DN_MAX_OBJECT_SHIFT)
  95 #define DN_ZERO_BONUSLEN        (DN_MAX_BONUSLEN + 1)
  96 #define DN_KILL_SPILLBLK (1)
  97 
  98 #define DNODES_PER_BLOCK_SHIFT  (DNODE_BLOCK_SHIFT - DNODE_SHIFT)
  99 #define DNODES_PER_BLOCK        (1ULL << DNODES_PER_BLOCK_SHIFT)
 100 
 101 /*
 102  * This is inaccurate if the indblkshift of the particular object is not the
 103  * max.  But it's only used by userland to calculate the zvol reservation.
 104  */
 105 #define DNODES_PER_LEVEL_SHIFT  (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT)
 106 #define DNODES_PER_LEVEL        (1ULL << DNODES_PER_LEVEL_SHIFT)
 107 
 108 /* The +2 here is a cheesy way to round up */
 109 #define DN_MAX_LEVELS   (2 + ((DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT) / \
 110         (DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT)))
 111 
 112 #define DN_BONUS(dnp)   ((void*)((dnp)->dn_bonus + \
 113         (((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t))))
 114 
 115 #define DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \
 116         (dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT)
 117 
 118 #define EPB(blkshift, typeshift)        (1 << (blkshift - typeshift))
 119 
 120 struct dmu_buf_impl;
 121 struct objset;
 122 struct zio;
 123 
 124 enum dnode_dirtycontext {
 125         DN_UNDIRTIED,
 126         DN_DIRTY_OPEN,
 127         DN_DIRTY_SYNC
 128 };
 129 
 130 /* Is dn_used in bytes?  if not, it's in multiples of SPA_MINBLOCKSIZE */
 131 #define DNODE_FLAG_USED_BYTES           (1<<0)
 132 #define DNODE_FLAG_USERUSED_ACCOUNTED   (1<<1)
 133 
 134 /* Does dnode have a SA spill blkptr in bonus? */
 135 #define DNODE_FLAG_SPILL_BLKPTR (1<<2)
 136 
 137 /*
 138  * Smart Compression
 139  *
 140  * Smart compression is a simple heuristic algorithm that automatically
 141  * tries to avoid compression on incompressible objects by continuously
 142  * monitoring per-object compression performance.
 143  *
 144  * The smart compression system has two states for each object:
 145  *      COMPRESSING: compression is applied to the object and results are
 146  *              reevaluated at fixed check intervals to see if we should
 147  *              continue attempting to compress.
 148  *      DENYING: compression has failed too often, so we give up trying
 149  *              for a while and retry at a later time.
 150  *
 151  * Each time compression succeeds, we bump down the sc_compression_failures
 152  * counter down to a minimum of -5. This helps us to prevent starting
 153  * denying in case of short incompressible transients. Each time
 154  * compression fails, we bump up the sc_compression_failures counter up to
 155  * a maximum of 5 (zfs_smartcomp_interval_shift). If the failure counter
 156  * is > 0 at the time the compression failed, we transition from the
 157  * COMPRESSING to the DENYING state and calculate a deny interval by
 158  * multiplying zfs_smartcomp_interval and (1 << sc_compression_failures).
 159  * This means that successive failures at compression will have us retry
 160  * compression progressively less often down to approx 32x less often
 161  * (by default) than the compression test interval. To avoid potential
 162  * data patterns confusing us, the deny interval is randomized by +-10%.
 163  */
 164 typedef enum dnode_smartcomp_state {
 165         DNODE_SMARTCOMP_COMPRESSING = 0,
 166         DNODE_SMARTCOMP_DENYING
 167 } dnode_smartcomp_state_t;
 168 
 169 typedef struct dnode_smartcomp {
 170         kmutex_t                sc_lock;
 171         dnode_smartcomp_state_t sc_state;
 172         uint64_t                sc_size;
 173         uint64_t                sc_orig_size;
 174         uint64_t                sc_deny_interval;
 175         int64_t                 sc_comp_failures;
 176 } dnode_smartcomp_t;
 177 
 178 typedef struct dnode_phys {
 179         uint8_t dn_type;                /* dmu_object_type_t */
 180         uint8_t dn_indblkshift;         /* ln2(indirect block size) */
 181         uint8_t dn_nlevels;             /* 1=dn_blkptr->data blocks */
 182         uint8_t dn_nblkptr;             /* length of dn_blkptr */
 183         uint8_t dn_bonustype;           /* type of data in bonus buffer */
 184         uint8_t dn_checksum;            /* ZIO_CHECKSUM type */
 185         uint8_t dn_compress;            /* ZIO_COMPRESS type */
 186         uint8_t dn_flags;               /* DNODE_FLAG_* */
 187         uint16_t dn_datablkszsec;       /* data block size in 512b sectors */
 188         uint16_t dn_bonuslen;           /* length of dn_bonus */
 189         uint8_t dn_pad2[4];
 190 
 191         /* accounting is protected by dn_dirty_mtx */
 192         uint64_t dn_maxblkid;           /* largest allocated block ID */
 193         uint64_t dn_used;               /* bytes (or sectors) of disk space */
 194 
 195         uint64_t dn_pad3[4];
 196 
 197         blkptr_t dn_blkptr[1];
 198         uint8_t dn_bonus[DN_MAX_BONUSLEN - sizeof (blkptr_t)];
 199         blkptr_t dn_spill;
 200 } dnode_phys_t;
 201 
 202 struct dnode {
 203         /*
 204          * Protects the structure of the dnode, including the number of levels
 205          * of indirection (dn_nlevels), dn_maxblkid, and dn_next_*
 206          */
 207         krwlock_t dn_struct_rwlock;
 208 
 209         /* Our link on dn_objset->os_dnodes list; protected by os_lock.  */
 210         list_node_t dn_link;
 211 
 212         /* immutable: */
 213         struct objset *dn_objset;
 214         uint64_t dn_object;
 215         struct dmu_buf_impl *dn_dbuf;
 216         struct dnode_handle *dn_handle;
 217         dnode_phys_t *dn_phys; /* pointer into dn->dn_dbuf->db.db_data */
 218 
 219         /*
 220          * Copies of stuff in dn_phys.  They're valid in the open
 221          * context (eg. even before the dnode is first synced).
 222          * Where necessary, these are protected by dn_struct_rwlock.
 223          */
 224         dmu_object_type_t dn_type;      /* object type */
 225         uint16_t dn_bonuslen;           /* bonus length */
 226         uint8_t dn_bonustype;           /* bonus type */
 227         uint8_t dn_nblkptr;             /* number of blkptrs (immutable) */
 228         uint8_t dn_checksum;            /* ZIO_CHECKSUM type */
 229         uint8_t dn_compress;            /* ZIO_COMPRESS type */
 230         uint8_t dn_nlevels;
 231         uint8_t dn_indblkshift;
 232         uint8_t dn_datablkshift;        /* zero if blksz not power of 2! */
 233         uint8_t dn_moved;               /* Has this dnode been moved? */
 234         uint16_t dn_datablkszsec;       /* in 512b sectors */
 235         uint32_t dn_datablksz;          /* in bytes */
 236         uint64_t dn_maxblkid;
 237         uint8_t dn_next_type[TXG_SIZE];
 238         uint8_t dn_next_nblkptr[TXG_SIZE];
 239         uint8_t dn_next_nlevels[TXG_SIZE];
 240         uint8_t dn_next_indblkshift[TXG_SIZE];
 241         uint8_t dn_next_bonustype[TXG_SIZE];
 242         uint8_t dn_rm_spillblk[TXG_SIZE];       /* for removing spill blk */
 243         uint16_t dn_next_bonuslen[TXG_SIZE];
 244         uint32_t dn_next_blksz[TXG_SIZE];       /* next block size in bytes */
 245 
 246         /* protected by dn_dbufs_mtx; declared here to fill 32-bit hole */
 247         uint32_t dn_dbufs_count;        /* count of dn_dbufs */
 248 
 249         /* protected by os_lock: */
 250         multilist_node_t dn_dirty_link[TXG_SIZE]; /* next on dataset's dirty */
 251 
 252         /* protected by dn_mtx: */
 253         kmutex_t dn_mtx;
 254         list_t dn_dirty_records[TXG_SIZE];
 255         struct range_tree *dn_free_ranges[TXG_SIZE];
 256         uint64_t dn_allocated_txg;
 257         uint64_t dn_free_txg;
 258         uint64_t dn_assigned_txg;
 259         kcondvar_t dn_notxholds;
 260         enum dnode_dirtycontext dn_dirtyctx;
 261         uint8_t *dn_dirtyctx_firstset;          /* dbg: contents meaningless */
 262 
 263         /* protected by own devices */
 264         refcount_t dn_tx_holds;
 265         refcount_t dn_holds;
 266 
 267         kmutex_t dn_dbufs_mtx;
 268         /*
 269          * Descendent dbufs, ordered by dbuf_compare. Note that dn_dbufs
 270          * can contain multiple dbufs of the same (level, blkid) when a
 271          * dbuf is marked DB_EVICTING without being removed from
 272          * dn_dbufs. To maintain the avl invariant that there cannot be
 273          * duplicate entries, we order the dbufs by an arbitrary value -
 274          * their address in memory. This means that dn_dbufs cannot be used to
 275          * directly look up a dbuf. Instead, callers must use avl_walk, have
 276          * a reference to the dbuf, or look up a non-existant node with
 277          * db_state = DB_SEARCH (see dbuf_free_range for an example).
 278          */
 279         avl_tree_t dn_dbufs;
 280 
 281         /* protected by dn_struct_rwlock */
 282         struct dmu_buf_impl *dn_bonus;  /* bonus buffer dbuf */
 283 
 284         boolean_t dn_have_spill;        /* have spill or are spilling */
 285 
 286         /* parent IO for current sync write */
 287         zio_t *dn_zio;
 288 
 289         /* used in syncing context */
 290         uint64_t dn_oldused;    /* old phys used bytes */
 291         uint64_t dn_oldflags;   /* old phys dn_flags */
 292         uint64_t dn_olduid, dn_oldgid;
 293         uint64_t dn_newuid, dn_newgid;
 294         int dn_id_flags;
 295 
 296         /* holds prefetch structure */
 297         struct zfetch   dn_zfetch;
 298 
 299         dnode_smartcomp_t dn_smartcomp; /* smart compression performance */
 300 };
 301 
 302 /*
 303  * Adds a level of indirection between the dbuf and the dnode to avoid
 304  * iterating descendent dbufs in dnode_move(). Handles are not allocated
 305  * individually, but as an array of child dnodes in dnode_hold_impl().
 306  */
 307 typedef struct dnode_handle {
 308         /* Protects dnh_dnode from modification by dnode_move(). */
 309         zrlock_t dnh_zrlock;
 310         dnode_t *dnh_dnode;
 311 } dnode_handle_t;
 312 
 313 typedef struct dnode_children {
 314         dmu_buf_user_t dnc_dbu;         /* User evict data */
 315         size_t dnc_count;               /* number of children */
 316         dnode_handle_t dnc_children[];  /* sized dynamically */
 317 } dnode_children_t;
 318 
 319 typedef struct free_range {
 320         avl_node_t fr_node;
 321         uint64_t fr_blkid;
 322         uint64_t fr_nblks;
 323 } free_range_t;
 324 
 325 void dnode_special_open(struct objset *dd, dnode_phys_t *dnp,
 326     uint64_t object, dnode_handle_t *dnh);
 327 void dnode_special_close(dnode_handle_t *dnh);
 328 
 329 void dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx);
 330 void dnode_setbonus_type(dnode_t *dn, dmu_object_type_t, dmu_tx_t *tx);
 331 void dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx);
 332 
 333 int dnode_hold(struct objset *dd, uint64_t object,
 334     void *ref, dnode_t **dnp);
 335 int dnode_hold_impl(struct objset *dd, uint64_t object, int flag,
 336     void *ref, dnode_t **dnp);
 337 boolean_t dnode_add_ref(dnode_t *dn, void *ref);
 338 void dnode_rele(dnode_t *dn, void *ref);
 339 void dnode_rele_and_unlock(dnode_t *dn, void *tag);
 340 void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
 341 void dnode_setdirty_sc(dnode_t *dn, dmu_tx_t *tx, boolean_t usesc);
 342 void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
 343 void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
 344     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx);
 345 void dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
 346     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx);
 347 void dnode_free(dnode_t *dn, dmu_tx_t *tx);
 348 void dnode_byteswap(dnode_phys_t *dnp);
 349 void dnode_buf_byteswap(void *buf, size_t size);
 350 void dnode_verify(dnode_t *dn);
 351 int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx);
 352 void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx);
 353 void dnode_diduse_space(dnode_t *dn, int64_t space);
 354 void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx,
 355     boolean_t usesc, boolean_t);
 356 uint64_t dnode_block_freed(dnode_t *dn, uint64_t blkid);
 357 void dnode_init(void);
 358 void dnode_fini(void);
 359 int dnode_next_offset(dnode_t *dn, int flags, uint64_t *off,
 360     int minlvl, uint64_t blkfill, uint64_t txg);
 361 void dnode_evict_dbufs(dnode_t *dn, int level);
 362 void dnode_evict_bonus(dnode_t *dn);
 363 
 364 /* Smart compression callbacks (see zio_smartcomp_info_t). */
 365 extern boolean_t dnode_smartcomp_ask_cb(void *userinfo, const zio_t *zio);
 366 extern void dnode_smartcomp_result_cb(void *userinfo, const zio_t *zio);
 367 extern void dnode_setup_zio_smartcomp(struct dmu_buf_impl *db,
 368     zio_smartcomp_info_t *sc);
 369 
 370 #define DNODE_IS_CACHEABLE(_dn)                                         \
 371         ((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL ||           \
 372         (DMU_OT_IS_METADATA((_dn)->dn_type) &&                               \
 373         (_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA))
 374 
 375 #define DNODE_META_IS_CACHEABLE(_dn)                                    \
 376         ((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL ||           \
 377         (_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA)
 378 
 379 #ifdef ZFS_DEBUG
 380 
 381 /*
 382  * There should be a ## between the string literal and fmt, to make it
 383  * clear that we're joining two strings together, but that piece of shit
 384  * gcc doesn't support that preprocessor token.
 385  */
 386 #define dprintf_dnode(dn, fmt, ...) do { \
 387         if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
 388         char __db_buf[32]; \
 389         uint64_t __db_obj = (dn)->dn_object; \
 390         if (__db_obj == DMU_META_DNODE_OBJECT) \
 391                 (void) strcpy(__db_buf, "mdn"); \
 392         else \
 393                 (void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \
 394                     (u_longlong_t)__db_obj);\
 395         dprintf_ds((dn)->dn_objset->os_dsl_dataset, "obj=%s " fmt, \
 396             __db_buf, __VA_ARGS__); \
 397         } \
 398 _NOTE(CONSTCOND) } while (0)
 399 
 400 #define DNODE_VERIFY(dn)                dnode_verify(dn)
 401 #define FREE_VERIFY(db, start, end, tx) free_verify(db, start, end, tx)
 402 
 403 #else
 404 
 405 #define dprintf_dnode(db, fmt, ...)
 406 #define DNODE_VERIFY(dn)
 407 #define FREE_VERIFY(db, start, end, tx)
 408 
 409 #endif
 410 
 411 #ifdef  __cplusplus
 412 }
 413 #endif
 414 
 415 #endif  /* _SYS_DNODE_H */