Print this page
Revert "8958 Update Intel ucode to 20180108 release"
This reverts commit 1adc3ffcd976ec0a34010cc7db08037a14c3ea4c.
NEX-15280 New default metadata block size is too large
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-15280 New default metadata block size is too large
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-5058 WBC: Race between the purging of window and opening new one
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
Reviewed by: Alex Aizman <alex.aizman@nexenta.com>
NEX-2830 ZFS smart compression
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-3266 5630 stale bonus buffer in recycled dnode_t leads to data corruption
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <george@delphix.com>
Reviewed by: Will Andrews <will@freebsd.org>
Approved by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Dan Fields <dan.fields@nexenta.com>
SUP-507 Delete or truncate of large files delayed on datasets with small recordsize
Reviewed by: Albert Lee <trisk@nexenta.com>
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
Reviewed by: Ilya Usvyatsky <ilya.usvyatsky@nexenta.com>
Reviewed by: Tony Nguyen <tony.nguyen@nexenta.com>
Fixup merge results
re #12585 rb4049 ZFS++ work port - refactoring to improve separation of open/closed code, bug fixes, performance improvements - open code
Bug 11205: add missing libzfs_closed_stubs.c to fix opensource-only build.
ZFS plus work: special vdevs, cos, cos/vdev properties

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/zfs/sys/dnode.h
          +++ new/usr/src/uts/common/fs/zfs/sys/dnode.h
↓ open down ↓ 12 lines elided ↑ open up ↑
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
       23 + * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
  23   24   * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
  24   25   * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
  25   26   */
  26   27  
  27   28  #ifndef _SYS_DNODE_H
  28   29  #define _SYS_DNODE_H
  29   30  
  30   31  #include <sys/zfs_context.h>
  31   32  #include <sys/avl.h>
  32   33  #include <sys/spa.h>
↓ open down ↓ 19 lines elided ↑ open up ↑
  52   53   */
  53   54  #define DNODE_FIND_HOLE         1
  54   55  #define DNODE_FIND_BACKWARDS    2
  55   56  #define DNODE_FIND_HAVELOCK     4
  56   57  
  57   58  /*
  58   59   * Fixed constants.
  59   60   */
  60   61  #define DNODE_SHIFT             9       /* 512 bytes */
  61   62  #define DN_MIN_INDBLKSHIFT      12      /* 4k */
       63 +#define DN_DFL_INDBLKSHIFT      14      /* 16k */
  62   64  /*
  63   65   * If we ever increase this value beyond 20, we need to revisit all logic that
  64   66   * does x << level * ebps to handle overflow.  With a 1M indirect block size,
  65   67   * 4 levels of indirect blocks would not be able to guarantee addressing an
  66   68   * entire object, so 5 levels will be used, but 5 * (20 - 7) = 65.
  67   69   */
  68   70  #define DN_MAX_INDBLKSHIFT      17      /* 128k */
  69   71  #define DNODE_BLOCK_SHIFT       14      /* 16k */
  70   72  #define DNODE_CORE_SIZE         64      /* 64 bytes for dnode sans blkptrs */
  71   73  #define DN_MAX_OBJECT_SHIFT     48      /* 256 trillion (zfs_fid_t limit) */
↓ open down ↓ 53 lines elided ↑ open up ↑
 125  127          DN_DIRTY_SYNC
 126  128  };
 127  129  
 128  130  /* Is dn_used in bytes?  if not, it's in multiples of SPA_MINBLOCKSIZE */
 129  131  #define DNODE_FLAG_USED_BYTES           (1<<0)
 130  132  #define DNODE_FLAG_USERUSED_ACCOUNTED   (1<<1)
 131  133  
 132  134  /* Does dnode have a SA spill blkptr in bonus? */
 133  135  #define DNODE_FLAG_SPILL_BLKPTR (1<<2)
 134  136  
      137 +/*
      138 + * Smart Compression
      139 + *
      140 + * Smart compression is a simple heuristic algorithm that automatically
      141 + * tries to avoid compression on incompressible objects by continuously
      142 + * monitoring per-object compression performance.
      143 + *
      144 + * The smart compression system has two states for each object:
      145 + *      COMPRESSING: compression is applied to the object and results are
      146 + *              reevaluated at fixed check intervals to see if we should
      147 + *              continue attempting to compress.
      148 + *      DENYING: compression has failed too often, so we give up trying
      149 + *              for a while and retry at a later time.
      150 + *
      151 + * Each time compression succeeds, we bump down the sc_compression_failures
      152 + * counter down to a minimum of -5. This helps us to prevent starting
      153 + * denying in case of short incompressible transients. Each time
      154 + * compression fails, we bump up the sc_compression_failures counter up to
      155 + * a maximum of 5 (zfs_smartcomp_interval_shift). If the failure counter
      156 + * is > 0 at the time the compression failed, we transition from the
      157 + * COMPRESSING to the DENYING state and calculate a deny interval by
      158 + * multiplying zfs_smartcomp_interval and (1 << sc_compression_failures).
      159 + * This means that successive failures at compression will have us retry
      160 + * compression progressively less often down to approx 32x less often
      161 + * (by default) than the compression test interval. To avoid potential
      162 + * data patterns confusing us, the deny interval is randomized by +-10%.
      163 + */
      164 +typedef enum dnode_smartcomp_state {
      165 +        DNODE_SMARTCOMP_COMPRESSING = 0,
      166 +        DNODE_SMARTCOMP_DENYING
      167 +} dnode_smartcomp_state_t;
      168 +
      169 +typedef struct dnode_smartcomp {
      170 +        kmutex_t                sc_lock;
      171 +        dnode_smartcomp_state_t sc_state;
      172 +        uint64_t                sc_size;
      173 +        uint64_t                sc_orig_size;
      174 +        uint64_t                sc_deny_interval;
      175 +        int64_t                 sc_comp_failures;
      176 +} dnode_smartcomp_t;
      177 +
 135  178  typedef struct dnode_phys {
 136  179          uint8_t dn_type;                /* dmu_object_type_t */
 137  180          uint8_t dn_indblkshift;         /* ln2(indirect block size) */
 138  181          uint8_t dn_nlevels;             /* 1=dn_blkptr->data blocks */
 139  182          uint8_t dn_nblkptr;             /* length of dn_blkptr */
 140  183          uint8_t dn_bonustype;           /* type of data in bonus buffer */
 141  184          uint8_t dn_checksum;            /* ZIO_CHECKSUM type */
 142  185          uint8_t dn_compress;            /* ZIO_COMPRESS type */
 143  186          uint8_t dn_flags;               /* DNODE_FLAG_* */
 144  187          uint16_t dn_datablkszsec;       /* data block size in 512b sectors */
↓ open down ↓ 100 lines elided ↑ open up ↑
 245  288  
 246  289          /* used in syncing context */
 247  290          uint64_t dn_oldused;    /* old phys used bytes */
 248  291          uint64_t dn_oldflags;   /* old phys dn_flags */
 249  292          uint64_t dn_olduid, dn_oldgid;
 250  293          uint64_t dn_newuid, dn_newgid;
 251  294          int dn_id_flags;
 252  295  
 253  296          /* holds prefetch structure */
 254  297          struct zfetch   dn_zfetch;
      298 +
      299 +        dnode_smartcomp_t dn_smartcomp; /* smart compression performance */
 255  300  };
 256  301  
 257  302  /*
 258  303   * Adds a level of indirection between the dbuf and the dnode to avoid
 259  304   * iterating descendent dbufs in dnode_move(). Handles are not allocated
 260  305   * individually, but as an array of child dnodes in dnode_hold_impl().
 261  306   */
 262  307  typedef struct dnode_handle {
 263  308          /* Protects dnh_dnode from modification by dnode_move(). */
 264  309          zrlock_t dnh_zrlock;
↓ open down ↓ 21 lines elided ↑ open up ↑
 286  331  void dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx);
 287  332  
 288  333  int dnode_hold(struct objset *dd, uint64_t object,
 289  334      void *ref, dnode_t **dnp);
 290  335  int dnode_hold_impl(struct objset *dd, uint64_t object, int flag,
 291  336      void *ref, dnode_t **dnp);
 292  337  boolean_t dnode_add_ref(dnode_t *dn, void *ref);
 293  338  void dnode_rele(dnode_t *dn, void *ref);
 294  339  void dnode_rele_and_unlock(dnode_t *dn, void *tag);
 295  340  void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
      341 +void dnode_setdirty_sc(dnode_t *dn, dmu_tx_t *tx, boolean_t usesc);
 296  342  void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
 297  343  void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
 298  344      dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx);
 299  345  void dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
 300  346      dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx);
 301  347  void dnode_free(dnode_t *dn, dmu_tx_t *tx);
 302  348  void dnode_byteswap(dnode_phys_t *dnp);
 303  349  void dnode_buf_byteswap(void *buf, size_t size);
 304  350  void dnode_verify(dnode_t *dn);
 305  351  int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx);
 306  352  void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx);
 307  353  void dnode_diduse_space(dnode_t *dn, int64_t space);
 308      -void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t);
      354 +void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx,
      355 +    boolean_t usesc, boolean_t);
 309  356  uint64_t dnode_block_freed(dnode_t *dn, uint64_t blkid);
 310  357  void dnode_init(void);
 311  358  void dnode_fini(void);
 312  359  int dnode_next_offset(dnode_t *dn, int flags, uint64_t *off,
 313  360      int minlvl, uint64_t blkfill, uint64_t txg);
 314      -void dnode_evict_dbufs(dnode_t *dn);
      361 +void dnode_evict_dbufs(dnode_t *dn, int level);
 315  362  void dnode_evict_bonus(dnode_t *dn);
 316      -boolean_t dnode_needs_remap(const dnode_t *dn);
 317  363  
      364 +/* Smart compression callbacks (see zio_smartcomp_info_t). */
      365 +extern boolean_t dnode_smartcomp_ask_cb(void *userinfo, const zio_t *zio);
      366 +extern void dnode_smartcomp_result_cb(void *userinfo, const zio_t *zio);
      367 +extern void dnode_setup_zio_smartcomp(struct dmu_buf_impl *db,
      368 +    zio_smartcomp_info_t *sc);
      369 +
 318  370  #define DNODE_IS_CACHEABLE(_dn)                                         \
 319  371          ((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL ||         \
 320  372          (DMU_OT_IS_METADATA((_dn)->dn_type) &&                          \
 321  373          (_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA))
 322  374  
 323  375  #define DNODE_META_IS_CACHEABLE(_dn)                                    \
 324  376          ((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL ||         \
 325  377          (_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA)
 326  378  
 327  379  #ifdef ZFS_DEBUG
↓ open down ↓ 36 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX