Print this page
Revert "8958 Update Intel ucode to 20180108 release"
This reverts commit 1adc3ffcd976ec0a34010cc7db08037a14c3ea4c.
NEX-15280 New default metadata block size is too large
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-15280 New default metadata block size is too large
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-5058 WBC: Race between the purging of window and opening new one
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
Reviewed by: Alex Aizman <alex.aizman@nexenta.com>
NEX-2830 ZFS smart compression
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-3266 5630 stale bonus buffer in recycled dnode_t leads to data corruption
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <george@delphix.com>
Reviewed by: Will Andrews <will@freebsd.org>
Approved by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Dan Fields <dan.fields@nexenta.com>
SUP-507 Delete or truncate of large files delayed on datasets with small recordsize
Reviewed by: Albert Lee <trisk@nexenta.com>
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
Reviewed by: Ilya Usvyatsky <ilya.usvyatsky@nexenta.com>
Reviewed by: Tony Nguyen <tony.nguyen@nexenta.com>
Fixup merge results
re #12585 rb4049 ZFS++ work port - refactoring to improve separation of open/closed code, bug fixes, performance improvements - open code
Bug 11205: add missing libzfs_closed_stubs.c to fix opensource-only build.
ZFS plus work: special vdevs, cos, cos/vdev properties

@@ -18,10 +18,11 @@
  *
  * CDDL HEADER END
  */
 /*
  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
  */
 
 #ifndef _SYS_DNODE_H

@@ -57,10 +58,11 @@
 /*
  * Fixed constants.
  */
 #define DNODE_SHIFT             9       /* 512 bytes */
 #define DN_MIN_INDBLKSHIFT      12      /* 4k */
+#define DN_DFL_INDBLKSHIFT      14      /* 16k */
 /*
  * If we ever increase this value beyond 20, we need to revisit all logic that
  * does x << level * ebps to handle overflow.  With a 1M indirect block size,
  * 4 levels of indirect blocks would not be able to guarantee addressing an
  * entire object, so 5 levels will be used, but 5 * (20 - 7) = 65.

@@ -130,10 +132,51 @@
 #define DNODE_FLAG_USERUSED_ACCOUNTED   (1<<1)
 
 /* Does dnode have a SA spill blkptr in bonus? */
 #define DNODE_FLAG_SPILL_BLKPTR (1<<2)
 
+/*
+ * Smart Compression
+ *
+ * Smart compression is a simple heuristic algorithm that automatically
+ * tries to avoid compression on incompressible objects by continuously
+ * monitoring per-object compression performance.
+ *
+ * The smart compression system has two states for each object:
+ *      COMPRESSING: compression is applied to the object and results are
+ *              reevaluated at fixed check intervals to see if we should
+ *              continue attempting to compress.
+ *      DENYING: compression has failed too often, so we give up trying
+ *              for a while and retry at a later time.
+ *
+ * Each time compression succeeds, we bump down the sc_compression_failures
+ * counter down to a minimum of -5. This helps us to prevent starting
+ * denying in case of short incompressible transients. Each time
+ * compression fails, we bump up the sc_compression_failures counter up to
+ * a maximum of 5 (zfs_smartcomp_interval_shift). If the failure counter
+ * is > 0 at the time the compression failed, we transition from the
+ * COMPRESSING to the DENYING state and calculate a deny interval by
+ * multiplying zfs_smartcomp_interval and (1 << sc_compression_failures).
+ * This means that successive failures at compression will have us retry
+ * compression progressively less often down to approx 32x less often
+ * (by default) than the compression test interval. To avoid potential
+ * data patterns confusing us, the deny interval is randomized by +-10%.
+ */
+typedef enum dnode_smartcomp_state {
+        DNODE_SMARTCOMP_COMPRESSING = 0,
+        DNODE_SMARTCOMP_DENYING
+} dnode_smartcomp_state_t;
+
+typedef struct dnode_smartcomp {
+        kmutex_t                sc_lock;
+        dnode_smartcomp_state_t sc_state;
+        uint64_t                sc_size;
+        uint64_t                sc_orig_size;
+        uint64_t                sc_deny_interval;
+        int64_t                 sc_comp_failures;
+} dnode_smartcomp_t;
+
 typedef struct dnode_phys {
         uint8_t dn_type;                /* dmu_object_type_t */
         uint8_t dn_indblkshift;         /* ln2(indirect block size) */
         uint8_t dn_nlevels;             /* 1=dn_blkptr->data blocks */
         uint8_t dn_nblkptr;             /* length of dn_blkptr */

@@ -250,10 +293,12 @@
         uint64_t dn_newuid, dn_newgid;
         int dn_id_flags;
 
         /* holds prefetch structure */
         struct zfetch   dn_zfetch;
+
+        dnode_smartcomp_t dn_smartcomp; /* smart compression performance */
 };
 
 /*
  * Adds a level of indirection between the dbuf and the dnode to avoid
  * iterating descendent dbufs in dnode_move(). Handles are not allocated

@@ -291,10 +336,11 @@
     void *ref, dnode_t **dnp);
 boolean_t dnode_add_ref(dnode_t *dn, void *ref);
 void dnode_rele(dnode_t *dn, void *ref);
 void dnode_rele_and_unlock(dnode_t *dn, void *tag);
 void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
+void dnode_setdirty_sc(dnode_t *dn, dmu_tx_t *tx, boolean_t usesc);
 void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
 void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx);
 void dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx);

@@ -303,20 +349,26 @@
 void dnode_buf_byteswap(void *buf, size_t size);
 void dnode_verify(dnode_t *dn);
 int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx);
 void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx);
 void dnode_diduse_space(dnode_t *dn, int64_t space);
-void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t);
+void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx,
+    boolean_t usesc, boolean_t);
 uint64_t dnode_block_freed(dnode_t *dn, uint64_t blkid);
 void dnode_init(void);
 void dnode_fini(void);
 int dnode_next_offset(dnode_t *dn, int flags, uint64_t *off,
     int minlvl, uint64_t blkfill, uint64_t txg);
-void dnode_evict_dbufs(dnode_t *dn);
+void dnode_evict_dbufs(dnode_t *dn, int level);
 void dnode_evict_bonus(dnode_t *dn);
-boolean_t dnode_needs_remap(const dnode_t *dn);
 
+/* Smart compression callbacks (see zio_smartcomp_info_t). */
+extern boolean_t dnode_smartcomp_ask_cb(void *userinfo, const zio_t *zio);
+extern void dnode_smartcomp_result_cb(void *userinfo, const zio_t *zio);
+extern void dnode_setup_zio_smartcomp(struct dmu_buf_impl *db,
+    zio_smartcomp_info_t *sc);
+
 #define DNODE_IS_CACHEABLE(_dn)                                         \
         ((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL ||         \
         (DMU_OT_IS_METADATA((_dn)->dn_type) &&                          \
         (_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA))