Print this page
NEX-10069 ZFS_READONLY is a little too strict (fix test lint)
NEX-9553 Move ss_fill gap logic from scan algorithm into range_tree.c
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
NEX-9562 Attaching a vdev while resilver/scrub is running causes panic.
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
NEX-6088 ZFS scrub/resilver take excessively long due to issuing lots of random IO
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
NEX-3508 CLONE - Port NEX-2946 Add UNMAP/TRIM functionality to ZFS and illumos
Reviewed by: Josef Sipek <josef.sipek@nexenta.com>
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
Conflicts:
    usr/src/uts/common/io/scsi/targets/sd.c
    usr/src/uts/common/sys/scsi/targets/sddef.h

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/zfs/sys/range_tree.h
          +++ new/usr/src/uts/common/fs/zfs/sys/range_tree.h
↓ open down ↓ 16 lines elided ↑ open up ↑
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   */
  25   25  
  26   26  /*
  27      - * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
       27 + * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
  28   28   */
  29   29  
  30   30  #ifndef _SYS_RANGE_TREE_H
  31   31  #define _SYS_RANGE_TREE_H
  32   32  
  33   33  #include <sys/avl.h>
  34   34  #include <sys/dmu.h>
  35   35  
  36   36  #ifdef  __cplusplus
  37   37  extern "C" {
  38   38  #endif
  39   39  
  40   40  #define RANGE_TREE_HISTOGRAM_SIZE       64
  41   41  
  42   42  typedef struct range_tree_ops range_tree_ops_t;
  43   43  
  44      -/*
  45      - * Note: the range_tree may not be accessed concurrently; consumers
  46      - * must provide external locking if required.
  47      - */
  48   44  typedef struct range_tree {
  49   45          avl_tree_t      rt_root;        /* offset-ordered segment AVL tree */
  50   46          uint64_t        rt_space;       /* sum of all segments in the map */
  51   47          range_tree_ops_t *rt_ops;
  52   48          void            *rt_arg;
  53   49  
       50 +        uint64_t        rt_gap; /* allowable inter-seg gap */
       51 +        kmem_cache_t    *rt_seg_cache;
       52 +
  54   53          /*
  55   54           * The rt_histogram maintains a histogram of ranges. Each bucket,
  56   55           * rt_histogram[i], contains the number of ranges whose size is:
  57   56           * 2^i <= size of range in bytes < 2^(i+1)
  58   57           */
  59   58          uint64_t        rt_histogram[RANGE_TREE_HISTOGRAM_SIZE];
       59 +        kmutex_t        *rt_lock;       /* pointer to lock that protects map */
  60   60  } range_tree_t;
  61   61  
  62   62  typedef struct range_seg {
  63   63          avl_node_t      rs_node;        /* AVL node */
  64   64          avl_node_t      rs_pp_node;     /* AVL picker-private node */
  65   65          uint64_t        rs_start;       /* starting offset of this segment */
  66   66          uint64_t        rs_end;         /* ending offset (non-inclusive) */
       67 +        uint64_t        rs_fill;        /* actual fill if gap mode is on */
  67   68  } range_seg_t;
  68   69  
  69   70  struct range_tree_ops {
  70   71          void    (*rtop_create)(range_tree_t *rt, void *arg);
  71   72          void    (*rtop_destroy)(range_tree_t *rt, void *arg);
  72   73          void    (*rtop_add)(range_tree_t *rt, range_seg_t *rs, void *arg);
  73   74          void    (*rtop_remove)(range_tree_t *rt, range_seg_t *rs, void *arg);
  74   75          void    (*rtop_vacate)(range_tree_t *rt, void *arg);
  75   76  };
  76   77  
  77   78  typedef void range_tree_func_t(void *arg, uint64_t start, uint64_t size);
  78   79  
  79   80  void range_tree_init(void);
  80   81  void range_tree_fini(void);
  81      -range_tree_t *range_tree_create(range_tree_ops_t *ops, void *arg);
       82 +range_tree_t *range_tree_create(range_tree_ops_t *ops, void *arg, kmutex_t *lp);
  82   83  void range_tree_destroy(range_tree_t *rt);
  83   84  boolean_t range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size);
       85 +void *range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size);
       86 +uint64_t range_tree_find_gap(range_tree_t *rt, uint64_t start, uint64_t size);
  84   87  uint64_t range_tree_space(range_tree_t *rt);
  85   88  void range_tree_verify(range_tree_t *rt, uint64_t start, uint64_t size);
  86   89  void range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst);
  87   90  void range_tree_stat_verify(range_tree_t *rt);
       91 +void range_tree_set_gap(range_tree_t *rt, uint64_t gap);
       92 +void range_tree_set_lock(range_tree_t *rt, kmutex_t *lp);
  88   93  
  89   94  void range_tree_add(void *arg, uint64_t start, uint64_t size);
       95 +void range_tree_add_fill(void *arg, uint64_t start, uint64_t size,
       96 +    uint64_t fill);
  90   97  void range_tree_remove(void *arg, uint64_t start, uint64_t size);
       98 +void range_tree_remove_overlap(void *arg, uint64_t start, uint64_t size);
       99 +void range_tree_remove_fill(void *arg, uint64_t start, uint64_t size,
      100 +    uint64_t fill, uint64_t fill_left);
  91  101  void range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size);
  92  102  
  93  103  void range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg);
  94  104  void range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg);
      105 +range_seg_t *range_tree_first(range_tree_t *rt);
  95  106  
  96  107  #ifdef  __cplusplus
  97  108  }
  98  109  #endif
  99  110  
 100  111  #endif  /* _SYS_RANGE_TREE_H */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX