29
30 #include <sys/zfs_context.h>
31 #include <sys/avl.h>
32 #include <sys/spa.h>
33 #include <sys/txg.h>
34 #include <sys/zio.h>
35 #include <sys/refcount.h>
36 #include <sys/dmu_zfetch.h>
37 #include <sys/zrlock.h>
38 #include <sys/multilist.h>
39
40 #ifdef __cplusplus
41 extern "C" {
42 #endif
43
44 /*
45 * dnode_hold() flags.
46 */
47 #define DNODE_MUST_BE_ALLOCATED 1
48 #define DNODE_MUST_BE_FREE 2
49
50 /*
51 * dnode_next_offset() flags.
52 */
53 #define DNODE_FIND_HOLE 1
54 #define DNODE_FIND_BACKWARDS 2
55 #define DNODE_FIND_HAVELOCK 4
56
57 /*
58 * Fixed constants.
59 */
60 #define DNODE_SHIFT 9 /* 512 bytes */
61 #define DN_MIN_INDBLKSHIFT 12 /* 4k */
62 /*
63 * If we ever increase this value beyond 20, we need to revisit all logic that
64 * does x << level * ebps to handle overflow. With a 1M indirect block size,
65 * 4 levels of indirect blocks would not be able to guarantee addressing an
66 * entire object, so 5 levels will be used, but 5 * (20 - 7) = 65.
67 */
68 #define DN_MAX_INDBLKSHIFT 17 /* 128k */
376 avl_node_t fr_node;
377 uint64_t fr_blkid;
378 uint64_t fr_nblks;
379 } free_range_t;
380
381 void dnode_special_open(struct objset *dd, dnode_phys_t *dnp,
382 uint64_t object, dnode_handle_t *dnh);
383 void dnode_special_close(dnode_handle_t *dnh);
384
385 void dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx);
386 void dnode_setbonus_type(dnode_t *dn, dmu_object_type_t, dmu_tx_t *tx);
387 void dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx);
388
389 int dnode_hold(struct objset *dd, uint64_t object,
390 void *ref, dnode_t **dnp);
391 int dnode_hold_impl(struct objset *dd, uint64_t object, int flag, int dn_slots,
392 void *ref, dnode_t **dnp);
393 boolean_t dnode_add_ref(dnode_t *dn, void *ref);
394 void dnode_rele(dnode_t *dn, void *ref);
395 void dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting);
396 void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
397 void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
398 void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
399 dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx);
400 void dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
401 dmu_object_type_t bonustype, int bonuslen, int dn_slots,
402 boolean_t keep_spill, dmu_tx_t *tx);
403 void dnode_free(dnode_t *dn, dmu_tx_t *tx);
404 void dnode_byteswap(dnode_phys_t *dnp);
405 void dnode_buf_byteswap(void *buf, size_t size);
406 void dnode_verify(dnode_t *dn);
407 int dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx);
408 int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx);
409 void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx);
410 void dnode_diduse_space(dnode_t *dn, int64_t space);
411 void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx,
412 boolean_t have_read, boolean_t force);
413 uint64_t dnode_block_freed(dnode_t *dn, uint64_t blkid);
414 void dnode_init(void);
415 void dnode_fini(void);
494 * able to hold the requested range of free dnode slots because
495 * after acquiring the zrl lock at least one slot was allocated.
496 */
497 kstat_named_t dnode_hold_free_lock_misses;
498 /*
499 * Number of times dnode_hold(..., DNODE_MUST_BE_FREE) needed
500 * to retry acquiring slot zrl locks due to contention.
501 */
502 kstat_named_t dnode_hold_free_lock_retry;
503 /*
504 * Number of times dnode_hold(..., DNODE_MUST_BE_FREE) requested
505 * a range of dnode slots which were held by another thread.
506 */
507 kstat_named_t dnode_hold_free_refcount;
508 /*
509 * Number of times dnode_hold(..., DNODE_MUST_BE_FREE) requested
510 * a range of dnode slots which would overflow the dnode_phys_t.
511 */
512 kstat_named_t dnode_hold_free_overflow;
513 /*
514 * Number of times a dnode_hold(...) was attempted on a dnode
515 * which had already been unlinked in an earlier txg.
516 */
517 kstat_named_t dnode_hold_free_txg;
518 /*
519 * Number of times dnode_free_interior_slots() needed to retry
520 * acquiring a slot zrl lock due to contention.
521 */
522 kstat_named_t dnode_free_interior_lock_retry;
523 /*
524 * Number of new dnodes allocated by dnode_allocate().
525 */
526 kstat_named_t dnode_allocate;
527 /*
528 * Number of dnodes re-allocated by dnode_reallocate().
529 */
530 kstat_named_t dnode_reallocate;
531 /*
532 * Number of meta dnode dbufs evicted.
533 */
534 kstat_named_t dnode_buf_evict;
535 /*
536 * Number of times dmu_object_alloc*() reached the end of the existing
537 * object ID chunk and advanced to a new one.
538 */
|
29
30 #include <sys/zfs_context.h>
31 #include <sys/avl.h>
32 #include <sys/spa.h>
33 #include <sys/txg.h>
34 #include <sys/zio.h>
35 #include <sys/refcount.h>
36 #include <sys/dmu_zfetch.h>
37 #include <sys/zrlock.h>
38 #include <sys/multilist.h>
39
40 #ifdef __cplusplus
41 extern "C" {
42 #endif
43
44 /*
45 * dnode_hold() flags.
46 */
47 #define DNODE_MUST_BE_ALLOCATED 1
48 #define DNODE_MUST_BE_FREE 2
49 #define DNODE_DRY_RUN 4
50
51 /*
52 * dnode_next_offset() flags.
53 */
54 #define DNODE_FIND_HOLE 1
55 #define DNODE_FIND_BACKWARDS 2
56 #define DNODE_FIND_HAVELOCK 4
57
58 /*
59 * Fixed constants.
60 */
61 #define DNODE_SHIFT 9 /* 512 bytes */
62 #define DN_MIN_INDBLKSHIFT 12 /* 4k */
63 /*
64 * If we ever increase this value beyond 20, we need to revisit all logic that
65 * does x << level * ebps to handle overflow. With a 1M indirect block size,
66 * 4 levels of indirect blocks would not be able to guarantee addressing an
67 * entire object, so 5 levels will be used, but 5 * (20 - 7) = 65.
68 */
69 #define DN_MAX_INDBLKSHIFT 17 /* 128k */
377 avl_node_t fr_node;
378 uint64_t fr_blkid;
379 uint64_t fr_nblks;
380 } free_range_t;
381
382 void dnode_special_open(struct objset *dd, dnode_phys_t *dnp,
383 uint64_t object, dnode_handle_t *dnh);
384 void dnode_special_close(dnode_handle_t *dnh);
385
386 void dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx);
387 void dnode_setbonus_type(dnode_t *dn, dmu_object_type_t, dmu_tx_t *tx);
388 void dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx);
389
390 int dnode_hold(struct objset *dd, uint64_t object,
391 void *ref, dnode_t **dnp);
392 int dnode_hold_impl(struct objset *dd, uint64_t object, int flag, int dn_slots,
393 void *ref, dnode_t **dnp);
394 boolean_t dnode_add_ref(dnode_t *dn, void *ref);
395 void dnode_rele(dnode_t *dn, void *ref);
396 void dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting);
397 int dnode_try_claim(objset_t *os, uint64_t object, int slots);
398 void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
399 void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
400 void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
401 dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx);
402 void dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
403 dmu_object_type_t bonustype, int bonuslen, int dn_slots,
404 boolean_t keep_spill, dmu_tx_t *tx);
405 void dnode_free(dnode_t *dn, dmu_tx_t *tx);
406 void dnode_byteswap(dnode_phys_t *dnp);
407 void dnode_buf_byteswap(void *buf, size_t size);
408 void dnode_verify(dnode_t *dn);
409 int dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx);
410 int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx);
411 void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx);
412 void dnode_diduse_space(dnode_t *dn, int64_t space);
413 void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx,
414 boolean_t have_read, boolean_t force);
415 uint64_t dnode_block_freed(dnode_t *dn, uint64_t blkid);
416 void dnode_init(void);
417 void dnode_fini(void);
496 * able to hold the requested range of free dnode slots because
497 * after acquiring the zrl lock at least one slot was allocated.
498 */
499 kstat_named_t dnode_hold_free_lock_misses;
500 /*
501 * Number of times dnode_hold(..., DNODE_MUST_BE_FREE) needed
502 * to retry acquiring slot zrl locks due to contention.
503 */
504 kstat_named_t dnode_hold_free_lock_retry;
505 /*
506 * Number of times dnode_hold(..., DNODE_MUST_BE_FREE) requested
507 * a range of dnode slots which were held by another thread.
508 */
509 kstat_named_t dnode_hold_free_refcount;
510 /*
511 * Number of times dnode_hold(..., DNODE_MUST_BE_FREE) requested
512 * a range of dnode slots which would overflow the dnode_phys_t.
513 */
514 kstat_named_t dnode_hold_free_overflow;
515 /*
516 * Number of times dnode_free_interior_slots() needed to retry
517 * acquiring a slot zrl lock due to contention.
518 */
519 kstat_named_t dnode_free_interior_lock_retry;
520 /*
521 * Number of new dnodes allocated by dnode_allocate().
522 */
523 kstat_named_t dnode_allocate;
524 /*
525 * Number of dnodes re-allocated by dnode_reallocate().
526 */
527 kstat_named_t dnode_reallocate;
528 /*
529 * Number of meta dnode dbufs evicted.
530 */
531 kstat_named_t dnode_buf_evict;
532 /*
533 * Number of times dmu_object_alloc*() reached the end of the existing
534 * object ID chunk and advanced to a new one.
535 */
|