Print this page
NEX-9200 Improve the scalability of attribute locking in zfs_zget
Reviewed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
NEX-9436 Rate limiting controls (was QoS) per ZFS dataset, updates from demo
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
NEX-8972 Async-delete side-effect that may cause unmount EBUSY
Reviewed by: Alek Pinchuk <alek@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
NEX-8852 Quality-of-Service (QoS) controls per NFS share
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
NEX-3762 Appliance crashes with a NULL pointer dereference during a zpool export when a zfs_vn_rele_taskq thread attempts to check a bogus rwlock from rw_write_held
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
OS-80 support for vdev and CoS properties for the new I/O scheduler
OS-95 lint warning introduced by OS-61
Fixup merge results
re #13204 rb4280 zfs receive/rollback deadlock
re #6815 rb1758 need WORM in nza-kernel (4.0)

*** 18,27 **** --- 18,28 ---- * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright 2016 Nexenta Systems, Inc. All rights reserved. */ #ifndef _SYS_FS_ZFS_VFSOPS_H #define _SYS_FS_ZFS_VFSOPS_H
*** 39,48 **** --- 40,72 ---- #endif typedef struct zfsvfs zfsvfs_t; struct znode; + /* + * ZFS Quality of Service (QoS) I/O throttling state, + * per file system. Limits the I/O rate in this FS. + * See "Token Bucket" on Wikipedia + */ + typedef struct zfs_rate_state { + uint64_t rate_cap; /* zero means no cap */ + int64_t rate_token_bucket; /* bytes I/O allowed without waiting */ + hrtime_t rate_last_update; + kmutex_t rate_lock; + kcondvar_t rate_wait_cv; + int rate_waiters; + } zfs_rate_state_t; + + /* + * Status of the zfs_unlinked_drain thread. + */ + typedef enum drain_state { + ZFS_DRAIN_SHUTDOWN = 0, + ZFS_DRAIN_RUNNING, + ZFS_DRAIN_SHUTDOWN_REQ + } drain_state_t; + struct zfsvfs { vfs_t *z_vfs; /* generic fs struct */ zfsvfs_t *z_parent; /* parent fs */ objset_t *z_os; /* objset reference */ uint64_t z_root; /* id of root znode */
*** 66,75 **** --- 90,100 ---- boolean_t z_unmounted; /* unmounted */ rrmlock_t z_teardown_lock; krwlock_t z_teardown_inactive_lock; list_t z_all_znodes; /* all vnodes in the fs */ kmutex_t z_znodes_lock; /* lock for z_all_znodes */ + uint_t z_znodes_freeing_cnt; /* number of znodes to be freed */ vnode_t *z_ctldir; /* .zfs directory pointer */ boolean_t z_show_ctldir; /* expose .zfs in the root dir */ boolean_t z_issnap; /* true if this is a snapshot */ boolean_t z_vscan; /* virus scan on/off */ boolean_t z_use_fuids; /* version allows fuids */
*** 80,91 **** kmutex_t z_lock; uint64_t z_userquota_obj; uint64_t z_groupquota_obj; uint64_t z_replay_eof; /* New end of file - replay only */ sa_attr_type_t *z_attr_table; /* SA attr mapping->id */ ! #define ZFS_OBJ_MTX_SZ 64 ! kmutex_t z_hold_mtx[ZFS_OBJ_MTX_SZ]; /* znode hold locks */ }; /* * Normal filesystems (those not under .zfs/snapshot) have a total * file ID size limited to 12 bytes (including the length field) due to --- 105,124 ---- kmutex_t z_lock; uint64_t z_userquota_obj; uint64_t z_groupquota_obj; uint64_t z_replay_eof; /* New end of file - replay only */ sa_attr_type_t *z_attr_table; /* SA attr mapping->id */ ! boolean_t z_isworm; /* true if this is a WORM FS */ ! /* true if suspend-resume cycle is in progress */ ! boolean_t z_busy; ! int z_hold_mtx_sz; /* the size of z_hold_mtx array */ ! kmutex_t *z_hold_mtx; /* znode hold locks */ ! /* for controlling async zfs_unlinked_drain */ ! kmutex_t z_drain_lock; ! kcondvar_t z_drain_cv; ! drain_state_t z_drain_state; ! zfs_rate_state_t z_rate; }; /* * Normal filesystems (those not under .zfs/snapshot) have a total * file ID size limited to 12 bytes (including the length field) due to