Print this page
usr/src/common/zfs/zprop_common.c

*** 20,256 **** */ /* * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved. */ /* * Copyright (c) 2011 Bayard G. Bell. All rights reserved. * Copyright (c) 2012, 2016 by Delphix. All rights reserved. * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. ! * Copyright 2017 Nexenta Systems, Inc. */ - /* - * Copyright 2011 cyril.galibern@opensvc.com - */ /* * SCSI disk target driver. */ ! #include <sys/scsi/scsi.h> #include <sys/dkbad.h> - #include <sys/dklabel.h> #include <sys/dkio.h> ! #include <sys/fdio.h> ! #include <sys/cdio.h> ! #include <sys/mhd.h> ! #include <sys/vtoc.h> #include <sys/dktp/fdisk.h> #include <sys/kstat.h> ! #include <sys/vtrace.h> ! #include <sys/note.h> ! #include <sys/thread.h> #include <sys/proc.h> ! #include <sys/efi_partition.h> ! #include <sys/var.h> ! #include <sys/aio_req.h> ! ! #ifdef __lock_lint ! #define _LP64 ! #define __amd64 ! #endif ! ! #if (defined(__fibre)) ! /* Note: is there a leadville version of the following? */ ! #include <sys/fc4/fcal_linkapp.h> ! #endif #include <sys/taskq.h> #include <sys/uuid.h> ! #include <sys/byteorder.h> ! #include <sys/sdt.h> #include "sd_xbuf.h" - #include <sys/scsi/targets/sddef.h> - #include <sys/cmlb.h> - #include <sys/sysevent/eventdefs.h> - #include <sys/sysevent/dev.h> - - #include <sys/fm/protocol.h> - - /* - * Loadable module info. - */ - #if (defined(__fibre)) - #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" - #else /* !__fibre */ #define SD_MODULE_NAME "SCSI Disk Driver" ! #endif /* !__fibre */ /* - * Define the interconnect type, to allow the driver to distinguish - * between parallel SCSI (sd) and fibre channel (ssd) behaviors. - * - * This is really for backward compatibility. In the future, the driver - * should actually check the "interconnect-type" property as reported by - * the HBA; however at present this property is not defined by all HBAs, - * so we will use this #define (1) to permit the driver to run in - * backward-compatibility mode; and (2) to print a notification message - * if an FC HBA does not support the "interconnect-type" property. The - * behavior of the driver will be to assume parallel SCSI behaviors unless - * the "interconnect-type" property is defined by the HBA **AND** has a - * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or - * INTERCONNECT_FABRIC, in which case the driver will assume Fibre - * Channel behaviors (as per the old ssd). (Note that the - * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and - * will result in the driver assuming parallel SCSI behaviors.) - * - * (see common/sys/scsi/impl/services.h) - * - * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default - * since some FC HBAs may already support that, and there is some code in - * the driver that already looks for it. Using INTERCONNECT_FABRIC as the - * default would confuse that code, and besides things should work fine - * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the - * "interconnect_type" property. - * - */ - #if (defined(__fibre)) - #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE - #else - #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL - #endif - - /* - * The name of the driver, established from the module name in _init. - */ - static char *sd_label = NULL; - - /* - * Driver name is unfortunately prefixed on some driver.conf properties. - */ - #if (defined(__fibre)) - #define sd_max_xfer_size ssd_max_xfer_size - #define sd_config_list ssd_config_list - static char *sd_max_xfer_size = "ssd_max_xfer_size"; - static char *sd_config_list = "ssd-config-list"; - #else - static char *sd_max_xfer_size = "sd_max_xfer_size"; - static char *sd_config_list = "sd-config-list"; - #endif - - /* * Driver global variables */ - #if (defined(__fibre)) - /* - * These #defines are to avoid namespace collisions that occur because this - * code is currently used to compile two separate driver modules: sd and ssd. - * All global variables need to be treated this way (even if declared static) - * in order to allow the debugger to resolve the names properly. - * It is anticipated that in the near future the ssd module will be obsoleted, - * at which time this namespace issue should go away. - */ - #define sd_state ssd_state - #define sd_io_time ssd_io_time - #define sd_failfast_enable ssd_failfast_enable - #define sd_ua_retry_count ssd_ua_retry_count - #define sd_report_pfa ssd_report_pfa - #define sd_max_throttle ssd_max_throttle - #define sd_min_throttle ssd_min_throttle - #define sd_rot_delay ssd_rot_delay - - #define sd_retry_on_reservation_conflict \ - ssd_retry_on_reservation_conflict - #define sd_reinstate_resv_delay ssd_reinstate_resv_delay - #define sd_resv_conflict_name ssd_resv_conflict_name - - #define sd_component_mask ssd_component_mask - #define sd_level_mask ssd_level_mask - #define sd_debug_un ssd_debug_un - #define sd_error_level ssd_error_level - - #define sd_xbuf_active_limit ssd_xbuf_active_limit - #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit - - #define sd_tr ssd_tr - #define sd_reset_throttle_timeout ssd_reset_throttle_timeout - #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout - #define sd_qfull_throttle_enable ssd_qfull_throttle_enable - #define sd_check_media_time ssd_check_media_time - #define sd_wait_cmds_complete ssd_wait_cmds_complete - #define sd_label_mutex ssd_label_mutex - #define sd_detach_mutex ssd_detach_mutex - #define sd_log_buf ssd_log_buf - #define sd_log_mutex ssd_log_mutex - - #define sd_disk_table ssd_disk_table - #define sd_disk_table_size ssd_disk_table_size - #define sd_sense_mutex ssd_sense_mutex - #define sd_cdbtab ssd_cdbtab - - #define sd_cb_ops ssd_cb_ops - #define sd_ops ssd_ops - #define sd_additional_codes ssd_additional_codes - #define sd_tgops ssd_tgops - - #define sd_minor_data ssd_minor_data - #define sd_minor_data_efi ssd_minor_data_efi - - #define sd_tq ssd_tq - #define sd_wmr_tq ssd_wmr_tq - #define sd_taskq_name ssd_taskq_name - #define sd_wmr_taskq_name ssd_wmr_taskq_name - #define sd_taskq_minalloc ssd_taskq_minalloc - #define sd_taskq_maxalloc ssd_taskq_maxalloc - - #define sd_dump_format_string ssd_dump_format_string - - #define sd_iostart_chain ssd_iostart_chain - #define sd_iodone_chain ssd_iodone_chain - - #define sd_pm_idletime ssd_pm_idletime - - #define sd_force_pm_supported ssd_force_pm_supported - - #define sd_dtype_optical_bind ssd_dtype_optical_bind - - #define sd_ssc_init ssd_ssc_init - #define sd_ssc_send ssd_ssc_send - #define sd_ssc_fini ssd_ssc_fini - #define sd_ssc_assessment ssd_ssc_assessment - #define sd_ssc_post ssd_ssc_post - #define sd_ssc_print ssd_ssc_print - #define sd_ssc_ereport_post ssd_ssc_ereport_post - #define sd_ssc_set_info ssd_ssc_set_info - #define sd_ssc_extract_info ssd_ssc_extract_info - - #endif - #ifdef SDDEBUG int sd_force_pm_supported = 0; #endif /* SDDEBUG */ void *sd_state = NULL; int sd_io_time = SD_IO_TIME; - int sd_failfast_enable = 1; int sd_ua_retry_count = SD_UA_RETRY_COUNT; int sd_report_pfa = 1; int sd_max_throttle = SD_MAX_THROTTLE; int sd_min_throttle = SD_MIN_THROTTLE; int sd_rot_delay = 4; /* Default 4ms Rotation delay */ int sd_qfull_throttle_enable = TRUE; int sd_retry_on_reservation_conflict = 1; int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; ! _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) ! static int sd_dtype_optical_bind = -1; - /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ - static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; - /* * Global data for debug logging. To enable debug printing, sd_component_mask * and sd_level_mask should be set to the desired bit patterns as outlined in * sddef.h. */ --- 20,102 ---- */ /* * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved. */ + /* + * Copyright 2011 cyril.galibern@opensvc.com * Copyright (c) 2011 Bayard G. Bell. All rights reserved. * Copyright (c) 2012, 2016 by Delphix. All rights reserved. * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. ! * Copyright 2019 Nexenta Systems, Inc. */ /* * SCSI disk target driver. */ ! #include <sys/aio_req.h> ! #include <sys/byteorder.h> ! #include <sys/cdio.h> ! #include <sys/cmlb.h> ! #include <sys/debug.h> #include <sys/dkbad.h> #include <sys/dkio.h> ! #include <sys/dkioc_free_util.h> ! #include <sys/dklabel.h> #include <sys/dktp/fdisk.h> + #include <sys/efi_partition.h> + #include <sys/fdio.h> + #include <sys/fm/protocol.h> + #include <sys/fs/dv_node.h> #include <sys/kstat.h> ! #include <sys/mhd.h> #include <sys/proc.h> ! #include <sys/scsi/scsi.h> ! #include <sys/scsi/targets/sddef.h> ! #include <sys/sdt.h> ! #include <sys/sysevent/dev.h> ! #include <sys/sysevent/eventdefs.h> #include <sys/taskq.h> + #include <sys/thread.h> #include <sys/uuid.h> ! #include <sys/var.h> ! #include <sys/vtoc.h> ! #include <sys/vtrace.h> #include "sd_xbuf.h" #define SD_MODULE_NAME "SCSI Disk Driver" ! static char *sd_label = "sd"; /* * Driver global variables */ #ifdef SDDEBUG int sd_force_pm_supported = 0; #endif /* SDDEBUG */ void *sd_state = NULL; int sd_io_time = SD_IO_TIME; int sd_ua_retry_count = SD_UA_RETRY_COUNT; int sd_report_pfa = 1; int sd_max_throttle = SD_MAX_THROTTLE; int sd_min_throttle = SD_MIN_THROTTLE; int sd_rot_delay = 4; /* Default 4ms Rotation delay */ int sd_qfull_throttle_enable = TRUE; int sd_retry_on_reservation_conflict = 1; int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; ! int sd_enable_lun_reset = FALSE; ! /* ! * Default safe I/O delay threshold of 30s for all devices. ! * Can be overriden for vendor/device id in sd.conf ! */ ! hrtime_t sd_slow_io_threshold = 30LL * NANOSEC; /* * Global data for debug logging. To enable debug printing, sd_component_mask * and sd_level_mask should be set to the desired bit patterns as outlined in * sddef.h. */
*** 292,304 **** * sd_detach_mutex protects un_layer_count, un_detach_count, and * un_opens_in_progress in the sd_lun structure. */ static kmutex_t sd_detach_mutex; - _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, - sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) - /* * Global buffer and mutex for debug logging */ static char sd_log_buf[1024]; static kmutex_t sd_log_mutex; --- 138,147 ----
*** 323,338 **** #define SD_SCSI_LUN_DETACH 1 static kmutex_t sd_scsi_target_lun_mutex; static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; - _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, - sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) - - _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, - sd_scsi_target_lun_head)) - /* * "Smart" Probe Caching structs, globals, #defines, etc. * For parallel scsi and non-self-identify device only. */ --- 166,175 ----
*** 349,368 **** static kmutex_t sd_scsi_probe_cache_mutex; static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; /* ! * Really we only need protection on the head of the linked list, but ! * better safe than sorry. */ ! _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, ! sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) ! _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, ! sd_scsi_probe_cache_head)) /* * Power attribute table */ static sd_power_attr_ss sd_pwr_ss = { { "NAME=spindle-motor", "0=off", "1=on", NULL }, {0, 100}, --- 186,231 ---- static kmutex_t sd_scsi_probe_cache_mutex; static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; /* ! * Create taskq for all targets in the system. This is created at ! * _init(9E) and destroyed at _fini(9E). ! * ! * Note: here we set the minalloc to a reasonably high number to ensure that ! * we will have an adequate supply of task entries available at interrupt time. ! * This is used in conjunction with the TASKQ_PREPOPULATE flag in ! * sd_create_taskq(). Since we do not want to sleep for allocations at ! * interrupt time, set maxalloc equal to minalloc. That way we will just fail ! * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq ! * requests any one instant in time. */ ! #define SD_TASKQ_NUMTHREADS 8 ! #define SD_TASKQ_MINALLOC 256 ! #define SD_TASKQ_MAXALLOC 256 ! static taskq_t *sd_tq = NULL; + static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; + static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; + + #define SD_BAIL_CHECK(a) if ((a)->un_detach_count != 0) { \ + mutex_exit(SD_MUTEX((a))); \ + return (ENXIO); \ + } /* + * The following task queue is being created for the write part of + * read-modify-write of non-512 block size devices. + * Limit the number of threads to 1 for now. This number has been chosen + * considering the fact that it applies only to dvd ram drives/MO drives + * currently. Performance for which is not main criteria at this stage. + * Note: It needs to be explored if we can use a single taskq in future + */ + #define SD_WMR_TASKQ_NUMTHREADS 1 + static taskq_t *sd_wmr_tq = NULL; + + /* * Power attribute table */ static sd_power_attr_ss sd_pwr_ss = { { "NAME=spindle-motor", "0=off", "1=on", NULL }, {0, 100},
*** 390,401 **** /* * Vendor specific data name property declarations */ - #if defined(__fibre) || defined(__i386) ||defined(__amd64) - static sd_tunables seagate_properties = { SEAGATE_THROTTLE_VALUE, 0, 0, 0, --- 253,262 ----
*** 404,414 **** 0, 0, 0 }; - static sd_tunables fujitsu_properties = { FUJITSU_THROTTLE_VALUE, 0, 0, 0, --- 265,274 ----
*** 477,492 **** PIRUS_MIN_THROTTLE_VALUE, PIRUS_DISKSORT_DISABLED_FLAG, PIRUS_LUN_RESET_ENABLED_FLAG }; - #endif - - #if (defined(__sparc) && !defined(__fibre)) || \ - (defined(__i386) || defined(__amd64)) - - static sd_tunables elite_properties = { ELITE_THROTTLE_VALUE, 0, 0, 0, --- 337,346 ----
*** 507,518 **** 0, 0, 0 }; - #endif /* Fibre or not */ - static sd_tunables lsi_properties_scsi = { LSI_THROTTLE_VALUE, 0, LSI_NOTREADY_RETRIES, 0, --- 361,370 ----
*** 558,578 **** 0, 0, 1 }; - - #if (defined(SD_PROP_TST)) - #define SD_TST_CTYPE_VAL CTYPE_CDROM #define SD_TST_THROTTLE_VAL 16 #define SD_TST_NOTREADY_VAL 12 #define SD_TST_BUSY_VAL 60 #define SD_TST_RST_RETRY_VAL 36 #define SD_TST_RSV_REL_TIME 60 - static sd_tunables tst_properties = { SD_TST_THROTTLE_VAL, SD_TST_CTYPE_VAL, SD_TST_NOTREADY_VAL, SD_TST_BUSY_VAL, --- 410,426 ----
*** 613,623 **** * ST318202F is a Legacy device * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been * made with an FC connection. The entries here are a legacy. */ static sd_disk_config_t sd_disk_table[] = { - #if defined(__fibre) || defined(__i386) || defined(__amd64) { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, --- 461,470 ----
*** 663,739 **** { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, ! { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | ! SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, ! { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | ! SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, ! { "SUN T3", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_RSV_REL_TIME, &purple_properties }, ! { "SUN SESS01", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_RSV_REL_TIME| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED, &sve_properties }, ! { "SUN T4", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_RSV_REL_TIME, &purple_properties }, ! { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | SD_CONF_BSET_LUN_RESET_ENABLED, &maserati_properties }, ! { "SUN SE6920", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED| SD_CONF_BSET_LUN_RESET_ENABLED, &pirus_properties }, ! { "SUN SE6940", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED| SD_CONF_BSET_LUN_RESET_ENABLED, &pirus_properties }, ! { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED| SD_CONF_BSET_LUN_RESET_ENABLED, &pirus_properties }, ! { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED| SD_CONF_BSET_LUN_RESET_ENABLED, &pirus_properties }, ! { "SUN PSX1000", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED| SD_CONF_BSET_LUN_RESET_ENABLED, &pirus_properties }, ! { "SUN SE6330", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED| --- 510,588 ---- { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, ! { "*CSM100_*", SD_CONF_BSET_NRR_COUNT| ! SD_CONF_BSET_CACHE_IS_NV, ! &lsi_oem_properties }, ! { "*CSM200_*", SD_CONF_BSET_NRR_COUNT| ! SD_CONF_BSET_CACHE_IS_NV, ! &lsi_oem_properties }, { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, ! { "SUN T3", SD_CONF_BSET_THROTTLE| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_RSV_REL_TIME, &purple_properties }, ! { "SUN SESS01", SD_CONF_BSET_THROTTLE| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_RSV_REL_TIME| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED, &sve_properties }, ! { "SUN T4", SD_CONF_BSET_THROTTLE| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_RSV_REL_TIME, &purple_properties }, ! { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED| SD_CONF_BSET_LUN_RESET_ENABLED, &maserati_properties }, ! { "SUN SE6920", SD_CONF_BSET_THROTTLE| SD_CONF_BSET_NRR_COUNT| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED| SD_CONF_BSET_LUN_RESET_ENABLED, &pirus_properties }, ! { "SUN SE6940", SD_CONF_BSET_THROTTLE| SD_CONF_BSET_NRR_COUNT| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED| SD_CONF_BSET_LUN_RESET_ENABLED, &pirus_properties }, ! { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE| SD_CONF_BSET_NRR_COUNT| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED| SD_CONF_BSET_LUN_RESET_ENABLED, &pirus_properties }, ! { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE| SD_CONF_BSET_NRR_COUNT| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED| SD_CONF_BSET_LUN_RESET_ENABLED, &pirus_properties }, ! { "SUN PSX1000", SD_CONF_BSET_THROTTLE| SD_CONF_BSET_NRR_COUNT| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED| SD_CONF_BSET_LUN_RESET_ENABLED, &pirus_properties }, ! { "SUN SE6330", SD_CONF_BSET_THROTTLE| SD_CONF_BSET_NRR_COUNT| SD_CONF_BSET_BSY_RETRY_COUNT| SD_CONF_BSET_RST_RETRIES| SD_CONF_BSET_MIN_THROTTLE| SD_CONF_BSET_DISKSORT_DISABLED|
*** 744,756 **** { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, - #endif /* fibre or NON-sparc platforms */ - #if ((defined(__sparc) && !defined(__fibre)) ||\ - (defined(__i386) || defined(__amd64))) { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, --- 593,602 ----
*** 758,803 **** { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, ! { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, ! { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, &symbios_properties }, ! { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, &lsi_properties_scsi }, ! #if defined(__i386) || defined(__amd64) ! { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD ! | SD_CONF_BSET_READSUB_BCD ! | SD_CONF_BSET_READ_TOC_ADDR_BCD ! | SD_CONF_BSET_NO_READ_HEADER ! | SD_CONF_BSET_READ_CD_XD4), NULL }, ! ! { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD ! | SD_CONF_BSET_READSUB_BCD ! | SD_CONF_BSET_READ_TOC_ADDR_BCD ! | SD_CONF_BSET_NO_READ_HEADER ! | SD_CONF_BSET_READ_CD_XD4), NULL }, ! #endif /* __i386 || __amd64 */ ! #endif /* sparc NON-fibre or NON-sparc platforms */ ! #if (defined(SD_PROP_TST)) ! { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE ! | SD_CONF_BSET_CTYPE ! | SD_CONF_BSET_NRR_COUNT ! | SD_CONF_BSET_FAB_DEVID ! | SD_CONF_BSET_NOCACHE ! | SD_CONF_BSET_BSY_RETRY_COUNT ! | SD_CONF_BSET_PLAYMSF_BCD ! | SD_CONF_BSET_READSUB_BCD ! | SD_CONF_BSET_READ_TOC_TRK_BCD ! | SD_CONF_BSET_READ_TOC_ADDR_BCD ! | SD_CONF_BSET_NO_READ_HEADER ! | SD_CONF_BSET_READ_CD_XD4 ! | SD_CONF_BSET_RST_RETRIES ! | SD_CONF_BSET_RSV_REL_TIME ! | SD_CONF_BSET_TUR_CHECK), &tst_properties}, #endif }; static const int sd_disk_table_size = sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); --- 604,649 ---- { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, ! { "SYMBIOS INF-01-00", SD_CONF_BSET_FAB_DEVID, NULL }, ! { "SYMBIOS", SD_CONF_BSET_THROTTLE| ! SD_CONF_BSET_NRR_COUNT, &symbios_properties }, ! { "LSI", SD_CONF_BSET_THROTTLE| ! SD_CONF_BSET_NRR_COUNT, &lsi_properties_scsi }, ! { " NEC CD-ROM DRIVE:260 ", SD_CONF_BSET_PLAYMSF_BCD| ! SD_CONF_BSET_READSUB_BCD| ! SD_CONF_BSET_READ_TOC_ADDR_BCD| ! SD_CONF_BSET_NO_READ_HEADER| ! SD_CONF_BSET_READ_CD_XD4, ! NULL }, ! { " NEC CD-ROM DRIVE:270 ", SD_CONF_BSET_PLAYMSF_BCD| ! SD_CONF_BSET_READSUB_BCD| ! SD_CONF_BSET_READ_TOC_ADDR_BCD| ! SD_CONF_BSET_NO_READ_HEADER| ! SD_CONF_BSET_READ_CD_XD4, ! NULL }, #if (defined(SD_PROP_TST)) ! { "VENDOR PRODUCT ", SD_CONF_BSET_THROTTLE| ! SD_CONF_BSET_CTYPE| ! SD_CONF_BSET_NRR_COUNT| ! SD_CONF_BSET_FAB_DEVID| ! SD_CONF_BSET_NOCACHE| ! SD_CONF_BSET_BSY_RETRY_COUNT| ! SD_CONF_BSET_PLAYMSF_BCD| ! SD_CONF_BSET_READSUB_BCD| ! SD_CONF_BSET_READ_TOC_TRK_BCD| ! SD_CONF_BSET_READ_TOC_ADDR_BCD| ! SD_CONF_BSET_NO_READ_HEADER| ! SD_CONF_BSET_READ_CD_XD4| ! SD_CONF_BSET_RST_RETRIES| ! SD_CONF_BSET_RSV_REL_TIME| ! SD_CONF_BSET_TUR_CHECK, ! &tst_properties}, #endif }; static const int sd_disk_table_size = sizeof (sd_disk_table)/ sizeof (sd_disk_config_t);
*** 858,1159 **** static int sd_pm_idletime = 1; /* * Internal function prototypes */ ! #if (defined(__fibre)) ! /* ! * These #defines are to avoid namespace collisions that occur because this ! * code is currently used to compile two separate driver modules: sd and ssd. ! * All function names need to be treated this way (even if declared static) ! * in order to allow the debugger to resolve the names properly. ! * It is anticipated that in the near future the ssd module will be obsoleted, ! * at which time this ugliness should go away. ! */ ! #define sd_log_trace ssd_log_trace ! #define sd_log_info ssd_log_info ! #define sd_log_err ssd_log_err ! #define sdprobe ssdprobe ! #define sdinfo ssdinfo ! #define sd_prop_op ssd_prop_op ! #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init ! #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini ! #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache ! #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache ! #define sd_scsi_target_lun_init ssd_scsi_target_lun_init ! #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini ! #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count ! #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target ! #define sd_spin_up_unit ssd_spin_up_unit ! #define sd_enable_descr_sense ssd_enable_descr_sense ! #define sd_reenable_dsense_task ssd_reenable_dsense_task ! #define sd_set_mmc_caps ssd_set_mmc_caps ! #define sd_read_unit_properties ssd_read_unit_properties ! #define sd_process_sdconf_file ssd_process_sdconf_file ! #define sd_process_sdconf_table ssd_process_sdconf_table ! #define sd_sdconf_id_match ssd_sdconf_id_match ! #define sd_blank_cmp ssd_blank_cmp ! #define sd_chk_vers1_data ssd_chk_vers1_data ! #define sd_set_vers1_properties ssd_set_vers1_properties ! #define sd_check_bdc_vpd ssd_check_bdc_vpd ! #define sd_check_emulation_mode ssd_check_emulation_mode ! #define sd_get_physical_geometry ssd_get_physical_geometry ! #define sd_get_virtual_geometry ssd_get_virtual_geometry ! #define sd_update_block_info ssd_update_block_info ! #define sd_register_devid ssd_register_devid ! #define sd_get_devid ssd_get_devid ! #define sd_create_devid ssd_create_devid ! #define sd_write_deviceid ssd_write_deviceid ! #define sd_check_vpd_page_support ssd_check_vpd_page_support ! #define sd_setup_pm ssd_setup_pm ! #define sd_create_pm_components ssd_create_pm_components ! #define sd_ddi_suspend ssd_ddi_suspend ! #define sd_ddi_resume ssd_ddi_resume ! #define sd_pm_state_change ssd_pm_state_change ! #define sdpower ssdpower ! #define sdattach ssdattach ! #define sddetach ssddetach ! #define sd_unit_attach ssd_unit_attach ! #define sd_unit_detach ssd_unit_detach ! #define sd_set_unit_attributes ssd_set_unit_attributes ! #define sd_create_errstats ssd_create_errstats ! #define sd_set_errstats ssd_set_errstats ! #define sd_set_pstats ssd_set_pstats ! #define sddump ssddump ! #define sd_scsi_poll ssd_scsi_poll ! #define sd_send_polled_RQS ssd_send_polled_RQS ! #define sd_ddi_scsi_poll ssd_ddi_scsi_poll ! #define sd_init_event_callbacks ssd_init_event_callbacks ! #define sd_event_callback ssd_event_callback ! #define sd_cache_control ssd_cache_control ! #define sd_get_write_cache_enabled ssd_get_write_cache_enabled ! #define sd_get_write_cache_changeable ssd_get_write_cache_changeable ! #define sd_get_nv_sup ssd_get_nv_sup ! #define sd_make_device ssd_make_device ! #define sdopen ssdopen ! #define sdclose ssdclose ! #define sd_ready_and_valid ssd_ready_and_valid ! #define sdmin ssdmin ! #define sdread ssdread ! #define sdwrite ssdwrite ! #define sdaread ssdaread ! #define sdawrite ssdawrite ! #define sdstrategy ssdstrategy ! #define sdioctl ssdioctl ! #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart ! #define sd_mapblocksize_iostart ssd_mapblocksize_iostart ! #define sd_checksum_iostart ssd_checksum_iostart ! #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart ! #define sd_pm_iostart ssd_pm_iostart ! #define sd_core_iostart ssd_core_iostart ! #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone ! #define sd_mapblocksize_iodone ssd_mapblocksize_iodone ! #define sd_checksum_iodone ssd_checksum_iodone ! #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone ! #define sd_pm_iodone ssd_pm_iodone ! #define sd_initpkt_for_buf ssd_initpkt_for_buf ! #define sd_destroypkt_for_buf ssd_destroypkt_for_buf ! #define sd_setup_rw_pkt ssd_setup_rw_pkt ! #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt ! #define sd_buf_iodone ssd_buf_iodone ! #define sd_uscsi_strategy ssd_uscsi_strategy ! #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi ! #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi ! #define sd_uscsi_iodone ssd_uscsi_iodone ! #define sd_xbuf_strategy ssd_xbuf_strategy ! #define sd_xbuf_init ssd_xbuf_init ! #define sd_pm_entry ssd_pm_entry ! #define sd_pm_exit ssd_pm_exit - #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler - #define sd_pm_timeout_handler ssd_pm_timeout_handler - - #define sd_add_buf_to_waitq ssd_add_buf_to_waitq - #define sdintr ssdintr - #define sd_start_cmds ssd_start_cmds - #define sd_send_scsi_cmd ssd_send_scsi_cmd - #define sd_bioclone_alloc ssd_bioclone_alloc - #define sd_bioclone_free ssd_bioclone_free - #define sd_shadow_buf_alloc ssd_shadow_buf_alloc - #define sd_shadow_buf_free ssd_shadow_buf_free - #define sd_print_transport_rejected_message \ - ssd_print_transport_rejected_message - #define sd_retry_command ssd_retry_command - #define sd_set_retry_bp ssd_set_retry_bp - #define sd_send_request_sense_command ssd_send_request_sense_command - #define sd_start_retry_command ssd_start_retry_command - #define sd_start_direct_priority_command \ - ssd_start_direct_priority_command - #define sd_return_failed_command ssd_return_failed_command - #define sd_return_failed_command_no_restart \ - ssd_return_failed_command_no_restart - #define sd_return_command ssd_return_command - #define sd_sync_with_callback ssd_sync_with_callback - #define sdrunout ssdrunout - #define sd_mark_rqs_busy ssd_mark_rqs_busy - #define sd_mark_rqs_idle ssd_mark_rqs_idle - #define sd_reduce_throttle ssd_reduce_throttle - #define sd_restore_throttle ssd_restore_throttle - #define sd_print_incomplete_msg ssd_print_incomplete_msg - #define sd_init_cdb_limits ssd_init_cdb_limits - #define sd_pkt_status_good ssd_pkt_status_good - #define sd_pkt_status_check_condition ssd_pkt_status_check_condition - #define sd_pkt_status_busy ssd_pkt_status_busy - #define sd_pkt_status_reservation_conflict \ - ssd_pkt_status_reservation_conflict - #define sd_pkt_status_qfull ssd_pkt_status_qfull - #define sd_handle_request_sense ssd_handle_request_sense - #define sd_handle_auto_request_sense ssd_handle_auto_request_sense - #define sd_print_sense_failed_msg ssd_print_sense_failed_msg - #define sd_validate_sense_data ssd_validate_sense_data - #define sd_decode_sense ssd_decode_sense - #define sd_print_sense_msg ssd_print_sense_msg - #define sd_sense_key_no_sense ssd_sense_key_no_sense - #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error - #define sd_sense_key_not_ready ssd_sense_key_not_ready - #define sd_sense_key_medium_or_hardware_error \ - ssd_sense_key_medium_or_hardware_error - #define sd_sense_key_illegal_request ssd_sense_key_illegal_request - #define sd_sense_key_unit_attention ssd_sense_key_unit_attention - #define sd_sense_key_fail_command ssd_sense_key_fail_command - #define sd_sense_key_blank_check ssd_sense_key_blank_check - #define sd_sense_key_aborted_command ssd_sense_key_aborted_command - #define sd_sense_key_default ssd_sense_key_default - #define sd_print_retry_msg ssd_print_retry_msg - #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg - #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete - #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err - #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset - #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted - #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout - #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free - #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject - #define sd_pkt_reason_default ssd_pkt_reason_default - #define sd_reset_target ssd_reset_target - #define sd_start_stop_unit_callback ssd_start_stop_unit_callback - #define sd_start_stop_unit_task ssd_start_stop_unit_task - #define sd_taskq_create ssd_taskq_create - #define sd_taskq_delete ssd_taskq_delete - #define sd_target_change_task ssd_target_change_task - #define sd_log_dev_status_event ssd_log_dev_status_event - #define sd_log_lun_expansion_event ssd_log_lun_expansion_event - #define sd_log_eject_request_event ssd_log_eject_request_event - #define sd_media_change_task ssd_media_change_task - #define sd_handle_mchange ssd_handle_mchange - #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK - #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY - #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 - #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION - #define sd_send_scsi_feature_GET_CONFIGURATION \ - sd_send_scsi_feature_GET_CONFIGURATION - #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT - #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY - #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY - #define sd_send_scsi_PERSISTENT_RESERVE_IN \ - ssd_send_scsi_PERSISTENT_RESERVE_IN - #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ - ssd_send_scsi_PERSISTENT_RESERVE_OUT - #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE - #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ - ssd_send_scsi_SYNCHRONIZE_CACHE_biodone - #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE - #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT - #define sd_send_scsi_RDWR ssd_send_scsi_RDWR - #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE - #define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \ - ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION - #define sd_gesn_media_data_valid ssd_gesn_media_data_valid - #define sd_alloc_rqs ssd_alloc_rqs - #define sd_free_rqs ssd_free_rqs - #define sd_dump_memory ssd_dump_memory - #define sd_get_media_info_com ssd_get_media_info_com - #define sd_get_media_info ssd_get_media_info - #define sd_get_media_info_ext ssd_get_media_info_ext - #define sd_dkio_ctrl_info ssd_dkio_ctrl_info - #define sd_nvpair_str_decode ssd_nvpair_str_decode - #define sd_strtok_r ssd_strtok_r - #define sd_set_properties ssd_set_properties - #define sd_get_tunables_from_conf ssd_get_tunables_from_conf - #define sd_setup_next_xfer ssd_setup_next_xfer - #define sd_dkio_get_temp ssd_dkio_get_temp - #define sd_check_mhd ssd_check_mhd - #define sd_mhd_watch_cb ssd_mhd_watch_cb - #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete - #define sd_sname ssd_sname - #define sd_mhd_resvd_recover ssd_mhd_resvd_recover - #define sd_resv_reclaim_thread ssd_resv_reclaim_thread - #define sd_take_ownership ssd_take_ownership - #define sd_reserve_release ssd_reserve_release - #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req - #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb - #define sd_persistent_reservation_in_read_keys \ - ssd_persistent_reservation_in_read_keys - #define sd_persistent_reservation_in_read_resv \ - ssd_persistent_reservation_in_read_resv - #define sd_mhdioc_takeown ssd_mhdioc_takeown - #define sd_mhdioc_failfast ssd_mhdioc_failfast - #define sd_mhdioc_release ssd_mhdioc_release - #define sd_mhdioc_register_devid ssd_mhdioc_register_devid - #define sd_mhdioc_inkeys ssd_mhdioc_inkeys - #define sd_mhdioc_inresv ssd_mhdioc_inresv - #define sr_change_blkmode ssr_change_blkmode - #define sr_change_speed ssr_change_speed - #define sr_atapi_change_speed ssr_atapi_change_speed - #define sr_pause_resume ssr_pause_resume - #define sr_play_msf ssr_play_msf - #define sr_play_trkind ssr_play_trkind - #define sr_read_all_subcodes ssr_read_all_subcodes - #define sr_read_subchannel ssr_read_subchannel - #define sr_read_tocentry ssr_read_tocentry - #define sr_read_tochdr ssr_read_tochdr - #define sr_read_cdda ssr_read_cdda - #define sr_read_cdxa ssr_read_cdxa - #define sr_read_mode1 ssr_read_mode1 - #define sr_read_mode2 ssr_read_mode2 - #define sr_read_cd_mode2 ssr_read_cd_mode2 - #define sr_sector_mode ssr_sector_mode - #define sr_eject ssr_eject - #define sr_ejected ssr_ejected - #define sr_check_wp ssr_check_wp - #define sd_watch_request_submit ssd_watch_request_submit - #define sd_check_media ssd_check_media - #define sd_media_watch_cb ssd_media_watch_cb - #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast - #define sr_volume_ctrl ssr_volume_ctrl - #define sr_read_sony_session_offset ssr_read_sony_session_offset - #define sd_log_page_supported ssd_log_page_supported - #define sd_check_for_writable_cd ssd_check_for_writable_cd - #define sd_wm_cache_constructor ssd_wm_cache_constructor - #define sd_wm_cache_destructor ssd_wm_cache_destructor - #define sd_range_lock ssd_range_lock - #define sd_get_range ssd_get_range - #define sd_free_inlist_wmap ssd_free_inlist_wmap - #define sd_range_unlock ssd_range_unlock - #define sd_read_modify_write_task ssd_read_modify_write_task - #define sddump_do_read_of_rmw ssddump_do_read_of_rmw - - #define sd_iostart_chain ssd_iostart_chain - #define sd_iodone_chain ssd_iodone_chain - #define sd_initpkt_map ssd_initpkt_map - #define sd_destroypkt_map ssd_destroypkt_map - #define sd_chain_type_map ssd_chain_type_map - #define sd_chain_index_map ssd_chain_index_map - - #define sd_failfast_flushctl ssd_failfast_flushctl - #define sd_failfast_flushq ssd_failfast_flushq - #define sd_failfast_flushq_callback ssd_failfast_flushq_callback - - #define sd_is_lsi ssd_is_lsi - #define sd_tg_rdwr ssd_tg_rdwr - #define sd_tg_getinfo ssd_tg_getinfo - #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler - - #endif /* #if (defined(__fibre)) */ - - int _init(void); int _fini(void); int _info(struct modinfo *modinfop); /*PRINTFLIKE3*/ --- 704,732 ---- static int sd_pm_idletime = 1; /* * Internal function prototypes */ + typedef struct unmap_param_hdr_s { + uint16_t uph_data_len; + uint16_t uph_descr_data_len; + uint32_t uph_reserved; + } unmap_param_hdr_t; ! typedef struct unmap_blk_descr_s { ! uint64_t ubd_lba; ! uint32_t ubd_lba_cnt; ! uint32_t ubd_reserved; ! } unmap_blk_descr_t; ! /* Max number of block descriptors in UNMAP command */ ! #define SD_UNMAP_MAX_DESCR \ ! ((UINT16_MAX - sizeof (unmap_param_hdr_t)) / sizeof (unmap_blk_descr_t)) ! /* Max size of the UNMAP parameter list in bytes */ ! #define SD_UNMAP_PARAM_LIST_MAXSZ (sizeof (unmap_param_hdr_t) + \ ! SD_UNMAP_MAX_DESCR * sizeof (unmap_blk_descr_t)) int _init(void); int _fini(void); int _info(struct modinfo *modinfop); /*PRINTFLIKE3*/
*** 1199,1209 **** /* * Using sd_ssc_assessment to set correct type-of-assessment * Using sd_ssc_post to post ereport & system log * sd_ssc_post will call sd_ssc_print to print system log ! * sd_ssc_post will call sd_ssd_ereport_post to post ereport */ static void sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess); static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); --- 772,782 ---- /* * Using sd_ssc_assessment to set correct type-of-assessment * Using sd_ssc_post to post ereport & system log * sd_ssc_post will call sd_ssc_print to print system log ! * sd_ssc_post will call sd_ssc_ereport_post to post ereport */ static void sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess); static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess);
*** 1251,1271 **** static int sd_get_devid(sd_ssc_t *ssc); static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); static int sd_write_deviceid(sd_ssc_t *ssc); static int sd_check_vpd_page_support(sd_ssc_t *ssc); static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); static int sd_ddi_suspend(dev_info_t *devi); static int sd_ddi_resume(dev_info_t *devi); static int sd_pm_state_change(struct sd_lun *un, int level, int flag); static int sdpower(dev_info_t *devi, int component, int level); static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); ! static int sd_unit_attach(dev_info_t *devi); static int sd_unit_detach(dev_info_t *devi); static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); static void sd_create_errstats(struct sd_lun *un, int instance); static void sd_set_errstats(struct sd_lun *un); --- 824,846 ---- static int sd_get_devid(sd_ssc_t *ssc); static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); static int sd_write_deviceid(sd_ssc_t *ssc); static int sd_check_vpd_page_support(sd_ssc_t *ssc); + #ifdef notyet static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); + #endif static int sd_ddi_suspend(dev_info_t *devi); static int sd_ddi_resume(dev_info_t *devi); static int sd_pm_state_change(struct sd_lun *un, int level, int flag); static int sdpower(dev_info_t *devi, int component, int level); static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); ! static void sd_unit_attach(void *arg); static int sd_unit_detach(dev_info_t *devi); static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); static void sd_create_errstats(struct sd_lun *un, int instance); static void sd_set_errstats(struct sd_lun *un);
*** 1274,1295 **** static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); static int sd_send_polled_RQS(struct sd_lun *un); static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); - #if (defined(__fibre)) /* - * Event callbacks (photon) - */ - static void sd_init_event_callbacks(struct sd_lun *un); - static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); - #endif - - /* * Defines for sd_cache_control */ - #define SD_CACHE_ENABLE 1 #define SD_CACHE_DISABLE 0 #define SD_CACHE_NOCHANGE -1 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); --- 849,861 ----
*** 1408,1418 **** static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, void (*statp)(kstat_io_t *)); static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, ! struct scsi_pkt *pktp); static void sd_start_retry_command(void *arg); static void sd_start_direct_priority_command(void *arg); static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode); static void sd_return_failed_command_no_restart(struct sd_lun *un, --- 974,984 ---- static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, void (*statp)(kstat_io_t *)); static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, ! int retry_check_flag, struct scsi_pkt *pktp); static void sd_start_retry_command(void *arg); static void sd_start_direct_priority_command(void *arg); static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode); static void sd_return_failed_command_no_restart(struct sd_lun *un,
*** 1531,1540 **** --- 1097,1108 ---- static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, uchar_t *usr_bufp); static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc); static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); + static int sd_send_scsi_UNMAP(dev_t dev, sd_ssc_t *ssc, dkioc_free_list_t *dfl, + int flag); static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, int path_flag); static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
*** 1625,1635 **** --- 1193,1205 ---- static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); static void sd_delayed_cv_broadcast(void *arg); static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); + #ifdef notyet static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); + #endif /* * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. */ static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag);
*** 1648,1658 **** /* * Function prototypes for failfast support. */ ! static void sd_failfast_flushq(struct sd_lun *un); static int sd_failfast_flushq_callback(struct buf *bp); /* * Function prototypes to check for lsi devices */ --- 1218,1228 ---- /* * Function prototypes for failfast support. */ ! static void sd_failfast_flushq(struct sd_lun *un, boolean_t flush_all); static int sd_failfast_flushq_callback(struct buf *bp); /* * Function prototypes to check for lsi devices */
*** 1688,1697 **** --- 1258,1299 ---- #define SD_FAILFAST_INACTIVE 0 #define SD_FAILFAST_ACTIVE 1 /* + * Bitmask to control behaviour in failfast active state: + * + * SD_FAILFAST_ENABLE_FORCE_INACTIVE: When set, allow retries without + * SD_RETRIES_FAILFAST to cause transition to failfast inactive state. + * + * SD_FAILFAST_ENABLE_FAIL_RETRIES: When set, cause retries with the flag + * SD_RETRIES_FAILFAST set (following a timeout) to fail when in failfast + * active state. + * + * SD_FAILFAST_ENABLE_FAIL_ALL_RETRIES: When set, cause ALL retries, + * regardless of reason, to fail when in failfast active state. This takes + * precedence over SD_FAILFAST_FAIL_RETRIES. + * + * SD_FAILFAST_ENABLE_FAIL_USCSI: When set, discard all commands in the USCSI + * chain (sdioctl or driver generated) when in failfast active state. + * To prevent problems with sdopen, this is limited to when there are + * multiple pending commands. + */ + + #define SD_FAILFAST_ENABLE_FORCE_INACTIVE 0x01 + #define SD_FAILFAST_ENABLE_FAIL_RETRIES 0x02 + #define SD_FAILFAST_ENABLE_FAIL_ALL_RETRIES 0x04 + #define SD_FAILFAST_ENABLE_FAIL_USCSI 0x08 + + /* + * The default behaviour is to fail all retries due to timeout when in failfast + * active state, and not allow other retries to transition to inactive. + */ + static int sd_failfast_enable = SD_FAILFAST_ENABLE_FAIL_RETRIES | + SD_FAILFAST_ENABLE_FAIL_USCSI; + + /* * Bitmask to control behavior of buf(9S) flushes when a transition to * the failfast state occurs. Optional bits include: * * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will
*** 1703,1724 **** */ #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 /* ! * The default behavior is to only flush bufs that have B_FAILFAST set, but ! * to flush all queues within the driver. */ ! static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; /* * SD Testing Fault Injection */ #ifdef SD_FAULT_INJECTION ! static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); static void sd_faultinjection(struct scsi_pkt *pktp); static void sd_injection_log(char *buf, struct sd_lun *un); #endif /* * Device driver ops vector --- 1305,1330 ---- */ #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 /* ! * The default behavior is to flush all bufs in all queues within the driver. */ ! static int sd_failfast_flushctl = ! SD_FAILFAST_FLUSH_ALL_BUFS | SD_FAILFAST_FLUSH_ALL_QUEUES; + #ifdef SD_FAULT_INJECTION + static uint_t sd_fault_injection_on = 0; + #endif /* * SD Testing Fault Injection */ #ifdef SD_FAULT_INJECTION ! static int sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); static void sd_faultinjection(struct scsi_pkt *pktp); + static void sd_prefaultinjection(struct scsi_pkt *pktp); static void sd_injection_log(char *buf, struct sd_lun *un); #endif /* * Device driver ops vector
*** 2370,2383 **** #define MAX_INQUIRY_SIZE 0xF0 /* * Macros used by functions to pass a given buf(9S) struct along to the * next function in the layering chain for further processing. - * - * In the following macros, passing more than three arguments to the called - * routines causes the optimizer for the SPARC compiler to stop doing tail - * call elimination which results in significant performance degradation. */ #define SD_BEGIN_IOSTART(index, un, bp) \ ((*(sd_iostart_chain[index]))(index, un, bp)) #define SD_BEGIN_IODONE(index, un, bp) \ --- 1976,1985 ----
*** 2566,2576 **** va_end(ap); scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); mutex_exit(&sd_log_mutex); } #ifdef SD_FAULT_INJECTION - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); if (un->sd_injection_mask & comp) { mutex_enter(&sd_log_mutex); va_start(ap, fmt); (void) vsprintf(sd_log_buf, fmt, ap); va_end(ap); --- 2168,2177 ----
*** 2616,2626 **** va_end(ap); scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); mutex_exit(&sd_log_mutex); } #ifdef SD_FAULT_INJECTION - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); if (un->sd_injection_mask & component) { mutex_enter(&sd_log_mutex); va_start(ap, fmt); (void) vsprintf(sd_log_buf, fmt, ap); va_end(ap); --- 2217,2226 ----
*** 2666,2676 **** va_end(ap); scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); mutex_exit(&sd_log_mutex); } #ifdef SD_FAULT_INJECTION - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); if (un->sd_injection_mask & component) { mutex_enter(&sd_log_mutex); va_start(ap, fmt); (void) vsprintf(sd_log_buf, fmt, ap); va_end(ap); --- 2266,2275 ----
*** 2699,2712 **** { struct scsi_device *devp; int rval; int instance = ddi_get_instance(devi); - /* - * if it wasn't for pln, sdprobe could actually be nulldev - * in the "__fibre" case. - */ if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { return (DDI_PROBE_DONTCARE); } devp = ddi_get_driver_private(devi); --- 2298,2307 ----
*** 2729,2768 **** switch (devp->sd_inq->inq_dtype) { case DTYPE_DIRECT: rval = DDI_PROBE_SUCCESS; break; case DTYPE_RODIRECT: ! /* CDs etc. Can be removable media */ rval = DDI_PROBE_SUCCESS; break; case DTYPE_OPTICAL: /* ! * Rewritable optical driver HP115AA ! * Can also be removable media */ - - /* - * Do not attempt to bind to DTYPE_OPTICAL if - * pre solaris 9 sparc sd behavior is required - * - * If first time through and sd_dtype_optical_bind - * has not been set in /etc/system check properties - */ - - if (sd_dtype_optical_bind < 0) { - sd_dtype_optical_bind = ddi_prop_get_int - (DDI_DEV_T_ANY, devi, 0, - "optical-device-bind", 1); - } - - if (sd_dtype_optical_bind == 0) { - rval = DDI_PROBE_FAILURE; - } else { rval = DDI_PROBE_SUCCESS; - } break; - case DTYPE_NOTPRESENT: default: rval = DDI_PROBE_FAILURE; break; } --- 2324,2343 ---- switch (devp->sd_inq->inq_dtype) { case DTYPE_DIRECT: rval = DDI_PROBE_SUCCESS; break; case DTYPE_RODIRECT: ! /* CDs etc. Can be removable media. */ rval = DDI_PROBE_SUCCESS; break; case DTYPE_OPTICAL: /* ! * Rewritable optical driver HP115AA. ! * Can also be removable media. */ rval = DDI_PROBE_SUCCESS; break; case DTYPE_NOTPRESENT: default: rval = DDI_PROBE_FAILURE; break; }
*** 2858,2873 **** char *name, caddr_t valuep, int *lengthp) { struct sd_lun *un; if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) ! return (ddi_prop_op(dev, dip, prop_op, mod_flags, ! name, valuep, lengthp)); return (cmlb_prop_op(un->un_cmlbhandle, dev, dip, prop_op, mod_flags, name, valuep, lengthp, SDPART(dev), (void *)SD_PATH_DIRECT)); } /* * The following functions are for smart probing: * sd_scsi_probe_cache_init() --- 2433,2461 ---- char *name, caddr_t valuep, int *lengthp) { struct sd_lun *un; if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) ! goto fallback; + mutex_enter(SD_MUTEX(un)); + while ((un->un_state == SD_STATE_ATTACHING)) + cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); + + if (un->un_state == SD_STATE_ATTACH_FAILED) { + mutex_exit(SD_MUTEX(un)); + goto fallback; + } + mutex_exit(SD_MUTEX(un)); + return (cmlb_prop_op(un->un_cmlbhandle, dev, dip, prop_op, mod_flags, name, valuep, lengthp, SDPART(dev), (void *)SD_PATH_DIRECT)); + + fallback: + return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, valuep, + lengthp)); } /* * The following functions are for smart probing: * sd_scsi_probe_cache_init()
*** 3211,3223 **** * this stage, use START STOP bit. */ status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT); ! if (status != 0) { ! if (status == EACCES) has_conflict = TRUE; sd_ssc_assessment(ssc, SD_FMT_IGNORE); } /* * Send another INQUIRY command to the target. This is necessary for --- 2799,2815 ---- * this stage, use START STOP bit. */ status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT); ! switch (status) { ! case EIO: ! sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); ! return (status); ! case EACCES: has_conflict = TRUE; + default: /*FALLTHROUGH*/ sd_ssc_assessment(ssc, SD_FMT_IGNORE); } /* * Send another INQUIRY command to the target. This is necessary for
*** 3832,3842 **** ASSERT(un != NULL); /* Obtain the configuration list associated with the .conf file */ if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), ! DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, &config_list, &nelements) != DDI_PROP_SUCCESS) { return (SD_FAILURE); } /* --- 3424,3434 ---- ASSERT(un != NULL); /* Obtain the configuration list associated with the .conf file */ if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), ! DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sd-config-list", &config_list, &nelements) != DDI_PROP_SUCCESS) { return (SD_FAILURE); } /*
*** 4196,4212 **** --- 3788,3829 ---- if (strcasecmp(name, "physical-block-size") == 0) { if (ddi_strtol(value, &endptr, 0, &val) == 0 && ISP2(val) && val >= un->un_tgt_blocksize && val >= un->un_sys_blocksize) { un->un_phy_blocksize = val; + un->un_f_sdconf_phy_blocksize = TRUE; } else { goto value_invalid; } SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " "physical block size set to %d\n", un->un_phy_blocksize); } + if (strcasecmp(name, "slow-io-threshold") == 0) { + if (ddi_strtol(value, &endptr, 0, &val) == 0) { + un->un_slow_io_threshold = (hrtime_t)val * NANOSEC; + } else { + un->un_slow_io_threshold = + (hrtime_t)sd_slow_io_threshold; + goto value_invalid; + } + SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " + "slow IO threshold set to %llu\n", + un->un_slow_io_threshold); + } + + if (strcasecmp(name, "io-time") == 0) { + if (ddi_strtol(value, &endptr, 0, &val) == 0) { + un->un_io_time = val; + } else { + un->un_io_time = sd_io_time; + goto value_invalid; + } + SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " + "IO time set to %llu\n", un->un_io_time); + } + if (strcasecmp(name, "retries-victim") == 0) { if (ddi_strtol(value, &endptr, 0, &val) == 0) { un->un_victim_retry_count = val; } else { goto value_invalid;
*** 4964,4974 **** * * These pages are also reserved in SBC-2 and later. We assume SBC-2 * or later for a direct-attached block device if the SCSI version is * at least SPC-3. */ - if (ISCD(un) || un->un_interconnect_type == SD_INTERCONNECT_SATA || (un->un_ctype == CTYPE_CCS && SD_INQUIRY(un)->inq_ansi >= 5)) return (ret); --- 4581,4590 ----
*** 5306,5317 **** --- 4922,5015 ---- stp->sd_capacity.value.ui64 = capacity; } } } + /* + * Parses the SCSI Block Limits VPD page (0xB0). It's legal to pass NULL for + * vpd_pg, in which case all the block limits will be reset to the defaults. + */ + static void + sd_parse_blk_limits_vpd(struct sd_lun *un, uchar_t *vpd_pg) + { + sd_blk_limits_t *lim = &un->un_blk_lim; + unsigned pg_len; + if (vpd_pg != NULL) + pg_len = BE_IN16(&vpd_pg[2]); + else + pg_len = 0; + + /* Block Limits VPD can be 16 bytes or 64 bytes long - support both */ + if (pg_len >= 0x10) { + lim->lim_opt_xfer_len_gran = BE_IN16(&vpd_pg[6]); + lim->lim_max_xfer_len = BE_IN32(&vpd_pg[8]); + lim->lim_opt_xfer_len = BE_IN32(&vpd_pg[12]); + } else { + lim->lim_opt_xfer_len_gran = 0; + lim->lim_max_xfer_len = UINT32_MAX; + lim->lim_opt_xfer_len = UINT32_MAX; + } + if (pg_len >= 0x3c) { + lim->lim_max_pfetch_len = BE_IN32(&vpd_pg[16]); + /* + * A zero in either of the following two fields indicates lack + * of UNMAP support. + */ + lim->lim_max_unmap_lba_cnt = BE_IN32(&vpd_pg[20]); + lim->lim_max_unmap_descr_cnt = BE_IN32(&vpd_pg[24]); + lim->lim_opt_unmap_gran = BE_IN32(&vpd_pg[28]); + if ((vpd_pg[32] >> 7) == 1) { + /* left-most bit on each byte is a flag */ + lim->lim_unmap_gran_align = + ((vpd_pg[32] & 0x7f) << 24) | (vpd_pg[33] << 16) | + (vpd_pg[34] << 8) | vpd_pg[35]; + } else { + lim->lim_unmap_gran_align = 0; + } + lim->lim_max_write_same_len = BE_IN64(&vpd_pg[36]); + } else { + lim->lim_max_pfetch_len = UINT32_MAX; + lim->lim_max_unmap_lba_cnt = UINT32_MAX; + lim->lim_max_unmap_descr_cnt = SD_UNMAP_MAX_DESCR; + lim->lim_opt_unmap_gran = 0; + lim->lim_unmap_gran_align = 0; + lim->lim_max_write_same_len = UINT64_MAX; + } + } + /* + * Collects VPD page B0 data if available (block limits). If the data is + * not available or querying the device failed, we revert to the defaults. + */ + static void + sd_setup_blk_limits(sd_ssc_t *ssc) + { + struct sd_lun *un = ssc->ssc_un; + uchar_t *inqB0 = NULL; + size_t inqB0_resid = 0; + int rval; + + if (un->un_vpd_page_mask & SD_VPD_BLK_LIMITS_PG) { + inqB0 = kmem_zalloc(MAX_INQUIRY_SIZE, KM_SLEEP); + rval = sd_send_scsi_INQUIRY(ssc, inqB0, MAX_INQUIRY_SIZE, 0x01, + 0xB0, &inqB0_resid); + if (rval != 0) { + sd_ssc_assessment(ssc, SD_FMT_IGNORE); + kmem_free(inqB0, MAX_INQUIRY_SIZE); + inqB0 = NULL; + } + } + /* passing NULL inqB0 will reset to defaults */ + sd_parse_blk_limits_vpd(ssc->ssc_un, inqB0); + if (inqB0) + kmem_free(inqB0, MAX_INQUIRY_SIZE); + } + + #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown" + + /* * Function: sd_register_devid * * Description: This routine will obtain the device id information from the * target, obtain the serial number, and register the device * id with the ddi framework.
*** 5438,5461 **** * FAB_DEVID property set in the disk_table. These drives * manage the devid's by storing them in last 2 available sectors * on the drive and have them fabricated by the ddi layer by calling * ddi_devid_init and passing the DEVID_FAB flag. */ ! if (un->un_f_opt_fab_devid == TRUE) { /* - * Depending on EINVAL isn't reliable, since a reserved disk - * may result in invalid geometry, so check to make sure a - * reservation conflict did not occur during attach. - */ - if ((sd_get_devid(ssc) == EINVAL) && - (reservation_flag != SD_TARGET_IS_RESERVED)) { - /* * The devid is invalid AND there is no reservation * conflict. Fabricate a new devid. */ (void) sd_create_devid(ssc); - } /* Register the devid if it exists */ if (un->un_devid != NULL) { (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); --- 5136,5153 ---- * FAB_DEVID property set in the disk_table. These drives * manage the devid's by storing them in last 2 available sectors * on the drive and have them fabricated by the ddi layer by calling * ddi_devid_init and passing the DEVID_FAB flag. */ ! if (un->un_f_opt_fab_devid == TRUE && ! reservation_flag != SD_TARGET_IS_RESERVED) { ! if (sd_get_devid(ssc) == EINVAL) /* * The devid is invalid AND there is no reservation * conflict. Fabricate a new devid. */ (void) sd_create_devid(ssc); /* Register the devid if it exists */ if (un->un_devid != NULL) { (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid);
*** 5473,5483 **** inq83_resid, &un->un_devid) == DDI_SUCCESS) { /* devid successfully encoded, register devid */ (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); ! } else { /* * Unable to encode a devid based on data available. * This is not a Sun qualified disk. Older Sun disk * drives that have the SD_FAB_DEVID property * set in the disk_table and non Sun qualified --- 5165,5175 ---- inq83_resid, &un->un_devid) == DDI_SUCCESS) { /* devid successfully encoded, register devid */ (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); ! } else if (reservation_flag != SD_TARGET_IS_RESERVED) { /* * Unable to encode a devid based on data available. * This is not a Sun qualified disk. Older Sun disk * drives that have the SD_FAB_DEVID property * set in the disk_table and non Sun qualified
*** 5835,5844 **** --- 5527,5539 ---- un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; break; case 0x86: un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; break; + case 0xB0: + un->un_vpd_page_mask |= SD_VPD_BLK_LIMITS_PG; + break; case 0xB1: un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG; break; } counter++;
*** 5863,5873 **** * * Description: Initialize Power Management on the device * * Context: Kernel Thread */ ! static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) { uint_t log_page_size; uchar_t *log_page_data; --- 5558,5568 ---- * * Description: Initialize Power Management on the device * * Context: Kernel Thread */ ! #ifdef notyet static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) { uint_t log_page_size; uchar_t *log_page_data;
*** 5918,5927 **** --- 5613,5623 ---- SD_PATH_DIRECT); if (rval != 0) { un->un_f_power_condition_supported = FALSE; } } + /* WTF? this fails for optical drives with no media */ if (!un->un_f_power_condition_supported) { rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT); } if (rval != 0) {
*** 6119,6130 **** un->un_pm_count = -1; } mutex_exit(&un->un_pm_mutex); mutex_exit(SD_MUTEX(un)); } - /* * Function: sd_ddi_suspend * * Description: Performs system power-down operations. This includes * setting the drive state to indicate its suspended so --- 5815,5826 ---- un->un_pm_count = -1; } mutex_exit(&un->un_pm_mutex); mutex_exit(SD_MUTEX(un)); } + #endif /* * Function: sd_ddi_suspend * * Description: Performs system power-down operations. This includes * setting the drive state to indicate its suspended so
*** 6293,6323 **** mutex_exit(SD_MUTEX(un)); (void) untimeout(temp_id); mutex_enter(SD_MUTEX(un)); } - if (un->un_f_is_fibre == TRUE) { - /* - * Remove callbacks for insert and remove events - */ - if (un->un_insert_event != NULL) { mutex_exit(SD_MUTEX(un)); - (void) ddi_remove_event_handler(un->un_insert_cb_id); - mutex_enter(SD_MUTEX(un)); - un->un_insert_event = NULL; - } - if (un->un_remove_event != NULL) { - mutex_exit(SD_MUTEX(un)); - (void) ddi_remove_event_handler(un->un_remove_cb_id); - mutex_enter(SD_MUTEX(un)); - un->un_remove_event = NULL; - } - } - - mutex_exit(SD_MUTEX(un)); - SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); return (DDI_SUCCESS); } --- 5989,6000 ----
*** 6393,6414 **** /* restart thread */ if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { scsi_watch_resume(un->un_swr_token); } - #if (defined(__fibre)) - if (un->un_f_is_fibre == TRUE) { /* - * Add callbacks for insert and remove events - */ - if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { - sd_init_event_callbacks(un); - } - } - #endif - - /* * Transport any pending commands to the target. * * If this is a low-activity device commands in queue will have to wait * until new commands come in, which may take awhile. Also, we * specifically don't check un_ncmds_in_transport because we know that --- 6070,6080 ----
*** 7090,7208 **** */ static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) { - switch (cmd) { - case DDI_ATTACH: - return (sd_unit_attach(devi)); - case DDI_RESUME: - return (sd_ddi_resume(devi)); - default: - break; - } - return (DDI_FAILURE); - } - - - /* - * Function: sddetach - * - * Description: Driver's detach(9E) entry point function. - * - * Arguments: devi - opaque device info handle - * cmd - detach type - * - * Return Code: DDI_SUCCESS - * DDI_FAILURE - * - * Context: Kernel thread context - */ - - static int - sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) - { - switch (cmd) { - case DDI_DETACH: - return (sd_unit_detach(devi)); - case DDI_SUSPEND: - return (sd_ddi_suspend(devi)); - default: - break; - } - return (DDI_FAILURE); - } - - - /* - * Function: sd_sync_with_callback - * - * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft - * state while the callback routine is active. - * - * Arguments: un: softstate structure for the instance - * - * Context: Kernel thread context - */ - - static void - sd_sync_with_callback(struct sd_lun *un) - { - ASSERT(un != NULL); - - mutex_enter(SD_MUTEX(un)); - - ASSERT(un->un_in_callback >= 0); - - while (un->un_in_callback > 0) { - mutex_exit(SD_MUTEX(un)); - delay(2); - mutex_enter(SD_MUTEX(un)); - } - - mutex_exit(SD_MUTEX(un)); - } - - /* - * Function: sd_unit_attach - * - * Description: Performs DDI_ATTACH processing for sdattach(). Allocates - * the soft state structure for the device and performs - * all necessary structure and device initializations. - * - * Arguments: devi: the system's dev_info_t for the device. - * - * Return Code: DDI_SUCCESS if attach is successful. - * DDI_FAILURE if any part of the attach fails. - * - * Context: Called at attach(9e) time for the DDI_ATTACH flag. - * Kernel thread context only. Can sleep. - */ - - static int - sd_unit_attach(dev_info_t *devi) - { struct scsi_device *devp; struct sd_lun *un; char *variantp; - char name_str[48]; - int reservation_flag = SD_TARGET_IS_UNRESERVED; int instance; - int rval; - int wc_enabled; - int wc_changeable; int tgt; - uint64_t capacity; - uint_t lbasize = 0; dev_info_t *pdip = ddi_get_parent(devi); ! int offbyone = 0; ! int geom_label_valid = 0; sd_ssc_t *ssc; - int status; struct sd_fm_internal *sfip = NULL; - int max_xfer_size; /* * Retrieve the target driver's private data area. This was set * up by the HBA. */ devp = ddi_get_driver_private(devi); --- 6756,6784 ---- */ static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) { struct scsi_device *devp; struct sd_lun *un; char *variantp; int instance; int tgt; dev_info_t *pdip = ddi_get_parent(devi); ! int max_xfer_size; sd_ssc_t *ssc; struct sd_fm_internal *sfip = NULL; + switch (cmd) { + case DDI_ATTACH: + break; + case DDI_RESUME: + return (sd_ddi_resume(devi)); + default: + return (DDI_FAILURE); + } + /* * Retrieve the target driver's private data area. This was set * up by the HBA. */ devp = ddi_get_driver_private(devi);
*** 7293,7303 **** * was successful, unless something has gone horribly wrong and the * ddi's soft state internals are corrupt (in which case it is * probably better to halt here than just fail the attach....) */ if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { ! panic("sd_unit_attach: NULL soft state on instance:0x%x", instance); /*NOTREACHED*/ } /* --- 6869,6879 ---- * was successful, unless something has gone horribly wrong and the * ddi's soft state internals are corrupt (in which case it is * probably better to halt here than just fail the attach....) */ if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { ! panic("sdattach: NULL soft state on instance:0x%x", instance); /*NOTREACHED*/ } /*
*** 7339,7434 **** un->un_node_type = DDI_NT_BLOCK_CHAN; un->un_ctype = CTYPE_CCS; break; } ! /* ! * Try to read the interconnect type from the HBA. ! * ! * Note: This driver is currently compiled as two binaries, a parallel ! * scsi version (sd) and a fibre channel version (ssd). All functional ! * differences are determined at compile time. In the future a single ! * binary will be provided and the interconnect type will be used to ! * differentiate between fibre and parallel scsi behaviors. At that time ! * it will be necessary for all fibre channel HBAs to support this ! * property. ! * ! * set un_f_is_fiber to TRUE ( default fiber ) ! */ ! un->un_f_is_fibre = TRUE; switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { case INTERCONNECT_SSA: un->un_interconnect_type = SD_INTERCONNECT_SSA; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); break; case INTERCONNECT_PARALLEL: - un->un_f_is_fibre = FALSE; un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); break; case INTERCONNECT_SAS: - un->un_f_is_fibre = FALSE; un->un_interconnect_type = SD_INTERCONNECT_SAS; un->un_node_type = DDI_NT_BLOCK_SAS; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); break; case INTERCONNECT_SATA: - un->un_f_is_fibre = FALSE; un->un_interconnect_type = SD_INTERCONNECT_SATA; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); break; case INTERCONNECT_FIBRE: un->un_interconnect_type = SD_INTERCONNECT_FIBRE; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); break; case INTERCONNECT_FABRIC: un->un_interconnect_type = SD_INTERCONNECT_FABRIC; un->un_node_type = DDI_NT_BLOCK_FABRIC; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); break; default: - #ifdef SD_DEFAULT_INTERCONNECT_TYPE /* ! * The HBA does not support the "interconnect-type" property ! * (or did not provide a recognized type). ! * ! * Note: This will be obsoleted when a single fibre channel ! * and parallel scsi driver is delivered. In the meantime the ! * interconnect type will be set to the platform default.If that ! * type is not parallel SCSI, it means that we should be ! * assuming "ssd" semantics. However, here this also means that ! * the FC HBA is not supporting the "interconnect-type" property ! * like we expect it to, so log this occurrence. */ - un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; - if (!SD_IS_PARALLEL_SCSI(un)) { - SD_INFO(SD_LOG_ATTACH_DETACH, un, - "sd_unit_attach: un:0x%p Assuming " - "INTERCONNECT_FIBRE\n", un); - } else { - SD_INFO(SD_LOG_ATTACH_DETACH, un, - "sd_unit_attach: un:0x%p Assuming " - "INTERCONNECT_PARALLEL\n", un); - un->un_f_is_fibre = FALSE; - } - #else - /* - * Note: This source will be implemented when a single fibre - * channel and parallel scsi driver is delivered. The default - * will be to assume that if a device does not support the - * "interconnect-type" property it is a parallel SCSI HBA and - * we will set the interconnect type for parallel scsi. - */ un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; - un->un_f_is_fibre = FALSE; - #endif break; } if (un->un_f_is_fibre == TRUE) { if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == --- 6915,6969 ---- un->un_node_type = DDI_NT_BLOCK_CHAN; un->un_ctype = CTYPE_CCS; break; } ! /* Try to read the interconnect type from the HBA */ ! un->un_f_is_fibre = FALSE; switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { case INTERCONNECT_SSA: + un->un_f_is_fibre = TRUE; un->un_interconnect_type = SD_INTERCONNECT_SSA; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p SD_INTERCONNECT_SSA\n", un); break; case INTERCONNECT_PARALLEL: un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); break; case INTERCONNECT_SAS: un->un_interconnect_type = SD_INTERCONNECT_SAS; un->un_node_type = DDI_NT_BLOCK_SAS; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p SD_INTERCONNECT_SAS\n", un); break; case INTERCONNECT_SATA: un->un_interconnect_type = SD_INTERCONNECT_SATA; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p SD_INTERCONNECT_SATA\n", un); break; case INTERCONNECT_FIBRE: + un->un_f_is_fibre = TRUE; un->un_interconnect_type = SD_INTERCONNECT_FIBRE; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); break; case INTERCONNECT_FABRIC: + un->un_f_is_fibre = TRUE; un->un_interconnect_type = SD_INTERCONNECT_FABRIC; un->un_node_type = DDI_NT_BLOCK_FABRIC; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); break; default: /* ! * The default is to assume that if a device does not support ! * the "interconnect-type" property it is a parallel SCSI HBA ! * and set the interconnect type for parallel SCSI. */ un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; break; } if (un->un_f_is_fibre == TRUE) { if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) ==
*** 7442,7474 **** break; } } } /* * Initialize the Request Sense command for the target */ if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { goto alloc_rqs_failed; } ! /* ! * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc ! * with separate binary for sd and ssd. ! * ! * x86 has 1 binary, un_retry_count is set base on connection type. ! * The hardcoded values will go away when Sparc uses 1 binary ! * for sd and ssd. This hardcoded values need to match ! * SD_RETRY_COUNT in sddef.h ! * The value used is base on interconnect type. ! * fibre = 3, parallel = 5 ! */ ! #if defined(__i386) || defined(__amd64) un->un_retry_count = un->un_f_is_fibre ? 3 : 5; - #else - un->un_retry_count = SD_RETRY_COUNT; - #endif /* * Set the per disk retry count to the default number of retries * for disks and CDROMs. This value can be overridden by the * disk property list or an entry in sd.conf. --- 6977,6998 ---- break; } } } + (void) ddi_prop_update_int(DDI_DEV_T_NONE, devi, + "allow-unconstrained-retire", 1); + /* * Initialize the Request Sense command for the target */ if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { goto alloc_rqs_failed; } ! /* The value used is base on interconnect type */ un->un_retry_count = un->un_f_is_fibre ? 3 : 5; /* * Set the per disk retry count to the default number of retries * for disks and CDROMs. This value can be overridden by the * disk property list or an entry in sd.conf.
*** 7491,7522 **** * in sd.conf or the device config table. */ un->un_reset_retry_count = (un->un_retry_count / 2); /* ! * Set the victim_retry_count to the default un_retry_count */ ! un->un_victim_retry_count = (2 * un->un_retry_count); /* * Set the reservation release timeout to the default value of ! * 5 seconds. This can be overridden by entries in ssd.conf or the * device config table. */ un->un_reserve_release_time = 5; /* * Set up the default maximum transfer size. Note that this may * get updated later in the attach, when setting up default wide * operations for disks. */ - #if defined(__i386) || defined(__amd64) un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; un->un_partial_dma_supported = 1; - #else - un->un_max_xfer_size = (uint_t)maxphys; - #endif /* * Get "allow bus device reset" property (defaults to "enabled" if * the property was not defined). This is to disable bus resets for * certain kinds of error recovery. Note: In the future when a run-time --- 7015,7051 ---- * in sd.conf or the device config table. */ un->un_reset_retry_count = (un->un_retry_count / 2); /* ! * Set the victim_retry_count to the default un_retry_count. ! * This value is used in addition to the standard retry count. ! * This can be overridden by entries in sd.conf or the device ! * config table. */ ! un->un_victim_retry_count = un->un_retry_count; /* * Set the reservation release timeout to the default value of ! * 5 seconds. This can be overridden by entries in sd.conf or the * device config table. */ un->un_reserve_release_time = 5; + un->un_io_time = sd_io_time; + + un->un_slow_io_threshold = sd_slow_io_threshold; + + un->un_f_lun_reset_enabled = sd_enable_lun_reset; + /* * Set up the default maximum transfer size. Note that this may * get updated later in the attach, when setting up default wide * operations for disks. */ un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; un->un_partial_dma_supported = 1; /* * Get "allow bus device reset" property (defaults to "enabled" if * the property was not defined). This is to disable bus resets for * certain kinds of error recovery. Note: In the future when a run-time
*** 7528,7543 **** } else { if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, "allow-bus-device-reset", 1) != 0) { un->un_f_allow_bus_device_reset = TRUE; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p Bus device reset " "enabled\n", un); } else { un->un_f_allow_bus_device_reset = FALSE; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p Bus device reset " "disabled\n", un); } } /* --- 7057,7072 ---- } else { if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, "allow-bus-device-reset", 1) != 0) { un->un_f_allow_bus_device_reset = TRUE; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p Bus device reset " "enabled\n", un); } else { un->un_f_allow_bus_device_reset = FALSE; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p Bus device reset " "disabled\n", un); } } /*
*** 7552,7579 **** * new property. */ if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { un->un_f_cfg_is_atapi = TRUE; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p Atapi device\n", un); } if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", &variantp) == DDI_PROP_SUCCESS) { if (strcmp(variantp, "atapi") == 0) { un->un_f_cfg_is_atapi = TRUE; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p Atapi device\n", un); } ddi_prop_free(variantp); } ! un->un_cmd_timeout = SD_IO_TIME; ! un->un_busy_timeout = SD_BSY_TIMEOUT; ! /* Info on current states, statuses, etc. (Updated frequently) */ ! un->un_state = SD_STATE_NORMAL; un->un_last_state = SD_STATE_NORMAL; /* Control & status info for command throttling */ un->un_throttle = sd_max_throttle; un->un_saved_throttle = sd_max_throttle; --- 7081,7114 ---- * new property. */ if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { un->un_f_cfg_is_atapi = TRUE; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p Atapi device\n", un); } if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", &variantp) == DDI_PROP_SUCCESS) { if (strcmp(variantp, "atapi") == 0) { un->un_f_cfg_is_atapi = TRUE; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p Atapi device\n", un); } ddi_prop_free(variantp); } ! un->un_cmd_timeout = ((ISCD(un)) ? 2 : 1) * (ushort_t)un->un_io_time; ! un->un_uscsi_timeout = un->un_cmd_timeout; un->un_busy_timeout = SD_BSY_TIMEOUT; ! /* ! * Info on current states, statuses, etc. (Updated frequently) ! * ! * Current state is ATTACHING until we finished sd_unit_attach. ! * Last state is NORMAL so that sd_unit_attach can Restore_state() ! * when it finishes successfully. ! */ ! un->un_state = SD_STATE_ATTACHING; un->un_last_state = SD_STATE_NORMAL; /* Control & status info for command throttling */ un->un_throttle = sd_max_throttle; un->un_saved_throttle = sd_max_throttle;
*** 7583,7592 **** --- 7118,7131 ---- un->un_f_use_adaptive_throttle = TRUE; } else { un->un_f_use_adaptive_throttle = FALSE; } + /* Unit detach has to pause until outstanding commands abort */ + un->un_f_detach_waiting = 0; + cv_init(&un->un_detach_cv, NULL, CV_DRIVER, NULL); + /* Removable media support. */ cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); un->un_mediastate = DKIO_NONE; un->un_specified_mediastate = DKIO_NONE;
*** 7625,7634 **** --- 7164,7174 ---- * physical sector size defaults to DEV_BSIZE currently. We can * override this value via the driver configuration file so we must * set it before calling sd_read_unit_properties(). */ un->un_phy_blocksize = DEV_BSIZE; + un->un_f_sdconf_phy_blocksize = FALSE; /* * Retrieve the properties from the static driver table or the driver * configuration file (.conf) for this unit and update the soft state * for the device as needed for the indicated properties.
*** 7636,7646 **** * following routines may have dependencies on soft state flags set * as part of the driver property configuration. */ sd_read_unit_properties(un); SD_TRACE(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p property configuration complete.\n", un); /* * Only if a device has "hotpluggable" property, it is * treated as hotpluggable device. Otherwise, it is * regarded as non-hotpluggable one. --- 7176,7186 ---- * following routines may have dependencies on soft state flags set * as part of the driver property configuration. */ sd_read_unit_properties(un); SD_TRACE(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p property configuration complete.\n", un); /* * Only if a device has "hotpluggable" property, it is * treated as hotpluggable device. Otherwise, it is * regarded as non-hotpluggable one.
*** 7722,7739 **** if (un->un_stats != NULL) { un->un_stats->ks_lock = SD_MUTEX(un); kstat_install(un->un_stats); } SD_TRACE(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p un_stats created\n", un); sd_create_errstats(un, instance); if (un->un_errstats == NULL) { goto create_errstats_failed; } SD_TRACE(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p errstats created\n", un); /* * The following if/else code was relocated here from below as part * of the fix for bug (4430280). However with the default setup added * on entry to this routine, it's no longer absolutely necessary for --- 7262,7312 ---- if (un->un_stats != NULL) { un->un_stats->ks_lock = SD_MUTEX(un); kstat_install(un->un_stats); } SD_TRACE(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p un_stats created\n", un); + un->un_unmapstats_ks = kstat_create(sd_label, instance, "unmapstats", + "misc", KSTAT_TYPE_NAMED, sizeof (*un->un_unmapstats) / + sizeof (kstat_named_t), 0); + if (un->un_unmapstats_ks) { + un->un_unmapstats = un->un_unmapstats_ks->ks_data; + + kstat_named_init(&un->un_unmapstats->us_cmds, + "commands", KSTAT_DATA_UINT64); + kstat_named_init(&un->un_unmapstats->us_errs, + "errors", KSTAT_DATA_UINT64); + kstat_named_init(&un->un_unmapstats->us_extents, + "extents", KSTAT_DATA_UINT64); + kstat_named_init(&un->un_unmapstats->us_bytes, + "bytes", KSTAT_DATA_UINT64); + + kstat_install(un->un_unmapstats_ks); + } else { + cmn_err(CE_NOTE, "!Cannot create unmap kstats for disk %d", + instance); + } + + un->un_lat_ksp = kstat_create(sd_label, instance, "io_latency", + "io_latency", KSTAT_TYPE_RAW, sizeof (un_lat_stat_t), + KSTAT_FLAG_PERSISTENT); + + if (un->un_lat_ksp != NULL) { + un->un_lat_ksp->ks_lock = SD_MUTEX(un); + un->un_lat_stats = (un_lat_stat_t *)un->un_lat_ksp->ks_data; + kstat_install(un->un_lat_ksp); + } else { + un->un_lat_stats = NULL; + } + sd_create_errstats(un, instance); if (un->un_errstats == NULL) { goto create_errstats_failed; } SD_TRACE(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p errstats created\n", un); /* * The following if/else code was relocated here from below as part * of the fix for bug (4430280). However with the default setup added * on entry to this routine, it's no longer absolutely necessary for
*** 7753,7763 **** (un->un_f_arq_enabled == TRUE)) { if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 1, 1) == 1) { un->un_tagflags = FLAG_STAG; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p tag queueing " "enabled\n", un); } else if (scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) == 1) { un->un_f_opt_queueing = TRUE; un->un_saved_throttle = un->un_throttle = --- 7326,7336 ---- (un->un_f_arq_enabled == TRUE)) { if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 1, 1) == 1) { un->un_tagflags = FLAG_STAG; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p tag queueing " "enabled\n", un); } else if (scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) == 1) { un->un_f_opt_queueing = TRUE; un->un_saved_throttle = un->un_throttle =
*** 7774,7815 **** min(un->un_throttle, 3); } else { un->un_f_opt_queueing = FALSE; un->un_saved_throttle = un->un_throttle = 1; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p no tag queueing\n", un); } /* * Enable large transfers for SATA/SAS drives */ if (SD_IS_SERIAL(un)) { un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, devi, 0, ! sd_max_xfer_size, SD_MAX_XFER_SIZE); SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p max transfer " "size=0x%x\n", un, un->un_max_xfer_size); } /* Setup or tear down default wide operations for disks */ - - /* - * Note: Legacy: it may be possible for both "sd_max_xfer_size" - * and "ssd_max_xfer_size" to exist simultaneously on the same - * system and be set to different values. In the future this - * code may need to be updated when the ssd module is - * obsoleted and removed from the system. (4299588) - */ if (SD_IS_PARALLEL_SCSI(un) && (devp->sd_inq->inq_rdf == RDF_SCSI2) && (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 1, 1) == 1) { SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p Wide Transfer " "enabled\n", un); } /* * If tagged queuing has also been enabled, then --- 7347,7380 ---- min(un->un_throttle, 3); } else { un->un_f_opt_queueing = FALSE; un->un_saved_throttle = un->un_throttle = 1; SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p no tag queueing\n", un); } /* * Enable large transfers for SATA/SAS drives */ if (SD_IS_SERIAL(un)) { un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, devi, 0, ! "sd_max_xfer_size", SD_MAX_XFER_SIZE); SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p max transfer " "size=0x%x\n", un, un->un_max_xfer_size); } /* Setup or tear down default wide operations for disks */ if (SD_IS_PARALLEL_SCSI(un) && (devp->sd_inq->inq_rdf == RDF_SCSI2) && (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 1, 1) == 1) { SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p Wide Transfer " "enabled\n", un); } /* * If tagged queuing has also been enabled, then
*** 7816,7878 **** * enable large xfers */ if (un->un_saved_throttle == sd_max_throttle) { un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, devi, 0, ! sd_max_xfer_size, SD_MAX_XFER_SIZE); SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p max transfer " "size=0x%x\n", un, un->un_max_xfer_size); } } else { if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1) == 1) { SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sd_unit_attach: un:0x%p " "Wide Transfer disabled\n", un); } } } else { un->un_tagflags = FLAG_STAG; un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, ! devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); } /* * If this target supports LUN reset, try to enable it. */ if (un->un_f_lun_reset_enabled) { if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { ! SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " "un:0x%p lun_reset capability set\n", un); } else { ! SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " "un:0x%p lun-reset capability not set\n", un); } } /* ! * Adjust the maximum transfer size. This is to fix * the problem of partial DMA support on SPARC. Some * HBA driver, like aac, has very small dma_attr_maxxfer * size, which requires partial DMA support on SPARC. - * In the future the SPARC pci nexus driver may solve - * the problem instead of this fix. */ max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { - /* We need DMA partial even on sparc to ensure sddump() works */ un->un_max_xfer_size = max_xfer_size; if (un->un_partial_dma_supported == 0) un->un_partial_dma_supported = 1; } if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, "buf_break", 0) == 1) { if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, un->un_max_xfer_size) == 1) { un->un_buf_breakup_supported = 1; ! SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " "un:0x%p Buf breakup enabled\n", un); } } /* --- 7381,7440 ---- * enable large xfers */ if (un->un_saved_throttle == sd_max_throttle) { un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, devi, 0, ! "sd_max_xfer_size", SD_MAX_XFER_SIZE); SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p max transfer " "size=0x%x\n", un, un->un_max_xfer_size); } } else { if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1) == 1) { SD_INFO(SD_LOG_ATTACH_DETACH, un, ! "sdattach: un:0x%p " "Wide Transfer disabled\n", un); } } } else { un->un_tagflags = FLAG_STAG; un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, ! devi, 0, "sd_max_xfer_size", SD_MAX_XFER_SIZE); } /* * If this target supports LUN reset, try to enable it. */ if (un->un_f_lun_reset_enabled) { if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { ! SD_INFO(SD_LOG_ATTACH_DETACH, un, "sdattach: " "un:0x%p lun_reset capability set\n", un); } else { ! SD_INFO(SD_LOG_ATTACH_DETACH, un, "sdattach: " "un:0x%p lun-reset capability not set\n", un); } } /* ! * XXX Adjust the maximum transfer size. This was to fix * the problem of partial DMA support on SPARC. Some * HBA driver, like aac, has very small dma_attr_maxxfer * size, which requires partial DMA support on SPARC. */ max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { un->un_max_xfer_size = max_xfer_size; if (un->un_partial_dma_supported == 0) un->un_partial_dma_supported = 1; } if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, "buf_break", 0) == 1) { if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, un->un_max_xfer_size) == 1) { un->un_buf_breakup_supported = 1; ! SD_INFO(SD_LOG_ATTACH_DETACH, un, "sdattach: " "un:0x%p Buf breakup enabled\n", un); } } /*
*** 7882,7897 **** un->un_pkt_flags = PKT_DMA_PARTIAL; } else { un->un_pkt_flags = 0; } - /* Initialize sd_ssc_t for internal uscsi commands */ - ssc = sd_ssc_init(un); scsi_fm_init(devp); /* ! * Allocate memory for SCSI FMA stuffs. */ un->un_fm_private = kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); sfip = (struct sd_fm_internal *)un->un_fm_private; sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; --- 7444,7457 ---- un->un_pkt_flags = PKT_DMA_PARTIAL; } else { un->un_pkt_flags = 0; } scsi_fm_init(devp); /* ! * Allocate memory for SCSI FMA stuff. */ un->un_fm_private = kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); sfip = (struct sd_fm_internal *)un->un_fm_private; sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd;
*** 7922,7940 **** --- 7482,7768 ---- sfip->fm_log_level = SD_FM_LOG_EREPORT; else sfip->fm_log_level = SD_FM_LOG_SILENT; } + /* Initialize sd_ssc_t for internal uscsi commands */ + ssc = sd_ssc_init(un); + + mutex_enter(SD_MUTEX(un)); /* + * Initialize the devid for the unit. Indicate target reservation so + * that no real I/O is done for devices that need devid fabrication. + * We will try again in sd_unit_attach() if necessary. + */ + if (un->un_f_devid_supported) { + sd_register_devid(ssc, devi, SD_TARGET_IS_RESERVED); + } + mutex_exit(SD_MUTEX(un)); + + /* Uninitialize sd_ssc_t pointer */ + sd_ssc_fini(ssc); + + cmlb_alloc_handle(&un->un_cmlbhandle); + + if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, + VOID2BOOLEAN(un->un_f_has_removable_media != 0), + VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), + un->un_node_type, 0, un->un_cmlbhandle, + (void *)SD_PATH_DIRECT) != 0) { + goto cmlb_attach_failed; + } + + /* * At this point in the attach, we have enough info in the * soft state to be able to issue commands to the target. * + * Schedule a taskq to finish attach to avoid holding the + * device tree lock for too long. If this fails, rollback + * and fail the attach. + */ + + if (taskq_dispatch(sd_tq, sd_unit_attach, devi, KM_PUSHPAGE) != NULL) + return (DDI_SUCCESS); + + cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); + cmlb_free_handle(&un->un_cmlbhandle); + + cmlb_attach_failed: + mutex_enter(SD_MUTEX(un)); + + /* Deallocate SCSI FMA memory spaces */ + kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); + + /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ + if (un->un_direct_priority_timeid != NULL) { + timeout_id_t temp_id = un->un_direct_priority_timeid; + un->un_direct_priority_timeid = NULL; + mutex_exit(SD_MUTEX(un)); + (void) untimeout(temp_id); + mutex_enter(SD_MUTEX(un)); + } + + /* Cancel any pending start/stop timeouts */ + if (un->un_startstop_timeid != NULL) { + timeout_id_t temp_id = un->un_startstop_timeid; + un->un_startstop_timeid = NULL; + mutex_exit(SD_MUTEX(un)); + (void) untimeout(temp_id); + mutex_enter(SD_MUTEX(un)); + } + + /* Cancel any pending reset-throttle timeouts */ + if (un->un_reset_throttle_timeid != NULL) { + timeout_id_t temp_id = un->un_reset_throttle_timeid; + un->un_reset_throttle_timeid = NULL; + mutex_exit(SD_MUTEX(un)); + (void) untimeout(temp_id); + mutex_enter(SD_MUTEX(un)); + } + + /* Cancel rmw warning message timeouts */ + if (un->un_rmw_msg_timeid != NULL) { + timeout_id_t temp_id = un->un_rmw_msg_timeid; + un->un_rmw_msg_timeid = NULL; + mutex_exit(SD_MUTEX(un)); + (void) untimeout(temp_id); + mutex_enter(SD_MUTEX(un)); + } + + /* Cancel any pending retry timeouts */ + if (un->un_retry_timeid != NULL) { + timeout_id_t temp_id = un->un_retry_timeid; + un->un_retry_timeid = NULL; + mutex_exit(SD_MUTEX(un)); + (void) untimeout(temp_id); + mutex_enter(SD_MUTEX(un)); + } + + /* Cancel any pending delayed cv broadcast timeouts */ + if (un->un_dcvb_timeid != NULL) { + timeout_id_t temp_id = un->un_dcvb_timeid; + un->un_dcvb_timeid = NULL; + mutex_exit(SD_MUTEX(un)); + (void) untimeout(temp_id); + mutex_enter(SD_MUTEX(un)); + } + + mutex_exit(SD_MUTEX(un)); + + /* There should not be any in-progress I/O so ASSERT this check */ + ASSERT(un->un_ncmds_in_transport == 0); + ASSERT(un->un_ncmds_in_driver == 0); + + /* Do not free the softstate if the callback routine is active */ + sd_sync_with_callback(un); + + /* + * Partition stats apparently are not used with removables. These would + * not have been created during attach, so no need to clean them up... + */ + if (un->un_errstats != NULL) { + kstat_delete(un->un_errstats); + un->un_errstats = NULL; + } + + create_errstats_failed: + + if (un->un_stats != NULL) { + kstat_delete(un->un_stats); + un->un_stats = NULL; + } + + if (un->un_unmapstats != NULL) { + kstat_delete(un->un_unmapstats_ks); + un->un_unmapstats_ks = NULL; + un->un_unmapstats = NULL; + } + + if (un->un_lat_ksp != NULL) { + kstat_delete(un->un_lat_ksp); + un->un_lat_ksp = NULL; + un->un_lat_stats = NULL; + } + + ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); + ddi_xbuf_attr_destroy(un->un_xbuf_attr); + + ddi_prop_remove_all(devi); + sema_destroy(&un->un_semoclose); + cv_destroy(&un->un_state_cv); + cv_destroy(&un->un_detach_cv); + sd_free_rqs(un); + + alloc_rqs_failed: + + devp->sd_private = NULL; + bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ + + /* + * Note: the man pages are unclear as to whether or not doing a + * ddi_soft_state_free(sd_state, instance) is the right way to + * clean up after the ddi_soft_state_zalloc() if the subsequent + * ddi_get_soft_state() fails. The implication seems to be + * that the get_soft_state cannot fail if the zalloc succeeds. + */ + ddi_soft_state_free(sd_state, instance); + + probe_failed: + scsi_unprobe(devp); + + return (DDI_FAILURE); + } + + + /* + * Function: sddetach + * + * Description: Driver's detach(9E) entry point function. + * + * Arguments: devi - opaque device info handle + * cmd - detach type + * + * Return Code: DDI_SUCCESS + * DDI_FAILURE + * + * Context: Kernel thread context + */ + + static int + sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) + { + switch (cmd) { + case DDI_DETACH: + return (sd_unit_detach(devi)); + case DDI_SUSPEND: + return (sd_ddi_suspend(devi)); + default: + break; + } + return (DDI_FAILURE); + } + + + /* + * Function: sd_sync_with_callback + * + * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft + * state while the callback routine is active. + * + * Arguments: un: softstate structure for the instance + * + * Context: Kernel thread context + */ + + static void + sd_sync_with_callback(struct sd_lun *un) + { + ASSERT(un != NULL); + + mutex_enter(SD_MUTEX(un)); + + ASSERT(un->un_in_callback >= 0); + + while (un->un_in_callback > 0) { + mutex_exit(SD_MUTEX(un)); + delay(2); + mutex_enter(SD_MUTEX(un)); + } + + mutex_exit(SD_MUTEX(un)); + } + + /* + * Function: sd_unit_attach + * + * Description: Performs DDI_ATTACH processing for sdattach(). Allocates + * the soft state structure for the device and performs + * all necessary structure and device initializations. + * + * Arguments: devi: the system's dev_info_t for the device. + * + * Return Code: DDI_SUCCESS if attach is successful. + * DDI_FAILURE if any part of the attach fails. + * + * Context: Called at attach(9e) time for the DDI_ATTACH flag. + * Kernel thread context only. Can sleep. + */ + void + sd_unit_attach(void *arg) + { + dev_info_t *devi = arg; + struct scsi_device *devp = ddi_get_driver_private(devi); + struct sd_lun *un = (struct sd_lun *)devp->sd_private; + char name_str[48]; + int reservation_flag = SD_TARGET_IS_UNRESERVED; + int rval; + int wc_enabled; + int wc_changeable; + int tgt; + uint64_t capacity; + uint_t lbasize = 0; + dev_info_t *pdip = ddi_get_parent(devi); + int geom_label_valid = 0; + sd_ssc_t *ssc; + int status; + char *devid; + + /* + * Retrieve the target ID of the device. + */ + tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, + SCSI_ADDR_PROP_TARGET, -1); + + /* * All command paths used below MUST issue their commands as * SD_PATH_DIRECT. This is important as intermediate layers * are not all initialized yet (such as PM). */ + /* Initialize sd_ssc_t for internal uscsi commands */ + ssc = sd_ssc_init(un); + /* * Send a TEST UNIT READY command to the device. This should clear * any outstanding UNIT ATTENTION that may be present. * * Note: Don't check for success, just track if there is a reservation,
*** 7994,8013 **** sd_label, CE_WARN, "disk has %llu blocks, which " "is too large for a 32-bit " "kernel", capacity); - #if defined(__i386) || defined(__amd64) /* * 1TB disk was treated as (1T - 512)B * in the past, so that it might have * valid VTOC and solaris partitions, * we have to allow it to continue to * work. */ ! if (capacity -1 > DK_MAX_BLOCKS) ! #endif goto spinup_failed; #endif } /* --- 7822,7839 ---- sd_label, CE_WARN, "disk has %llu blocks, which " "is too large for a 32-bit " "kernel", capacity); /* * 1TB disk was treated as (1T - 512)B * in the past, so that it might have * valid VTOC and solaris partitions, * we have to allow it to continue to * work. */ ! if (capacity - 1 > DK_MAX_BLOCKS) goto spinup_failed; #endif } /*
*** 8068,8083 **** /* * Likewise, should never get here if the * spin-up succeeded. Just continue with * the attach... */ ! if (status == EIO) sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); ! else sd_ssc_assessment(ssc, SD_FMT_IGNORE); break; } break; case EACCES: /* --- 7894,7911 ---- /* * Likewise, should never get here if the * spin-up succeeded. Just continue with * the attach... */ ! if (status == EIO) { sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); ! goto spinup_failed; ! } else { sd_ssc_assessment(ssc, SD_FMT_IGNORE); + } break; } break; case EACCES: /*
*** 8123,8133 **** --- 7951,7963 ---- /* * Initialize power management */ mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); + #ifdef notyet sd_setup_pm(ssc, devi); + #endif if (un->un_f_pm_is_enabled == FALSE) { /* * For performance, point to a jump table that does * not include pm. * The direct and priority chains don't change with PM.
*** 8158,8168 **** sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, "retry-on-reservation-conflict", sd_retry_on_reservation_conflict); if (sd_retry_on_reservation_conflict != 0) { sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, ! devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, sd_retry_on_reservation_conflict); } /* Set up options for QFULL handling. */ if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, --- 7988,7998 ---- sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, "retry-on-reservation-conflict", sd_retry_on_reservation_conflict); if (sd_retry_on_reservation_conflict != 0) { sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, ! devi, DDI_PROP_DONTPASS, "sd_retry_on_reservation_conflict", sd_retry_on_reservation_conflict); } /* Set up options for QFULL handling. */ if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
*** 8194,8223 **** /* * Check whether the drive is in emulation mode. */ sd_check_emulation_mode(ssc); ! cmlb_alloc_handle(&un->un_cmlbhandle); ! ! #if defined(__i386) || defined(__amd64) ! /* ! * On x86, compensate for off-by-1 legacy error ! */ if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && (lbasize == un->un_sys_blocksize)) ! offbyone = CMLB_OFF_BY_ONE; ! #endif - if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, - VOID2BOOLEAN(un->un_f_has_removable_media != 0), - VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), - un->un_node_type, offbyone, un->un_cmlbhandle, - (void *)SD_PATH_DIRECT) != 0) { - goto cmlb_attach_failed; - } - - /* * Read and validate the device's geometry (ie, disk label) * A new unformatted drive will not have a valid geometry, but * the driver needs to successfully attach to this device so * the drive can be formatted via ioctls. --- 8024,8038 ---- /* * Check whether the drive is in emulation mode. */ sd_check_emulation_mode(ssc); ! /* Compensate for off-by-1 legacy error */ if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && (lbasize == un->un_sys_blocksize)) ! cmlb_workaround_off_by_one(un->un_cmlbhandle); /* * Read and validate the device's geometry (ie, disk label) * A new unformatted drive will not have a valid geometry, but * the driver needs to successfully attach to this device so * the drive can be formatted via ioctls.
*** 8226,8259 **** (void *)SD_PATH_DIRECT) == 0) ? 1: 0; mutex_enter(SD_MUTEX(un)); /* ! * Read and initialize the devid for the unit. */ ! if (un->un_f_devid_supported) { sd_register_devid(ssc, devi, reservation_flag); } mutex_exit(SD_MUTEX(un)); - #if (defined(__fibre)) - /* - * Register callbacks for fibre only. You can't do this solely - * on the basis of the devid_type because this is hba specific. - * We need to query our hba capabilities to find out whether to - * register or not. - */ - if (un->un_f_is_fibre) { - if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { - sd_init_event_callbacks(un); - SD_TRACE(SD_LOG_ATTACH_DETACH, un, - "sd_unit_attach: un:0x%p event callbacks inserted", - un); - } - } - #endif - if (un->un_f_opt_disable_cache == TRUE) { /* * Disable both read cache and write cache. This is * the historic behavior of the keywords in the config file. */ --- 8041,8057 ---- (void *)SD_PATH_DIRECT) == 0) ? 1: 0; mutex_enter(SD_MUTEX(un)); /* ! * Read and initialize the devid for the unit if not done already. */ ! if (un->un_f_devid_supported && un->un_devid == NULL) { sd_register_devid(ssc, devi, reservation_flag); } mutex_exit(SD_MUTEX(un)); if (un->un_f_opt_disable_cache == TRUE) { /* * Disable both read cache and write cache. This is * the historic behavior of the keywords in the config file. */
*** 8361,8370 **** --- 8159,8169 ---- sd_set_errstats(un); SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: un:0x%p errstats set\n", un); + sd_setup_blk_limits(ssc); /* * After successfully attaching an instance, we record the information * of how many luns have been attached on the relative target and * controller for parallel SCSI. This information is used when sd tries
*** 8378,8397 **** "sd_unit_attach: un:0x%p exit success\n", un); /* Uninitialize sd_ssc_t pointer */ sd_ssc_fini(ssc); ! return (DDI_SUCCESS); /* * An error occurred during the attach; clean up & return failure. */ wm_cache_failed: devid_failed: - ddi_remove_minor_node(devi, NULL); - - cmlb_attach_failed: /* * Cleanup from the scsi_ifsetcap() calls (437868) */ (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); --- 8177,8200 ---- "sd_unit_attach: un:0x%p exit success\n", un); /* Uninitialize sd_ssc_t pointer */ sd_ssc_fini(ssc); ! /* attach finished, switch to SD_STATE_NORMAL */ ! mutex_enter(SD_MUTEX(un)); ! New_state(un, SD_STATE_NORMAL); ! cv_broadcast(&un->un_suspend_cv); ! mutex_exit(SD_MUTEX(un)); + return; + /* * An error occurred during the attach; clean up & return failure. */ + wm_cache_failed: devid_failed: /* * Cleanup from the scsi_ifsetcap() calls (437868) */ (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
*** 8408,8534 **** if (un->un_f_is_fibre == FALSE) { (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); } spinup_failed: ! ! /* Uninitialize sd_ssc_t pointer */ ! sd_ssc_fini(ssc); ! mutex_enter(SD_MUTEX(un)); ! ! /* Deallocate SCSI FMA memory spaces */ ! kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); ! ! /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ ! if (un->un_direct_priority_timeid != NULL) { ! timeout_id_t temp_id = un->un_direct_priority_timeid; ! un->un_direct_priority_timeid = NULL; mutex_exit(SD_MUTEX(un)); - (void) untimeout(temp_id); - mutex_enter(SD_MUTEX(un)); - } ! /* Cancel any pending start/stop timeouts */ ! if (un->un_startstop_timeid != NULL) { ! timeout_id_t temp_id = un->un_startstop_timeid; ! un->un_startstop_timeid = NULL; ! mutex_exit(SD_MUTEX(un)); ! (void) untimeout(temp_id); ! mutex_enter(SD_MUTEX(un)); ! } ! /* Cancel any pending reset-throttle timeouts */ ! if (un->un_reset_throttle_timeid != NULL) { ! timeout_id_t temp_id = un->un_reset_throttle_timeid; ! un->un_reset_throttle_timeid = NULL; ! mutex_exit(SD_MUTEX(un)); ! (void) untimeout(temp_id); ! mutex_enter(SD_MUTEX(un)); ! } ! ! /* Cancel rmw warning message timeouts */ ! if (un->un_rmw_msg_timeid != NULL) { ! timeout_id_t temp_id = un->un_rmw_msg_timeid; ! un->un_rmw_msg_timeid = NULL; ! mutex_exit(SD_MUTEX(un)); ! (void) untimeout(temp_id); ! mutex_enter(SD_MUTEX(un)); ! } ! ! /* Cancel any pending retry timeouts */ ! if (un->un_retry_timeid != NULL) { ! timeout_id_t temp_id = un->un_retry_timeid; ! un->un_retry_timeid = NULL; ! mutex_exit(SD_MUTEX(un)); ! (void) untimeout(temp_id); ! mutex_enter(SD_MUTEX(un)); ! } ! ! /* Cancel any pending delayed cv broadcast timeouts */ ! if (un->un_dcvb_timeid != NULL) { ! timeout_id_t temp_id = un->un_dcvb_timeid; ! un->un_dcvb_timeid = NULL; ! mutex_exit(SD_MUTEX(un)); ! (void) untimeout(temp_id); ! mutex_enter(SD_MUTEX(un)); ! } ! ! mutex_exit(SD_MUTEX(un)); ! ! /* There should not be any in-progress I/O so ASSERT this check */ ! ASSERT(un->un_ncmds_in_transport == 0); ! ASSERT(un->un_ncmds_in_driver == 0); ! ! /* Do not free the softstate if the callback routine is active */ ! sd_sync_with_callback(un); ! ! /* ! * Partition stats apparently are not used with removables. These would ! * not have been created during attach, so no need to clean them up... ! */ ! if (un->un_errstats != NULL) { ! kstat_delete(un->un_errstats); ! un->un_errstats = NULL; ! } ! ! create_errstats_failed: ! ! if (un->un_stats != NULL) { ! kstat_delete(un->un_stats); ! un->un_stats = NULL; ! } ! ! ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); ! ddi_xbuf_attr_destroy(un->un_xbuf_attr); ! ! ddi_prop_remove_all(devi); ! sema_destroy(&un->un_semoclose); ! cv_destroy(&un->un_state_cv); ! ! sd_free_rqs(un); ! ! alloc_rqs_failed: ! ! devp->sd_private = NULL; ! bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ ! ! /* ! * Note: the man pages are unclear as to whether or not doing a ! * ddi_soft_state_free(sd_state, instance) is the right way to ! * clean up after the ddi_soft_state_zalloc() if the subsequent ! * ddi_get_soft_state() fails. The implication seems to be ! * that the get_soft_state cannot fail if the zalloc succeeds. ! */ ! #ifndef XPV_HVM_DRIVER ! ddi_soft_state_free(sd_state, instance); ! #endif /* !XPV_HVM_DRIVER */ ! ! probe_failed: ! scsi_unprobe(devp); ! ! return (DDI_FAILURE); } /* * Function: sd_unit_detach --- 8211,8237 ---- if (un->un_f_is_fibre == FALSE) { (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); } spinup_failed: ! /* attach failed, switch to SD_STATE_ATTACH_FAILED */ mutex_enter(SD_MUTEX(un)); ! New_state(un, SD_STATE_ATTACH_FAILED); ! cv_broadcast(&un->un_suspend_cv); mutex_exit(SD_MUTEX(un)); ! devid = DEVI(devi)->devi_devid_str; ! scsi_fm_ereport_post(un->un_sd, 0, ! NULL, "disk.attach-failure", ssc->ssc_uscsi_info->ui_ena, ! devid, NULL, DDI_NOSLEEP, NULL, ! FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, ! DEVID_IF_KNOWN(devid)); ! /* Uninitialize sd_ssc_t pointer */ ! sd_ssc_fini(ssc); ! SD_ERROR(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach failed: un: %p", ! (void *)un); } /* * Function: sd_unit_detach
*** 8555,8578 **** mutex_enter(&sd_detach_mutex); /* * Fail the detach for any of the following: * - Unable to get the sd_lun struct for the instance ! * - A layered driver has an outstanding open on the instance * - Another thread is already detaching this instance * - Another thread is currently performing an open */ devp = ddi_get_driver_private(devi); ! if ((devp == NULL) || ! ((un = (struct sd_lun *)devp->sd_private) == NULL) || ! (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || ! (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { mutex_exit(&sd_detach_mutex); return (DDI_FAILURE); } ! SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); /* * Mark this instance as currently in a detach, to inhibit any * opens from a layered driver. */ --- 8258,8288 ---- mutex_enter(&sd_detach_mutex); /* * Fail the detach for any of the following: * - Unable to get the sd_lun struct for the instance ! * - The instance is still attaching * - Another thread is already detaching this instance * - Another thread is currently performing an open + * + * Additionaly, if "device gone" flag is not set: + * - There are outstanding commands in driver + * - There are outstanding commands in transport */ devp = ddi_get_driver_private(devi); ! if (devp == NULL || (un = (struct sd_lun *)devp->sd_private) == NULL || ! un->un_detach_count != 0 || un->un_opens_in_progress != 0 || ! (!DEVI_IS_GONE(devi) && ! (un->un_state == SD_STATE_RWAIT || ! un->un_state == SD_STATE_ATTACHING || ! un->un_ncmds_in_driver != 0 || ! un->un_ncmds_in_transport != 0))) { mutex_exit(&sd_detach_mutex); return (DDI_FAILURE); } ! SD_TRACE(SD_LOG_ATTACH_DETACH, un, "%s: entry 0x%p\n", __func__, un); /* * Mark this instance as currently in a detach, to inhibit any * opens from a layered driver. */
*** 8582,8595 **** tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, -1); dev = sd_make_device(SD_DEVINFO(un)); - #ifndef lint - _NOTE(COMPETING_THREADS_NOW); - #endif - mutex_enter(SD_MUTEX(un)); /* * Fail the detach if there are any outstanding layered * opens on this device. --- 8292,8301 ----
*** 8599,8635 **** goto err_notclosed; } } /* ! * Verify there are NO outstanding commands issued to this device. ! * ie, un_ncmds_in_transport == 0. ! * It's possible to have outstanding commands through the physio ! * code path, even though everything's closed. */ ! if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || ! (un->un_direct_priority_timeid != NULL) || ! (un->un_state == SD_STATE_RWAIT)) { mutex_exit(SD_MUTEX(un)); ! SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "sd_dr_detach: Detach failure due to outstanding cmds\n"); ! goto err_stillbusy; } /* * If we have the device reserved, release the reservation. */ ! if ((un->un_resvd_status & SD_RESERVE) && !(un->un_resvd_status & SD_LOST_RESERVE)) { mutex_exit(SD_MUTEX(un)); /* * Note: sd_reserve_release sends a command to the device * via the sd_ioctlcmd() path, and can sleep. */ if (sd_reserve_release(dev, SD_RELEASE) != 0) { SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "sd_dr_detach: Cannot release reservation \n"); } } else { mutex_exit(SD_MUTEX(un)); } --- 8305,8336 ---- goto err_notclosed; } } /* ! * If the attach wasn't successful, some normal cleanup work must not ! * be done. */ ! if (un->un_state == SD_STATE_ATTACH_FAILED) { mutex_exit(SD_MUTEX(un)); ! goto no_attach_cleanup; } /* * If we have the device reserved, release the reservation. */ ! if (!DEVI_IS_GONE(devi) && ! (un->un_resvd_status & SD_RESERVE) && !(un->un_resvd_status & SD_LOST_RESERVE)) { mutex_exit(SD_MUTEX(un)); /* * Note: sd_reserve_release sends a command to the device * via the sd_ioctlcmd() path, and can sleep. */ if (sd_reserve_release(dev, SD_RELEASE) != 0) { SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "%s: cannot release reservation\n", __func__); } } else { mutex_exit(SD_MUTEX(un)); }
*** 8682,8692 **** --- 8383,8418 ---- /* Remove any pending reservation reclaim requests for this device */ sd_rmv_resv_reclaim_req(dev); mutex_enter(SD_MUTEX(un)); + if (un->un_retry_timeid != NULL) { + timeout_id_t temp_id = un->un_retry_timeid; + un->un_retry_timeid = NULL; + mutex_exit(SD_MUTEX(un)); + (void) untimeout(temp_id); + mutex_enter(SD_MUTEX(un)); + if (un->un_retry_bp != NULL) { + un->un_retry_bp->av_forw = un->un_waitq_headp; + un->un_waitq_headp = un->un_retry_bp; + if (un->un_waitq_tailp == NULL) + un->un_waitq_tailp = un->un_retry_bp; + un->un_retry_bp = NULL; + un->un_retry_statp = NULL; + } + } + + if (DEVI_IS_GONE(SD_DEVINFO(un))) { + /* abort in-flight IO */ + (void) scsi_abort(SD_ADDRESS(un), NULL); + /* abort pending IO */ + un->un_failfast_state = SD_FAILFAST_ACTIVE; + un->un_failfast_bp = NULL; + sd_failfast_flushq(un, B_TRUE); + } + /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ if (un->un_direct_priority_timeid != NULL) { timeout_id_t temp_id = un->un_direct_priority_timeid; un->un_direct_priority_timeid = NULL; mutex_exit(SD_MUTEX(un));
*** 8695,8757 **** } /* Cancel any active multi-host disk watch thread requests */ if (un->un_mhd_token != NULL) { mutex_exit(SD_MUTEX(un)); - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); if (scsi_watch_request_terminate(un->un_mhd_token, SCSI_WATCH_TERMINATE_NOWAIT)) { SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "sd_dr_detach: Cannot cancel mhd watch request\n"); /* * Note: We are returning here after having removed * some driver timeouts above. This is consistent with * the legacy implementation but perhaps the watch * terminate call should be made with the wait flag set. */ ! goto err_stillbusy; } mutex_enter(SD_MUTEX(un)); un->un_mhd_token = NULL; } if (un->un_swr_token != NULL) { mutex_exit(SD_MUTEX(un)); - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); if (scsi_watch_request_terminate(un->un_swr_token, SCSI_WATCH_TERMINATE_NOWAIT)) { SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "sd_dr_detach: Cannot cancel swr watch request\n"); /* * Note: We are returning here after having removed * some driver timeouts above. This is consistent with * the legacy implementation but perhaps the watch * terminate call should be made with the wait flag set. */ ! goto err_stillbusy; } mutex_enter(SD_MUTEX(un)); un->un_swr_token = NULL; } - mutex_exit(SD_MUTEX(un)); - /* * Clear any scsi_reset_notifies. We clear the reset notifies * if we have not registered one. * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! */ (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, sd_mhd_reset_notify_cb, (caddr_t)un); - /* - * protect the timeout pointers from getting nulled by - * their callback functions during the cancellation process. - * In such a scenario untimeout can be invoked with a null value. - */ - _NOTE(NO_COMPETING_THREADS_NOW); - mutex_enter(&un->un_pm_mutex); if (un->un_pm_idle_timeid != NULL) { timeout_id_t temp_id = un->un_pm_idle_timeid; un->un_pm_idle_timeid = NULL; mutex_exit(&un->un_pm_mutex); --- 8421,8473 ---- } /* Cancel any active multi-host disk watch thread requests */ if (un->un_mhd_token != NULL) { mutex_exit(SD_MUTEX(un)); if (scsi_watch_request_terminate(un->un_mhd_token, SCSI_WATCH_TERMINATE_NOWAIT)) { SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "%s: cannot cancel mhd watch request\n", __func__); /* * Note: We are returning here after having removed * some driver timeouts above. This is consistent with * the legacy implementation but perhaps the watch * terminate call should be made with the wait flag set. */ ! goto err_remove_event; } mutex_enter(SD_MUTEX(un)); un->un_mhd_token = NULL; } if (un->un_swr_token != NULL) { mutex_exit(SD_MUTEX(un)); if (scsi_watch_request_terminate(un->un_swr_token, SCSI_WATCH_TERMINATE_NOWAIT)) { SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "%s: cannot cancel swr watch request\n", __func__); /* * Note: We are returning here after having removed * some driver timeouts above. This is consistent with * the legacy implementation but perhaps the watch * terminate call should be made with the wait flag set. */ ! goto err_remove_event; } mutex_enter(SD_MUTEX(un)); un->un_swr_token = NULL; } /* * Clear any scsi_reset_notifies. We clear the reset notifies * if we have not registered one. * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! */ + mutex_exit(SD_MUTEX(un)); (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, sd_mhd_reset_notify_cb, (caddr_t)un); mutex_enter(&un->un_pm_mutex); if (un->un_pm_idle_timeid != NULL) { timeout_id_t temp_id = un->un_pm_idle_timeid; un->un_pm_idle_timeid = NULL; mutex_exit(&un->un_pm_mutex);
*** 8789,8801 **** mutex_exit(&un->un_pm_mutex); if ((un->un_f_pm_is_enabled == TRUE) && (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un)) != DDI_SUCCESS)) { SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "sd_dr_detach: Lower power request failed, ignoring.\n"); /* - * Fix for bug: 4297749, item # 13 * The above test now includes a check to see if PM is * supported by this device before call * pm_lower_power(). * Note, the following is not dead code. The call to * pm_lower_power above will generate a call back into --- 8505,8517 ---- mutex_exit(&un->un_pm_mutex); if ((un->un_f_pm_is_enabled == TRUE) && (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un)) != DDI_SUCCESS)) { SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "%s: lower power request failed, ignoring\n", ! __func__); /* * The above test now includes a check to see if PM is * supported by this device before call * pm_lower_power(). * Note, the following is not dead code. The call to * pm_lower_power above will generate a call back into
*** 8854,8864 **** * substantial cleanup above. This is consistent * with the legacy implementation but this may not * be the right thing to do. */ SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "sd_dr_detach: Cannot cancel insert event\n"); goto err_remove_event; } un->un_insert_event = NULL; if ((un->un_remove_event != NULL) && --- 8570,8580 ---- * substantial cleanup above. This is consistent * with the legacy implementation but this may not * be the right thing to do. */ SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "%s: cannot cancel insert event\n", __func__); goto err_remove_event; } un->un_insert_event = NULL; if ((un->un_remove_event != NULL) &&
*** 8869,8887 **** * substantial cleanup above. This is consistent * with the legacy implementation but this may not * be the right thing to do. */ SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "sd_dr_detach: Cannot cancel remove event\n"); goto err_remove_event; } un->un_remove_event = NULL; } /* Do not free the softstate if the callback routine is active */ sd_sync_with_callback(un); cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); cmlb_free_handle(&un->un_cmlbhandle); /* * Hold the detach mutex here, to make sure that no other threads ever --- 8585,8622 ---- * substantial cleanup above. This is consistent * with the legacy implementation but this may not * be the right thing to do. */ SD_ERROR(SD_LOG_ATTACH_DETACH, un, ! "%s: cannot cancel remove event\n", __func__); goto err_remove_event; } un->un_remove_event = NULL; } /* Do not free the softstate if the callback routine is active */ sd_sync_with_callback(un); + no_attach_cleanup: + /* + * The driver must wait, at least attempt to wait, for any commands + * still in the driver. + */ + mutex_enter(SD_MUTEX(un)); + + while (un->un_ncmds_in_driver != 0) { + clock_t max_delay = ddi_get_lbolt() + SEC_TO_TICK(30); + un->un_f_detach_waiting = 1; + if (cv_timedwait(&un->un_detach_cv, SD_MUTEX(un), + max_delay) == -1) { + break; + } + } + + un->un_f_detach_waiting = 0; + mutex_exit(SD_MUTEX(un)); + cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); cmlb_free_handle(&un->un_cmlbhandle); /* * Hold the detach mutex here, to make sure that no other threads ever
*** 8934,8943 **** --- 8669,8688 ---- */ if (un->un_stats != NULL) { kstat_delete(un->un_stats); un->un_stats = NULL; } + if (un->un_unmapstats != NULL) { + kstat_delete(un->un_unmapstats_ks); + un->un_unmapstats_ks = NULL; + un->un_unmapstats = NULL; + } + if (un->un_lat_ksp != NULL) { + kstat_delete(un->un_lat_ksp); + un->un_lat_stats = NULL; + un->un_lat_ksp = NULL; + } if (un->un_errstats != NULL) { kstat_delete(un->un_errstats); un->un_errstats = NULL; }
*** 8964,8973 **** --- 8709,8721 ---- cv_destroy(&un->un_wcc_cv); /* Open/close semaphore */ sema_destroy(&un->un_semoclose); + /* Used to wait for outstanding commands */ + cv_destroy(&un->un_detach_cv); + /* Removable media condvar. */ cv_destroy(&un->un_state_cv); /* Suspend/resume condvar. */ cv_destroy(&un->un_suspend_cv);
*** 8999,9022 **** */ if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); } return (DDI_SUCCESS); err_notclosed: mutex_exit(SD_MUTEX(un)); - err_stillbusy: - _NOTE(NO_COMPETING_THREADS_NOW); - err_remove_event: mutex_enter(&sd_detach_mutex); un->un_detach_count--; mutex_exit(&sd_detach_mutex); ! SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); return (DDI_FAILURE); } /* --- 8747,8770 ---- */ if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); } + ddi_remove_minor_node(devi, NULL); + (void) devfs_clean(devi, NULL, DV_CLEAN_FORCE); + return (DDI_SUCCESS); err_notclosed: mutex_exit(SD_MUTEX(un)); err_remove_event: mutex_enter(&sd_detach_mutex); un->un_detach_count--; mutex_exit(&sd_detach_mutex); ! SD_TRACE(SD_LOG_ATTACH_DETACH, un, "%s: exit failure\n", __func__); return (DDI_FAILURE); } /*
*** 9219,9231 **** ASSERT(un != NULL); instance = ddi_get_instance(SD_DEVINFO(un)); ! /* Note:x86: is this a VTOC8/VTOC16 difference? */ for (i = 0; i < NSDMAP; i++) { - if (cmlb_partinfo(un->un_cmlbhandle, i, &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) continue; mutex_enter(SD_MUTEX(un)); --- 8967,8978 ---- ASSERT(un != NULL); instance = ddi_get_instance(SD_DEVINFO(un)); ! /* XXX is this a VTOC8/VTOC16 difference? */ for (i = 0; i < NSDMAP; i++) { if (cmlb_partinfo(un->un_cmlbhandle, i, &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) continue; mutex_enter(SD_MUTEX(un));
*** 9246,9357 **** } mutex_exit(SD_MUTEX(un)); } } - - #if (defined(__fibre)) /* - * Function: sd_init_event_callbacks - * - * Description: This routine initializes the insertion and removal event - * callbacks. (fibre only) - * - * Arguments: un - driver soft state (unit) structure - * - * Context: Kernel thread context - */ - - static void - sd_init_event_callbacks(struct sd_lun *un) - { - ASSERT(un != NULL); - - if ((un->un_insert_event == NULL) && - (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, - &un->un_insert_event) == DDI_SUCCESS)) { - /* - * Add the callback for an insertion event - */ - (void) ddi_add_event_handler(SD_DEVINFO(un), - un->un_insert_event, sd_event_callback, (void *)un, - &(un->un_insert_cb_id)); - } - - if ((un->un_remove_event == NULL) && - (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, - &un->un_remove_event) == DDI_SUCCESS)) { - /* - * Add the callback for a removal event - */ - (void) ddi_add_event_handler(SD_DEVINFO(un), - un->un_remove_event, sd_event_callback, (void *)un, - &(un->un_remove_cb_id)); - } - } - - - /* - * Function: sd_event_callback - * - * Description: This routine handles insert/remove events (photon). The - * state is changed to OFFLINE which can be used to supress - * error msgs. (fibre only) - * - * Arguments: un - driver soft state (unit) structure - * - * Context: Callout thread context - */ - /* ARGSUSED */ - static void - sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, - void *bus_impldata) - { - struct sd_lun *un = (struct sd_lun *)arg; - - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); - if (event == un->un_insert_event) { - SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); - mutex_enter(SD_MUTEX(un)); - if (un->un_state == SD_STATE_OFFLINE) { - if (un->un_last_state != SD_STATE_SUSPENDED) { - un->un_state = un->un_last_state; - } else { - /* - * We have gone through SUSPEND/RESUME while - * we were offline. Restore the last state - */ - un->un_state = un->un_save_state; - } - } - mutex_exit(SD_MUTEX(un)); - - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); - } else if (event == un->un_remove_event) { - SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); - mutex_enter(SD_MUTEX(un)); - /* - * We need to handle an event callback that occurs during - * the suspend operation, since we don't prevent it. - */ - if (un->un_state != SD_STATE_OFFLINE) { - if (un->un_state != SD_STATE_SUSPENDED) { - New_state(un, SD_STATE_OFFLINE); - } else { - un->un_last_state = SD_STATE_OFFLINE; - } - } - mutex_exit(SD_MUTEX(un)); - } else { - scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, - "!Unknown event\n"); - } - - } - #endif - - /* * Values related to caching mode page depending on whether the unit is ATAPI. */ #define SDC_CDB_GROUP(un) ((un->un_f_cfg_is_atapi == TRUE) ? \ CDB_GROUP1 : CDB_GROUP0) #define SDC_HDRLEN(un) ((un->un_f_cfg_is_atapi == TRUE) ? \ --- 8993,9003 ----
*** 9750,9762 **** rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 0x01, 0x86, &inq86_resid); if (rval == 0 && (inq86_len - inq86_resid > 6)) { SD_TRACE(SD_LOG_COMMON, un, ! "sd_get_nv_sup: \ ! successfully get VPD page: %x \ ! PAGE LENGTH: %x BYTE 6: %x\n", inq86[1], inq86[3], inq86[6]); mutex_enter(SD_MUTEX(un)); /* * check the value of NV_SUP bit: only if the device --- 9396,9407 ---- rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 0x01, 0x86, &inq86_resid); if (rval == 0 && (inq86_len - inq86_resid > 6)) { SD_TRACE(SD_LOG_COMMON, un, ! "sd_get_nv_sup: successfully get VPD page: %x " ! "PAGE LENGTH: %x BYTE 6: %x\n", inq86[1], inq86[3], inq86[6]); mutex_enter(SD_MUTEX(un)); /* * check the value of NV_SUP bit: only if the device
*** 10094,10115 **** /* * Fail the open if there is no softstate for the instance, or * if another thread somewhere is trying to detach the instance. */ if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || ! (un->un_detach_count != 0)) { mutex_exit(&sd_detach_mutex); /* ! * The probe cache only needs to be cleared when open (9e) fails ! * with ENXIO (4238046). */ - /* - * un-conditionally clearing probe cache is ok with - * separate sd/ssd binaries - * x86 platform can be an issue with both parallel - * and fibre in 1 binary - */ sd_scsi_clear_probe_cache(); return (ENXIO); } /* --- 9739,9754 ---- /* * Fail the open if there is no softstate for the instance, or * if another thread somewhere is trying to detach the instance. */ if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || ! un->un_detach_count != 0 || DEVI_IS_GONE(SD_DEVINFO(un))) { mutex_exit(&sd_detach_mutex); /* ! * The probe cache only needs to be cleared when open (9E) fails ! * with ENXIO. */ sd_scsi_clear_probe_cache(); return (ENXIO); } /*
*** 10156,10177 **** * status. */ if (!nodelay) { while ((un->un_state == SD_STATE_SUSPENDED) || ! (un->un_state == SD_STATE_PM_CHANGING)) { cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); } - mutex_exit(SD_MUTEX(un)); if (sd_pm_entry(un) != DDI_SUCCESS) { rval = EIO; SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: sd_pm_entry failed\n"); goto open_failed_with_pm; } mutex_enter(SD_MUTEX(un)); } /* check for previous exclusive open */ SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); SD_TRACE(SD_LOG_OPEN_CLOSE, un, --- 9795,9822 ---- * status. */ if (!nodelay) { while ((un->un_state == SD_STATE_SUSPENDED) || ! (un->un_state == SD_STATE_PM_CHANGING) || ! (un->un_state == SD_STATE_ATTACHING)) { cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); } mutex_exit(SD_MUTEX(un)); if (sd_pm_entry(un) != DDI_SUCCESS) { rval = EIO; SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: sd_pm_entry failed\n"); goto open_failed_with_pm; } mutex_enter(SD_MUTEX(un)); + } else if (un->un_state == SD_STATE_ATTACH_FAILED) { + mutex_exit(SD_MUTEX(un)); + rval = EIO; + SD_ERROR(SD_LOG_OPEN_CLOSE, un, + "sdopen: attach failed, can't open\n"); + goto open_failed_not_attached; } /* check for previous exclusive open */ SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); SD_TRACE(SD_LOG_OPEN_CLOSE, un,
*** 10258,10268 **** rval = un->un_f_has_removable_media ? ENXIO : EIO; SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " "device not ready or invalid disk block value\n"); goto open_fail; } - #if defined(__i386) || defined(__amd64) } else { uchar_t *cp; /* * x86 requires special nodelay handling, so that p0 is * always defined and accessible. --- 9903,9912 ----
*** 10279,10290 **** mutex_exit(SD_MUTEX(un)); cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); mutex_enter(SD_MUTEX(un)); } - - #endif } if (otyp == OTYP_LYR) { un->un_ocmap.lyropen[part]++; } else { --- 9923,9932 ----
*** 10324,10333 **** --- 9966,9981 ---- } SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " "open of part %d type %d\n", part, otyp); + /* + * If we made it here, the disk is alive. + * Make sure it is set to normal state. + */ + New_state(un, SD_STATE_NORMAL); + mutex_exit(SD_MUTEX(un)); if (!nodelay) { sd_pm_exit(un); }
*** 10352,10361 **** --- 10000,10010 ---- */ if (!nodelay) { sd_pm_exit(un); } open_failed_with_pm: + open_failed_not_attached: sema_v(&un->un_semoclose); mutex_enter(&sd_detach_mutex); un->un_opens_in_progress--; if (otyp == OTYP_LYR) {
*** 10394,10404 **** --- 10043,10057 ---- /* Validate the open type */ if (otyp >= OTYPCNT) { return (ENXIO); } + /* Hold the detach mutex to allow close to complete */ + mutex_enter(&sd_detach_mutex); + if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { + mutex_exit(&sd_detach_mutex); return (ENXIO); } part = SDPART(dev); nodelay = flag & (FNDELAY | FNONBLOCK);
*** 10412,10423 **** */ sema_p(&un->un_semoclose); mutex_enter(SD_MUTEX(un)); ! /* Don't proceed if power is being changed. */ ! while (un->un_state == SD_STATE_PM_CHANGING) { cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); } if (un->un_exclopen & (1 << part)) { un->un_exclopen &= ~(1 << part); --- 10065,10077 ---- */ sema_p(&un->un_semoclose); mutex_enter(SD_MUTEX(un)); ! /* Don't proceed if power is being changed or we're still attaching. */ ! while ((un->un_state == SD_STATE_PM_CHANGING) || ! (un->un_state == SD_STATE_ATTACHING)) { cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); } if (un->un_exclopen & (1 << part)) { un->un_exclopen &= ~(1 << part);
*** 10454,10465 **** } mutex_exit(SD_MUTEX(un)); cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); mutex_enter(SD_MUTEX(un)); ! ! } else { /* * Flush any outstanding writes in NVRAM cache. * Note: SYNCHRONIZE CACHE is an optional SCSI-2 * cmd, it may not work for non-Pluto devices. * SYNCHRONIZE CACHE is not required for removables, --- 10108,10118 ---- } mutex_exit(SD_MUTEX(un)); cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); mutex_enter(SD_MUTEX(un)); ! } else if (un->un_state != SD_STATE_ATTACH_FAILED) { /* * Flush any outstanding writes in NVRAM cache. * Note: SYNCHRONIZE CACHE is an optional SCSI-2 * cmd, it may not work for non-Pluto devices. * SYNCHRONIZE CACHE is not required for removables,
*** 10469,10490 **** * the only command issued here that requires the * drive be powered up, only do the power up before * sending the Sync Cache command. If additional * commands are added which require a powered up * drive, the following sequence may have to change. - * - * And finally, note that parallel SCSI on SPARC - * only issues a Sync Cache to DVD-RAM, a newly - * supported device. */ ! #if defined(__i386) || defined(__amd64) ! if ((un->un_f_sync_cache_supported && un->un_f_sync_cache_required) || ! un->un_f_dvdram_writable_device == TRUE) { ! #else ! if (un->un_f_dvdram_writable_device == TRUE) { ! #endif mutex_exit(SD_MUTEX(un)); if (sd_pm_entry(un) == DDI_SUCCESS) { rval = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); --- 10122,10136 ---- * the only command issued here that requires the * drive be powered up, only do the power up before * sending the Sync Cache command. If additional * commands are added which require a powered up * drive, the following sequence may have to change. */ ! if (!DEVI_IS_GONE(SD_DEVINFO(un)) && ! ((un->un_f_sync_cache_supported && un->un_f_sync_cache_required) || ! un->un_f_dvdram_writable_device == TRUE)) { mutex_exit(SD_MUTEX(un)); if (sd_pm_entry(un) == DDI_SUCCESS) { rval = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL);
*** 10530,10539 **** --- 10176,10191 ---- } mutex_enter(SD_MUTEX(un)); } /* + * Pardon a device that is currently in failfast + * active state, to not bias a future open. + */ + un->un_failfast_state = SD_FAILFAST_INACTIVE; + + /* * If a device has removable media, invalidate all * parameters related to media, such as geometry, * blocksize, and blockcount. */ if (un->un_f_has_removable_media) {
*** 10566,10584 **** } mutex_exit(SD_MUTEX(un)); sema_v(&un->un_semoclose); ! if (otyp == OTYP_LYR) { ! mutex_enter(&sd_detach_mutex); ! /* ! * The detach routine may run when the layer count ! * drops to zero. ! */ un->un_layer_count--; mutex_exit(&sd_detach_mutex); - } return (rval); } --- 10218,10231 ---- } mutex_exit(SD_MUTEX(un)); sema_v(&un->un_semoclose); ! if (otyp == OTYP_LYR) un->un_layer_count--; + mutex_exit(&sd_detach_mutex); return (rval); }
*** 10671,10681 **** * Do a test unit ready to clear any unit attention from non-cd * devices. */ mutex_exit(SD_MUTEX(un)); ! status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); if (status != 0) { sd_ssc_assessment(ssc, SD_FMT_IGNORE); } mutex_enter(SD_MUTEX(un)); --- 10318,10328 ---- * Do a test unit ready to clear any unit attention from non-cd * devices. */ mutex_exit(SD_MUTEX(un)); ! status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); if (status != 0) { sd_ssc_assessment(ssc, SD_FMT_IGNORE); } mutex_enter(SD_MUTEX(un));
*** 10877,10893 **** struct sd_lun *un = NULL; int secmask; int err = 0; sd_ssc_t *ssc; ! if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { return (ENXIO); - } ASSERT(!mutex_owned(SD_MUTEX(un))); if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { mutex_enter(SD_MUTEX(un)); /* * Because the call to sd_ready_and_valid will issue I/O we * must wait here if either the device is suspended or --- 10524,10550 ---- struct sd_lun *un = NULL; int secmask; int err = 0; sd_ssc_t *ssc; ! if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || ! DEVI_IS_GONE(SD_DEVINFO(un))) return (ENXIO); ASSERT(!mutex_owned(SD_MUTEX(un))); + mutex_enter(SD_MUTEX(un)); + while (un->un_state == SD_STATE_ATTACHING) + cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); + if (un->un_state == SD_STATE_ATTACH_FAILED) { + mutex_exit(SD_MUTEX(un)); + SD_ERROR(SD_LOG_READ_WRITE, un, "sdread: attach failed\n"); + return (EIO); + } + mutex_exit(SD_MUTEX(un)); + if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { mutex_enter(SD_MUTEX(un)); /* * Because the call to sd_ready_and_valid will issue I/O we * must wait here if either the device is suspended or
*** 10895,10904 **** --- 10552,10563 ---- */ while ((un->un_state == SD_STATE_SUSPENDED) || (un->un_state == SD_STATE_PM_CHANGING)) { cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); } + + SD_BAIL_CHECK(un); un->un_ncmds_in_driver++; mutex_exit(SD_MUTEX(un)); /* Initialize sd_ssc_t for internal uscsi commands */ ssc = sd_ssc_init(un);
*** 10909,10918 **** --- 10568,10579 ---- } sd_ssc_fini(ssc); mutex_enter(SD_MUTEX(un)); un->un_ncmds_in_driver--; + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); ASSERT(un->un_ncmds_in_driver >= 0); mutex_exit(SD_MUTEX(un)); if (err != 0) return (err); }
*** 10968,10983 **** struct sd_lun *un = NULL; int secmask; int err = 0; sd_ssc_t *ssc; ! if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { return (ENXIO); - } ASSERT(!mutex_owned(SD_MUTEX(un))); if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { mutex_enter(SD_MUTEX(un)); /* * Because the call to sd_ready_and_valid will issue I/O we * must wait here if either the device is suspended or --- 10629,10655 ---- struct sd_lun *un = NULL; int secmask; int err = 0; sd_ssc_t *ssc; ! if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || ! DEVI_IS_GONE(SD_DEVINFO(un))) return (ENXIO); ASSERT(!mutex_owned(SD_MUTEX(un))); + mutex_enter(SD_MUTEX(un)); + while (un->un_state == SD_STATE_ATTACHING) + cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); + + if (un->un_state == SD_STATE_ATTACH_FAILED) { + mutex_exit(SD_MUTEX(un)); + SD_ERROR(SD_LOG_READ_WRITE, un, "sdwrite: attach failed\n"); + return (EIO); + } + mutex_exit(SD_MUTEX(un)); + if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { mutex_enter(SD_MUTEX(un)); /* * Because the call to sd_ready_and_valid will issue I/O we * must wait here if either the device is suspended or
*** 10985,10994 **** --- 10657,10668 ---- */ while ((un->un_state == SD_STATE_SUSPENDED) || (un->un_state == SD_STATE_PM_CHANGING)) { cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); } + + SD_BAIL_CHECK(un); un->un_ncmds_in_driver++; mutex_exit(SD_MUTEX(un)); /* Initialize sd_ssc_t for internal uscsi commands */ ssc = sd_ssc_init(un);
*** 11000,11009 **** --- 10674,10685 ---- sd_ssc_fini(ssc); mutex_enter(SD_MUTEX(un)); un->un_ncmds_in_driver--; ASSERT(un->un_ncmds_in_driver >= 0); + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); mutex_exit(SD_MUTEX(un)); if (err != 0) return (err); }
*** 11058,11073 **** struct uio *uio = aio->aio_uio; int secmask; int err = 0; sd_ssc_t *ssc; ! if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { return (ENXIO); - } ASSERT(!mutex_owned(SD_MUTEX(un))); if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { mutex_enter(SD_MUTEX(un)); /* * Because the call to sd_ready_and_valid will issue I/O we * must wait here if either the device is suspended or --- 10734,10760 ---- struct uio *uio = aio->aio_uio; int secmask; int err = 0; sd_ssc_t *ssc; ! if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || ! DEVI_IS_GONE(SD_DEVINFO(un))) return (ENXIO); ASSERT(!mutex_owned(SD_MUTEX(un))); + mutex_enter(SD_MUTEX(un)); + while (un->un_state == SD_STATE_ATTACHING) + cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); + + if (un->un_state == SD_STATE_ATTACH_FAILED) { + mutex_exit(SD_MUTEX(un)); + SD_ERROR(SD_LOG_READ_WRITE, un, "sdaread: attach failed\n"); + return (EIO); + } + mutex_exit(SD_MUTEX(un)); + if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { mutex_enter(SD_MUTEX(un)); /* * Because the call to sd_ready_and_valid will issue I/O we * must wait here if either the device is suspended or
*** 11075,11084 **** --- 10762,10773 ---- */ while ((un->un_state == SD_STATE_SUSPENDED) || (un->un_state == SD_STATE_PM_CHANGING)) { cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); } + + SD_BAIL_CHECK(un); un->un_ncmds_in_driver++; mutex_exit(SD_MUTEX(un)); /* Initialize sd_ssc_t for internal uscsi commands */ ssc = sd_ssc_init(un);
*** 11090,11099 **** --- 10779,10790 ---- sd_ssc_fini(ssc); mutex_enter(SD_MUTEX(un)); un->un_ncmds_in_driver--; ASSERT(un->un_ncmds_in_driver >= 0); + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); mutex_exit(SD_MUTEX(un)); if (err != 0) return (err); }
*** 11148,11163 **** struct uio *uio = aio->aio_uio; int secmask; int err = 0; sd_ssc_t *ssc; ! if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { return (ENXIO); - } ASSERT(!mutex_owned(SD_MUTEX(un))); if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { mutex_enter(SD_MUTEX(un)); /* * Because the call to sd_ready_and_valid will issue I/O we * must wait here if either the device is suspended or --- 10839,10866 ---- struct uio *uio = aio->aio_uio; int secmask; int err = 0; sd_ssc_t *ssc; ! if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || ! DEVI_IS_GONE(SD_DEVINFO(un))) return (ENXIO); ASSERT(!mutex_owned(SD_MUTEX(un))); + mutex_enter(SD_MUTEX(un)); + while (un->un_state == SD_STATE_ATTACHING) + cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); + + if (un->un_state == SD_STATE_ATTACH_FAILED) { + mutex_exit(SD_MUTEX(un)); + SD_ERROR(SD_LOG_READ_WRITE, un, + "sdawrite: attach failed\n"); + return (EIO); + } + mutex_exit(SD_MUTEX(un)); + if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { mutex_enter(SD_MUTEX(un)); /* * Because the call to sd_ready_and_valid will issue I/O we * must wait here if either the device is suspended or
*** 11165,11174 **** --- 10868,10879 ---- */ while ((un->un_state == SD_STATE_SUSPENDED) || (un->un_state == SD_STATE_PM_CHANGING)) { cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); } + + SD_BAIL_CHECK(un); un->un_ncmds_in_driver++; mutex_exit(SD_MUTEX(un)); /* Initialize sd_ssc_t for internal uscsi commands */ ssc = sd_ssc_init(un);
*** 11180,11189 **** --- 10885,10896 ---- sd_ssc_fini(ssc); mutex_enter(SD_MUTEX(un)); un->un_ncmds_in_driver--; ASSERT(un->un_ncmds_in_driver >= 0); + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); mutex_exit(SD_MUTEX(un)); if (err != 0) return (err); }
*** 11309,11352 **** * layer must acquire & release the SD_MUTEX(un) as needed. */ /* - * Create taskq for all targets in the system. This is created at - * _init(9E) and destroyed at _fini(9E). - * - * Note: here we set the minalloc to a reasonably high number to ensure that - * we will have an adequate supply of task entries available at interrupt time. - * This is used in conjunction with the TASKQ_PREPOPULATE flag in - * sd_create_taskq(). Since we do not want to sleep for allocations at - * interrupt time, set maxalloc equal to minalloc. That way we will just fail - * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq - * requests any one instant in time. - */ - #define SD_TASKQ_NUMTHREADS 8 - #define SD_TASKQ_MINALLOC 256 - #define SD_TASKQ_MAXALLOC 256 - - static taskq_t *sd_tq = NULL; - _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) - - static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; - static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; - - /* - * The following task queue is being created for the write part of - * read-modify-write of non-512 block size devices. - * Limit the number of threads to 1 for now. This number has been chosen - * considering the fact that it applies only to dvd ram drives/MO drives - * currently. Performance for which is not main criteria at this stage. - * Note: It needs to be explored if we can use a single taskq in future - */ - #define SD_WMR_TASKQ_NUMTHREADS 1 - static taskq_t *sd_wmr_tq = NULL; - _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) - - /* * Function: sd_taskq_create * * Description: Create taskq thread(s) and preallocate task entries * * Return Code: Returns a pointer to the allocated taskq_t. --- 11016,11025 ----
*** 11420,11444 **** static int sdstrategy(struct buf *bp) { struct sd_lun *un; ! un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); ! if (un == NULL) { ! bioerror(bp, EIO); ! bp->b_resid = bp->b_bcount; ! biodone(bp); ! return (0); ! } ! /* As was done in the past, fail new cmds. if state is dumping. */ ! if (un->un_state == SD_STATE_DUMPING) { ! bioerror(bp, ENXIO); ! bp->b_resid = bp->b_bcount; ! biodone(bp); ! return (0); } ASSERT(!mutex_owned(SD_MUTEX(un))); /* --- 11093,11113 ---- static int sdstrategy(struct buf *bp) { struct sd_lun *un; + int error = EIO; ! if ((un = ddi_get_soft_state(sd_state, ! SD_GET_INSTANCE_FROM_BUF(bp))) == NULL) ! goto fail; ! /* Fail new cmds if state is dumping or device is gone */ ! if (un->un_state == SD_STATE_DUMPING || ! DEVI_IS_GONE(SD_DEVINFO(un))) { ! error = ENXIO; ! goto fail; } ASSERT(!mutex_owned(SD_MUTEX(un))); /*
*** 11451,11464 **** /* * Must wait here if either the device is suspended or * if it's power level is changing. */ while ((un->un_state == SD_STATE_SUSPENDED) || ! (un->un_state == SD_STATE_PM_CHANGING)) { cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); } un->un_ncmds_in_driver++; /* * atapi: Since we are running the CD for now in PIO mode we need to * call bp_mapin here to avoid bp_mapin called interrupt context under --- 11120,11145 ---- /* * Must wait here if either the device is suspended or * if it's power level is changing. */ while ((un->un_state == SD_STATE_SUSPENDED) || ! (un->un_state == SD_STATE_PM_CHANGING) || ! (un->un_state == SD_STATE_ATTACHING)) { cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); } + if (un->un_state == SD_STATE_ATTACH_FAILED) { + mutex_exit(SD_MUTEX(un)); + SD_ERROR(SD_LOG_READ_WRITE, un, + "sdstrategy: attach failed\n"); + goto fail; + } + if (un->un_detach_count != 0) { + mutex_exit(SD_MUTEX(un)); + goto fail; + } + un->un_ncmds_in_driver++; /* * atapi: Since we are running the CD for now in PIO mode we need to * call bp_mapin here to avoid bp_mapin called interrupt context under
*** 11482,11494 **** * call sd_xbuf_strategy(). We just want to return the * result of ddi_xbuf_qstrategy so that we have an opt- * imized tail call which saves us a stack frame. */ return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); } - /* * Function: sd_xbuf_strategy * * Description: Function for initiating IO operations via the * ddi_xbuf_qstrategy() mechanism. --- 11163,11180 ---- * call sd_xbuf_strategy(). We just want to return the * result of ddi_xbuf_qstrategy so that we have an opt- * imized tail call which saves us a stack frame. */ return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); + + fail: + bioerror(bp, error); + bp->b_resid = bp->b_bcount; + biodone(bp); + return (0); } /* * Function: sd_xbuf_strategy * * Description: Function for initiating IO operations via the * ddi_xbuf_qstrategy() mechanism.
*** 11667,11677 **** uchar_t cmd; ASSERT(bp != NULL); un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); ! if (un == NULL) { bioerror(bp, EIO); bp->b_resid = bp->b_bcount; biodone(bp); return (0); } --- 11353,11363 ---- uchar_t cmd; ASSERT(bp != NULL); un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); ! if (un == NULL || DEVI_IS_GONE(SD_DEVINFO(un))) { bioerror(bp, EIO); bp->b_resid = bp->b_bcount; biodone(bp); return (0); }
*** 11704,11713 **** --- 11390,11408 ---- if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) un->un_f_sync_cache_required = TRUE; + if (sd_failfast_enable & SD_FAILFAST_ENABLE_FAIL_USCSI) { + /* + * If there are outstanding commands, treat all + * USCSI commands as if they have B_FAILFAST set. + */ + if (un->un_ncmds_in_driver != 1) + bp->b_flags |= B_FAILFAST; + } + mutex_exit(SD_MUTEX(un)); switch (uip->ui_flags) { case SD_PATH_DIRECT: chain_type = SD_CHAIN_DIRECT;
*** 11778,11790 **** struct sd_lun *un; sd_ssc_t *ssc; int rval; un = ddi_get_soft_state(sd_state, SDUNIT(dev)); ! if (un == NULL) { return (ENXIO); - } /* * Using sd_ssc_send to handle uscsi cmd */ ssc = sd_ssc_init(un); --- 11473,11484 ---- struct sd_lun *un; sd_ssc_t *ssc; int rval; un = ddi_get_soft_state(sd_state, SDUNIT(dev)); ! if (un == NULL || DEVI_IS_GONE(SD_DEVINFO(un))) return (ENXIO); /* * Using sd_ssc_send to handle uscsi cmd */ ssc = sd_ssc_init(un);
*** 11947,11956 **** --- 11641,11651 ---- { struct sd_uscsi_info *uip; struct uscsi_cmd *uscmd; struct sd_lun *un; dev_t dev; + dev_info_t *dip = SD_DEVINFO(ssc->ssc_un); int format = 0; int rval; ASSERT(ssc != NULL);
*** 11990,11999 **** --- 11685,11695 ---- * if USCSI_PMFAILFAST is set and un is in low power, fail the * command immediately. */ mutex_enter(SD_MUTEX(un)); mutex_enter(&un->un_pm_mutex); + if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) && SD_DEVICE_IS_IN_LOW_POWER(un)) { SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:" "un:0x%p is in low power\n", un); mutex_exit(&un->un_pm_mutex);
*** 12058,12067 **** --- 11754,11767 ---- uscmd->uscsi_flags &= ~USCSI_NOINTR; dev = SD_GET_DEV(un); rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, sd_uscsi_strategy, NULL, uip); + if (DEVI_IS_GONE(dip)) { + cmn_err(CE_WARN, "%s-%d: device is gone!", __func__, __LINE__); + return (ENXIO); + } /* * mark ssc_flags right after handle_cmd to make sure * the uscsi has been sent */
*** 12426,12435 **** --- 12126,12137 ---- */ un->un_pm_idle_time = gethrtime(); un->un_ncmds_in_driver--; ASSERT(un->un_ncmds_in_driver >= 0); + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", un->un_ncmds_in_driver); mutex_exit(SD_MUTEX(un));
*** 12476,12485 **** --- 12178,12189 ---- */ un->un_pm_idle_time = gethrtime(); un->un_ncmds_in_driver--; ASSERT(un->un_ncmds_in_driver >= 0); + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", un->un_ncmds_in_driver); mutex_exit(SD_MUTEX(un));
*** 12873,12886 **** if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || (bp->b_bcount == 0)) { goto done; } - #if defined(__i386) || defined(__amd64) /* We do not support non-block-aligned transfers for ROD devices */ ASSERT(!ISROD(un)); - #endif xp = SD_GET_XBUF(bp); ASSERT(xp != NULL); SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " --- 12577,12588 ----
*** 13575,13590 **** * Use CDB_GROUP1 commands for most devices except for * parallel SCSI fixed drives in which case we get better * performance using CDB_GROUP0 commands (where applicable). */ un->un_mincdb = SD_CDB_GROUP1; - #if !defined(__fibre) if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && !un->un_f_has_removable_media) { un->un_mincdb = SD_CDB_GROUP0; } - #endif /* * Try to read the max-cdb-length supported by HBA. */ un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); --- 13277,13290 ----
*** 13618,13628 **** #endif un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) ? sizeof (struct scsi_arq_status) : 1); if (!ISCD(un)) ! un->un_cmd_timeout = (ushort_t)sd_io_time; un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; } /* --- 13318,13328 ---- #endif un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) ? sizeof (struct scsi_arq_status) : 1); if (!ISCD(un)) ! un->un_cmd_timeout = (ushort_t)un->un_io_time; un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; } /*
*** 13670,13680 **** SD_TRACE(SD_LOG_IO_CORE, un, "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); mutex_exit(SD_MUTEX(un)); - #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { /* * Already have a scsi_pkt -- just need DMA resources. * We must recompute the CDB in case the mapping returns * a nonzero pkt_resid. --- 13370,13379 ----
*** 13686,13696 **** ASSERT(xp->xb_pktp != NULL); pktp = xp->xb_pktp; } else { pktp = NULL; } - #endif /* __i386 || __amd64 */ startblock = xp->xb_blkno; /* Absolute block num. */ blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); --- 13385,13394 ----
*** 13734,13746 **** *pktpp = pktp; SD_TRACE(SD_LOG_IO_CORE, un, "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); - #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; - #endif mutex_enter(SD_MUTEX(un)); return (SD_PKT_ALLOC_SUCCESS); } --- 13432,13442 ----
*** 14400,14419 **** if (bp->b_flags & (B_PAGEIO | B_PHYS)) { bp_mapin(bp); } bflags &= (B_READ | B_WRITE); - #if defined(__i386) || defined(__amd64) new_bp = getrbuf(KM_SLEEP); new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); new_bp->b_bcount = datalen; new_bp->b_flags = bflags | (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); - #else - new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, - datalen, bflags, SLEEP_FUNC, NULL); - #endif new_bp->av_forw = NULL; new_bp->av_back = NULL; new_bp->b_dev = bp->b_dev; new_bp->b_blkno = blkno; new_bp->b_iodone = func; --- 14096,14110 ----
*** 14498,14529 **** ASSERT(bp != NULL); xp = SD_GET_XBUF(bp); ASSERT(xp != NULL); - #if defined(__sparc) /* - * Call bp_mapout() before freeing the buf, in case a lower - * layer or HBA had done a bp_mapin(). we must do this here - * as we are the "originator" of the shadow buf. - */ - bp_mapout(bp); - #endif - - /* * Null out b_iodone before freeing the bp, to ensure that the driver * never gets confused by a stale value in this field. (Just a little * extra defensiveness here.) */ bp->b_iodone = NULL; - #if defined(__i386) || defined(__amd64) kmem_free(bp->b_un.b_addr, bp->b_bcount); freerbuf(bp); - #else - scsi_free_consistent_buf(bp); - #endif kmem_free(xp, sizeof (struct sd_xbuf)); } --- 14189,14207 ----
*** 14745,14757 **** sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) { struct sd_xbuf *xp; struct buf *bp; void (*statp)(kstat_io_t *); - #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ void (*saved_statp)(kstat_io_t *); - #endif int rval; struct sd_fm_internal *sfip = NULL; ASSERT(un != NULL); ASSERT(mutex_owned(SD_MUTEX(un))); --- 14423,14433 ----
*** 14758,14771 **** ASSERT(un->un_ncmds_in_transport >= 0); ASSERT(un->un_throttle >= 0); SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); do { - #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ saved_statp = NULL; - #endif /* * If we are syncing or dumping, fail the command to * avoid recursively calling back into scsi_transport(). * The dump I/O itself uses a separate code path so this --- 14434,14463 ---- ASSERT(un->un_ncmds_in_transport >= 0); ASSERT(un->un_throttle >= 0); SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); + /* + * If device is currently retired, we should abort all pending I/O. + */ + if (DEVI(un->un_sd->sd_dev)->devi_flags & DEVI_RETIRED) { + if (immed_bp) { + immed_bp->b_resid = immed_bp->b_bcount; + bioerror(immed_bp, ENXIO); + biodone(immed_bp); + } + /* abort in-flight IO */ + (void) scsi_abort(SD_ADDRESS(un), NULL); + /* abort pending IO */ + un->un_failfast_state = SD_FAILFAST_ACTIVE; + un->un_failfast_bp = NULL; + sd_failfast_flushq(un, B_TRUE); + return; + } + do { saved_statp = NULL; /* * If we are syncing or dumping, fail the command to * avoid recursively calling back into scsi_transport(). * The dump I/O itself uses a separate code path so this
*** 14818,14830 **** if ((un->un_retry_statp == kstat_waitq_enter) || (un->un_retry_statp == kstat_runq_back_to_waitq)) { statp = kstat_waitq_to_runq; } - #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ saved_statp = un->un_retry_statp; - #endif un->un_retry_statp = NULL; SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " "un_throttle:%d un_ncmds_in_transport:%d\n", --- 14510,14520 ----
*** 14897,14927 **** * If state is SD_STATE_PM_CHANGING then this command is * part of the device power control and the state must * not be put back to normal. Doing so would would * allow new commands to proceed when they shouldn't, * the device may be going off. */ if ((un->un_state != SD_STATE_SUSPENDED) && ! (un->un_state != SD_STATE_PM_CHANGING)) { New_state(un, SD_STATE_NORMAL); } xp = SD_GET_XBUF(bp); ASSERT(xp != NULL); - #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ /* * Allocate the scsi_pkt if we need one, or attach DMA * resources if we have a scsi_pkt that needs them. The * latter should only occur for commands that are being * retried. */ if ((xp->xb_pktp == NULL) || ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { - #else - if (xp->xb_pktp == NULL) { - #endif /* * There is no scsi_pkt allocated for this buf. Call * the initpkt function to allocate & init one. * * The scsi_init_pkt runout callback functionality is --- 14587,14617 ---- * If state is SD_STATE_PM_CHANGING then this command is * part of the device power control and the state must * not be put back to normal. Doing so would would * allow new commands to proceed when they shouldn't, * the device may be going off. + * + * Similarly, if the state is SD_STATE_ATTACHING we should + * not set it to SD_STATE_NORMAL to avoid corruption. */ if ((un->un_state != SD_STATE_SUSPENDED) && ! (un->un_state != SD_STATE_PM_CHANGING) && ! (un->un_state != SD_STATE_ATTACHING)) { New_state(un, SD_STATE_NORMAL); } xp = SD_GET_XBUF(bp); ASSERT(xp != NULL); /* * Allocate the scsi_pkt if we need one, or attach DMA * resources if we have a scsi_pkt that needs them. The * latter should only occur for commands that are being * retried. */ if ((xp->xb_pktp == NULL) || ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { /* * There is no scsi_pkt allocated for this buf. Call * the initpkt function to allocate & init one. * * The scsi_init_pkt runout callback functionality is
*** 14985,14996 **** * sent as an immed_bp (which we just fail). */ SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); - #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ - if (bp == immed_bp) { /* * If SD_XB_DMA_FREED is clear, then * this is a failure to allocate a * scsi_pkt, and we must fail the --- 14675,14684 ----
*** 15058,15073 **** un, SD_RESTART_TIMEOUT); } goto exit; } - #else - if (bp == immed_bp) { - break; /* Just fail the command */ - } - #endif - /* Add the buf back to the head of the waitq */ bp->av_forw = un->un_waitq_headp; un->un_waitq_headp = bp; if (un->un_waitq_tailp == NULL) { un->un_waitq_tailp = bp; --- 14746,14755 ----
*** 15084,15106 **** "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); break; case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: /* ! * Note:x86: Partial DMA mapping not supported ! * for USCSI commands, and all the needed DMA ! * resources were not allocated. */ SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: " "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); break; case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: /* ! * Note:x86: Request cannot fit into CDB based ! * on lba and len. */ SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: " "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); break; --- 14766,14788 ---- "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); break; case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: /* ! * Partial DMA mapping not supported for USCSI ! * commands, and all the needed DMA resources ! * were not allocated. */ SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: " "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); break; case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: /* ! * Request cannot fit into CDB based on lba ! * and len. */ SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: " "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); break;
*** 15135,15144 **** --- 14817,14829 ---- xp->xb_pktp->pkt_flags |= FLAG_HEAD; } un->un_ncmds_in_transport++; SD_UPDATE_KSTATS(un, statp, bp); + /* The start time MAY be overriden by the HBA driver. */ + xp->xb_pktp->pkt_start = gethrtime(); + xp->xb_pktp->pkt_stop = 0; /* * Call scsi_transport() to send the command to the target. * According to SCSA architecture, we must drop the mutex here * before calling scsi_transport() in order to avoid deadlock.
*** 15148,15157 **** --- 14833,14850 ---- */ SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: calling scsi_transport()\n"); DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); + #ifdef SD_FAULT_INJECTION + /* + * Packet is ready for submission to the HBA. Perform HBA-based + * fault-injection. + */ + sd_prefaultinjection(xp->xb_pktp); + #endif /* SD_FAULT_INJECTION */ + mutex_exit(SD_MUTEX(un)); rval = scsi_transport(xp->xb_pktp); mutex_enter(SD_MUTEX(un)); SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
*** 15165,15190 **** case TRAN_BUSY: un->un_ncmds_in_transport--; ASSERT(un->un_ncmds_in_transport >= 0); /* * Don't retry request sense, the sense data * is lost when another request is sent. * Free up the rqs buf and retry * the original failed cmd. Update kstat. */ ! if (bp == un->un_rqs_bp) { SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); bp = sd_mark_rqs_idle(un, xp); sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); goto exit; } - #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ /* * Free the DMA resources for the scsi_pkt. This will * allow mpxio to select another path the next time * we call scsi_transport() with this scsi_pkt. * See sdintr() for the rationalization behind this. --- 14858,14905 ---- case TRAN_BUSY: un->un_ncmds_in_transport--; ASSERT(un->un_ncmds_in_transport >= 0); + #ifdef SD_FAULT_INJECTION /* + * If the packet was rejected during active fault + * injection session, move to the next fault slot + * and reset packet flag related to rejection. + */ + if (sd_fault_injection_on) { + uint_t i = un->sd_fi_fifo_start; + + if (un->sd_fi_fifo_tran[i] != NULL) { + kmem_free(un->sd_fi_fifo_tran[i], + sizeof (struct sd_fi_tran)); + un->sd_fi_fifo_tran[i] = NULL; + } + un->sd_fi_fifo_start++; + } + + if (xp->xb_pktp->pkt_flags & FLAG_PKT_BUSY) { + xp->xb_pktp->pkt_flags &= ~FLAG_PKT_BUSY; + } + #endif /* SD_FAULT_INJECTION */ + + /* * Don't retry request sense, the sense data * is lost when another request is sent. * Free up the rqs buf and retry * the original failed cmd. Update kstat. */ ! if ((un->un_ncmds_in_transport > 0) && ! (bp == un->un_rqs_bp)) { SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); bp = sd_mark_rqs_idle(un, xp); sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); goto exit; } /* * Free the DMA resources for the scsi_pkt. This will * allow mpxio to select another path the next time * we call scsi_transport() with this scsi_pkt. * See sdintr() for the rationalization behind this.
*** 15193,15203 **** ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { scsi_dmafree(xp->xb_pktp); xp->xb_pkt_flags |= SD_XB_DMA_FREED; } - #endif if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { /* * Commands that are SD_PATH_DIRECT_PRIORITY * are for error recovery situations. These do --- 14908,14917 ----
*** 15240,15250 **** sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); } /* * Set up the bp to be tried again 10 ms later. ! * Note:x86: Is there a timeout value in the sd_lun * for this condition? */ sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, kstat_runq_back_to_waitq); goto exit; --- 14954,14964 ---- sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); } /* * Set up the bp to be tried again 10 ms later. ! * XXX Is there a timeout value in the sd_lun * for this condition? */ sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, kstat_runq_back_to_waitq); goto exit;
*** 15369,15379 **** * transfer, try sending it */ sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, (clock_t)0, NULL); sd_start_cmds(un, NULL); ! return; /* Note:x86: need a return here? */ } } /* * If this is the failfast bp, clear it from un_failfast_bp. This --- 15083,15093 ---- * transfer, try sending it */ sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, (clock_t)0, NULL); sd_start_cmds(un, NULL); ! return; /* XXX need a return here? */ } } /* * If this is the failfast bp, clear it from un_failfast_bp. This
*** 15585,15594 **** --- 15299,15314 ---- * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE * if the check should be made to see of FLAG_ISOLATE is set * in the pkt. If FLAG_ISOLATE is set, then the command is * not retried, it is simply failed. * + * Optionally may be bitwise-OR'ed with SD_RETRIES_FAILFAST + * to indicate a retry following a command timeout, and check + * if the target should transition to failfast pending or + * failfast active. If the buf has B_FAILFAST set, the + * command should be failed when failfast is active. + * * user_funcp - Ptr to function to call before dispatching the * command. May be NULL if no action needs to be performed. * (Primarily intended for printing messages.) * * user_arg - Optional argument to be passed along to
*** 15689,15698 **** --- 15409,15435 ---- if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { goto fail_command; } } + if (sd_failfast_enable & (SD_FAILFAST_ENABLE_FAIL_RETRIES | + SD_FAILFAST_ENABLE_FAIL_ALL_RETRIES)) { + if (sd_failfast_enable & SD_FAILFAST_ENABLE_FAIL_ALL_RETRIES) { + /* + * Fail ALL retries when in active failfast state, + * regardless of reason. + */ + if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { + goto fail_command; + } + } + /* + * Treat bufs being retried as if they have the + * B_FAILFAST flag set. + */ + bp->b_flags |= B_FAILFAST; + } /* * If SD_RETRIES_FAILFAST is set, it indicates that either a * command timeout or a selection timeout has occurred. This means * that we were unable to establish an kind of communication with
*** 15740,15750 **** * so enter active failfast state & flush * queues as appropriate. */ un->un_failfast_state = SD_FAILFAST_ACTIVE; un->un_failfast_bp = NULL; ! sd_failfast_flushq(un); /* * Fail this bp now if B_FAILFAST set; * otherwise continue with retries. (It would * be pretty ironic if this bp succeeded on a --- 15477,15487 ---- * so enter active failfast state & flush * queues as appropriate. */ un->un_failfast_state = SD_FAILFAST_ACTIVE; un->un_failfast_bp = NULL; ! sd_failfast_flushq(un, B_FALSE); /* * Fail this bp now if B_FAILFAST set; * otherwise continue with retries. (It would * be pretty ironic if this bp succeeded on a
*** 15781,15791 **** --- 15518,15534 ---- * communication with the target and subsequent commands * and/or retries are likely to get through to the target, * In this case we want to be aggressive about clearing * the failfast state. Note that this does not affect * the "failfast pending" condition. + * + * We limit this to retries that are not a side effect of an + * unrelated event, as it would be unwise to clear failfast + * active state when we see retries due to a reset. */ + if ((sd_failfast_enable & SD_FAILFAST_ENABLE_FORCE_INACTIVE) && + (retry_check_flag & SD_RETRIES_MASK) != SD_RETRIES_VICTIM) un->un_failfast_state = SD_FAILFAST_INACTIVE; } /*
*** 16223,16235 **** * Description: Sends a REQUEST SENSE command to the target * * Context: May be called from interrupt context. */ ! static void ! sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, ! struct scsi_pkt *pktp) { ASSERT(bp != NULL); ASSERT(un != NULL); ASSERT(mutex_owned(SD_MUTEX(un))); --- 15966,15977 ---- * Description: Sends a REQUEST SENSE command to the target * * Context: May be called from interrupt context. */ ! static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, ! int retry_check_flag, struct scsi_pkt *pktp) { ASSERT(bp != NULL); ASSERT(un != NULL); ASSERT(mutex_owned(SD_MUTEX(un)));
*** 16252,16269 **** /* * Retry the failed command and don't issue the request sense if: * 1) the sense buf is busy * 2) we have 1 or more outstanding commands on the target * (the sense data will be cleared or invalidated any way) - * - * Note: There could be an issue with not checking a retry limit here, - * the problem is determining which retry limit to check. */ if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { /* Don't retry if the command is flagged as non-retryable */ if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { ! sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, un->un_busy_timeout, kstat_waitq_enter); SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_send_request_sense_command: " "at full throttle, retrying exit\n"); --- 15994,16008 ---- /* * Retry the failed command and don't issue the request sense if: * 1) the sense buf is busy * 2) we have 1 or more outstanding commands on the target * (the sense data will be cleared or invalidated any way) */ if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { /* Don't retry if the command is flagged as non-retryable */ if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { ! sd_retry_command(un, bp, retry_check_flag, NULL, NULL, 0, un->un_busy_timeout, kstat_waitq_enter); SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_send_request_sense_command: " "at full throttle, retrying exit\n");
*** 16431,16443 **** SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); /* Set up the other needed members in the ARQ scsi_pkt. */ un->un_rqs_pktp->pkt_comp = sdintr; ! un->un_rqs_pktp->pkt_time = sd_io_time; ! un->un_rqs_pktp->pkt_flags |= ! (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ /* * Allocate & init the sd_xbuf struct for the RQS command. Do not * provide any intpkt, destroypkt routines as we take care of * scsi_pkt allocation/freeing here and in sd_free_rqs(). --- 16170,16182 ---- SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); /* Set up the other needed members in the ARQ scsi_pkt. */ un->un_rqs_pktp->pkt_comp = sdintr; ! un->un_rqs_pktp->pkt_time = ((ISCD(un)) ? 2 : 1) * ! (ushort_t)un->un_io_time; ! un->un_rqs_pktp->pkt_flags |= (FLAG_SENSING | FLAG_HEAD); /* * Allocate & init the sd_xbuf struct for the RQS command. Do not * provide any intpkt, destroypkt routines as we take care of * scsi_pkt allocation/freeing here and in sd_free_rqs().
*** 16468,16490 **** * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). * * The 3rd case is the HBA (adp) always return enabled on * scsi_ifgetgetcap even when it's not enable, the best approach * is issue a scsi_ifsetcap then a scsi_ifgetcap - * Note: this case is to circumvent the Adaptec bug. (x86 only) */ if (un->un_f_is_fibre == TRUE) { un->un_f_arq_enabled = TRUE; } else { - #if defined(__i386) || defined(__amd64) /* ! * Circumvent the Adaptec bug, remove this code when ! * the bug is fixed */ (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); - #endif switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { case 0: SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_alloc_rqs: HBA supports ARQ\n"); /* --- 16207,16226 ---- * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). * * The 3rd case is the HBA (adp) always return enabled on * scsi_ifgetgetcap even when it's not enable, the best approach * is issue a scsi_ifsetcap then a scsi_ifgetcap */ if (un->un_f_is_fibre == TRUE) { un->un_f_arq_enabled = TRUE; } else { /* ! * XXX Circumvent the Adaptec bug, remove this code when ! * the bug is fixed. */ (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { case 0: SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_alloc_rqs: HBA supports ARQ\n"); /*
*** 16775,16785 **** --- 16511,16562 ---- */ SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); return (1); } + static void + sd_slow_io_ereport(struct scsi_pkt *pktp) + { + struct buf *bp; + struct sd_lun *un; + char *devid; + ASSERT(pktp != NULL); + bp = (struct buf *)pktp->pkt_private; + ASSERT(bp != NULL); + un = SD_GET_UN(bp); + ASSERT(un != NULL); + + SD_ERROR(SD_LOG_IO_CORE | SD_LOG_ERROR, un, + "Slow IO detected SD: 0x%p delta in nsec: %llu", + (void *)un, pktp->pkt_stop - pktp->pkt_start); + + devid = DEVI(un->un_sd->sd_dev)->devi_devid_str; + scsi_fm_ereport_post(un->un_sd, 0, NULL, "cmd.disk.slow-io", + fm_ena_generate(0, FM_ENA_FMT1), devid, NULL, DDI_NOSLEEP, NULL, + FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, + "start", DATA_TYPE_UINT64, pktp->pkt_start, + "stop", DATA_TYPE_UINT64, pktp->pkt_stop, + "delta", DATA_TYPE_UINT64, pktp->pkt_stop - pktp->pkt_start, + "threshold", DATA_TYPE_UINT64, un->un_slow_io_threshold, + "pkt-reason", DATA_TYPE_UINT32, pktp->pkt_reason, + NULL); + } + + /* Clamp the value between 0..max using min as the offset */ + static int + clamp_lat(int bucket, int min, int max) + { + + if (max < bucket) + bucket = max; + if (min > bucket) + bucket = min; + + return (bucket - min); + } + /* * Function: sdintr * * Description: Completion callback routine for scsi_pkt(9S) structs * sent to the HBA driver via scsi_transport(9F).
*** 16793,16802 **** --- 16570,16581 ---- struct buf *bp; struct sd_xbuf *xp; struct sd_lun *un; size_t actual_len; sd_ssc_t *sscp; + hrtime_t io_delta = 0LL; + int bucket; ASSERT(pktp != NULL); bp = (struct buf *)pktp->pkt_private; ASSERT(bp != NULL); xp = SD_GET_XBUF(bp);
*** 16829,16839 **** --- 16608,16641 ---- /* Increment counter to indicate that the callback routine is active */ un->un_in_callback++; SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); + /* If the HBA driver did not set the stop time, set it now. */ + if (pktp->pkt_stop == 0) + pktp->pkt_stop = gethrtime(); + /* + * If there are HBA drivers or layered drivers which do not participate + * in slow-io diagnosis, the start time, set above may be overwritten + * with zero. If pkt_start is zero, the delta should also be zero. + */ + if (pktp->pkt_start != 0) + io_delta = pktp->pkt_stop - pktp->pkt_start; + if (un->un_slow_io_threshold > 0 && io_delta > un->un_slow_io_threshold) + sd_slow_io_ereport(pktp); + if (un->un_lat_stats) { + un->un_lat_stats->l_nrequest++; + un->un_lat_stats->l_sum += io_delta; + /* Track the latency in usec and quantize by power of 2 */ + bucket = clamp_lat(ddi_fls(io_delta / 1000), + SD_LAT_MIN_USEC_SHIFT, SD_LAT_MAX_USEC_SHIFT - 1); + ASSERT3S(bucket, >=, 0); + ASSERT3S(bucket, <, ARRAY_SIZE(un->un_lat_stats->l_histogram)); + un->un_lat_stats->l_histogram[bucket]++; + } + #ifdef SDDEBUG if (bp == un->un_retry_bp) { SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", un, un->un_retry_bp, un->un_ncmds_in_transport);
*** 16928,16938 **** "sdintr: arq done and FLAG_DIAGNOSE set\n"); sd_return_failed_command(un, bp, EIO); goto exit; } - #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ /* * We want to either retry or fail this command, so free * the DMA resources here. If we retry the command then * the DMA resources will be reallocated in sd_start_cmds(). * Note that when PKT_DMA_PARTIAL is used, this reallocation --- 16730,16739 ----
*** 16944,16954 **** ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && ((pktp->pkt_flags & FLAG_SENSING) == 0)) { scsi_dmafree(pktp); xp->xb_pkt_flags |= SD_XB_DMA_FREED; } - #endif SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: arq done, sd_handle_auto_request_sense\n"); sd_handle_auto_request_sense(un, bp, xp, pktp); --- 16745,16754 ----
*** 17020,17031 **** return; } not_successful: - - #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ /* * The following is based upon knowledge of the underlying transport * and its use of DMA resources. This code should be removed when * PKT_DMA_PARTIAL support is taken out of the disk driver in favor * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() --- 16820,16829 ----
*** 17049,17059 **** ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && ((pktp->pkt_flags & FLAG_SENSING) == 0)) { scsi_dmafree(pktp); xp->xb_pkt_flags |= SD_XB_DMA_FREED; } - #endif /* * The command did not successfully complete as requested so check * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal * driver command that should not be retried so just return. If --- 16847,16856 ----
*** 17067,17077 **** * (we handle the auto request sense case above), otherwise * just fail the command. */ if ((pktp->pkt_reason == CMD_CMPLT) && (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { ! sd_send_request_sense_command(un, bp, pktp); } else { sd_return_failed_command(un, bp, EIO); } goto exit; } --- 16864,16875 ---- * (we handle the auto request sense case above), otherwise * just fail the command. */ if ((pktp->pkt_reason == CMD_CMPLT) && (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { ! sd_send_request_sense_command(un, bp, ! SD_RETRIES_STANDARD, pktp); } else { sd_return_failed_command(un, bp, EIO); } goto exit; }
*** 17711,17735 **** sense_failed: /* * If the request sense failed (for whatever reason), attempt * to retry the original command. */ - #if defined(__i386) || defined(__amd64) - /* - * SD_RETRY_DELAY is conditionally compile (#if fibre) in - * sddef.h for Sparc platform, and x86 uses 1 binary - * for both SCSI/FC. - * The SD_RETRY_DELAY value need to be adjusted here - * when SD_RETRY_DELAY change in sddef.h - */ sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_failed_msg, msgp, EIO, un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); - #else - sd_retry_command(un, bp, SD_RETRIES_STANDARD, - sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); - #endif return (SD_SENSE_DATA_IS_INVALID); } /* --- 17509,17521 ----
*** 19171,19181 **** SD_UPDATE_RESERVATION_STATUS(un, pktp); funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? sd_print_retry_msg : NULL; ! sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); } /* --- 18957,18967 ---- SD_UPDATE_RESERVATION_STATUS(un, pktp); funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? sd_print_retry_msg : NULL; ! sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); } /*
*** 19274,19299 **** * retry limit may have been reached for the failed command. */ if (un->un_f_arq_enabled == FALSE) { SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " "no ARQ, sending request sense command\n"); ! sd_send_request_sense_command(un, bp, pktp); } else { SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " "ARQ,retrying request sense command\n"); - #if defined(__i386) || defined(__amd64) - /* - * The SD_RETRY_DELAY value need to be adjusted here - * when SD_RETRY_DELAY change in sddef.h - */ sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, ! un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); - #else - sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, - EIO, SD_RETRY_DELAY, NULL); - #endif } SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); } --- 19060,19077 ---- * retry limit may have been reached for the failed command. */ if (un->un_f_arq_enabled == FALSE) { SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " "no ARQ, sending request sense command\n"); ! sd_send_request_sense_command(un, bp, SD_RETRIES_STANDARD, ! pktp); } else { SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " "ARQ,retrying request sense command\n"); sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, ! un->un_f_is_fibre ? drv_usectohz(100000) : (clock_t)0, NULL); } SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); }
*** 20045,20055 **** ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (sense_buf); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 60; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, path_flag); switch (status) { --- 19823,19833 ---- ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (sense_buf); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = un->un_uscsi_timeout; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, path_flag); switch (status) {
*** 20261,20271 **** ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (sense_buf); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 60; /* * Read Capacity (16) is a Service Action In command. One * command byte (0x9E) is overloaded for multiple operations, * with the second CDB byte specifying the desired operation --- 20039,20049 ---- ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (sense_buf); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = un->un_uscsi_timeout; /* * Read Capacity (16) is a Service Action In command. One * command byte (0x9E) is overloaded for multiple operations, * with the second CDB byte specifying the desired operation
*** 20305,20319 **** --- 20083,20107 ---- * * bytes 8-11: Block length in bytes * (MSB in byte:8 & LSB in byte:11) * * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT + * + * byte 14: + * bit 7: Thin-Provisioning Enabled + * bit 6: Thin-Provisioning Read Zeros */ capacity = BE_64(capacity16_buf[0]); lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f; + un->un_thin_flags = 0; + if (((uint8_t *)capacity16_buf)[14] & (1 << 7)) + un->un_thin_flags |= SD_THIN_PROV_ENABLED; + if (((uint8_t *)capacity16_buf)[14] & (1 << 6)) + un->un_thin_flags |= SD_THIN_PROV_READ_ZEROS; + pbsize = lbasize << lbpb_exp; /* * Done with capacity16_buf */
*** 20483,20493 **** ucmd_buf.uscsi_bufaddr = NULL; ucmd_buf.uscsi_buflen = 0; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 200; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, path_flag); switch (status) { --- 20271,20281 ---- ucmd_buf.uscsi_bufaddr = NULL; ucmd_buf.uscsi_buflen = 0; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 3 * un->un_uscsi_timeout; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, path_flag); switch (status) {
*** 20702,20712 **** ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; ucmd_buf.uscsi_buflen = buflen; ucmd_buf.uscsi_rqbuf = NULL; ucmd_buf.uscsi_rqlen = 0; ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, SD_PATH_DIRECT); /* --- 20490,20500 ---- ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; ucmd_buf.uscsi_buflen = buflen; ucmd_buf.uscsi_rqbuf = NULL; ucmd_buf.uscsi_rqlen = 0; ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 2 * un->un_uscsi_timeout; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, SD_PATH_DIRECT); /*
*** 20805,20815 **** /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ if ((flag & SD_DONT_RETRY_TUR) != 0) { ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; } ! ucmd_buf.uscsi_timeout = 60; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : SD_PATH_STANDARD)); --- 20593,20603 ---- /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ if ((flag & SD_DONT_RETRY_TUR) != 0) { ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; } ! ucmd_buf.uscsi_timeout = un->un_uscsi_timeout; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : SD_PATH_STANDARD));
*** 20902,20912 **** ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; ucmd_buf.uscsi_buflen = data_len; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 60; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, SD_PATH_STANDARD); switch (status) { --- 20690,20700 ---- ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; ucmd_buf.uscsi_buflen = data_len; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = un->un_uscsi_timeout; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, SD_PATH_STANDARD); switch (status) {
*** 21008,21018 **** ucmd_buf.uscsi_bufaddr = (caddr_t)prp; ucmd_buf.uscsi_buflen = data_len; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 60; switch (usr_cmd) { case SD_SCSI3_REGISTER: { mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; --- 20796,20806 ---- ucmd_buf.uscsi_bufaddr = (caddr_t)prp; ucmd_buf.uscsi_buflen = data_len; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = un->un_uscsi_timeout; switch (usr_cmd) { case SD_SCSI3_REGISTER: { mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp;
*** 21203,21213 **** uscmd->uscsi_buflen = 0; uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); uscmd->uscsi_rqlen = SENSE_LENGTH; uscmd->uscsi_rqresid = SENSE_LENGTH; uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; ! uscmd->uscsi_timeout = sd_io_time; /* * Allocate an sd_uscsi_info struct and fill it with the info * needed by sd_initpkt_for_uscsi(). Then put the pointer into * b_private in the buf for sd_initpkt_for_uscsi(). Note that --- 20991,21001 ---- uscmd->uscsi_buflen = 0; uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); uscmd->uscsi_rqlen = SENSE_LENGTH; uscmd->uscsi_rqresid = SENSE_LENGTH; uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; ! uscmd->uscsi_timeout = un->un_cmd_timeout; /* * Allocate an sd_uscsi_info struct and fill it with the info * needed by sd_initpkt_for_uscsi(). Then put the pointer into * b_private in the buf for sd_initpkt_for_uscsi(). Note that
*** 21350,21362 **** } break; } done: ! if (uip->ui_dkc.dkc_callback != NULL) { (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); - } ASSERT((bp->b_flags & B_REMAPPED) == 0); freerbuf(bp); kmem_free(uip, sizeof (struct sd_uscsi_info)); kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); --- 21138,21149 ---- } break; } done: ! if (uip->ui_dkc.dkc_callback != NULL) (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); ASSERT((bp->b_flags & B_REMAPPED) == 0); freerbuf(bp); kmem_free(uip, sizeof (struct sd_uscsi_info)); kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH);
*** 21364,21375 **** --- 21151,21380 ---- kmem_free(uscmd, sizeof (struct uscsi_cmd)); return (status); } + /* + * Issues a single SCSI UNMAP command with a prepared UNMAP parameter list. + * Returns zero on success, or the non-zero command error code on failure. + */ + static int + sd_send_scsi_UNMAP_issue_one(sd_ssc_t *ssc, unmap_param_hdr_t *uph, + uint64_t num_descr, uint64_t bytes) + { + struct sd_lun *un = ssc->ssc_un; + struct scsi_extended_sense sense_buf; + union scsi_cdb cdb; + struct uscsi_cmd ucmd_buf; + int status; + const uint64_t param_size = sizeof (unmap_param_hdr_t) + + num_descr * sizeof (unmap_blk_descr_t); + uph->uph_data_len = BE_16(param_size - 2); + uph->uph_descr_data_len = BE_16(param_size - 8); + + bzero(&cdb, sizeof (cdb)); + bzero(&ucmd_buf, sizeof (ucmd_buf)); + bzero(&sense_buf, sizeof (struct scsi_extended_sense)); + + cdb.scc_cmd = SCMD_UNMAP; + FORMG1COUNT(&cdb, param_size); + + ucmd_buf.uscsi_cdb = (char *)&cdb; + ucmd_buf.uscsi_cdblen = (uchar_t)CDB_GROUP1; + ucmd_buf.uscsi_bufaddr = (caddr_t)uph; + ucmd_buf.uscsi_buflen = param_size; + ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; + ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); + ucmd_buf.uscsi_flags = USCSI_WRITE | USCSI_RQENABLE | USCSI_SILENT; + ucmd_buf.uscsi_timeout = un->un_cmd_timeout; + + status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, + SD_PATH_STANDARD); + + switch (status) { + case 0: + sd_ssc_assessment(ssc, SD_FMT_STANDARD); + + if (un->un_unmapstats) { + atomic_inc_64(&un->un_unmapstats->us_cmds.value.ui64); + atomic_add_64(&un->un_unmapstats->us_extents.value.ui64, + num_descr); + atomic_add_64(&un->un_unmapstats->us_bytes.value.ui64, + bytes); + } + break; /* Success! */ + case EIO: + if (un->un_unmapstats) + atomic_inc_64(&un->un_unmapstats->us_errs.value.ui64); + switch (ucmd_buf.uscsi_status) { + case STATUS_RESERVATION_CONFLICT: + status = EACCES; + break; + default: + break; + } + break; + default: + if (un->un_unmapstats) + atomic_inc_64(&un->un_unmapstats->us_errs.value.ui64); + break; + } + + return (status); + } + /* + * Returns a pointer to the i'th block descriptor inside an UNMAP param list. + */ + static inline unmap_blk_descr_t * + UNMAP_blk_descr_i(void *buf, uint64_t i) + { + return ((unmap_blk_descr_t *)((uint8_t *)buf + + sizeof (unmap_param_hdr_t) + (i * sizeof (unmap_blk_descr_t)))); + } + + /* + * Takes the list of extents from sd_send_scsi_UNMAP, chops it up, prepares + * UNMAP block descriptors and issues individual SCSI UNMAP commands. While + * doing so we consult the block limits to determine at most how many + * extents and LBAs we can UNMAP in one command. + * If a command fails for whatever, reason, extent list processing is aborted + * and the failed command's status is returned. Otherwise returns 0 on + * success. + */ + static int + sd_send_scsi_UNMAP_issue(dev_t dev, sd_ssc_t *ssc, const dkioc_free_list_t *dfl) + { + struct sd_lun *un = ssc->ssc_un; + unmap_param_hdr_t *uph; + sd_blk_limits_t *lim = &un->un_blk_lim; + int rval = 0; + int partition; + /* partition offset & length in system blocks */ + diskaddr_t part_off_sysblks = 0, part_len_sysblks = 0; + uint64_t part_off, part_len; + uint64_t descr_cnt_lim, byte_cnt_lim; + uint64_t descr_issued = 0, bytes_issued = 0; + + uph = kmem_zalloc(SD_UNMAP_PARAM_LIST_MAXSZ, KM_SLEEP); + + partition = SDPART(dev); + (void) cmlb_partinfo(un->un_cmlbhandle, partition, &part_len_sysblks, + &part_off_sysblks, NULL, NULL, (void *)SD_PATH_DIRECT); + part_off = SD_SYSBLOCKS2BYTES(part_off_sysblks); + part_len = SD_SYSBLOCKS2BYTES(part_len_sysblks); + + ASSERT(un->un_blk_lim.lim_max_unmap_lba_cnt != 0); + ASSERT(un->un_blk_lim.lim_max_unmap_descr_cnt != 0); + /* Spec says 0xffffffff are special values, so compute maximums. */ + byte_cnt_lim = lim->lim_max_unmap_lba_cnt < UINT32_MAX ? + (uint64_t)lim->lim_max_unmap_lba_cnt * un->un_tgt_blocksize : + UINT64_MAX; + descr_cnt_lim = MIN(lim->lim_max_unmap_descr_cnt, SD_UNMAP_MAX_DESCR); + + for (size_t i = 0; i < dfl->dfl_num_exts; i++) { + const dkioc_free_list_ext_t *ext = &dfl->dfl_exts[i]; + uint64_t ext_start = ext->dfle_start; + uint64_t ext_length = ext->dfle_length; + + while (ext_length > 0) { + unmap_blk_descr_t *ubd; + /* Respect device limit on LBA count per command */ + uint64_t len = MIN(MIN(ext_length, byte_cnt_lim - + bytes_issued), SD_TGTBLOCKS2BYTES(un, UINT32_MAX)); + + /* check partition limits */ + if (ext_start + len > part_len) { + rval = SET_ERROR(EINVAL); + goto out; + } + #ifdef DEBUG + if (dfl->dfl_ck_func) + dfl->dfl_ck_func(dfl->dfl_offset + ext_start, + len, dfl->dfl_ck_arg); + #endif + ASSERT3U(descr_issued, <, descr_cnt_lim); + ASSERT3U(bytes_issued, <, byte_cnt_lim); + ubd = UNMAP_blk_descr_i(uph, descr_issued); + + /* adjust in-partition addresses to be device-global */ + ubd->ubd_lba = BE_64(SD_BYTES2TGTBLOCKS(un, + dfl->dfl_offset + ext_start + part_off)); + ubd->ubd_lba_cnt = BE_32(SD_BYTES2TGTBLOCKS(un, len)); + + descr_issued++; + bytes_issued += len; + + /* Issue command when device limits reached */ + if (descr_issued == descr_cnt_lim || + bytes_issued == byte_cnt_lim) { + rval = sd_send_scsi_UNMAP_issue_one(ssc, uph, + descr_issued, bytes_issued); + if (rval != 0) + goto out; + descr_issued = 0; + bytes_issued = 0; + } + + ext_start += len; + ext_length -= len; + } + } + + if (descr_issued > 0) { + /* issue last command */ + rval = sd_send_scsi_UNMAP_issue_one(ssc, uph, descr_issued, + bytes_issued); + } + + out: + kmem_free(uph, SD_UNMAP_PARAM_LIST_MAXSZ); + return (rval); + } + + /* + * Issues one or several UNMAP commands based on a list of extents to be + * unmapped. The internal multi-command processing is hidden, as the exact + * number of commands and extents per command is limited by both SCSI + * command syntax and device limits (as expressed in the SCSI Block Limits + * VPD page and un_blk_lim in struct sd_lun). + * Returns zero on success, or the error code of the first failed SCSI UNMAP + * command. + */ + static int + sd_send_scsi_UNMAP(dev_t dev, sd_ssc_t *ssc, dkioc_free_list_t *dfl, int flag) + { + struct sd_lun *un = ssc->ssc_un; + int rval = 0; + + ASSERT(!mutex_owned(SD_MUTEX(un))); + ASSERT(dfl != NULL); + + /* Per spec, any of these conditions signals lack of UNMAP support. */ + if (!(un->un_thin_flags & SD_THIN_PROV_ENABLED) || + un->un_blk_lim.lim_max_unmap_descr_cnt == 0 || + un->un_blk_lim.lim_max_unmap_lba_cnt == 0) { + return (SET_ERROR(ENOTSUP)); + } + + /* For userspace calls we must copy in. */ + if (!(flag & FKIOCTL) && (dfl = dfl_copyin(dfl, flag, KM_SLEEP)) == + NULL) + return (SET_ERROR(EFAULT)); + + rval = sd_send_scsi_UNMAP_issue(dev, ssc, dfl); + + if (!(flag & FKIOCTL)) { + dfl_free(dfl); + dfl = NULL; + } + + return (rval); + } + + /* * Function: sd_send_scsi_GET_CONFIGURATION * * Description: Issues the get configuration command to the device. * Called from sd_check_for_writable_cd & sd_get_media_info * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN
*** 21421,21431 **** cdb[8] = SD_PROFILE_HEADER_LEN; ucmdbuf->uscsi_cdb = cdb; ucmdbuf->uscsi_cdblen = CDB_GROUP1; ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; ucmdbuf->uscsi_buflen = buflen; ! ucmdbuf->uscsi_timeout = sd_io_time; ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; ucmdbuf->uscsi_rqlen = rqbuflen; ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, --- 21426,21436 ---- cdb[8] = SD_PROFILE_HEADER_LEN; ucmdbuf->uscsi_cdb = cdb; ucmdbuf->uscsi_cdblen = CDB_GROUP1; ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; ucmdbuf->uscsi_buflen = buflen; ! ucmdbuf->uscsi_timeout = un->un_uscsi_timeout; ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; ucmdbuf->uscsi_rqlen = rqbuflen; ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
*** 21514,21524 **** cdb[8] = buflen; ucmdbuf->uscsi_cdb = cdb; ucmdbuf->uscsi_cdblen = CDB_GROUP1; ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; ucmdbuf->uscsi_buflen = buflen; ! ucmdbuf->uscsi_timeout = sd_io_time; ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; ucmdbuf->uscsi_rqlen = rqbuflen; ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, --- 21519,21529 ---- cdb[8] = buflen; ucmdbuf->uscsi_cdb = cdb; ucmdbuf->uscsi_cdblen = CDB_GROUP1; ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; ucmdbuf->uscsi_buflen = buflen; ! ucmdbuf->uscsi_timeout = un->un_uscsi_timeout; ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; ucmdbuf->uscsi_rqlen = rqbuflen; ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
*** 21627,21637 **** ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; ucmd_buf.uscsi_buflen = buflen; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 60; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, path_flag); switch (status) { --- 21632,21642 ---- ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; ucmd_buf.uscsi_buflen = buflen; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = un->un_uscsi_timeout; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, path_flag); switch (status) {
*** 21746,21756 **** ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; ucmd_buf.uscsi_buflen = buflen; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 60; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, path_flag); switch (status) { --- 21751,21761 ---- ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; ucmd_buf.uscsi_buflen = buflen; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = un->un_uscsi_timeout; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, path_flag); switch (status) {
*** 21884,21894 **** ucmd_buf.uscsi_bufaddr = bufaddr; ucmd_buf.uscsi_buflen = buflen; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 60; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, path_flag); switch (status) { case 0: --- 21889,21899 ---- ucmd_buf.uscsi_bufaddr = bufaddr; ucmd_buf.uscsi_buflen = buflen; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = un->un_cmd_timeout; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, path_flag); switch (status) { case 0:
*** 21964,21974 **** ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; ucmd_buf.uscsi_buflen = buflen; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 60; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, path_flag); switch (status) { --- 21969,21979 ---- ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; ucmd_buf.uscsi_buflen = buflen; ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = un->un_uscsi_timeout; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, path_flag); switch (status) {
*** 22096,22106 **** ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; ucmd_buf.uscsi_buflen = buflen; ucmd_buf.uscsi_rqbuf = NULL; ucmd_buf.uscsi_rqlen = 0; ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = 60; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, SD_PATH_DIRECT); /* --- 22101,22111 ---- ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; ucmd_buf.uscsi_buflen = buflen; ucmd_buf.uscsi_rqbuf = NULL; ucmd_buf.uscsi_rqlen = 0; ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; ! ucmd_buf.uscsi_timeout = un->un_uscsi_timeout; status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, SD_PATH_DIRECT); /*
*** 22178,22213 **** return (ENXIO); } ASSERT(!mutex_owned(SD_MUTEX(un))); - /* Initialize sd_ssc_t for internal uscsi commands */ - ssc = sd_ssc_init(un); - - is_valid = SD_IS_VALID_LABEL(un); - /* * Moved this wait from sd_uscsi_strategy to here for * reasons of deadlock prevention. Internal driver commands, * specifically those to change a devices power level, result * in a call to sd_uscsi_strategy. */ mutex_enter(SD_MUTEX(un)); while ((un->un_state == SD_STATE_SUSPENDED) || ! (un->un_state == SD_STATE_PM_CHANGING)) { cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); } /* * Twiddling the counter here protects commands from now * through to the top of sd_uscsi_strategy. Without the * counter inc. a power down, for example, could get in * after the above check for state is made and before * execution gets to the top of sd_uscsi_strategy. * That would cause problems. */ un->un_ncmds_in_driver++; if (!is_valid && (flag & (FNDELAY | FNONBLOCK))) { switch (cmd) { case DKIOCGGEOM: /* SD_PATH_DIRECT */ case DKIOCGVTOC: --- 22183,22230 ---- return (ENXIO); } ASSERT(!mutex_owned(SD_MUTEX(un))); /* * Moved this wait from sd_uscsi_strategy to here for * reasons of deadlock prevention. Internal driver commands, * specifically those to change a devices power level, result * in a call to sd_uscsi_strategy. */ mutex_enter(SD_MUTEX(un)); while ((un->un_state == SD_STATE_SUSPENDED) || ! (un->un_state == SD_STATE_PM_CHANGING) || ! (un->un_state == SD_STATE_ATTACHING)) { cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); } + + if (un->un_state == SD_STATE_ATTACH_FAILED) { + mutex_exit(SD_MUTEX(un)); + SD_ERROR(SD_LOG_READ_WRITE, un, + "sdioctl: attach failed\n"); + return (EIO); + } + /* * Twiddling the counter here protects commands from now * through to the top of sd_uscsi_strategy. Without the * counter inc. a power down, for example, could get in * after the above check for state is made and before * execution gets to the top of sd_uscsi_strategy. * That would cause problems. */ un->un_ncmds_in_driver++; + mutex_exit(SD_MUTEX(un)); + /* Initialize sd_ssc_t for internal uscsi commands */ + ssc = sd_ssc_init(un); + + is_valid = SD_IS_VALID_LABEL(un); + + mutex_enter(SD_MUTEX(un)); + if (!is_valid && (flag & (FNDELAY | FNONBLOCK))) { switch (cmd) { case DKIOCGGEOM: /* SD_PATH_DIRECT */ case DKIOCGVTOC:
*** 22224,22239 **** case DKIOCSETEFI: case DKIOCGMBOOT: case DKIOCSMBOOT: case DKIOCG_PHYGEOM: case DKIOCG_VIRTGEOM: - #if defined(__i386) || defined(__amd64) case DKIOCSETEXTPART: - #endif /* let cmlb handle it */ goto skip_ready_valid; - case CDROMPAUSE: case CDROMRESUME: case CDROMPLAYMSF: case CDROMPLAYTRKIND: case CDROMREADTOCHDR: --- 22241,22253 ----
*** 22253,22262 **** --- 22267,22278 ---- case CDROMCDXA: case CDROMSUBCODE: if (!ISCD(un)) { un->un_ncmds_in_driver--; ASSERT(un->un_ncmds_in_driver >= 0); + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); mutex_exit(SD_MUTEX(un)); err = ENOTTY; goto done_without_assess; } break;
*** 22264,22273 **** --- 22280,22291 ---- case DKIOCEJECT: case CDROMEJECT: if (!un->un_f_eject_media_supported) { un->un_ncmds_in_driver--; ASSERT(un->un_ncmds_in_driver >= 0); + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); mutex_exit(SD_MUTEX(un)); err = ENOTTY; goto done_without_assess; } break;
*** 22276,22285 **** --- 22294,22305 ---- err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); if (err != 0) { mutex_enter(SD_MUTEX(un)); un->un_ncmds_in_driver--; ASSERT(un->un_ncmds_in_driver >= 0); + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); mutex_exit(SD_MUTEX(un)); err = EIO; goto done_quick_assess; } mutex_enter(SD_MUTEX(un));
*** 22334,22343 **** --- 22354,22365 ---- err = EIO; } } un->un_ncmds_in_driver--; ASSERT(un->un_ncmds_in_driver >= 0); + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); mutex_exit(SD_MUTEX(un)); goto done_without_assess; } }
*** 22377,22389 **** case DKIOCSETEFI: case DKIOCGMBOOT: case DKIOCSMBOOT: case DKIOCG_PHYGEOM: case DKIOCG_VIRTGEOM: - #if defined(__i386) || defined(__amd64) case DKIOCSETEXTPART: - #endif SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); /* TUR should spin up */ if (un->un_f_has_removable_media) --- 22399,22409 ----
*** 22758,22775 **** } break; case CDROMPLAYTRKIND: SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); - #if defined(__i386) || defined(__amd64) /* * not supported on ATAPI CD drives, use CDROMPLAYMSF instead */ if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { - #else - if (!ISCD(un)) { - #endif err = ENOTTY; } else { err = sr_play_trkind(dev, (caddr_t)arg, flag); } break; --- 22778,22791 ----
*** 23056,23070 **** case SDIOCINSERTUN: case SDIOCINSERTARQ: case SDIOCPUSH: case SDIOCRETRIEVE: case SDIOCRUN: SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" "SDIOC detected cmd:0x%X:\n", cmd); /* call error generator */ ! sd_faultinjection_ioctl(cmd, arg, un); ! err = 0; break; #endif /* SD_FAULT_INJECTION */ case DKIOCFLUSHWRITECACHE: --- 23072,23086 ---- case SDIOCINSERTUN: case SDIOCINSERTARQ: case SDIOCPUSH: case SDIOCRETRIEVE: case SDIOCRUN: + case SDIOCINSERTTRAN: SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" "SDIOC detected cmd:0x%X:\n", cmd); /* call error generator */ ! err = sd_faultinjection_ioctl(cmd, arg, un); break; #endif /* SD_FAULT_INJECTION */ case DKIOCFLUSHWRITECACHE:
*** 23101,23110 **** --- 23117,23140 ---- err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); } } break; + case DKIOCFREE: + { + dkioc_free_list_t *dfl = (dkioc_free_list_t *)arg; + + /* bad userspace ioctls shouldn't panic */ + if (dfl == NULL && !(flag & FKIOCTL)) { + err = SET_ERROR(EINVAL); + break; + } + /* synchronous UNMAP request */ + err = sd_send_scsi_UNMAP(dev, ssc, dfl, flag); + } + break; + case DKIOCGETWCE: { int wce; if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) {
*** 23246,23255 **** --- 23276,23287 ---- break; } mutex_enter(SD_MUTEX(un)); un->un_ncmds_in_driver--; ASSERT(un->un_ncmds_in_driver >= 0); + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); mutex_exit(SD_MUTEX(un)); done_without_assess: sd_ssc_fini(ssc);
*** 23259,23268 **** --- 23291,23302 ---- done_with_assess: mutex_enter(SD_MUTEX(un)); un->un_ncmds_in_driver--; ASSERT(un->un_ncmds_in_driver >= 0); + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); mutex_exit(SD_MUTEX(un)); done_quick_assess: if (err != 0) sd_ssc_assessment(ssc, SD_FMT_IGNORE);
*** 23363,23373 **** * dki_lbsize - logical block size * dki_capacity - capacity in blocks * dki_pbsize - physical block size (if requested) * * Return Code: 0 ! * EACCESS * EFAULT * ENXIO * EIO */ static int --- 23397,23407 ---- * dki_lbsize - logical block size * dki_capacity - capacity in blocks * dki_pbsize - physical block size (if requested) * * Return Code: 0 ! * EACCES * EFAULT * ENXIO * EIO */ static int
*** 23481,23494 **** */ if (dki_pbsize && un->un_f_descr_format_supported) { rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, &pbsize, SD_PATH_DIRECT); ! /* ! * Override the physical blocksize if the instance already ! * has a larger value. ! */ pbsize = MAX(pbsize, un->un_phy_blocksize); } if (dki_pbsize == NULL || rval != 0 || !un->un_f_descr_format_supported) { --- 23515,23527 ---- */ if (dki_pbsize && un->un_f_descr_format_supported) { rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, &pbsize, SD_PATH_DIRECT); ! if (un->un_f_sdconf_phy_blocksize) /* keep sd.conf's pbs */ ! pbsize = un->un_phy_blocksize; ! else /* override the pbs if the instance has a larger value */ pbsize = MAX(pbsize, un->un_phy_blocksize); } if (dki_pbsize == NULL || rval != 0 || !un->un_f_descr_format_supported) {
*** 23745,23754 **** --- 23778,23789 ---- * If the count isn't decremented the device can't * be powered down. */ un->un_ncmds_in_driver--; ASSERT(un->un_ncmds_in_driver >= 0); + if (un->un_f_detach_waiting) + cv_signal(&un->un_detach_cv); /* * if a prior request had been made, this will be the same * token, as scsi_watch was designed that way. */
*** 24238,24248 **** * * Return Code: -1 - on error (log sense is optional and may not be supported). * 0 - log page not found. * 1 - log page found. */ ! static int sd_log_page_supported(sd_ssc_t *ssc, int log_page) { uchar_t *log_page_data; int i; --- 24273,24283 ---- * * Return Code: -1 - on error (log sense is optional and may not be supported). * 0 - log page not found. * 1 - log page found. */ ! #ifdef notyet static int sd_log_page_supported(sd_ssc_t *ssc, int log_page) { uchar_t *log_page_data; int i;
*** 24303,24314 **** } } kmem_free(log_page_data, 0xFF); return (match); } - /* * Function: sd_mhdioc_failfast * * Description: This routine is the driver entry point for handling ioctl * requests to enable/disable the multihost failfast option. --- 24338,24349 ---- } } kmem_free(log_page_data, 0xFF); return (match); } + #endif /* * Function: sd_mhdioc_failfast * * Description: This routine is the driver entry point for handling ioctl * requests to enable/disable the multihost failfast option.
*** 25740,25755 **** diskaddr_t nblks = 0; diskaddr_t start_block; instance = SDUNIT(dev); if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || !SD_IS_VALID_LABEL(un) || ISCD(un)) { return (ENXIO); } - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) - SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); partition = SDPART(dev); SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); --- 25775,25790 ---- diskaddr_t nblks = 0; diskaddr_t start_block; instance = SDUNIT(dev); if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || + (un->un_state == SD_STATE_ATTACHING) || + (un->un_state == SD_STATE_ATTACH_FAILED) || !SD_IS_VALID_LABEL(un) || ISCD(un)) { return (ENXIO); } SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); partition = SDPART(dev); SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition);
*** 27989,27999 **** com = kmem_zalloc(sizeof (*com), KM_SLEEP); com->uscsi_cdb = cdb; com->uscsi_cdblen = CDB_GROUP1; com->uscsi_bufaddr = buffer; com->uscsi_buflen = 0x04; ! com->uscsi_timeout = 300; com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, SD_PATH_STANDARD); if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { --- 28024,28034 ---- com = kmem_zalloc(sizeof (*com), KM_SLEEP); com->uscsi_cdb = cdb; com->uscsi_cdblen = CDB_GROUP1; com->uscsi_bufaddr = buffer; com->uscsi_buflen = 0x04; ! com->uscsi_timeout = 3 * un->un_cmd_timeout; com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, SD_PATH_STANDARD); if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
*** 29338,29349 **** if (wmp == NULL) wmp = kmem_cache_alloc(un->un_wm_cache, KM_NOSLEEP); if (wmp == NULL) { mutex_exit(SD_MUTEX(un)); - _NOTE(DATA_READABLE_WITHOUT_LOCK - (sd_lun::un_wm_cache)) wmp = kmem_cache_alloc(un->un_wm_cache, KM_SLEEP); mutex_enter(SD_MUTEX(un)); /* * we released the mutex so recheck and go to --- 29373,29382 ----
*** 29738,29748 **** * * Context: may execute in interrupt context. */ static void ! sd_failfast_flushq(struct sd_lun *un) { struct buf *bp; struct buf *next_waitq_bp; struct buf *prev_waitq_bp = NULL; --- 29771,29781 ---- * * Context: may execute in interrupt context. */ static void ! sd_failfast_flushq(struct sd_lun *un, boolean_t flush_all) { struct buf *bp; struct buf *next_waitq_bp; struct buf *prev_waitq_bp = NULL;
*** 29756,29766 **** /* * Check if we should flush all bufs when entering failfast state, or * just those with B_FAILFAST set. */ ! if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { /* * Move *all* bp's on the wait queue to the failfast flush * queue, including those that do NOT have B_FAILFAST set. */ if (un->un_failfast_headp == NULL) { --- 29789,29800 ---- /* * Check if we should flush all bufs when entering failfast state, or * just those with B_FAILFAST set. */ ! if ((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || ! flush_all) { /* * Move *all* bp's on the wait queue to the failfast flush * queue, including those that do NOT have B_FAILFAST set. */ if (un->un_failfast_headp == NULL) {
*** 30016,30026 **** * causing faults in multiple layers of the driver. * */ #ifdef SD_FAULT_INJECTION - static uint_t sd_fault_injection_on = 0; /* * Function: sd_faultinjection_ioctl() * * Description: This routine is the driver entry point for handling --- 30050,30059 ----
*** 30029,30043 **** * * Arguments: cmd - the ioctl cmd received * arg - the arguments from user and returns */ ! static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { uint_t i = 0; uint_t rval; SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); mutex_enter(SD_MUTEX(un)); --- 30062,30077 ---- * * Arguments: cmd - the ioctl cmd received * arg - the arguments from user and returns */ ! static int sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { uint_t i = 0; uint_t rval; + int ret = 0; SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); mutex_enter(SD_MUTEX(un));
*** 30063,30072 **** --- 30097,30107 ---- for (i = 0; i < SD_FI_MAX_ERROR; i++) { un->sd_fi_fifo_pkt[i] = NULL; un->sd_fi_fifo_xb[i] = NULL; un->sd_fi_fifo_un[i] = NULL; un->sd_fi_fifo_arq[i] = NULL; + un->sd_fi_fifo_tran[i] = NULL; } un->sd_fi_fifo_start = 0; un->sd_fi_fifo_end = 0; mutex_enter(&(un->un_fi_mutex));
*** 30101,30114 **** --- 30136,30154 ---- } if (un->sd_fi_fifo_arq[i] != NULL) { kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); } + if (un->sd_fi_fifo_tran[i] != NULL) { + kmem_free(un->sd_fi_fifo_tran[i], + sizeof (struct sd_fi_tran)); + } un->sd_fi_fifo_pkt[i] = NULL; un->sd_fi_fifo_un[i] = NULL; un->sd_fi_fifo_xb[i] = NULL; un->sd_fi_fifo_arq[i] = NULL; + un->sd_fi_fifo_tran[i] = NULL; } un->sd_fi_fifo_start = 0; un->sd_fi_fifo_end = 0; SD_INFO(SD_LOG_IOERR, un,
*** 30120,30129 **** --- 30160,30174 ---- SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; + if (un->sd_fi_fifo_tran[i] != NULL) { + ret = EBUSY; + break; + } + sd_fault_injection_on = 0; /* No more that SD_FI_MAX_ERROR allowed in Queue */ if (un->sd_fi_fifo_pkt[i] != NULL) { kmem_free(un->sd_fi_fifo_pkt[i],
*** 30132,30163 **** --- 30177,30277 ---- if (arg != NULL) { un->sd_fi_fifo_pkt[i] = kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); if (un->sd_fi_fifo_pkt[i] == NULL) { /* Alloc failed don't store anything */ + ret = ENOMEM; break; } rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt), 0); if (rval == -1) { kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); un->sd_fi_fifo_pkt[i] = NULL; + ret = EFAULT; + break; } } else { SD_INFO(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: pkt null\n"); } break; + case SDIOCINSERTTRAN: + /* Store a tran packet struct to be pushed onto fifo. */ + SD_INFO(SD_LOG_SDTEST, un, + "sd_faultinjection_ioctl: Injecting Fault Insert TRAN\n"); + i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; + + /* + * HBA-related fault injections can't be mixed with target-level + * fault injections. + */ + if (un->sd_fi_fifo_pkt[i] != NULL || + un->sd_fi_fifo_xb[i] != NULL || + un->sd_fi_fifo_un[i] != NULL || + un->sd_fi_fifo_arq[i] != NULL) { + ret = EBUSY; + break; + } + + sd_fault_injection_on = 0; + + if (un->sd_fi_fifo_tran[i] != NULL) { + kmem_free(un->sd_fi_fifo_tran[i], + sizeof (struct sd_fi_tran)); + un->sd_fi_fifo_tran[i] = NULL; + } + if (arg != NULL) { + un->sd_fi_fifo_tran[i] = + kmem_alloc(sizeof (struct sd_fi_tran), KM_NOSLEEP); + if (un->sd_fi_fifo_tran[i] == NULL) { + /* Alloc failed don't store anything */ + ret = ENOMEM; + break; + } + rval = ddi_copyin((void *)arg, un->sd_fi_fifo_tran[i], + sizeof (struct sd_fi_tran), 0); + + if (rval == 0) { + switch (un->sd_fi_fifo_tran[i]->tran_cmd) { + case SD_FLTINJ_CMD_BUSY: + case SD_FLTINJ_CMD_TIMEOUT: + break; + default: + ret = EINVAL; + break; + } + } else { + ret = EFAULT; + } + + if (ret != 0) { + kmem_free(un->sd_fi_fifo_tran[i], + sizeof (struct sd_fi_tran)); + un->sd_fi_fifo_tran[i] = NULL; + break; + } + } else { + SD_INFO(SD_LOG_IOERR, un, + "sd_faultinjection_ioctl: tran null\n"); + } + break; + case SDIOCINSERTXB: /* Store a xb struct to be pushed onto fifo */ SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; + if (un->sd_fi_fifo_tran[i] != NULL) { + ret = EBUSY; + break; + } + sd_fault_injection_on = 0; if (un->sd_fi_fifo_xb[i] != NULL) { kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb));
*** 30166,30184 **** --- 30280,30301 ---- if (arg != NULL) { un->sd_fi_fifo_xb[i] = kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); if (un->sd_fi_fifo_xb[i] == NULL) { /* Alloc failed don't store anything */ + ret = ENOMEM; break; } rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb), 0); if (rval == -1) { kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); un->sd_fi_fifo_xb[i] = NULL; + ret = EFAULT; + break; } } else { SD_INFO(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: xb null\n"); }
*** 30188,30197 **** --- 30305,30318 ---- /* Store a un struct to be pushed onto fifo */ SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; + if (un->sd_fi_fifo_tran[i] != NULL) { + ret = EBUSY; + break; + } sd_fault_injection_on = 0; if (un->sd_fi_fifo_un[i] != NULL) { kmem_free(un->sd_fi_fifo_un[i],
*** 30201,30218 **** --- 30322,30342 ---- if (arg != NULL) { un->sd_fi_fifo_un[i] = kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); if (un->sd_fi_fifo_un[i] == NULL) { /* Alloc failed don't store anything */ + ret = ENOMEM; break; } rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un), 0); if (rval == -1) { kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); un->sd_fi_fifo_un[i] = NULL; + ret = EFAULT; + break; } } else { SD_INFO(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: un null\n");
*** 30223,30232 **** --- 30347,30360 ---- case SDIOCINSERTARQ: /* Store a arq struct to be pushed onto fifo */ SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; + if (un->sd_fi_fifo_tran[i] != NULL) { + ret = EBUSY; + break; + } sd_fault_injection_on = 0; if (un->sd_fi_fifo_arq[i] != NULL) { kmem_free(un->sd_fi_fifo_arq[i],
*** 30236,30253 **** --- 30364,30384 ---- if (arg != NULL) { un->sd_fi_fifo_arq[i] = kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); if (un->sd_fi_fifo_arq[i] == NULL) { /* Alloc failed don't store anything */ + ret = ENOMEM; break; } rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq), 0); if (rval == -1) { kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); un->sd_fi_fifo_arq[i] = NULL; + ret = EFAULT; + break; } } else { SD_INFO(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: arq null\n");
*** 30254,30264 **** } break; case SDIOCPUSH: ! /* Push stored xb, pkt, un, and arq onto fifo */ sd_fault_injection_on = 0; if (arg != NULL) { rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); if (rval != -1 && --- 30385,30395 ---- } break; case SDIOCPUSH: ! /* Push stored xb, pkt, un, arq and tran onto fifo */ sd_fault_injection_on = 0; if (arg != NULL) { rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); if (rval != -1 &&
*** 30299,30308 **** --- 30430,30440 ---- break; } mutex_exit(SD_MUTEX(un)); SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: exit\n"); + return (ret); } /* * Function: sd_injection_log()
*** 30338,30348 **** --- 30470,30529 ---- } mutex_exit(&(un->un_fi_mutex)); } + /* + * This function is called just before sending the packet to the HBA. + * Caller must hold per-LUN mutex. Mutex is held locked upon return. + */ + static void + sd_prefaultinjection(struct scsi_pkt *pktp) + { + uint_t i; + struct buf *bp; + struct sd_lun *un; + struct sd_fi_tran *fi_tran; + ASSERT(pktp != NULL); + + /* pull bp and un from pktp */ + bp = (struct buf *)pktp->pkt_private; + un = SD_GET_UN(bp); + + /* if injection is off return */ + if (sd_fault_injection_on == 0 || + un->sd_fi_fifo_start == un->sd_fi_fifo_end) { + return; + } + + ASSERT(un != NULL); + ASSERT(mutex_owned(SD_MUTEX(un))); + + /* take next set off fifo */ + i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; + + fi_tran = un->sd_fi_fifo_tran[i]; + if (fi_tran != NULL) { + switch (fi_tran->tran_cmd) { + case SD_FLTINJ_CMD_BUSY: + pktp->pkt_flags |= FLAG_PKT_BUSY; + break; + case SD_FLTINJ_CMD_TIMEOUT: + pktp->pkt_flags |= FLAG_PKT_TIMEOUT; + break; + default: + return; + } + } + /* + * We don't deallocate any data here - it will be deallocated after + * the packet has been processed by the HBA. + */ + } + + /* * Function: sd_faultinjection() * * Description: This routine takes the pkt and changes its * content based on error injection scenerio.
*** 30563,30581 **** * 4. Building default VTOC label * * As section 3 says, sd checks if some kinds of devices have VTOC label. * If those devices have no valid VTOC label, sd(7d) will attempt to * create default VTOC for them. Currently sd creates default VTOC label ! * for all devices on x86 platform (VTOC_16), but only for removable ! * media devices on SPARC (VTOC_8). * * ----------------------------------------------------------- * removable media hotpluggable platform | Default Label * ----------------------------------------------------------- - * false false sparc | No * false true x86 | Yes - * false true sparc | Yes * true x x | Yes * ---------------------------------------------------------- * * * 5. Supported blocksizes of target devices --- 30744,30759 ---- * 4. Building default VTOC label * * As section 3 says, sd checks if some kinds of devices have VTOC label. * If those devices have no valid VTOC label, sd(7d) will attempt to * create default VTOC for them. Currently sd creates default VTOC label ! * for all devices on x86 platform (VTOC_16). * * ----------------------------------------------------------- * removable media hotpluggable platform | Default Label * ----------------------------------------------------------- * false true x86 | Yes * true x x | Yes * ---------------------------------------------------------- * * * 5. Supported blocksizes of target devices
*** 30604,30642 **** * of devices and apply automounting policies to each. * * * 7. fdisk partition management * ! * Fdisk is traditional partition method on x86 platform. Sd(7d) driver ! * just supports fdisk partitions on x86 platform. On sparc platform, sd ! * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize ! * fdisk partitions on both x86 and SPARC platform. * * ----------------------------------------------------------- * platform removable media USB/1394 | fdisk supported * ----------------------------------------------------------- * x86 X X | true ! * ------------------------------------------------------------ ! * sparc X X | false ! * ------------------------------------------------------------ * * * 8. MBOOT/MBR * - * Although sd(7d) doesn't support fdisk on SPARC platform, it does support - * read/write mboot for removable media devices on sparc platform. - * * ----------------------------------------------------------- * platform removable media USB/1394 | mboot supported * ----------------------------------------------------------- * x86 X X | true ! * ------------------------------------------------------------ ! * sparc false false | false ! * sparc false true | true ! * sparc true false | true ! * sparc true true | true ! * ------------------------------------------------------------ * * * 9. error handling during opening device * * If failed to open a disk device, an errno is returned. For some kinds --- 30782,30808 ---- * of devices and apply automounting policies to each. * * * 7. fdisk partition management * ! * Fdisk is traditional partition method on x86 platform. sd(7D) driver ! * just supports fdisk partitions on x86 platform. * * ----------------------------------------------------------- * platform removable media USB/1394 | fdisk supported * ----------------------------------------------------------- * x86 X X | true ! * ----------------------------------------------------------- * * * 8. MBOOT/MBR * * ----------------------------------------------------------- * platform removable media USB/1394 | mboot supported * ----------------------------------------------------------- * x86 X X | true ! * ----------------------------------------------------------- * * * 9. error handling during opening device * * If failed to open a disk device, an errno is returned. For some kinds
*** 31170,31186 **** * * Description: Will be called when SD driver need to post an ereport. * * Context: Kernel thread or interrupt context. */ - - #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown" - static void sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) { - int uscsi_path_instance = 0; uchar_t uscsi_pkt_reason; uint32_t uscsi_pkt_state; uint32_t uscsi_pkt_statistics; uint64_t uscsi_ena; uchar_t op_code; --- 31336,31348 ----
*** 31219,31229 **** if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || (un->un_state == SD_STATE_DUMPING)) return; uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; - uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; uscsi_ena = ssc->ssc_uscsi_info->ui_ena; sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; --- 31381,31390 ----
*** 31268,31278 **** * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered * command, we will post ereport.io.scsi.cmd.disk.recovered. * driver-assessment will always be "recovered" here. */ if (drv_assess == SD_FM_DRV_RECOVERY) { ! scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, "cmd.disk.recovered", uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, DEVID_IF_KNOWN(devid), "driver-assessment", DATA_TYPE_STRING, assessment, --- 31429,31439 ---- * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered * command, we will post ereport.io.scsi.cmd.disk.recovered. * driver-assessment will always be "recovered" here. */ if (drv_assess == SD_FM_DRV_RECOVERY) { ! scsi_fm_ereport_post(un->un_sd, 0, NULL, "cmd.disk.recovered", uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, DEVID_IF_KNOWN(devid), "driver-assessment", DATA_TYPE_STRING, assessment,
*** 31295,31305 **** * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. * SSC_FLAGS_INVALID_DATA - invalid data sent back. */ if (ssc->ssc_flags & ssc_invalid_flags) { if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { ! scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, "cmd.disk.dev.uderr", uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, DEVID_IF_KNOWN(devid), "driver-assessment", DATA_TYPE_STRING, --- 31456,31466 ---- * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. * SSC_FLAGS_INVALID_DATA - invalid data sent back. */ if (ssc->ssc_flags & ssc_invalid_flags) { if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { ! scsi_fm_ereport_post(un->un_sd, 0, NULL, "cmd.disk.dev.uderr", uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, DEVID_IF_KNOWN(devid), "driver-assessment", DATA_TYPE_STRING,
*** 31324,31334 **** * For other type of invalid data, the * un-decode-value field would be empty because the * un-decodable content could be seen from upper * level payload or inside un-decode-info. */ ! scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, "cmd.disk.dev.uderr", uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, DEVID_IF_KNOWN(devid), --- 31485,31495 ---- * For other type of invalid data, the * un-decode-value field would be empty because the * un-decodable content could be seen from upper * level payload or inside un-decode-info. */ ! scsi_fm_ereport_post(un->un_sd, 0, NULL, "cmd.disk.dev.uderr", uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, DEVID_IF_KNOWN(devid),
*** 31366,31376 **** * error. */ if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; ! scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, DEVID_IF_KNOWN(devid), "driver-assessment", DATA_TYPE_STRING, drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, --- 31527,31537 ---- * error. */ if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; ! scsi_fm_ereport_post(un->un_sd, 0, NULL, "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, DEVID_IF_KNOWN(devid), "driver-assessment", DATA_TYPE_STRING, drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
*** 31410,31420 **** /* * driver-assessment should be "fatal" if * drv_assess is SD_FM_DRV_FATAL. */ scsi_fm_ereport_post(un->un_sd, ! uscsi_path_instance, NULL, "cmd.disk.dev.rqs.merr", uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, DEVID_IF_KNOWN(devid), --- 31571,31581 ---- /* * driver-assessment should be "fatal" if * drv_assess is SD_FM_DRV_FATAL. */ scsi_fm_ereport_post(un->un_sd, ! 0, NULL, "cmd.disk.dev.rqs.merr", uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, DEVID_IF_KNOWN(devid),
*** 31459,31469 **** * error), driver-assessment should * be "fatal" if drv_assess is * SD_FM_DRV_FATAL. */ scsi_fm_ereport_post(un->un_sd, ! uscsi_path_instance, NULL, "cmd.disk.dev.rqs.derr", uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, --- 31620,31630 ---- * error), driver-assessment should * be "fatal" if drv_assess is * SD_FM_DRV_FATAL. */ scsi_fm_ereport_post(un->un_sd, ! 0, NULL, "cmd.disk.dev.rqs.derr", uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
*** 31514,31524 **** * Post ereport.io.scsi.cmd.disk.dev.serr if we got the * stat-code but with sense data unavailable. * driver-assessment will be set based on parameter * drv_assess. */ ! scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, "cmd.disk.dev.serr", uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, DEVID_IF_KNOWN(devid), --- 31675,31685 ---- * Post ereport.io.scsi.cmd.disk.dev.serr if we got the * stat-code but with sense data unavailable. * driver-assessment will be set based on parameter * drv_assess. */ ! scsi_fm_ereport_post(un->un_sd, 0, NULL, "cmd.disk.dev.serr", uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, DEVID_IF_KNOWN(devid),
*** 31768,31781 **** if (rval != 0) { un->un_phy_blocksize = DEV_BSIZE; } else { if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) { un->un_phy_blocksize = DEV_BSIZE; ! } else if (pbsize > un->un_phy_blocksize) { /* ! * Don't reset the physical blocksize ! * unless we've detected a larger value. */ un->un_phy_blocksize = pbsize; } } } --- 31929,31945 ---- if (rval != 0) { un->un_phy_blocksize = DEV_BSIZE; } else { if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) { un->un_phy_blocksize = DEV_BSIZE; ! } else if (pbsize > un->un_phy_blocksize && ! !un->un_f_sdconf_phy_blocksize) { /* ! * Reset the physical block size ! * if we've detected a larger value and ! * we didn't already set the physical ! * block size in sd.conf */ un->un_phy_blocksize = pbsize; } } }