Print this page
usr/src/common/zfs/zprop_common.c

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/io/scsi/targets/sd.c
          +++ new/usr/src/uts/common/io/scsi/targets/sd.c
↓ open down ↓ 14 lines elided ↑ open up ↑
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
  24   24   */
       25 +
  25   26  /*
       27 + * Copyright 2011 cyril.galibern@opensvc.com
  26   28   * Copyright (c) 2011 Bayard G. Bell.  All rights reserved.
  27   29   * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
  28   30   * Copyright 2012 DEY Storage Systems, Inc.  All rights reserved.
  29      - * Copyright 2017 Nexenta Systems, Inc.
       31 + * Copyright 2019 Nexenta Systems, Inc.
  30   32   */
  31      -/*
  32      - * Copyright 2011 cyril.galibern@opensvc.com
  33      - */
  34   33  
  35   34  /*
  36   35   * SCSI disk target driver.
  37   36   */
  38      -#include <sys/scsi/scsi.h>
       37 +#include <sys/aio_req.h>
       38 +#include <sys/byteorder.h>
       39 +#include <sys/cdio.h>
       40 +#include <sys/cmlb.h>
       41 +#include <sys/debug.h>
  39   42  #include <sys/dkbad.h>
  40      -#include <sys/dklabel.h>
  41   43  #include <sys/dkio.h>
  42      -#include <sys/fdio.h>
  43      -#include <sys/cdio.h>
  44      -#include <sys/mhd.h>
  45      -#include <sys/vtoc.h>
       44 +#include <sys/dkioc_free_util.h>
       45 +#include <sys/dklabel.h>
  46   46  #include <sys/dktp/fdisk.h>
       47 +#include <sys/efi_partition.h>
       48 +#include <sys/fdio.h>
       49 +#include <sys/fm/protocol.h>
       50 +#include <sys/fs/dv_node.h>
  47   51  #include <sys/kstat.h>
  48      -#include <sys/vtrace.h>
  49      -#include <sys/note.h>
  50      -#include <sys/thread.h>
       52 +#include <sys/mhd.h>
  51   53  #include <sys/proc.h>
  52      -#include <sys/efi_partition.h>
  53      -#include <sys/var.h>
  54      -#include <sys/aio_req.h>
  55      -
  56      -#ifdef __lock_lint
  57      -#define _LP64
  58      -#define __amd64
  59      -#endif
  60      -
  61      -#if (defined(__fibre))
  62      -/* Note: is there a leadville version of the following? */
  63      -#include <sys/fc4/fcal_linkapp.h>
  64      -#endif
       54 +#include <sys/scsi/scsi.h>
       55 +#include <sys/scsi/targets/sddef.h>
       56 +#include <sys/sdt.h>
       57 +#include <sys/sysevent/dev.h>
       58 +#include <sys/sysevent/eventdefs.h>
  65   59  #include <sys/taskq.h>
       60 +#include <sys/thread.h>
  66   61  #include <sys/uuid.h>
  67      -#include <sys/byteorder.h>
  68      -#include <sys/sdt.h>
       62 +#include <sys/var.h>
       63 +#include <sys/vtoc.h>
       64 +#include <sys/vtrace.h>
  69   65  
  70   66  #include "sd_xbuf.h"
  71   67  
  72      -#include <sys/scsi/targets/sddef.h>
  73      -#include <sys/cmlb.h>
  74      -#include <sys/sysevent/eventdefs.h>
  75      -#include <sys/sysevent/dev.h>
  76      -
  77      -#include <sys/fm/protocol.h>
  78      -
  79      -/*
  80      - * Loadable module info.
  81      - */
  82      -#if (defined(__fibre))
  83      -#define SD_MODULE_NAME  "SCSI SSA/FCAL Disk Driver"
  84      -#else /* !__fibre */
  85   68  #define SD_MODULE_NAME  "SCSI Disk Driver"
  86      -#endif /* !__fibre */
       69 +static  char *sd_label = "sd";
  87   70  
  88   71  /*
  89      - * Define the interconnect type, to allow the driver to distinguish
  90      - * between parallel SCSI (sd) and fibre channel (ssd) behaviors.
  91      - *
  92      - * This is really for backward compatibility. In the future, the driver
  93      - * should actually check the "interconnect-type" property as reported by
  94      - * the HBA; however at present this property is not defined by all HBAs,
  95      - * so we will use this #define (1) to permit the driver to run in
  96      - * backward-compatibility mode; and (2) to print a notification message
  97      - * if an FC HBA does not support the "interconnect-type" property.  The
  98      - * behavior of the driver will be to assume parallel SCSI behaviors unless
  99      - * the "interconnect-type" property is defined by the HBA **AND** has a
 100      - * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or
 101      - * INTERCONNECT_FABRIC, in which case the driver will assume Fibre
 102      - * Channel behaviors (as per the old ssd).  (Note that the
 103      - * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and
 104      - * will result in the driver assuming parallel SCSI behaviors.)
 105      - *
 106      - * (see common/sys/scsi/impl/services.h)
 107      - *
 108      - * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default
 109      - * since some FC HBAs may already support that, and there is some code in
 110      - * the driver that already looks for it.  Using INTERCONNECT_FABRIC as the
 111      - * default would confuse that code, and besides things should work fine
 112      - * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the
 113      - * "interconnect_type" property.
 114      - *
 115      - */
 116      -#if (defined(__fibre))
 117      -#define SD_DEFAULT_INTERCONNECT_TYPE    SD_INTERCONNECT_FIBRE
 118      -#else
 119      -#define SD_DEFAULT_INTERCONNECT_TYPE    SD_INTERCONNECT_PARALLEL
 120      -#endif
 121      -
 122      -/*
 123      - * The name of the driver, established from the module name in _init.
 124      - */
 125      -static  char *sd_label                  = NULL;
 126      -
 127      -/*
 128      - * Driver name is unfortunately prefixed on some driver.conf properties.
 129      - */
 130      -#if (defined(__fibre))
 131      -#define sd_max_xfer_size                ssd_max_xfer_size
 132      -#define sd_config_list                  ssd_config_list
 133      -static  char *sd_max_xfer_size          = "ssd_max_xfer_size";
 134      -static  char *sd_config_list            = "ssd-config-list";
 135      -#else
 136      -static  char *sd_max_xfer_size          = "sd_max_xfer_size";
 137      -static  char *sd_config_list            = "sd-config-list";
 138      -#endif
 139      -
 140      -/*
 141   72   * Driver global variables
 142   73   */
 143   74  
 144      -#if (defined(__fibre))
 145      -/*
 146      - * These #defines are to avoid namespace collisions that occur because this
 147      - * code is currently used to compile two separate driver modules: sd and ssd.
 148      - * All global variables need to be treated this way (even if declared static)
 149      - * in order to allow the debugger to resolve the names properly.
 150      - * It is anticipated that in the near future the ssd module will be obsoleted,
 151      - * at which time this namespace issue should go away.
 152      - */
 153      -#define sd_state                        ssd_state
 154      -#define sd_io_time                      ssd_io_time
 155      -#define sd_failfast_enable              ssd_failfast_enable
 156      -#define sd_ua_retry_count               ssd_ua_retry_count
 157      -#define sd_report_pfa                   ssd_report_pfa
 158      -#define sd_max_throttle                 ssd_max_throttle
 159      -#define sd_min_throttle                 ssd_min_throttle
 160      -#define sd_rot_delay                    ssd_rot_delay
 161      -
 162      -#define sd_retry_on_reservation_conflict        \
 163      -                                        ssd_retry_on_reservation_conflict
 164      -#define sd_reinstate_resv_delay         ssd_reinstate_resv_delay
 165      -#define sd_resv_conflict_name           ssd_resv_conflict_name
 166      -
 167      -#define sd_component_mask               ssd_component_mask
 168      -#define sd_level_mask                   ssd_level_mask
 169      -#define sd_debug_un                     ssd_debug_un
 170      -#define sd_error_level                  ssd_error_level
 171      -
 172      -#define sd_xbuf_active_limit            ssd_xbuf_active_limit
 173      -#define sd_xbuf_reserve_limit           ssd_xbuf_reserve_limit
 174      -
 175      -#define sd_tr                           ssd_tr
 176      -#define sd_reset_throttle_timeout       ssd_reset_throttle_timeout
 177      -#define sd_qfull_throttle_timeout       ssd_qfull_throttle_timeout
 178      -#define sd_qfull_throttle_enable        ssd_qfull_throttle_enable
 179      -#define sd_check_media_time             ssd_check_media_time
 180      -#define sd_wait_cmds_complete           ssd_wait_cmds_complete
 181      -#define sd_label_mutex                  ssd_label_mutex
 182      -#define sd_detach_mutex                 ssd_detach_mutex
 183      -#define sd_log_buf                      ssd_log_buf
 184      -#define sd_log_mutex                    ssd_log_mutex
 185      -
 186      -#define sd_disk_table                   ssd_disk_table
 187      -#define sd_disk_table_size              ssd_disk_table_size
 188      -#define sd_sense_mutex                  ssd_sense_mutex
 189      -#define sd_cdbtab                       ssd_cdbtab
 190      -
 191      -#define sd_cb_ops                       ssd_cb_ops
 192      -#define sd_ops                          ssd_ops
 193      -#define sd_additional_codes             ssd_additional_codes
 194      -#define sd_tgops                        ssd_tgops
 195      -
 196      -#define sd_minor_data                   ssd_minor_data
 197      -#define sd_minor_data_efi               ssd_minor_data_efi
 198      -
 199      -#define sd_tq                           ssd_tq
 200      -#define sd_wmr_tq                       ssd_wmr_tq
 201      -#define sd_taskq_name                   ssd_taskq_name
 202      -#define sd_wmr_taskq_name               ssd_wmr_taskq_name
 203      -#define sd_taskq_minalloc               ssd_taskq_minalloc
 204      -#define sd_taskq_maxalloc               ssd_taskq_maxalloc
 205      -
 206      -#define sd_dump_format_string           ssd_dump_format_string
 207      -
 208      -#define sd_iostart_chain                ssd_iostart_chain
 209      -#define sd_iodone_chain                 ssd_iodone_chain
 210      -
 211      -#define sd_pm_idletime                  ssd_pm_idletime
 212      -
 213      -#define sd_force_pm_supported           ssd_force_pm_supported
 214      -
 215      -#define sd_dtype_optical_bind           ssd_dtype_optical_bind
 216      -
 217      -#define sd_ssc_init                     ssd_ssc_init
 218      -#define sd_ssc_send                     ssd_ssc_send
 219      -#define sd_ssc_fini                     ssd_ssc_fini
 220      -#define sd_ssc_assessment               ssd_ssc_assessment
 221      -#define sd_ssc_post                     ssd_ssc_post
 222      -#define sd_ssc_print                    ssd_ssc_print
 223      -#define sd_ssc_ereport_post             ssd_ssc_ereport_post
 224      -#define sd_ssc_set_info                 ssd_ssc_set_info
 225      -#define sd_ssc_extract_info             ssd_ssc_extract_info
 226      -
 227      -#endif
 228      -
 229   75  #ifdef  SDDEBUG
 230   76  int     sd_force_pm_supported           = 0;
 231   77  #endif  /* SDDEBUG */
 232   78  
 233   79  void *sd_state                          = NULL;
 234   80  int sd_io_time                          = SD_IO_TIME;
 235      -int sd_failfast_enable                  = 1;
 236   81  int sd_ua_retry_count                   = SD_UA_RETRY_COUNT;
 237   82  int sd_report_pfa                       = 1;
 238   83  int sd_max_throttle                     = SD_MAX_THROTTLE;
 239   84  int sd_min_throttle                     = SD_MIN_THROTTLE;
 240   85  int sd_rot_delay                        = 4; /* Default 4ms Rotation delay */
 241   86  int sd_qfull_throttle_enable            = TRUE;
 242   87  
 243   88  int sd_retry_on_reservation_conflict    = 1;
 244   89  int sd_reinstate_resv_delay             = SD_REINSTATE_RESV_DELAY;
 245      -_NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay))
       90 +int sd_enable_lun_reset                 = FALSE;
 246   91  
 247      -static int sd_dtype_optical_bind        = -1;
       92 +/*
       93 + * Default safe I/O delay threshold of 30s for all devices.
       94 + * Can be overriden for vendor/device id in sd.conf
       95 + */
       96 +hrtime_t sd_slow_io_threshold           = 30LL * NANOSEC;
 248   97  
 249      -/* Note: the following is not a bug, it really is "sd_" and not "ssd_" */
 250      -static  char *sd_resv_conflict_name     = "sd_retry_on_reservation_conflict";
 251      -
 252   98  /*
 253   99   * Global data for debug logging. To enable debug printing, sd_component_mask
 254  100   * and sd_level_mask should be set to the desired bit patterns as outlined in
 255  101   * sddef.h.
 256  102   */
 257  103  uint_t  sd_component_mask               = 0x0;
 258  104  uint_t  sd_level_mask                   = 0x0;
 259  105  struct  sd_lun *sd_debug_un             = NULL;
 260  106  uint_t  sd_error_level                  = SCSI_ERR_RETRYABLE;
 261  107  
↓ open down ↓ 25 lines elided ↑ open up ↑
 287  133   * component of the driver
 288  134   */
 289  135  static kmutex_t sd_label_mutex;
 290  136  
 291  137  /*
 292  138   * sd_detach_mutex protects un_layer_count, un_detach_count, and
 293  139   * un_opens_in_progress in the sd_lun structure.
 294  140   */
 295  141  static kmutex_t sd_detach_mutex;
 296  142  
 297      -_NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex,
 298      -        sd_lun::{un_layer_count un_detach_count un_opens_in_progress}))
 299      -
 300  143  /*
 301  144   * Global buffer and mutex for debug logging
 302  145   */
 303  146  static char     sd_log_buf[1024];
 304  147  static kmutex_t sd_log_mutex;
 305  148  
 306  149  /*
 307  150   * Structs and globals for recording attached lun information.
 308  151   * This maintains a chain. Each node in the chain represents a SCSI controller.
 309  152   * The structure records the number of luns attached to each target connected
↓ open down ↓ 8 lines elided ↑ open up ↑
 318  161  
 319  162  /*
 320  163   * Flag to indicate the lun is attached or detached
 321  164   */
 322  165  #define SD_SCSI_LUN_ATTACH      0
 323  166  #define SD_SCSI_LUN_DETACH      1
 324  167  
 325  168  static kmutex_t sd_scsi_target_lun_mutex;
 326  169  static struct sd_scsi_hba_tgt_lun       *sd_scsi_target_lun_head = NULL;
 327  170  
 328      -_NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
 329      -    sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip))
 330      -
 331      -_NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
 332      -    sd_scsi_target_lun_head))
 333      -
 334  171  /*
 335  172   * "Smart" Probe Caching structs, globals, #defines, etc.
 336  173   * For parallel scsi and non-self-identify device only.
 337  174   */
 338  175  
 339  176  /*
 340  177   * The following resources and routines are implemented to support
 341  178   * "smart" probing, which caches the scsi_probe() results in an array,
 342  179   * in order to help avoid long probe times.
 343  180   */
 344  181  struct sd_scsi_probe_cache {
 345  182          struct  sd_scsi_probe_cache     *next;
 346  183          dev_info_t      *pdip;
 347  184          int             cache[NTARGETS_WIDE];
 348  185  };
 349  186  
 350  187  static kmutex_t sd_scsi_probe_cache_mutex;
 351  188  static struct   sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL;
 352  189  
 353  190  /*
 354      - * Really we only need protection on the head of the linked list, but
 355      - * better safe than sorry.
      191 + * Create taskq for all targets in the system. This is created at
      192 + * _init(9E) and destroyed at _fini(9E).
      193 + *
      194 + * Note: here we set the minalloc to a reasonably high number to ensure that
      195 + * we will have an adequate supply of task entries available at interrupt time.
      196 + * This is used in conjunction with the TASKQ_PREPOPULATE flag in
      197 + * sd_create_taskq().  Since we do not want to sleep for allocations at
      198 + * interrupt time, set maxalloc equal to minalloc. That way we will just fail
      199 + * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq
      200 + * requests any one instant in time.
 356  201   */
 357      -_NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
 358      -    sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip))
      202 +#define SD_TASKQ_NUMTHREADS     8
      203 +#define SD_TASKQ_MINALLOC       256
      204 +#define SD_TASKQ_MAXALLOC       256
 359  205  
 360      -_NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
 361      -    sd_scsi_probe_cache_head))
      206 +static taskq_t  *sd_tq = NULL;
 362  207  
      208 +static int      sd_taskq_minalloc = SD_TASKQ_MINALLOC;
      209 +static int      sd_taskq_maxalloc = SD_TASKQ_MAXALLOC;
      210 +
      211 +#define SD_BAIL_CHECK(a) if ((a)->un_detach_count != 0) { \
      212 +        mutex_exit(SD_MUTEX((a))); \
      213 +        return (ENXIO); \
      214 +        }
 363  215  /*
      216 + * The following task queue is being created for the write part of
      217 + * read-modify-write of non-512 block size devices.
      218 + * Limit the number of threads to 1 for now. This number has been chosen
      219 + * considering the fact that it applies only to dvd ram drives/MO drives
      220 + * currently. Performance for which is not main criteria at this stage.
      221 + * Note: It needs to be explored if we can use a single taskq in future
      222 + */
      223 +#define SD_WMR_TASKQ_NUMTHREADS 1
      224 +static taskq_t  *sd_wmr_tq = NULL;
      225 +
      226 +/*
 364  227   * Power attribute table
 365  228   */
 366  229  static sd_power_attr_ss sd_pwr_ss = {
 367  230          { "NAME=spindle-motor", "0=off", "1=on", NULL },
 368  231          {0, 100},
 369  232          {30, 0},
 370  233          {20000, 0}
 371  234  };
 372  235  
 373  236  static sd_power_attr_pc sd_pwr_pc = {
↓ open down ↓ 11 lines elided ↑ open up ↑
 385  248          SD_TARGET_START_VALID,
 386  249          SD_TARGET_STANDBY,
 387  250          SD_TARGET_IDLE,
 388  251          SD_TARGET_ACTIVE
 389  252  };
 390  253  
 391  254  /*
 392  255   * Vendor specific data name property declarations
 393  256   */
 394  257  
 395      -#if defined(__fibre) || defined(__i386) ||defined(__amd64)
 396      -
 397  258  static sd_tunables seagate_properties = {
 398  259          SEAGATE_THROTTLE_VALUE,
 399  260          0,
 400  261          0,
 401  262          0,
 402  263          0,
 403  264          0,
 404  265          0,
 405  266          0,
 406  267          0
 407  268  };
 408  269  
 409      -
 410  270  static sd_tunables fujitsu_properties = {
 411  271          FUJITSU_THROTTLE_VALUE,
 412  272          0,
 413  273          0,
 414  274          0,
 415  275          0,
 416  276          0,
 417  277          0,
 418  278          0,
 419  279          0
↓ open down ↓ 52 lines elided ↑ open up ↑
 472  332          0,
 473  333          PIRUS_NRR_COUNT,
 474  334          PIRUS_BUSY_RETRIES,
 475  335          PIRUS_RESET_RETRY_COUNT,
 476  336          0,
 477  337          PIRUS_MIN_THROTTLE_VALUE,
 478  338          PIRUS_DISKSORT_DISABLED_FLAG,
 479  339          PIRUS_LUN_RESET_ENABLED_FLAG
 480  340  };
 481  341  
 482      -#endif
 483      -
 484      -#if (defined(__sparc) && !defined(__fibre)) || \
 485      -        (defined(__i386) || defined(__amd64))
 486      -
 487      -
 488  342  static sd_tunables elite_properties = {
 489  343          ELITE_THROTTLE_VALUE,
 490  344          0,
 491  345          0,
 492  346          0,
 493  347          0,
 494  348          0,
 495  349          0,
 496  350          0,
 497  351          0
↓ open down ↓ 4 lines elided ↑ open up ↑
 502  356          0,
 503  357          0,
 504  358          0,
 505  359          0,
 506  360          0,
 507  361          0,
 508  362          0,
 509  363          0
 510  364  };
 511  365  
 512      -#endif /* Fibre or not */
 513      -
 514  366  static sd_tunables lsi_properties_scsi = {
 515  367          LSI_THROTTLE_VALUE,
 516  368          0,
 517  369          LSI_NOTREADY_RETRIES,
 518  370          0,
 519  371          0,
 520  372          0,
 521  373          0,
 522  374          0,
 523  375          0
↓ open down ↓ 29 lines elided ↑ open up ↑
 553  405          LSI_OEM_NOTREADY_RETRIES,
 554  406          0,
 555  407          0,
 556  408          0,
 557  409          0,
 558  410          0,
 559  411          0,
 560  412          1
 561  413  };
 562  414  
 563      -
 564      -
 565  415  #if (defined(SD_PROP_TST))
 566      -
 567  416  #define SD_TST_CTYPE_VAL        CTYPE_CDROM
 568  417  #define SD_TST_THROTTLE_VAL     16
 569  418  #define SD_TST_NOTREADY_VAL     12
 570  419  #define SD_TST_BUSY_VAL         60
 571  420  #define SD_TST_RST_RETRY_VAL    36
 572  421  #define SD_TST_RSV_REL_TIME     60
 573      -
 574  422  static sd_tunables tst_properties = {
 575  423          SD_TST_THROTTLE_VAL,
 576  424          SD_TST_CTYPE_VAL,
 577  425          SD_TST_NOTREADY_VAL,
 578  426          SD_TST_BUSY_VAL,
 579  427          SD_TST_RST_RETRY_VAL,
 580  428          SD_TST_RSV_REL_TIME,
 581  429          0,
 582  430          0,
 583  431          0
↓ open down ↓ 24 lines elided ↑ open up ↑
 608  456   * as equivalent to a single blank. For example, this causes a
 609  457   * sd_disk_table entry of " NEC CDROM " to match a device's id string
 610  458   * of  "NEC       CDROM".
 611  459   *
 612  460   * Note: The MD21 controller type has been obsoleted.
 613  461   *       ST318202F is a Legacy device
 614  462   *       MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been
 615  463   *       made with an FC connection. The entries here are a legacy.
 616  464   */
 617  465  static sd_disk_config_t sd_disk_table[] = {
 618      -#if defined(__fibre) || defined(__i386) || defined(__amd64)
 619  466          { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
 620  467          { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
 621  468          { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
 622  469          { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
 623  470          { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties },
 624  471          { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties },
 625  472          { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties },
 626  473          { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties },
 627  474          { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties },
 628  475          { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties },
↓ open down ↓ 29 lines elided ↑ open up ↑
 658  505          { "IBM     FAStT",      SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 659  506          { "IBM     1814",       SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 660  507          { "IBM     1814-200",   SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 661  508          { "IBM     1818",       SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 662  509          { "DELL    MD3000",     SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 663  510          { "DELL    MD3000i",    SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 664  511          { "LSI     INF",        SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 665  512          { "ENGENIO INF",        SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 666  513          { "SGI     TP",         SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 667  514          { "SGI     IS",         SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 668      -        { "*CSM100_*",          SD_CONF_BSET_NRR_COUNT |
 669      -                        SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
 670      -        { "*CSM200_*",          SD_CONF_BSET_NRR_COUNT |
 671      -                        SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
 672      -        { "Fujitsu SX300",      SD_CONF_BSET_THROTTLE,  &lsi_oem_properties },
      515 +        { "*CSM100_*",          SD_CONF_BSET_NRR_COUNT|
      516 +                                SD_CONF_BSET_CACHE_IS_NV,
      517 +                                &lsi_oem_properties },
      518 +        { "*CSM200_*",          SD_CONF_BSET_NRR_COUNT|
      519 +                                SD_CONF_BSET_CACHE_IS_NV,
      520 +                                &lsi_oem_properties },
      521 +        { "Fujitsu SX300",      SD_CONF_BSET_THROTTLE, &lsi_oem_properties },
 673  522          { "LSI",                SD_CONF_BSET_NRR_COUNT, &lsi_properties },
 674      -        { "SUN     T3", SD_CONF_BSET_THROTTLE |
 675      -                        SD_CONF_BSET_BSY_RETRY_COUNT|
 676      -                        SD_CONF_BSET_RST_RETRIES|
 677      -                        SD_CONF_BSET_RSV_REL_TIME,
 678      -                &purple_properties },
 679      -        { "SUN     SESS01", SD_CONF_BSET_THROTTLE |
 680      -                SD_CONF_BSET_BSY_RETRY_COUNT|
 681      -                SD_CONF_BSET_RST_RETRIES|
 682      -                SD_CONF_BSET_RSV_REL_TIME|
 683      -                SD_CONF_BSET_MIN_THROTTLE|
 684      -                SD_CONF_BSET_DISKSORT_DISABLED,
 685      -                &sve_properties },
 686      -        { "SUN     T4", SD_CONF_BSET_THROTTLE |
 687      -                        SD_CONF_BSET_BSY_RETRY_COUNT|
 688      -                        SD_CONF_BSET_RST_RETRIES|
 689      -                        SD_CONF_BSET_RSV_REL_TIME,
 690      -                &purple_properties },
 691      -        { "SUN     SVE01", SD_CONF_BSET_DISKSORT_DISABLED |
 692      -                SD_CONF_BSET_LUN_RESET_ENABLED,
 693      -                &maserati_properties },
 694      -        { "SUN     SE6920", SD_CONF_BSET_THROTTLE |
 695      -                SD_CONF_BSET_NRR_COUNT|
 696      -                SD_CONF_BSET_BSY_RETRY_COUNT|
 697      -                SD_CONF_BSET_RST_RETRIES|
 698      -                SD_CONF_BSET_MIN_THROTTLE|
 699      -                SD_CONF_BSET_DISKSORT_DISABLED|
 700      -                SD_CONF_BSET_LUN_RESET_ENABLED,
 701      -                &pirus_properties },
 702      -        { "SUN     SE6940", SD_CONF_BSET_THROTTLE |
 703      -                SD_CONF_BSET_NRR_COUNT|
 704      -                SD_CONF_BSET_BSY_RETRY_COUNT|
 705      -                SD_CONF_BSET_RST_RETRIES|
 706      -                SD_CONF_BSET_MIN_THROTTLE|
 707      -                SD_CONF_BSET_DISKSORT_DISABLED|
 708      -                SD_CONF_BSET_LUN_RESET_ENABLED,
 709      -                &pirus_properties },
 710      -        { "SUN     StorageTek 6920", SD_CONF_BSET_THROTTLE |
 711      -                SD_CONF_BSET_NRR_COUNT|
 712      -                SD_CONF_BSET_BSY_RETRY_COUNT|
 713      -                SD_CONF_BSET_RST_RETRIES|
 714      -                SD_CONF_BSET_MIN_THROTTLE|
 715      -                SD_CONF_BSET_DISKSORT_DISABLED|
 716      -                SD_CONF_BSET_LUN_RESET_ENABLED,
 717      -                &pirus_properties },
 718      -        { "SUN     StorageTek 6940", SD_CONF_BSET_THROTTLE |
 719      -                SD_CONF_BSET_NRR_COUNT|
 720      -                SD_CONF_BSET_BSY_RETRY_COUNT|
 721      -                SD_CONF_BSET_RST_RETRIES|
 722      -                SD_CONF_BSET_MIN_THROTTLE|
 723      -                SD_CONF_BSET_DISKSORT_DISABLED|
 724      -                SD_CONF_BSET_LUN_RESET_ENABLED,
 725      -                &pirus_properties },
 726      -        { "SUN     PSX1000", SD_CONF_BSET_THROTTLE |
 727      -                SD_CONF_BSET_NRR_COUNT|
 728      -                SD_CONF_BSET_BSY_RETRY_COUNT|
 729      -                SD_CONF_BSET_RST_RETRIES|
 730      -                SD_CONF_BSET_MIN_THROTTLE|
 731      -                SD_CONF_BSET_DISKSORT_DISABLED|
 732      -                SD_CONF_BSET_LUN_RESET_ENABLED,
 733      -                &pirus_properties },
 734      -        { "SUN     SE6330", SD_CONF_BSET_THROTTLE |
 735      -                SD_CONF_BSET_NRR_COUNT|
 736      -                SD_CONF_BSET_BSY_RETRY_COUNT|
 737      -                SD_CONF_BSET_RST_RETRIES|
 738      -                SD_CONF_BSET_MIN_THROTTLE|
 739      -                SD_CONF_BSET_DISKSORT_DISABLED|
 740      -                SD_CONF_BSET_LUN_RESET_ENABLED,
 741      -                &pirus_properties },
      523 +        { "SUN     T3",         SD_CONF_BSET_THROTTLE|
      524 +                                SD_CONF_BSET_BSY_RETRY_COUNT|
      525 +                                SD_CONF_BSET_RST_RETRIES|
      526 +                                SD_CONF_BSET_RSV_REL_TIME,
      527 +                                &purple_properties },
      528 +        { "SUN     SESS01",     SD_CONF_BSET_THROTTLE|
      529 +                                SD_CONF_BSET_BSY_RETRY_COUNT|
      530 +                                SD_CONF_BSET_RST_RETRIES|
      531 +                                SD_CONF_BSET_RSV_REL_TIME|
      532 +                                SD_CONF_BSET_MIN_THROTTLE|
      533 +                                SD_CONF_BSET_DISKSORT_DISABLED,
      534 +                                &sve_properties },
      535 +        { "SUN     T4",         SD_CONF_BSET_THROTTLE|
      536 +                                SD_CONF_BSET_BSY_RETRY_COUNT|
      537 +                                SD_CONF_BSET_RST_RETRIES|
      538 +                                SD_CONF_BSET_RSV_REL_TIME,
      539 +                                &purple_properties },
      540 +        { "SUN     SVE01",      SD_CONF_BSET_DISKSORT_DISABLED|
      541 +                                SD_CONF_BSET_LUN_RESET_ENABLED,
      542 +                                &maserati_properties },
      543 +        { "SUN     SE6920",     SD_CONF_BSET_THROTTLE|
      544 +                                SD_CONF_BSET_NRR_COUNT|
      545 +                                SD_CONF_BSET_BSY_RETRY_COUNT|
      546 +                                SD_CONF_BSET_RST_RETRIES|
      547 +                                SD_CONF_BSET_MIN_THROTTLE|
      548 +                                SD_CONF_BSET_DISKSORT_DISABLED|
      549 +                                SD_CONF_BSET_LUN_RESET_ENABLED,
      550 +                                &pirus_properties },
      551 +        { "SUN     SE6940",     SD_CONF_BSET_THROTTLE|
      552 +                                SD_CONF_BSET_NRR_COUNT|
      553 +                                SD_CONF_BSET_BSY_RETRY_COUNT|
      554 +                                SD_CONF_BSET_RST_RETRIES|
      555 +                                SD_CONF_BSET_MIN_THROTTLE|
      556 +                                SD_CONF_BSET_DISKSORT_DISABLED|
      557 +                                SD_CONF_BSET_LUN_RESET_ENABLED,
      558 +                                &pirus_properties },
      559 +        { "SUN     StorageTek 6920", SD_CONF_BSET_THROTTLE|
      560 +                                SD_CONF_BSET_NRR_COUNT|
      561 +                                SD_CONF_BSET_BSY_RETRY_COUNT|
      562 +                                SD_CONF_BSET_RST_RETRIES|
      563 +                                SD_CONF_BSET_MIN_THROTTLE|
      564 +                                SD_CONF_BSET_DISKSORT_DISABLED|
      565 +                                SD_CONF_BSET_LUN_RESET_ENABLED,
      566 +                                &pirus_properties },
      567 +        { "SUN     StorageTek 6940", SD_CONF_BSET_THROTTLE|
      568 +                                SD_CONF_BSET_NRR_COUNT|
      569 +                                SD_CONF_BSET_BSY_RETRY_COUNT|
      570 +                                SD_CONF_BSET_RST_RETRIES|
      571 +                                SD_CONF_BSET_MIN_THROTTLE|
      572 +                                SD_CONF_BSET_DISKSORT_DISABLED|
      573 +                                SD_CONF_BSET_LUN_RESET_ENABLED,
      574 +                                &pirus_properties },
      575 +        { "SUN     PSX1000",    SD_CONF_BSET_THROTTLE|
      576 +                                SD_CONF_BSET_NRR_COUNT|
      577 +                                SD_CONF_BSET_BSY_RETRY_COUNT|
      578 +                                SD_CONF_BSET_RST_RETRIES|
      579 +                                SD_CONF_BSET_MIN_THROTTLE|
      580 +                                SD_CONF_BSET_DISKSORT_DISABLED|
      581 +                                SD_CONF_BSET_LUN_RESET_ENABLED,
      582 +                                &pirus_properties },
      583 +        { "SUN     SE6330",     SD_CONF_BSET_THROTTLE|
      584 +                                SD_CONF_BSET_NRR_COUNT|
      585 +                                SD_CONF_BSET_BSY_RETRY_COUNT|
      586 +                                SD_CONF_BSET_RST_RETRIES|
      587 +                                SD_CONF_BSET_MIN_THROTTLE|
      588 +                                SD_CONF_BSET_DISKSORT_DISABLED|
      589 +                                SD_CONF_BSET_LUN_RESET_ENABLED,
      590 +                                &pirus_properties },
 742  591          { "SUN     STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 743      -        { "SUN     SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
      592 +        { "SUN     SUN_6180",   SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 744  593          { "STK     OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 745  594          { "STK     OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 746  595          { "STK     BladeCtlr",  SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 747  596          { "STK     FLEXLINE",   SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
 748      -        { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties },
 749      -#endif /* fibre or NON-sparc platforms */
 750      -#if ((defined(__sparc) && !defined(__fibre)) ||\
 751      -        (defined(__i386) || defined(__amd64)))
 752      -        { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties },
 753      -        { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties },
 754      -        { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL },
 755      -        { "CONNER  CP30540",  SD_CONF_BSET_NOCACHE,  NULL },
 756      -        { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL },
 757      -        { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL },
 758      -        { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL },
 759      -        { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL },
 760      -        { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL },
 761      -        { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL },
 762      -        { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL },
 763      -        { "SYMBIOS INF-01-00       ", SD_CONF_BSET_FAB_DEVID, NULL },
 764      -        { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT,
 765      -            &symbios_properties },
 766      -        { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT,
 767      -            &lsi_properties_scsi },
 768      -#if defined(__i386) || defined(__amd64)
 769      -        { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD
 770      -                                    | SD_CONF_BSET_READSUB_BCD
 771      -                                    | SD_CONF_BSET_READ_TOC_ADDR_BCD
 772      -                                    | SD_CONF_BSET_NO_READ_HEADER
 773      -                                    | SD_CONF_BSET_READ_CD_XD4), NULL },
 774      -
 775      -        { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD
 776      -                                    | SD_CONF_BSET_READSUB_BCD
 777      -                                    | SD_CONF_BSET_READ_TOC_ADDR_BCD
 778      -                                    | SD_CONF_BSET_NO_READ_HEADER
 779      -                                    | SD_CONF_BSET_READ_CD_XD4), NULL },
 780      -#endif /* __i386 || __amd64 */
 781      -#endif /* sparc NON-fibre or NON-sparc platforms */
 782      -
      597 +        { "SYMBIOS",            SD_CONF_BSET_NRR_COUNT, &symbios_properties },
      598 +        { "SEAGATE ST42400N",   SD_CONF_BSET_THROTTLE, &elite_properties },
      599 +        { "SEAGATE ST31200N",   SD_CONF_BSET_THROTTLE, &st31200n_properties },
      600 +        { "SEAGATE ST41600N",   SD_CONF_BSET_TUR_CHECK, NULL },
      601 +        { "CONNER  CP30540",    SD_CONF_BSET_NOCACHE,  NULL },
      602 +        { "*SUN0104*",          SD_CONF_BSET_FAB_DEVID, NULL },
      603 +        { "*SUN0207*",          SD_CONF_BSET_FAB_DEVID, NULL },
      604 +        { "*SUN0327*",          SD_CONF_BSET_FAB_DEVID, NULL },
      605 +        { "*SUN0340*",          SD_CONF_BSET_FAB_DEVID, NULL },
      606 +        { "*SUN0424*",          SD_CONF_BSET_FAB_DEVID, NULL },
      607 +        { "*SUN0669*",          SD_CONF_BSET_FAB_DEVID, NULL },
      608 +        { "*SUN1.0G*",          SD_CONF_BSET_FAB_DEVID, NULL },
      609 +        { "SYMBIOS INF-01-00",  SD_CONF_BSET_FAB_DEVID, NULL },
      610 +        { "SYMBIOS",            SD_CONF_BSET_THROTTLE|
      611 +                                SD_CONF_BSET_NRR_COUNT,
      612 +                                &symbios_properties },
      613 +        { "LSI",                SD_CONF_BSET_THROTTLE|
      614 +                                SD_CONF_BSET_NRR_COUNT,
      615 +                                &lsi_properties_scsi },
      616 +        { " NEC CD-ROM DRIVE:260 ", SD_CONF_BSET_PLAYMSF_BCD|
      617 +                                SD_CONF_BSET_READSUB_BCD|
      618 +                                SD_CONF_BSET_READ_TOC_ADDR_BCD|
      619 +                                SD_CONF_BSET_NO_READ_HEADER|
      620 +                                SD_CONF_BSET_READ_CD_XD4,
      621 +                                NULL },
      622 +        { " NEC CD-ROM DRIVE:270 ", SD_CONF_BSET_PLAYMSF_BCD|
      623 +                                SD_CONF_BSET_READSUB_BCD|
      624 +                                SD_CONF_BSET_READ_TOC_ADDR_BCD|
      625 +                                SD_CONF_BSET_NO_READ_HEADER|
      626 +                                SD_CONF_BSET_READ_CD_XD4,
      627 +                                NULL },
 783  628  #if (defined(SD_PROP_TST))
 784      -        { "VENDOR  PRODUCT ", (SD_CONF_BSET_THROTTLE
 785      -                                | SD_CONF_BSET_CTYPE
 786      -                                | SD_CONF_BSET_NRR_COUNT
 787      -                                | SD_CONF_BSET_FAB_DEVID
 788      -                                | SD_CONF_BSET_NOCACHE
 789      -                                | SD_CONF_BSET_BSY_RETRY_COUNT
 790      -                                | SD_CONF_BSET_PLAYMSF_BCD
 791      -                                | SD_CONF_BSET_READSUB_BCD
 792      -                                | SD_CONF_BSET_READ_TOC_TRK_BCD
 793      -                                | SD_CONF_BSET_READ_TOC_ADDR_BCD
 794      -                                | SD_CONF_BSET_NO_READ_HEADER
 795      -                                | SD_CONF_BSET_READ_CD_XD4
 796      -                                | SD_CONF_BSET_RST_RETRIES
 797      -                                | SD_CONF_BSET_RSV_REL_TIME
 798      -                                | SD_CONF_BSET_TUR_CHECK), &tst_properties},
      629 +        { "VENDOR  PRODUCT ",   SD_CONF_BSET_THROTTLE|
      630 +                                SD_CONF_BSET_CTYPE|
      631 +                                SD_CONF_BSET_NRR_COUNT|
      632 +                                SD_CONF_BSET_FAB_DEVID|
      633 +                                SD_CONF_BSET_NOCACHE|
      634 +                                SD_CONF_BSET_BSY_RETRY_COUNT|
      635 +                                SD_CONF_BSET_PLAYMSF_BCD|
      636 +                                SD_CONF_BSET_READSUB_BCD|
      637 +                                SD_CONF_BSET_READ_TOC_TRK_BCD|
      638 +                                SD_CONF_BSET_READ_TOC_ADDR_BCD|
      639 +                                SD_CONF_BSET_NO_READ_HEADER|
      640 +                                SD_CONF_BSET_READ_CD_XD4|
      641 +                                SD_CONF_BSET_RST_RETRIES|
      642 +                                SD_CONF_BSET_RSV_REL_TIME|
      643 +                                SD_CONF_BSET_TUR_CHECK,
      644 +                                &tst_properties},
 799  645  #endif
 800  646  };
 801  647  
 802  648  static const int sd_disk_table_size =
 803  649          sizeof (sd_disk_table)/ sizeof (sd_disk_config_t);
 804  650  
 805  651  /*
 806  652   * Emulation mode disk drive VID/PID table
 807  653   */
 808  654  static char sd_flash_dev_table[][25] = {
↓ open down ↓ 44 lines elided ↑ open up ↑
 853  699  
 854  700  /*
 855  701   * Specifies the number of seconds that must have elapsed since the last
 856  702   * cmd. has completed for a device to be declared idle to the PM framework.
 857  703   */
 858  704  static int sd_pm_idletime = 1;
 859  705  
 860  706  /*
 861  707   * Internal function prototypes
 862  708   */
      709 +typedef struct unmap_param_hdr_s {
      710 +        uint16_t        uph_data_len;
      711 +        uint16_t        uph_descr_data_len;
      712 +        uint32_t        uph_reserved;
      713 +} unmap_param_hdr_t;
 863  714  
 864      -#if (defined(__fibre))
 865      -/*
 866      - * These #defines are to avoid namespace collisions that occur because this
 867      - * code is currently used to compile two separate driver modules: sd and ssd.
 868      - * All function names need to be treated this way (even if declared static)
 869      - * in order to allow the debugger to resolve the names properly.
 870      - * It is anticipated that in the near future the ssd module will be obsoleted,
 871      - * at which time this ugliness should go away.
 872      - */
 873      -#define sd_log_trace                    ssd_log_trace
 874      -#define sd_log_info                     ssd_log_info
 875      -#define sd_log_err                      ssd_log_err
 876      -#define sdprobe                         ssdprobe
 877      -#define sdinfo                          ssdinfo
 878      -#define sd_prop_op                      ssd_prop_op
 879      -#define sd_scsi_probe_cache_init        ssd_scsi_probe_cache_init
 880      -#define sd_scsi_probe_cache_fini        ssd_scsi_probe_cache_fini
 881      -#define sd_scsi_clear_probe_cache       ssd_scsi_clear_probe_cache
 882      -#define sd_scsi_probe_with_cache        ssd_scsi_probe_with_cache
 883      -#define sd_scsi_target_lun_init         ssd_scsi_target_lun_init
 884      -#define sd_scsi_target_lun_fini         ssd_scsi_target_lun_fini
 885      -#define sd_scsi_get_target_lun_count    ssd_scsi_get_target_lun_count
 886      -#define sd_scsi_update_lun_on_target    ssd_scsi_update_lun_on_target
 887      -#define sd_spin_up_unit                 ssd_spin_up_unit
 888      -#define sd_enable_descr_sense           ssd_enable_descr_sense
 889      -#define sd_reenable_dsense_task         ssd_reenable_dsense_task
 890      -#define sd_set_mmc_caps                 ssd_set_mmc_caps
 891      -#define sd_read_unit_properties         ssd_read_unit_properties
 892      -#define sd_process_sdconf_file          ssd_process_sdconf_file
 893      -#define sd_process_sdconf_table         ssd_process_sdconf_table
 894      -#define sd_sdconf_id_match              ssd_sdconf_id_match
 895      -#define sd_blank_cmp                    ssd_blank_cmp
 896      -#define sd_chk_vers1_data               ssd_chk_vers1_data
 897      -#define sd_set_vers1_properties         ssd_set_vers1_properties
 898      -#define sd_check_bdc_vpd                ssd_check_bdc_vpd
 899      -#define sd_check_emulation_mode         ssd_check_emulation_mode
      715 +typedef struct unmap_blk_descr_s {
      716 +        uint64_t        ubd_lba;
      717 +        uint32_t        ubd_lba_cnt;
      718 +        uint32_t        ubd_reserved;
      719 +} unmap_blk_descr_t;
 900  720  
 901      -#define sd_get_physical_geometry        ssd_get_physical_geometry
 902      -#define sd_get_virtual_geometry         ssd_get_virtual_geometry
 903      -#define sd_update_block_info            ssd_update_block_info
 904      -#define sd_register_devid               ssd_register_devid
 905      -#define sd_get_devid                    ssd_get_devid
 906      -#define sd_create_devid                 ssd_create_devid
 907      -#define sd_write_deviceid               ssd_write_deviceid
 908      -#define sd_check_vpd_page_support       ssd_check_vpd_page_support
 909      -#define sd_setup_pm                     ssd_setup_pm
 910      -#define sd_create_pm_components         ssd_create_pm_components
 911      -#define sd_ddi_suspend                  ssd_ddi_suspend
 912      -#define sd_ddi_resume                   ssd_ddi_resume
 913      -#define sd_pm_state_change              ssd_pm_state_change
 914      -#define sdpower                         ssdpower
 915      -#define sdattach                        ssdattach
 916      -#define sddetach                        ssddetach
 917      -#define sd_unit_attach                  ssd_unit_attach
 918      -#define sd_unit_detach                  ssd_unit_detach
 919      -#define sd_set_unit_attributes          ssd_set_unit_attributes
 920      -#define sd_create_errstats              ssd_create_errstats
 921      -#define sd_set_errstats                 ssd_set_errstats
 922      -#define sd_set_pstats                   ssd_set_pstats
 923      -#define sddump                          ssddump
 924      -#define sd_scsi_poll                    ssd_scsi_poll
 925      -#define sd_send_polled_RQS              ssd_send_polled_RQS
 926      -#define sd_ddi_scsi_poll                ssd_ddi_scsi_poll
 927      -#define sd_init_event_callbacks         ssd_init_event_callbacks
 928      -#define sd_event_callback               ssd_event_callback
 929      -#define sd_cache_control                ssd_cache_control
 930      -#define sd_get_write_cache_enabled      ssd_get_write_cache_enabled
 931      -#define sd_get_write_cache_changeable   ssd_get_write_cache_changeable
 932      -#define sd_get_nv_sup                   ssd_get_nv_sup
 933      -#define sd_make_device                  ssd_make_device
 934      -#define sdopen                          ssdopen
 935      -#define sdclose                         ssdclose
 936      -#define sd_ready_and_valid              ssd_ready_and_valid
 937      -#define sdmin                           ssdmin
 938      -#define sdread                          ssdread
 939      -#define sdwrite                         ssdwrite
 940      -#define sdaread                         ssdaread
 941      -#define sdawrite                        ssdawrite
 942      -#define sdstrategy                      ssdstrategy
 943      -#define sdioctl                         ssdioctl
 944      -#define sd_mapblockaddr_iostart         ssd_mapblockaddr_iostart
 945      -#define sd_mapblocksize_iostart         ssd_mapblocksize_iostart
 946      -#define sd_checksum_iostart             ssd_checksum_iostart
 947      -#define sd_checksum_uscsi_iostart       ssd_checksum_uscsi_iostart
 948      -#define sd_pm_iostart                   ssd_pm_iostart
 949      -#define sd_core_iostart                 ssd_core_iostart
 950      -#define sd_mapblockaddr_iodone          ssd_mapblockaddr_iodone
 951      -#define sd_mapblocksize_iodone          ssd_mapblocksize_iodone
 952      -#define sd_checksum_iodone              ssd_checksum_iodone
 953      -#define sd_checksum_uscsi_iodone        ssd_checksum_uscsi_iodone
 954      -#define sd_pm_iodone                    ssd_pm_iodone
 955      -#define sd_initpkt_for_buf              ssd_initpkt_for_buf
 956      -#define sd_destroypkt_for_buf           ssd_destroypkt_for_buf
 957      -#define sd_setup_rw_pkt                 ssd_setup_rw_pkt
 958      -#define sd_setup_next_rw_pkt            ssd_setup_next_rw_pkt
 959      -#define sd_buf_iodone                   ssd_buf_iodone
 960      -#define sd_uscsi_strategy               ssd_uscsi_strategy
 961      -#define sd_initpkt_for_uscsi            ssd_initpkt_for_uscsi
 962      -#define sd_destroypkt_for_uscsi         ssd_destroypkt_for_uscsi
 963      -#define sd_uscsi_iodone                 ssd_uscsi_iodone
 964      -#define sd_xbuf_strategy                ssd_xbuf_strategy
 965      -#define sd_xbuf_init                    ssd_xbuf_init
 966      -#define sd_pm_entry                     ssd_pm_entry
 967      -#define sd_pm_exit                      ssd_pm_exit
      721 +/* Max number of block descriptors in UNMAP command */
      722 +#define SD_UNMAP_MAX_DESCR \
      723 +        ((UINT16_MAX - sizeof (unmap_param_hdr_t)) / sizeof (unmap_blk_descr_t))
      724 +/* Max size of the UNMAP parameter list in bytes */
      725 +#define SD_UNMAP_PARAM_LIST_MAXSZ       (sizeof (unmap_param_hdr_t) + \
      726 +        SD_UNMAP_MAX_DESCR * sizeof (unmap_blk_descr_t))
 968  727  
 969      -#define sd_pm_idletimeout_handler       ssd_pm_idletimeout_handler
 970      -#define sd_pm_timeout_handler           ssd_pm_timeout_handler
 971      -
 972      -#define sd_add_buf_to_waitq             ssd_add_buf_to_waitq
 973      -#define sdintr                          ssdintr
 974      -#define sd_start_cmds                   ssd_start_cmds
 975      -#define sd_send_scsi_cmd                ssd_send_scsi_cmd
 976      -#define sd_bioclone_alloc               ssd_bioclone_alloc
 977      -#define sd_bioclone_free                ssd_bioclone_free
 978      -#define sd_shadow_buf_alloc             ssd_shadow_buf_alloc
 979      -#define sd_shadow_buf_free              ssd_shadow_buf_free
 980      -#define sd_print_transport_rejected_message     \
 981      -                                        ssd_print_transport_rejected_message
 982      -#define sd_retry_command                ssd_retry_command
 983      -#define sd_set_retry_bp                 ssd_set_retry_bp
 984      -#define sd_send_request_sense_command   ssd_send_request_sense_command
 985      -#define sd_start_retry_command          ssd_start_retry_command
 986      -#define sd_start_direct_priority_command        \
 987      -                                        ssd_start_direct_priority_command
 988      -#define sd_return_failed_command        ssd_return_failed_command
 989      -#define sd_return_failed_command_no_restart     \
 990      -                                        ssd_return_failed_command_no_restart
 991      -#define sd_return_command               ssd_return_command
 992      -#define sd_sync_with_callback           ssd_sync_with_callback
 993      -#define sdrunout                        ssdrunout
 994      -#define sd_mark_rqs_busy                ssd_mark_rqs_busy
 995      -#define sd_mark_rqs_idle                ssd_mark_rqs_idle
 996      -#define sd_reduce_throttle              ssd_reduce_throttle
 997      -#define sd_restore_throttle             ssd_restore_throttle
 998      -#define sd_print_incomplete_msg         ssd_print_incomplete_msg
 999      -#define sd_init_cdb_limits              ssd_init_cdb_limits
1000      -#define sd_pkt_status_good              ssd_pkt_status_good
1001      -#define sd_pkt_status_check_condition   ssd_pkt_status_check_condition
1002      -#define sd_pkt_status_busy              ssd_pkt_status_busy
1003      -#define sd_pkt_status_reservation_conflict      \
1004      -                                        ssd_pkt_status_reservation_conflict
1005      -#define sd_pkt_status_qfull             ssd_pkt_status_qfull
1006      -#define sd_handle_request_sense         ssd_handle_request_sense
1007      -#define sd_handle_auto_request_sense    ssd_handle_auto_request_sense
1008      -#define sd_print_sense_failed_msg       ssd_print_sense_failed_msg
1009      -#define sd_validate_sense_data          ssd_validate_sense_data
1010      -#define sd_decode_sense                 ssd_decode_sense
1011      -#define sd_print_sense_msg              ssd_print_sense_msg
1012      -#define sd_sense_key_no_sense           ssd_sense_key_no_sense
1013      -#define sd_sense_key_recoverable_error  ssd_sense_key_recoverable_error
1014      -#define sd_sense_key_not_ready          ssd_sense_key_not_ready
1015      -#define sd_sense_key_medium_or_hardware_error   \
1016      -                                        ssd_sense_key_medium_or_hardware_error
1017      -#define sd_sense_key_illegal_request    ssd_sense_key_illegal_request
1018      -#define sd_sense_key_unit_attention     ssd_sense_key_unit_attention
1019      -#define sd_sense_key_fail_command       ssd_sense_key_fail_command
1020      -#define sd_sense_key_blank_check        ssd_sense_key_blank_check
1021      -#define sd_sense_key_aborted_command    ssd_sense_key_aborted_command
1022      -#define sd_sense_key_default            ssd_sense_key_default
1023      -#define sd_print_retry_msg              ssd_print_retry_msg
1024      -#define sd_print_cmd_incomplete_msg     ssd_print_cmd_incomplete_msg
1025      -#define sd_pkt_reason_cmd_incomplete    ssd_pkt_reason_cmd_incomplete
1026      -#define sd_pkt_reason_cmd_tran_err      ssd_pkt_reason_cmd_tran_err
1027      -#define sd_pkt_reason_cmd_reset         ssd_pkt_reason_cmd_reset
1028      -#define sd_pkt_reason_cmd_aborted       ssd_pkt_reason_cmd_aborted
1029      -#define sd_pkt_reason_cmd_timeout       ssd_pkt_reason_cmd_timeout
1030      -#define sd_pkt_reason_cmd_unx_bus_free  ssd_pkt_reason_cmd_unx_bus_free
1031      -#define sd_pkt_reason_cmd_tag_reject    ssd_pkt_reason_cmd_tag_reject
1032      -#define sd_pkt_reason_default           ssd_pkt_reason_default
1033      -#define sd_reset_target                 ssd_reset_target
1034      -#define sd_start_stop_unit_callback     ssd_start_stop_unit_callback
1035      -#define sd_start_stop_unit_task         ssd_start_stop_unit_task
1036      -#define sd_taskq_create                 ssd_taskq_create
1037      -#define sd_taskq_delete                 ssd_taskq_delete
1038      -#define sd_target_change_task           ssd_target_change_task
1039      -#define sd_log_dev_status_event         ssd_log_dev_status_event
1040      -#define sd_log_lun_expansion_event      ssd_log_lun_expansion_event
1041      -#define sd_log_eject_request_event      ssd_log_eject_request_event
1042      -#define sd_media_change_task            ssd_media_change_task
1043      -#define sd_handle_mchange               ssd_handle_mchange
1044      -#define sd_send_scsi_DOORLOCK           ssd_send_scsi_DOORLOCK
1045      -#define sd_send_scsi_READ_CAPACITY      ssd_send_scsi_READ_CAPACITY
1046      -#define sd_send_scsi_READ_CAPACITY_16   ssd_send_scsi_READ_CAPACITY_16
1047      -#define sd_send_scsi_GET_CONFIGURATION  ssd_send_scsi_GET_CONFIGURATION
1048      -#define sd_send_scsi_feature_GET_CONFIGURATION  \
1049      -                                        sd_send_scsi_feature_GET_CONFIGURATION
1050      -#define sd_send_scsi_START_STOP_UNIT    ssd_send_scsi_START_STOP_UNIT
1051      -#define sd_send_scsi_INQUIRY            ssd_send_scsi_INQUIRY
1052      -#define sd_send_scsi_TEST_UNIT_READY    ssd_send_scsi_TEST_UNIT_READY
1053      -#define sd_send_scsi_PERSISTENT_RESERVE_IN      \
1054      -                                        ssd_send_scsi_PERSISTENT_RESERVE_IN
1055      -#define sd_send_scsi_PERSISTENT_RESERVE_OUT     \
1056      -                                        ssd_send_scsi_PERSISTENT_RESERVE_OUT
1057      -#define sd_send_scsi_SYNCHRONIZE_CACHE  ssd_send_scsi_SYNCHRONIZE_CACHE
1058      -#define sd_send_scsi_SYNCHRONIZE_CACHE_biodone  \
1059      -                                        ssd_send_scsi_SYNCHRONIZE_CACHE_biodone
1060      -#define sd_send_scsi_MODE_SENSE         ssd_send_scsi_MODE_SENSE
1061      -#define sd_send_scsi_MODE_SELECT        ssd_send_scsi_MODE_SELECT
1062      -#define sd_send_scsi_RDWR               ssd_send_scsi_RDWR
1063      -#define sd_send_scsi_LOG_SENSE          ssd_send_scsi_LOG_SENSE
1064      -#define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION      \
1065      -                                ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
1066      -#define sd_gesn_media_data_valid        ssd_gesn_media_data_valid
1067      -#define sd_alloc_rqs                    ssd_alloc_rqs
1068      -#define sd_free_rqs                     ssd_free_rqs
1069      -#define sd_dump_memory                  ssd_dump_memory
1070      -#define sd_get_media_info_com           ssd_get_media_info_com
1071      -#define sd_get_media_info               ssd_get_media_info
1072      -#define sd_get_media_info_ext           ssd_get_media_info_ext
1073      -#define sd_dkio_ctrl_info               ssd_dkio_ctrl_info
1074      -#define sd_nvpair_str_decode            ssd_nvpair_str_decode
1075      -#define sd_strtok_r                     ssd_strtok_r
1076      -#define sd_set_properties               ssd_set_properties
1077      -#define sd_get_tunables_from_conf       ssd_get_tunables_from_conf
1078      -#define sd_setup_next_xfer              ssd_setup_next_xfer
1079      -#define sd_dkio_get_temp                ssd_dkio_get_temp
1080      -#define sd_check_mhd                    ssd_check_mhd
1081      -#define sd_mhd_watch_cb                 ssd_mhd_watch_cb
1082      -#define sd_mhd_watch_incomplete         ssd_mhd_watch_incomplete
1083      -#define sd_sname                        ssd_sname
1084      -#define sd_mhd_resvd_recover            ssd_mhd_resvd_recover
1085      -#define sd_resv_reclaim_thread          ssd_resv_reclaim_thread
1086      -#define sd_take_ownership               ssd_take_ownership
1087      -#define sd_reserve_release              ssd_reserve_release
1088      -#define sd_rmv_resv_reclaim_req         ssd_rmv_resv_reclaim_req
1089      -#define sd_mhd_reset_notify_cb          ssd_mhd_reset_notify_cb
1090      -#define sd_persistent_reservation_in_read_keys  \
1091      -                                        ssd_persistent_reservation_in_read_keys
1092      -#define sd_persistent_reservation_in_read_resv  \
1093      -                                        ssd_persistent_reservation_in_read_resv
1094      -#define sd_mhdioc_takeown               ssd_mhdioc_takeown
1095      -#define sd_mhdioc_failfast              ssd_mhdioc_failfast
1096      -#define sd_mhdioc_release               ssd_mhdioc_release
1097      -#define sd_mhdioc_register_devid        ssd_mhdioc_register_devid
1098      -#define sd_mhdioc_inkeys                ssd_mhdioc_inkeys
1099      -#define sd_mhdioc_inresv                ssd_mhdioc_inresv
1100      -#define sr_change_blkmode               ssr_change_blkmode
1101      -#define sr_change_speed                 ssr_change_speed
1102      -#define sr_atapi_change_speed           ssr_atapi_change_speed
1103      -#define sr_pause_resume                 ssr_pause_resume
1104      -#define sr_play_msf                     ssr_play_msf
1105      -#define sr_play_trkind                  ssr_play_trkind
1106      -#define sr_read_all_subcodes            ssr_read_all_subcodes
1107      -#define sr_read_subchannel              ssr_read_subchannel
1108      -#define sr_read_tocentry                ssr_read_tocentry
1109      -#define sr_read_tochdr                  ssr_read_tochdr
1110      -#define sr_read_cdda                    ssr_read_cdda
1111      -#define sr_read_cdxa                    ssr_read_cdxa
1112      -#define sr_read_mode1                   ssr_read_mode1
1113      -#define sr_read_mode2                   ssr_read_mode2
1114      -#define sr_read_cd_mode2                ssr_read_cd_mode2
1115      -#define sr_sector_mode                  ssr_sector_mode
1116      -#define sr_eject                        ssr_eject
1117      -#define sr_ejected                      ssr_ejected
1118      -#define sr_check_wp                     ssr_check_wp
1119      -#define sd_watch_request_submit         ssd_watch_request_submit
1120      -#define sd_check_media                  ssd_check_media
1121      -#define sd_media_watch_cb               ssd_media_watch_cb
1122      -#define sd_delayed_cv_broadcast         ssd_delayed_cv_broadcast
1123      -#define sr_volume_ctrl                  ssr_volume_ctrl
1124      -#define sr_read_sony_session_offset     ssr_read_sony_session_offset
1125      -#define sd_log_page_supported           ssd_log_page_supported
1126      -#define sd_check_for_writable_cd        ssd_check_for_writable_cd
1127      -#define sd_wm_cache_constructor         ssd_wm_cache_constructor
1128      -#define sd_wm_cache_destructor          ssd_wm_cache_destructor
1129      -#define sd_range_lock                   ssd_range_lock
1130      -#define sd_get_range                    ssd_get_range
1131      -#define sd_free_inlist_wmap             ssd_free_inlist_wmap
1132      -#define sd_range_unlock                 ssd_range_unlock
1133      -#define sd_read_modify_write_task       ssd_read_modify_write_task
1134      -#define sddump_do_read_of_rmw           ssddump_do_read_of_rmw
1135      -
1136      -#define sd_iostart_chain                ssd_iostart_chain
1137      -#define sd_iodone_chain                 ssd_iodone_chain
1138      -#define sd_initpkt_map                  ssd_initpkt_map
1139      -#define sd_destroypkt_map               ssd_destroypkt_map
1140      -#define sd_chain_type_map               ssd_chain_type_map
1141      -#define sd_chain_index_map              ssd_chain_index_map
1142      -
1143      -#define sd_failfast_flushctl            ssd_failfast_flushctl
1144      -#define sd_failfast_flushq              ssd_failfast_flushq
1145      -#define sd_failfast_flushq_callback     ssd_failfast_flushq_callback
1146      -
1147      -#define sd_is_lsi                       ssd_is_lsi
1148      -#define sd_tg_rdwr                      ssd_tg_rdwr
1149      -#define sd_tg_getinfo                   ssd_tg_getinfo
1150      -#define sd_rmw_msg_print_handler        ssd_rmw_msg_print_handler
1151      -
1152      -#endif  /* #if (defined(__fibre)) */
1153      -
1154      -
1155  728  int _init(void);
1156  729  int _fini(void);
1157  730  int _info(struct modinfo *modinfop);
1158  731  
1159  732  /*PRINTFLIKE3*/
1160  733  static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1161  734  /*PRINTFLIKE3*/
1162  735  static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1163  736  /*PRINTFLIKE3*/
1164  737  static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...);
↓ open down ↓ 29 lines elided ↑ open up ↑
1194  767   */
1195  768  static sd_ssc_t *sd_ssc_init(struct sd_lun *un);
1196  769  static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd,
1197  770      int flag, enum uio_seg dataspace, int path_flag);
1198  771  static void sd_ssc_fini(sd_ssc_t *ssc);
1199  772  
1200  773  /*
1201  774   * Using sd_ssc_assessment to set correct type-of-assessment
1202  775   * Using sd_ssc_post to post ereport & system log
1203  776   *       sd_ssc_post will call sd_ssc_print to print system log
1204      - *       sd_ssc_post will call sd_ssd_ereport_post to post ereport
      777 + *       sd_ssc_post will call sd_ssc_ereport_post to post ereport
1205  778   */
1206  779  static void sd_ssc_assessment(sd_ssc_t *ssc,
1207  780      enum sd_type_assessment tp_assess);
1208  781  
1209  782  static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess);
1210  783  static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity);
1211  784  static void sd_ssc_ereport_post(sd_ssc_t *ssc,
1212  785      enum sd_driver_assessment drv_assess);
1213  786  
1214  787  /*
↓ open down ↓ 31 lines elided ↑ open up ↑
1246  819  static void sd_set_vers1_properties(struct sd_lun *un, int flags,
1247  820      sd_tunables *prop_list);
1248  821  
1249  822  static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi,
1250  823      int reservation_flag);
1251  824  static int  sd_get_devid(sd_ssc_t *ssc);
1252  825  static ddi_devid_t sd_create_devid(sd_ssc_t *ssc);
1253  826  static int  sd_write_deviceid(sd_ssc_t *ssc);
1254  827  static int  sd_check_vpd_page_support(sd_ssc_t *ssc);
1255  828  
      829 +#ifdef notyet
1256  830  static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi);
1257  831  static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un);
      832 +#endif
1258  833  
1259  834  static int  sd_ddi_suspend(dev_info_t *devi);
1260  835  static int  sd_ddi_resume(dev_info_t *devi);
1261  836  static int  sd_pm_state_change(struct sd_lun *un, int level, int flag);
1262  837  static int  sdpower(dev_info_t *devi, int component, int level);
1263  838  
1264  839  static int  sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd);
1265  840  static int  sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd);
1266      -static int  sd_unit_attach(dev_info_t *devi);
      841 +static void sd_unit_attach(void *arg);
1267  842  static int  sd_unit_detach(dev_info_t *devi);
1268  843  
1269  844  static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi);
1270  845  static void sd_create_errstats(struct sd_lun *un, int instance);
1271  846  static void sd_set_errstats(struct sd_lun *un);
1272  847  static void sd_set_pstats(struct sd_lun *un);
1273  848  
1274  849  static int  sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
1275  850  static int  sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt);
1276  851  static int  sd_send_polled_RQS(struct sd_lun *un);
1277  852  static int  sd_ddi_scsi_poll(struct scsi_pkt *pkt);
1278  853  
1279      -#if (defined(__fibre))
1280  854  /*
1281      - * Event callbacks (photon)
1282      - */
1283      -static void sd_init_event_callbacks(struct sd_lun *un);
1284      -static void  sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *);
1285      -#endif
1286      -
1287      -/*
1288  855   * Defines for sd_cache_control
1289  856   */
1290      -
1291  857  #define SD_CACHE_ENABLE         1
1292  858  #define SD_CACHE_DISABLE        0
1293  859  #define SD_CACHE_NOCHANGE       -1
1294  860  
1295  861  static int   sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag);
1296  862  static int   sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled);
1297  863  static void  sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable);
1298  864  static void  sd_get_nv_sup(sd_ssc_t *ssc);
1299  865  static dev_t sd_make_device(dev_info_t *devi);
1300  866  static void  sd_check_bdc_vpd(sd_ssc_t *ssc);
↓ open down ↓ 102 lines elided ↑ open up ↑
1403  969          int retry_check_flag,
1404  970          void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp,
1405  971                  int c),
1406  972          void *user_arg, int failure_code,  clock_t retry_delay,
1407  973          void (*statp)(kstat_io_t *));
1408  974  
1409  975  static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp,
1410  976          clock_t retry_delay, void (*statp)(kstat_io_t *));
1411  977  
1412  978  static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
1413      -        struct scsi_pkt *pktp);
      979 +        int retry_check_flag, struct scsi_pkt *pktp);
1414  980  static void sd_start_retry_command(void *arg);
1415  981  static void sd_start_direct_priority_command(void *arg);
1416  982  static void sd_return_failed_command(struct sd_lun *un, struct buf *bp,
1417  983          int errcode);
1418  984  static void sd_return_failed_command_no_restart(struct sd_lun *un,
1419  985          struct buf *bp, int errcode);
1420  986  static void sd_return_command(struct sd_lun *un, struct buf *bp);
1421  987  static void sd_sync_with_callback(struct sd_lun *un);
1422  988  static int sdrunout(caddr_t arg);
1423  989  
↓ open down ↓ 102 lines elided ↑ open up ↑
1526 1092  static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr,
1527 1093          size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp);
1528 1094  static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag);
1529 1095  static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc,
1530 1096          uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp);
1531 1097  static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc,
1532 1098          uchar_t usr_cmd, uchar_t *usr_bufp);
1533 1099  static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un,
1534 1100          struct dk_callback *dkc);
1535 1101  static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp);
     1102 +static int sd_send_scsi_UNMAP(dev_t dev, sd_ssc_t *ssc, dkioc_free_list_t *dfl,
     1103 +        int flag);
1536 1104  static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc,
1537 1105          struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
1538 1106          uchar_t *bufaddr, uint_t buflen, int path_flag);
1539 1107  static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc,
1540 1108          struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
1541 1109          uchar_t *bufaddr, uint_t buflen, char feature, int path_flag);
1542 1110  static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize,
1543 1111          uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag);
1544 1112  static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize,
1545 1113          uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag);
↓ open down ↓ 74 lines elided ↑ open up ↑
1620 1188  static int sr_eject(dev_t dev);
1621 1189  static void sr_ejected(register struct sd_lun *un);
1622 1190  static int sr_check_wp(dev_t dev);
1623 1191  static opaque_t sd_watch_request_submit(struct sd_lun *un);
1624 1192  static int sd_check_media(dev_t dev, enum dkio_state state);
1625 1193  static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
1626 1194  static void sd_delayed_cv_broadcast(void *arg);
1627 1195  static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag);
1628 1196  static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag);
1629 1197  
     1198 +#ifdef notyet
1630 1199  static int sd_log_page_supported(sd_ssc_t *ssc, int log_page);
     1200 +#endif
1631 1201  
1632 1202  /*
1633 1203   * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions.
1634 1204   */
1635 1205  static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag);
1636 1206  static int sd_wm_cache_constructor(void *wm, void *un, int flags);
1637 1207  static void sd_wm_cache_destructor(void *wm, void *un);
1638 1208  static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb,
1639 1209      daddr_t endb, ushort_t typ);
1640 1210  static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb,
↓ open down ↓ 2 lines elided ↑ open up ↑
1643 1213  static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm);
1644 1214  static void sd_read_modify_write_task(void * arg);
1645 1215  static int
1646 1216  sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
1647 1217      struct buf **bpp);
1648 1218  
1649 1219  
1650 1220  /*
1651 1221   * Function prototypes for failfast support.
1652 1222   */
1653      -static void sd_failfast_flushq(struct sd_lun *un);
     1223 +static void sd_failfast_flushq(struct sd_lun *un, boolean_t flush_all);
1654 1224  static int sd_failfast_flushq_callback(struct buf *bp);
1655 1225  
1656 1226  /*
1657 1227   * Function prototypes to check for lsi devices
1658 1228   */
1659 1229  static void sd_is_lsi(struct sd_lun *un);
1660 1230  
1661 1231  /*
1662 1232   * Function prototypes for partial DMA support
1663 1233   */
↓ open down ↓ 19 lines elided ↑ open up ↑
1683 1253   * failfast processing being performed.
1684 1254   *
1685 1255   * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing
1686 1256   * failfast processing on all bufs with B_FAILFAST set.
1687 1257   */
1688 1258  
1689 1259  #define SD_FAILFAST_INACTIVE            0
1690 1260  #define SD_FAILFAST_ACTIVE              1
1691 1261  
1692 1262  /*
     1263 + * Bitmask to control behaviour in failfast active state:
     1264 + *
     1265 + * SD_FAILFAST_ENABLE_FORCE_INACTIVE: When set, allow retries without
     1266 + * SD_RETRIES_FAILFAST to cause transition to failfast inactive state.
     1267 + *
     1268 + * SD_FAILFAST_ENABLE_FAIL_RETRIES: When set, cause retries with the flag
     1269 + * SD_RETRIES_FAILFAST set (following a timeout) to fail when in failfast
     1270 + * active state.
     1271 + *
     1272 + * SD_FAILFAST_ENABLE_FAIL_ALL_RETRIES: When set, cause ALL retries,
     1273 + * regardless of reason, to fail when in failfast active state. This takes
     1274 + * precedence over SD_FAILFAST_FAIL_RETRIES.
     1275 + *
     1276 + * SD_FAILFAST_ENABLE_FAIL_USCSI: When set, discard all commands in the USCSI
     1277 + * chain (sdioctl or driver generated) when in failfast active state.
     1278 + * To prevent problems with sdopen, this is limited to when there are
     1279 + * multiple pending commands.
     1280 + */
     1281 +
     1282 +#define SD_FAILFAST_ENABLE_FORCE_INACTIVE       0x01
     1283 +#define SD_FAILFAST_ENABLE_FAIL_RETRIES         0x02
     1284 +#define SD_FAILFAST_ENABLE_FAIL_ALL_RETRIES     0x04
     1285 +#define SD_FAILFAST_ENABLE_FAIL_USCSI           0x08
     1286 +
     1287 +/*
     1288 + * The default behaviour is to fail all retries due to timeout when in failfast
     1289 + * active state, and not allow other retries to transition to inactive.
     1290 + */
     1291 +static int sd_failfast_enable = SD_FAILFAST_ENABLE_FAIL_RETRIES |
     1292 +    SD_FAILFAST_ENABLE_FAIL_USCSI;
     1293 +
     1294 +/*
1693 1295   * Bitmask to control behavior of buf(9S) flushes when a transition to
1694 1296   * the failfast state occurs. Optional bits include:
1695 1297   *
1696 1298   * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that
1697 1299   * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will
1698 1300   * be flushed.
1699 1301   *
1700 1302   * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the
1701 1303   * driver, in addition to the regular wait queue. This includes the xbuf
1702 1304   * queues. When clear, only the driver's wait queue will be flushed.
1703 1305   */
1704 1306  #define SD_FAILFAST_FLUSH_ALL_BUFS      0x01
1705 1307  #define SD_FAILFAST_FLUSH_ALL_QUEUES    0x02
1706 1308  
1707 1309  /*
1708      - * The default behavior is to only flush bufs that have B_FAILFAST set, but
1709      - * to flush all queues within the driver.
     1310 + * The default behavior is to flush all bufs in all queues within the driver.
1710 1311   */
1711      -static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES;
     1312 +static int sd_failfast_flushctl =
     1313 +    SD_FAILFAST_FLUSH_ALL_BUFS | SD_FAILFAST_FLUSH_ALL_QUEUES;
1712 1314  
     1315 +#ifdef SD_FAULT_INJECTION
     1316 +static uint_t   sd_fault_injection_on = 0;
     1317 +#endif
1713 1318  
1714 1319  /*
1715 1320   * SD Testing Fault Injection
1716 1321   */
1717 1322  #ifdef SD_FAULT_INJECTION
1718      -static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un);
     1323 +static int sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un);
1719 1324  static void sd_faultinjection(struct scsi_pkt *pktp);
     1325 +static void sd_prefaultinjection(struct scsi_pkt *pktp);
1720 1326  static void sd_injection_log(char *buf, struct sd_lun *un);
1721 1327  #endif
1722 1328  
1723 1329  /*
1724 1330   * Device driver ops vector
1725 1331   */
1726 1332  static struct cb_ops sd_cb_ops = {
1727 1333          sdopen,                 /* open */
1728 1334          sdclose,                /* close */
1729 1335          sdstrategy,             /* strategy */
↓ open down ↓ 635 lines elided ↑ open up ↑
2365 1971  
2366 1972  /* un->un_priority_chain_type must be set to one of these */
2367 1973  #define SD_CHAIN_INFO_PRIORITY_CMD      9
2368 1974  
2369 1975  /* size for devid inquiries */
2370 1976  #define MAX_INQUIRY_SIZE                0xF0
2371 1977  
2372 1978  /*
2373 1979   * Macros used by functions to pass a given buf(9S) struct along to the
2374 1980   * next function in the layering chain for further processing.
2375      - *
2376      - * In the following macros, passing more than three arguments to the called
2377      - * routines causes the optimizer for the SPARC compiler to stop doing tail
2378      - * call elimination which results in significant performance degradation.
2379 1981   */
2380 1982  #define SD_BEGIN_IOSTART(index, un, bp) \
2381 1983          ((*(sd_iostart_chain[index]))(index, un, bp))
2382 1984  
2383 1985  #define SD_BEGIN_IODONE(index, un, bp)  \
2384 1986          ((*(sd_iodone_chain[index]))(index, un, bp))
2385 1987  
2386 1988  #define SD_NEXT_IOSTART(index, un, bp)                          \
2387 1989          ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp))
2388 1990  
↓ open down ↓ 172 lines elided ↑ open up ↑
2561 2163          if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) &&
2562 2164              ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2563 2165                  mutex_enter(&sd_log_mutex);
2564 2166                  va_start(ap, fmt);
2565 2167                  (void) vsprintf(sd_log_buf, fmt, ap);
2566 2168                  va_end(ap);
2567 2169                  scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2568 2170                  mutex_exit(&sd_log_mutex);
2569 2171          }
2570 2172  #ifdef SD_FAULT_INJECTION
2571      -        _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2572 2173          if (un->sd_injection_mask & comp) {
2573 2174                  mutex_enter(&sd_log_mutex);
2574 2175                  va_start(ap, fmt);
2575 2176                  (void) vsprintf(sd_log_buf, fmt, ap);
2576 2177                  va_end(ap);
2577 2178                  sd_injection_log(sd_log_buf, un);
2578 2179                  mutex_exit(&sd_log_mutex);
2579 2180          }
2580 2181  #endif
2581 2182  }
↓ open down ↓ 29 lines elided ↑ open up ↑
2611 2212              (sd_level_mask & SD_LOGMASK_INFO) &&
2612 2213              ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2613 2214                  mutex_enter(&sd_log_mutex);
2614 2215                  va_start(ap, fmt);
2615 2216                  (void) vsprintf(sd_log_buf, fmt, ap);
2616 2217                  va_end(ap);
2617 2218                  scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2618 2219                  mutex_exit(&sd_log_mutex);
2619 2220          }
2620 2221  #ifdef SD_FAULT_INJECTION
2621      -        _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2622 2222          if (un->sd_injection_mask & component) {
2623 2223                  mutex_enter(&sd_log_mutex);
2624 2224                  va_start(ap, fmt);
2625 2225                  (void) vsprintf(sd_log_buf, fmt, ap);
2626 2226                  va_end(ap);
2627 2227                  sd_injection_log(sd_log_buf, un);
2628 2228                  mutex_exit(&sd_log_mutex);
2629 2229          }
2630 2230  #endif
2631 2231  }
↓ open down ↓ 29 lines elided ↑ open up ↑
2661 2261              (sd_level_mask & SD_LOGMASK_TRACE) &&
2662 2262              ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2663 2263                  mutex_enter(&sd_log_mutex);
2664 2264                  va_start(ap, fmt);
2665 2265                  (void) vsprintf(sd_log_buf, fmt, ap);
2666 2266                  va_end(ap);
2667 2267                  scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2668 2268                  mutex_exit(&sd_log_mutex);
2669 2269          }
2670 2270  #ifdef SD_FAULT_INJECTION
2671      -        _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2672 2271          if (un->sd_injection_mask & component) {
2673 2272                  mutex_enter(&sd_log_mutex);
2674 2273                  va_start(ap, fmt);
2675 2274                  (void) vsprintf(sd_log_buf, fmt, ap);
2676 2275                  va_end(ap);
2677 2276                  sd_injection_log(sd_log_buf, un);
2678 2277                  mutex_exit(&sd_log_mutex);
2679 2278          }
2680 2279  #endif
2681 2280  }
↓ open down ↓ 12 lines elided ↑ open up ↑
2694 2293   *                                 but may be present in the future.
2695 2294   */
2696 2295  
2697 2296  static int
2698 2297  sdprobe(dev_info_t *devi)
2699 2298  {
2700 2299          struct scsi_device      *devp;
2701 2300          int                     rval;
2702 2301          int                     instance = ddi_get_instance(devi);
2703 2302  
2704      -        /*
2705      -         * if it wasn't for pln, sdprobe could actually be nulldev
2706      -         * in the "__fibre" case.
2707      -         */
2708 2303          if (ddi_dev_is_sid(devi) == DDI_SUCCESS) {
2709 2304                  return (DDI_PROBE_DONTCARE);
2710 2305          }
2711 2306  
2712 2307          devp = ddi_get_driver_private(devi);
2713 2308  
2714 2309          if (devp == NULL) {
2715 2310                  /* Ooops... nexus driver is mis-configured... */
2716 2311                  return (DDI_PROBE_FAILURE);
2717 2312          }
↓ open down ↓ 6 lines elided ↑ open up ↑
2724 2319           * Call the SCSA utility probe routine to see if we actually
2725 2320           * have a target at this SCSI nexus.
2726 2321           */
2727 2322          switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) {
2728 2323          case SCSIPROBE_EXISTS:
2729 2324                  switch (devp->sd_inq->inq_dtype) {
2730 2325                  case DTYPE_DIRECT:
2731 2326                          rval = DDI_PROBE_SUCCESS;
2732 2327                          break;
2733 2328                  case DTYPE_RODIRECT:
2734      -                        /* CDs etc. Can be removable media */
     2329 +                        /* CDs etc. Can be removable media. */
2735 2330                          rval = DDI_PROBE_SUCCESS;
2736 2331                          break;
2737 2332                  case DTYPE_OPTICAL:
2738 2333                          /*
2739      -                         * Rewritable optical driver HP115AA
2740      -                         * Can also be removable media
     2334 +                         * Rewritable optical driver HP115AA.
     2335 +                         * Can also be removable media.
2741 2336                           */
2742      -
2743      -                        /*
2744      -                         * Do not attempt to bind to  DTYPE_OPTICAL if
2745      -                         * pre solaris 9 sparc sd behavior is required
2746      -                         *
2747      -                         * If first time through and sd_dtype_optical_bind
2748      -                         * has not been set in /etc/system check properties
2749      -                         */
2750      -
2751      -                        if (sd_dtype_optical_bind  < 0) {
2752      -                                sd_dtype_optical_bind = ddi_prop_get_int
2753      -                                    (DDI_DEV_T_ANY, devi, 0,
2754      -                                    "optical-device-bind", 1);
2755      -                        }
2756      -
2757      -                        if (sd_dtype_optical_bind == 0) {
2758      -                                rval = DDI_PROBE_FAILURE;
2759      -                        } else {
2760      -                                rval = DDI_PROBE_SUCCESS;
2761      -                        }
     2337 +                        rval = DDI_PROBE_SUCCESS;
2762 2338                          break;
2763      -
2764 2339                  case DTYPE_NOTPRESENT:
2765 2340                  default:
2766 2341                          rval = DDI_PROBE_FAILURE;
2767 2342                          break;
2768 2343                  }
2769 2344                  break;
2770 2345          default:
2771 2346                  rval = DDI_PROBE_PARTIAL;
2772 2347                  break;
2773 2348          }
↓ open down ↓ 79 lines elided ↑ open up ↑
2853 2428   *              DDI_PROP_BUF_TOO_SMALL
2854 2429   */
2855 2430  
2856 2431  static int
2857 2432  sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
2858 2433      char *name, caddr_t valuep, int *lengthp)
2859 2434  {
2860 2435          struct sd_lun   *un;
2861 2436  
2862 2437          if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL)
2863      -                return (ddi_prop_op(dev, dip, prop_op, mod_flags,
2864      -                    name, valuep, lengthp));
     2438 +                goto fallback;
2865 2439  
     2440 +        mutex_enter(SD_MUTEX(un));
     2441 +        while ((un->un_state == SD_STATE_ATTACHING))
     2442 +                cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
     2443 +
     2444 +        if (un->un_state == SD_STATE_ATTACH_FAILED) {
     2445 +                mutex_exit(SD_MUTEX(un));
     2446 +                goto fallback;
     2447 +        }
     2448 +        mutex_exit(SD_MUTEX(un));
     2449 +
2866 2450          return (cmlb_prop_op(un->un_cmlbhandle,
2867 2451              dev, dip, prop_op, mod_flags, name, valuep, lengthp,
2868 2452              SDPART(dev), (void *)SD_PATH_DIRECT));
     2453 +
     2454 +fallback:
     2455 +        return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, valuep,
     2456 +            lengthp));
2869 2457  }
2870 2458  
2871 2459  /*
2872 2460   * The following functions are for smart probing:
2873 2461   * sd_scsi_probe_cache_init()
2874 2462   * sd_scsi_probe_cache_fini()
2875 2463   * sd_scsi_clear_probe_cache()
2876 2464   * sd_scsi_probe_with_cache()
2877 2465   */
2878 2466  
↓ open down ↓ 327 lines elided ↑ open up ↑
3206 2794           * is wrong.  EMC's arrays will also fail this with a check
3207 2795           * condition (0x2/0x4/0x3) if the device is "inactive," but
3208 2796           * we don't want to fail the attach because it may become
3209 2797           * "active" later.
3210 2798           * We don't know if power condition is supported or not at
3211 2799           * this stage, use START STOP bit.
3212 2800           */
3213 2801          status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
3214 2802              SD_TARGET_START, SD_PATH_DIRECT);
3215 2803  
3216      -        if (status != 0) {
3217      -                if (status == EACCES)
3218      -                        has_conflict = TRUE;
     2804 +        switch (status) {
     2805 +        case EIO:
     2806 +                sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
     2807 +                return (status);
     2808 +        case EACCES:
     2809 +                has_conflict = TRUE;
     2810 +        default: /*FALLTHROUGH*/
3219 2811                  sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3220 2812          }
3221 2813  
3222 2814          /*
3223 2815           * Send another INQUIRY command to the target. This is necessary for
3224 2816           * non-removable media direct access devices because their INQUIRY data
3225 2817           * may not be fully qualified until they are spun up (perhaps via the
3226 2818           * START command above).  Note: This seems to be needed for some
3227 2819           * legacy devices only.) The INQUIRY command should succeed even if a
3228 2820           * Reservation Conflict is present.
↓ open down ↓ 598 lines elided ↑ open up ↑
3827 3419          char    *dataname_lasts;
3828 3420          int     *data_list = NULL;
3829 3421          uint_t  data_list_len;
3830 3422          int     rval = SD_FAILURE;
3831 3423          int     i;
3832 3424  
3833 3425          ASSERT(un != NULL);
3834 3426  
3835 3427          /* Obtain the configuration list associated with the .conf file */
3836 3428          if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un),
3837      -            DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list,
     3429 +            DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sd-config-list",
3838 3430              &config_list, &nelements) != DDI_PROP_SUCCESS) {
3839 3431                  return (SD_FAILURE);
3840 3432          }
3841 3433  
3842 3434          /*
3843 3435           * Compare vids in each duplet to the inquiry vid - if a match is
3844 3436           * made, get the data value and update the soft state structure
3845 3437           * accordingly.
3846 3438           *
3847 3439           * Each duplet should show as a pair of strings, return SD_FAILURE
↓ open down ↓ 343 lines elided ↑ open up ↑
4191 3783                  }
4192 3784                  SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4193 3785                      "RMW type set to %d\n", un->un_f_rmw_type);
4194 3786          }
4195 3787  
4196 3788          if (strcasecmp(name, "physical-block-size") == 0) {
4197 3789                  if (ddi_strtol(value, &endptr, 0, &val) == 0 &&
4198 3790                      ISP2(val) && val >= un->un_tgt_blocksize &&
4199 3791                      val >= un->un_sys_blocksize) {
4200 3792                          un->un_phy_blocksize = val;
     3793 +                        un->un_f_sdconf_phy_blocksize = TRUE;
4201 3794                  } else {
4202 3795                          goto value_invalid;
4203 3796                  }
4204 3797                  SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4205 3798                      "physical block size set to %d\n", un->un_phy_blocksize);
4206 3799          }
4207 3800  
     3801 +        if (strcasecmp(name, "slow-io-threshold") == 0) {
     3802 +                if (ddi_strtol(value, &endptr, 0, &val) == 0) {
     3803 +                        un->un_slow_io_threshold = (hrtime_t)val * NANOSEC;
     3804 +                } else {
     3805 +                        un->un_slow_io_threshold =
     3806 +                            (hrtime_t)sd_slow_io_threshold;
     3807 +                        goto value_invalid;
     3808 +                }
     3809 +                SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
     3810 +                    "slow IO threshold set to %llu\n",
     3811 +                    un->un_slow_io_threshold);
     3812 +        }
     3813 +
     3814 +        if (strcasecmp(name, "io-time") == 0) {
     3815 +                if (ddi_strtol(value, &endptr, 0, &val) == 0) {
     3816 +                        un->un_io_time = val;
     3817 +                } else {
     3818 +                        un->un_io_time = sd_io_time;
     3819 +                        goto value_invalid;
     3820 +                }
     3821 +                SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
     3822 +                    "IO time set to %llu\n", un->un_io_time);
     3823 +        }
     3824 +
4208 3825          if (strcasecmp(name, "retries-victim") == 0) {
4209 3826                  if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4210 3827                          un->un_victim_retry_count = val;
4211 3828                  } else {
4212 3829                          goto value_invalid;
4213 3830                  }
4214 3831                  SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4215 3832                      "victim retry count set to %d\n",
4216 3833                      un->un_victim_retry_count);
4217 3834                  return;
↓ open down ↓ 741 lines elided ↑ open up ↑
4959 4576           * using the default LBA size.
4960 4577           *
4961 4578           * Since SATA MODE SENSE function (sata_txlt_mode_sense()) does not
4962 4579           * implement support for mode pages 3 and 4 return here to prevent
4963 4580           * illegal requests on SATA drives.
4964 4581           *
4965 4582           * These pages are also reserved in SBC-2 and later.  We assume SBC-2
4966 4583           * or later for a direct-attached block device if the SCSI version is
4967 4584           * at least SPC-3.
4968 4585           */
4969      -
4970 4586          if (ISCD(un) ||
4971 4587              un->un_interconnect_type == SD_INTERCONNECT_SATA ||
4972 4588              (un->un_ctype == CTYPE_CCS && SD_INQUIRY(un)->inq_ansi >= 5))
4973 4589                  return (ret);
4974 4590  
4975 4591          cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0;
4976 4592  
4977 4593          /*
4978 4594           * Retrieve MODE SENSE page 3 - Format Device Page
4979 4595           */
↓ open down ↓ 321 lines elided ↑ open up ↑
5301 4917                          struct sd_errstats *stp;
5302 4918  
5303 4919                          capacity *= un->un_sys_blocksize;
5304 4920                          stp = (struct sd_errstats *)un->un_errstats->ks_data;
5305 4921                          if (stp->sd_capacity.value.ui64 < capacity)
5306 4922                                  stp->sd_capacity.value.ui64 = capacity;
5307 4923                  }
5308 4924          }
5309 4925  }
5310 4926  
     4927 +/*
     4928 + * Parses the SCSI Block Limits VPD page (0xB0). It's legal to pass NULL for
     4929 + * vpd_pg, in which case all the block limits will be reset to the defaults.
     4930 + */
     4931 +static void
     4932 +sd_parse_blk_limits_vpd(struct sd_lun *un, uchar_t *vpd_pg)
     4933 +{
     4934 +        sd_blk_limits_t *lim = &un->un_blk_lim;
     4935 +        unsigned pg_len;
5311 4936  
     4937 +        if (vpd_pg != NULL)
     4938 +                pg_len = BE_IN16(&vpd_pg[2]);
     4939 +        else
     4940 +                pg_len = 0;
     4941 +
     4942 +        /* Block Limits VPD can be 16 bytes or 64 bytes long - support both */
     4943 +        if (pg_len >= 0x10) {
     4944 +                lim->lim_opt_xfer_len_gran = BE_IN16(&vpd_pg[6]);
     4945 +                lim->lim_max_xfer_len = BE_IN32(&vpd_pg[8]);
     4946 +                lim->lim_opt_xfer_len = BE_IN32(&vpd_pg[12]);
     4947 +        } else {
     4948 +                lim->lim_opt_xfer_len_gran = 0;
     4949 +                lim->lim_max_xfer_len = UINT32_MAX;
     4950 +                lim->lim_opt_xfer_len = UINT32_MAX;
     4951 +        }
     4952 +        if (pg_len >= 0x3c) {
     4953 +                lim->lim_max_pfetch_len = BE_IN32(&vpd_pg[16]);
     4954 +                /*
     4955 +                 * A zero in either of the following two fields indicates lack
     4956 +                 * of UNMAP support.
     4957 +                 */
     4958 +                lim->lim_max_unmap_lba_cnt = BE_IN32(&vpd_pg[20]);
     4959 +                lim->lim_max_unmap_descr_cnt = BE_IN32(&vpd_pg[24]);
     4960 +                lim->lim_opt_unmap_gran = BE_IN32(&vpd_pg[28]);
     4961 +                if ((vpd_pg[32] >> 7) == 1) {
     4962 +                        /* left-most bit on each byte is a flag */
     4963 +                        lim->lim_unmap_gran_align =
     4964 +                            ((vpd_pg[32] & 0x7f) << 24) | (vpd_pg[33] << 16) |
     4965 +                            (vpd_pg[34] << 8) | vpd_pg[35];
     4966 +                } else {
     4967 +                        lim->lim_unmap_gran_align = 0;
     4968 +                }
     4969 +                lim->lim_max_write_same_len = BE_IN64(&vpd_pg[36]);
     4970 +        } else {
     4971 +                lim->lim_max_pfetch_len = UINT32_MAX;
     4972 +                lim->lim_max_unmap_lba_cnt = UINT32_MAX;
     4973 +                lim->lim_max_unmap_descr_cnt = SD_UNMAP_MAX_DESCR;
     4974 +                lim->lim_opt_unmap_gran = 0;
     4975 +                lim->lim_unmap_gran_align = 0;
     4976 +                lim->lim_max_write_same_len = UINT64_MAX;
     4977 +        }
     4978 +}
     4979 +
5312 4980  /*
     4981 + * Collects VPD page B0 data if available (block limits). If the data is
     4982 + * not available or querying the device failed, we revert to the defaults.
     4983 + */
     4984 +static void
     4985 +sd_setup_blk_limits(sd_ssc_t *ssc)
     4986 +{
     4987 +        struct sd_lun   *un             = ssc->ssc_un;
     4988 +        uchar_t         *inqB0          = NULL;
     4989 +        size_t          inqB0_resid     = 0;
     4990 +        int             rval;
     4991 +
     4992 +        if (un->un_vpd_page_mask & SD_VPD_BLK_LIMITS_PG) {
     4993 +                inqB0 = kmem_zalloc(MAX_INQUIRY_SIZE, KM_SLEEP);
     4994 +                rval = sd_send_scsi_INQUIRY(ssc, inqB0, MAX_INQUIRY_SIZE, 0x01,
     4995 +                    0xB0, &inqB0_resid);
     4996 +                if (rval != 0) {
     4997 +                        sd_ssc_assessment(ssc, SD_FMT_IGNORE);
     4998 +                        kmem_free(inqB0, MAX_INQUIRY_SIZE);
     4999 +                        inqB0 = NULL;
     5000 +                }
     5001 +        }
     5002 +        /* passing NULL inqB0 will reset to defaults */
     5003 +        sd_parse_blk_limits_vpd(ssc->ssc_un, inqB0);
     5004 +        if (inqB0)
     5005 +                kmem_free(inqB0, MAX_INQUIRY_SIZE);
     5006 +}
     5007 +
     5008 +#define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown"
     5009 +
     5010 +/*
5313 5011   *    Function: sd_register_devid
5314 5012   *
5315 5013   * Description: This routine will obtain the device id information from the
5316 5014   *              target, obtain the serial number, and register the device
5317 5015   *              id with the ddi framework.
5318 5016   *
5319 5017   *   Arguments: devi - the system's dev_info_t for the device.
5320 5018   *              un - driver soft state (unit) structure
5321 5019   *              reservation_flag - indicates if a reservation conflict
5322 5020   *              occurred during attach
↓ open down ↓ 110 lines elided ↑ open up ↑
5433 5131                  goto cleanup; /* use devid registered by the transport */
5434 5132          }
5435 5133  
5436 5134          /*
5437 5135           * This is the case of antiquated Sun disk drives that have the
5438 5136           * FAB_DEVID property set in the disk_table.  These drives
5439 5137           * manage the devid's by storing them in last 2 available sectors
5440 5138           * on the drive and have them fabricated by the ddi layer by calling
5441 5139           * ddi_devid_init and passing the DEVID_FAB flag.
5442 5140           */
5443      -        if (un->un_f_opt_fab_devid == TRUE) {
5444      -                /*
5445      -                 * Depending on EINVAL isn't reliable, since a reserved disk
5446      -                 * may result in invalid geometry, so check to make sure a
5447      -                 * reservation conflict did not occur during attach.
5448      -                 */
5449      -                if ((sd_get_devid(ssc) == EINVAL) &&
5450      -                    (reservation_flag != SD_TARGET_IS_RESERVED)) {
     5141 +        if (un->un_f_opt_fab_devid == TRUE &&
     5142 +            reservation_flag != SD_TARGET_IS_RESERVED) {
     5143 +                if (sd_get_devid(ssc) == EINVAL)
5451 5144                          /*
5452 5145                           * The devid is invalid AND there is no reservation
5453 5146                           * conflict.  Fabricate a new devid.
5454 5147                           */
5455 5148                          (void) sd_create_devid(ssc);
5456      -                }
5457 5149  
5458 5150                  /* Register the devid if it exists */
5459 5151                  if (un->un_devid != NULL) {
5460 5152                          (void) ddi_devid_register(SD_DEVINFO(un),
5461 5153                              un->un_devid);
5462 5154                          SD_INFO(SD_LOG_ATTACH_DETACH, un,
5463 5155                              "sd_register_devid: Devid Fabricated\n");
5464 5156                  }
5465 5157                  goto cleanup;
5466 5158          }
↓ open down ↓ 1 lines elided ↑ open up ↑
5468 5160          /* encode best devid possible based on data available */
5469 5161          if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST,
5470 5162              (char *)ddi_driver_name(SD_DEVINFO(un)),
5471 5163              (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)),
5472 5164              inq80, inq80_len - inq80_resid, inq83, inq83_len -
5473 5165              inq83_resid, &un->un_devid) == DDI_SUCCESS) {
5474 5166  
5475 5167                  /* devid successfully encoded, register devid */
5476 5168                  (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid);
5477 5169  
5478      -        } else {
     5170 +        } else if (reservation_flag != SD_TARGET_IS_RESERVED) {
5479 5171                  /*
5480 5172                   * Unable to encode a devid based on data available.
5481 5173                   * This is not a Sun qualified disk.  Older Sun disk
5482 5174                   * drives that have the SD_FAB_DEVID property
5483 5175                   * set in the disk_table and non Sun qualified
5484 5176                   * disks are treated in the same manner.  These
5485 5177                   * drives manage the devid's by storing them in
5486 5178                   * last 2 available sectors on the drive and
5487 5179                   * have them fabricated by the ddi layer by
5488 5180                   * calling ddi_devid_init and passing the
↓ open down ↓ 341 lines elided ↑ open up ↑
5830 5522                                  break;
5831 5523                          case 0x82:
5832 5524                                  un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG;
5833 5525                                  break;
5834 5526                          case 0x83:
5835 5527                                  un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG;
5836 5528                                  break;
5837 5529                          case 0x86:
5838 5530                                  un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG;
5839 5531                                  break;
     5532 +                        case 0xB0:
     5533 +                                un->un_vpd_page_mask |= SD_VPD_BLK_LIMITS_PG;
     5534 +                                break;
5840 5535                          case 0xB1:
5841 5536                                  un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG;
5842 5537                                  break;
5843 5538                          }
5844 5539                          counter++;
5845 5540                  }
5846 5541  
5847 5542          } else {
5848 5543                  rval = -1;
5849 5544  
↓ open down ↓ 8 lines elided ↑ open up ↑
5858 5553  }
5859 5554  
5860 5555  
5861 5556  /*
5862 5557   *    Function: sd_setup_pm
5863 5558   *
5864 5559   * Description: Initialize Power Management on the device
5865 5560   *
5866 5561   *     Context: Kernel Thread
5867 5562   */
5868      -
     5563 +#ifdef notyet
5869 5564  static void
5870 5565  sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi)
5871 5566  {
5872 5567          uint_t          log_page_size;
5873 5568          uchar_t         *log_page_data;
5874 5569          int             rval = 0;
5875 5570          struct sd_lun   *un;
5876 5571  
5877 5572          ASSERT(ssc != NULL);
5878 5573          un = ssc->ssc_un;
↓ open down ↓ 34 lines elided ↑ open up ↑
5913 5608                  un->un_f_start_stop_supported = TRUE;
5914 5609  
5915 5610                  if (un->un_f_power_condition_supported) {
5916 5611                          rval = sd_send_scsi_START_STOP_UNIT(ssc,
5917 5612                              SD_POWER_CONDITION, SD_TARGET_ACTIVE,
5918 5613                              SD_PATH_DIRECT);
5919 5614                          if (rval != 0) {
5920 5615                                  un->un_f_power_condition_supported = FALSE;
5921 5616                          }
5922 5617                  }
     5618 +                /* WTF? this fails for optical drives with no media */
5923 5619                  if (!un->un_f_power_condition_supported) {
5924 5620                          rval = sd_send_scsi_START_STOP_UNIT(ssc,
5925 5621                              SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT);
5926 5622                  }
5927 5623                  if (rval != 0) {
5928 5624                          sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5929 5625                          un->un_f_start_stop_supported = FALSE;
5930 5626                  }
5931 5627  
5932 5628                  /*
↓ open down ↓ 181 lines elided ↑ open up ↑
6114 5810          } else {
6115 5811                  mutex_enter(SD_MUTEX(un));
6116 5812                  un->un_power_level = SD_PM_STATE_STOPPED(un);
6117 5813                  mutex_enter(&un->un_pm_mutex);
6118 5814                  /* Set to off. */
6119 5815                  un->un_pm_count = -1;
6120 5816          }
6121 5817          mutex_exit(&un->un_pm_mutex);
6122 5818          mutex_exit(SD_MUTEX(un));
6123 5819  }
     5820 +#endif
6124 5821  
6125      -
6126 5822  /*
6127 5823   *    Function: sd_ddi_suspend
6128 5824   *
6129 5825   * Description: Performs system power-down operations. This includes
6130 5826   *              setting the drive state to indicate its suspended so
6131 5827   *              that no new commands will be accepted. Also, wait for
6132 5828   *              all commands that are in transport or queued to a timer
6133 5829   *              for retry to complete. All timeout threads are cancelled.
6134 5830   *
6135 5831   * Return Code: DDI_FAILURE or DDI_SUCCESS
↓ open down ↓ 152 lines elided ↑ open up ↑
6288 5984          }
6289 5985  
6290 5986          if (un->un_direct_priority_timeid != NULL) {
6291 5987                  timeout_id_t temp_id = un->un_direct_priority_timeid;
6292 5988                  un->un_direct_priority_timeid = NULL;
6293 5989                  mutex_exit(SD_MUTEX(un));
6294 5990                  (void) untimeout(temp_id);
6295 5991                  mutex_enter(SD_MUTEX(un));
6296 5992          }
6297 5993  
6298      -        if (un->un_f_is_fibre == TRUE) {
6299      -                /*
6300      -                 * Remove callbacks for insert and remove events
6301      -                 */
6302      -                if (un->un_insert_event != NULL) {
6303      -                        mutex_exit(SD_MUTEX(un));
6304      -                        (void) ddi_remove_event_handler(un->un_insert_cb_id);
6305      -                        mutex_enter(SD_MUTEX(un));
6306      -                        un->un_insert_event = NULL;
6307      -                }
6308      -
6309      -                if (un->un_remove_event != NULL) {
6310      -                        mutex_exit(SD_MUTEX(un));
6311      -                        (void) ddi_remove_event_handler(un->un_remove_cb_id);
6312      -                        mutex_enter(SD_MUTEX(un));
6313      -                        un->un_remove_event = NULL;
6314      -                }
6315      -        }
6316      -
6317 5994          mutex_exit(SD_MUTEX(un));
6318 5995  
6319 5996          SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n");
6320 5997  
6321 5998          return (DDI_SUCCESS);
6322 5999  }
6323 6000  
6324 6001  
6325 6002  /*
6326 6003   *    Function: sd_ddi_resume
↓ open down ↓ 61 lines elided ↑ open up ↑
6388 6065           * start I/O until after power has been restored.
6389 6066           */
6390 6067          cv_broadcast(&un->un_suspend_cv);
6391 6068          cv_broadcast(&un->un_state_cv);
6392 6069  
6393 6070          /* restart thread */
6394 6071          if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) {
6395 6072                  scsi_watch_resume(un->un_swr_token);
6396 6073          }
6397 6074  
6398      -#if (defined(__fibre))
6399      -        if (un->un_f_is_fibre == TRUE) {
6400      -                /*
6401      -                 * Add callbacks for insert and remove events
6402      -                 */
6403      -                if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) {
6404      -                        sd_init_event_callbacks(un);
6405      -                }
6406      -        }
6407      -#endif
6408      -
6409 6075          /*
6410 6076           * Transport any pending commands to the target.
6411 6077           *
6412 6078           * If this is a low-activity device commands in queue will have to wait
6413 6079           * until new commands come in, which may take awhile. Also, we
6414 6080           * specifically don't check un_ncmds_in_transport because we know that
6415 6081           * there really are no commands in progress after the unit was
6416 6082           * suspended and we could have reached the throttle level, been
6417 6083           * suspended, and have no new commands coming in for awhile. Highly
6418 6084           * unlikely, but so is the low-activity disk scenario.
↓ open down ↓ 666 lines elided ↑ open up ↑
7085 6751   *
7086 6752   * Return Code: DDI_SUCCESS
7087 6753   *              DDI_FAILURE
7088 6754   *
7089 6755   *     Context: Kernel thread context
7090 6756   */
7091 6757  
7092 6758  static int
7093 6759  sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
7094 6760  {
7095      -        switch (cmd) {
7096      -        case DDI_ATTACH:
7097      -                return (sd_unit_attach(devi));
7098      -        case DDI_RESUME:
7099      -                return (sd_ddi_resume(devi));
7100      -        default:
7101      -                break;
7102      -        }
7103      -        return (DDI_FAILURE);
7104      -}
7105      -
7106      -
7107      -/*
7108      - *    Function: sddetach
7109      - *
7110      - * Description: Driver's detach(9E) entry point function.
7111      - *
7112      - *   Arguments: devi - opaque device info handle
7113      - *              cmd  - detach  type
7114      - *
7115      - * Return Code: DDI_SUCCESS
7116      - *              DDI_FAILURE
7117      - *
7118      - *     Context: Kernel thread context
7119      - */
7120      -
7121      -static int
7122      -sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
7123      -{
7124      -        switch (cmd) {
7125      -        case DDI_DETACH:
7126      -                return (sd_unit_detach(devi));
7127      -        case DDI_SUSPEND:
7128      -                return (sd_ddi_suspend(devi));
7129      -        default:
7130      -                break;
7131      -        }
7132      -        return (DDI_FAILURE);
7133      -}
7134      -
7135      -
7136      -/*
7137      - *     Function: sd_sync_with_callback
7138      - *
7139      - *  Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft
7140      - *               state while the callback routine is active.
7141      - *
7142      - *    Arguments: un: softstate structure for the instance
7143      - *
7144      - *      Context: Kernel thread context
7145      - */
7146      -
7147      -static void
7148      -sd_sync_with_callback(struct sd_lun *un)
7149      -{
7150      -        ASSERT(un != NULL);
7151      -
7152      -        mutex_enter(SD_MUTEX(un));
7153      -
7154      -        ASSERT(un->un_in_callback >= 0);
7155      -
7156      -        while (un->un_in_callback > 0) {
7157      -                mutex_exit(SD_MUTEX(un));
7158      -                delay(2);
7159      -                mutex_enter(SD_MUTEX(un));
7160      -        }
7161      -
7162      -        mutex_exit(SD_MUTEX(un));
7163      -}
7164      -
7165      -/*
7166      - *    Function: sd_unit_attach
7167      - *
7168      - * Description: Performs DDI_ATTACH processing for sdattach(). Allocates
7169      - *              the soft state structure for the device and performs
7170      - *              all necessary structure and device initializations.
7171      - *
7172      - *   Arguments: devi: the system's dev_info_t for the device.
7173      - *
7174      - * Return Code: DDI_SUCCESS if attach is successful.
7175      - *              DDI_FAILURE if any part of the attach fails.
7176      - *
7177      - *     Context: Called at attach(9e) time for the DDI_ATTACH flag.
7178      - *              Kernel thread context only.  Can sleep.
7179      - */
7180      -
7181      -static int
7182      -sd_unit_attach(dev_info_t *devi)
7183      -{
7184 6761          struct  scsi_device     *devp;
7185 6762          struct  sd_lun          *un;
7186 6763          char                    *variantp;
7187      -        char                    name_str[48];
7188      -        int     reservation_flag = SD_TARGET_IS_UNRESERVED;
7189 6764          int     instance;
7190      -        int     rval;
7191      -        int     wc_enabled;
7192      -        int     wc_changeable;
7193 6765          int     tgt;
7194      -        uint64_t        capacity;
7195      -        uint_t          lbasize = 0;
7196 6766          dev_info_t      *pdip = ddi_get_parent(devi);
7197      -        int             offbyone = 0;
7198      -        int             geom_label_valid = 0;
     6767 +        int             max_xfer_size;
7199 6768          sd_ssc_t        *ssc;
7200      -        int             status;
7201 6769          struct sd_fm_internal   *sfip = NULL;
7202      -        int             max_xfer_size;
7203 6770  
     6771 +        switch (cmd) {
     6772 +        case DDI_ATTACH:
     6773 +                break;
     6774 +        case DDI_RESUME:
     6775 +                return (sd_ddi_resume(devi));
     6776 +        default:
     6777 +                return (DDI_FAILURE);
     6778 +        }
     6779 +
7204 6780          /*
7205 6781           * Retrieve the target driver's private data area. This was set
7206 6782           * up by the HBA.
7207 6783           */
7208 6784          devp = ddi_get_driver_private(devi);
7209 6785  
7210 6786          /*
7211 6787           * Retrieve the target ID of the device.
7212 6788           */
7213 6789          tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
↓ open down ↓ 74 lines elided ↑ open up ↑
7288 6864  
7289 6865          /*
7290 6866           * Retrieve a pointer to the newly-allocated soft state.
7291 6867           *
7292 6868           * This should NEVER fail if the ddi_soft_state_zalloc() call above
7293 6869           * was successful, unless something has gone horribly wrong and the
7294 6870           * ddi's soft state internals are corrupt (in which case it is
7295 6871           * probably better to halt here than just fail the attach....)
7296 6872           */
7297 6873          if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
7298      -                panic("sd_unit_attach: NULL soft state on instance:0x%x",
     6874 +                panic("sdattach: NULL soft state on instance:0x%x",
7299 6875                      instance);
7300 6876                  /*NOTREACHED*/
7301 6877          }
7302 6878  
7303 6879          /*
7304 6880           * Link the back ptr of the driver soft state to the scsi_device
7305 6881           * struct for this lun.
7306 6882           * Save a pointer to the softstate in the driver-private area of
7307 6883           * the scsi_device struct.
7308 6884           * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until
↓ open down ↓ 25 lines elided ↑ open up ↑
7334 6910          case DTYPE_OPTICAL:
7335 6911                  un->un_node_type = DDI_NT_BLOCK_CHAN;
7336 6912                  un->un_ctype     = CTYPE_ROD;
7337 6913                  break;
7338 6914          default:
7339 6915                  un->un_node_type = DDI_NT_BLOCK_CHAN;
7340 6916                  un->un_ctype     = CTYPE_CCS;
7341 6917                  break;
7342 6918          }
7343 6919  
7344      -        /*
7345      -         * Try to read the interconnect type from the HBA.
7346      -         *
7347      -         * Note: This driver is currently compiled as two binaries, a parallel
7348      -         * scsi version (sd) and a fibre channel version (ssd). All functional
7349      -         * differences are determined at compile time. In the future a single
7350      -         * binary will be provided and the interconnect type will be used to
7351      -         * differentiate between fibre and parallel scsi behaviors. At that time
7352      -         * it will be necessary for all fibre channel HBAs to support this
7353      -         * property.
7354      -         *
7355      -         * set un_f_is_fiber to TRUE ( default fiber )
7356      -         */
7357      -        un->un_f_is_fibre = TRUE;
     6920 +        /* Try to read the interconnect type from the HBA */
     6921 +        un->un_f_is_fibre = FALSE;
7358 6922          switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) {
7359 6923          case INTERCONNECT_SSA:
     6924 +                un->un_f_is_fibre = TRUE;
7360 6925                  un->un_interconnect_type = SD_INTERCONNECT_SSA;
7361 6926                  SD_INFO(SD_LOG_ATTACH_DETACH, un,
7362      -                    "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un);
     6927 +                    "sdattach: un:0x%p SD_INTERCONNECT_SSA\n", un);
7363 6928                  break;
7364 6929          case INTERCONNECT_PARALLEL:
7365      -                un->un_f_is_fibre = FALSE;
7366 6930                  un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
7367 6931                  SD_INFO(SD_LOG_ATTACH_DETACH, un,
7368      -                    "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un);
     6932 +                    "sdattach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un);
7369 6933                  break;
7370 6934          case INTERCONNECT_SAS:
7371      -                un->un_f_is_fibre = FALSE;
7372 6935                  un->un_interconnect_type = SD_INTERCONNECT_SAS;
7373 6936                  un->un_node_type = DDI_NT_BLOCK_SAS;
7374 6937                  SD_INFO(SD_LOG_ATTACH_DETACH, un,
7375      -                    "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un);
     6938 +                    "sdattach: un:0x%p SD_INTERCONNECT_SAS\n", un);
7376 6939                  break;
7377 6940          case INTERCONNECT_SATA:
7378      -                un->un_f_is_fibre = FALSE;
7379 6941                  un->un_interconnect_type = SD_INTERCONNECT_SATA;
7380 6942                  SD_INFO(SD_LOG_ATTACH_DETACH, un,
7381      -                    "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un);
     6943 +                    "sdattach: un:0x%p SD_INTERCONNECT_SATA\n", un);
7382 6944                  break;
7383 6945          case INTERCONNECT_FIBRE:
     6946 +                un->un_f_is_fibre = TRUE;
7384 6947                  un->un_interconnect_type = SD_INTERCONNECT_FIBRE;
7385 6948                  SD_INFO(SD_LOG_ATTACH_DETACH, un,
7386      -                    "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un);
     6949 +                    "sdattach: un:0x%p SD_INTERCONNECT_FIBRE\n", un);
7387 6950                  break;
7388 6951          case INTERCONNECT_FABRIC:
     6952 +                un->un_f_is_fibre = TRUE;
7389 6953                  un->un_interconnect_type = SD_INTERCONNECT_FABRIC;
7390 6954                  un->un_node_type = DDI_NT_BLOCK_FABRIC;
7391 6955                  SD_INFO(SD_LOG_ATTACH_DETACH, un,
7392      -                    "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un);
     6956 +                    "sdattach: un:0x%p SD_INTERCONNECT_FABRIC\n", un);
7393 6957                  break;
7394 6958          default:
7395      -#ifdef SD_DEFAULT_INTERCONNECT_TYPE
7396 6959                  /*
7397      -                 * The HBA does not support the "interconnect-type" property
7398      -                 * (or did not provide a recognized type).
7399      -                 *
7400      -                 * Note: This will be obsoleted when a single fibre channel
7401      -                 * and parallel scsi driver is delivered. In the meantime the
7402      -                 * interconnect type will be set to the platform default.If that
7403      -                 * type is not parallel SCSI, it means that we should be
7404      -                 * assuming "ssd" semantics. However, here this also means that
7405      -                 * the FC HBA is not supporting the "interconnect-type" property
7406      -                 * like we expect it to, so log this occurrence.
     6960 +                 * The default is to assume that if a device does not support
     6961 +                 * the "interconnect-type" property it is a parallel SCSI HBA
     6962 +                 * and set the interconnect type for parallel SCSI.
7407 6963                   */
7408      -                un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE;
7409      -                if (!SD_IS_PARALLEL_SCSI(un)) {
7410      -                        SD_INFO(SD_LOG_ATTACH_DETACH, un,
7411      -                            "sd_unit_attach: un:0x%p Assuming "
7412      -                            "INTERCONNECT_FIBRE\n", un);
7413      -                } else {
7414      -                        SD_INFO(SD_LOG_ATTACH_DETACH, un,
7415      -                            "sd_unit_attach: un:0x%p Assuming "
7416      -                            "INTERCONNECT_PARALLEL\n", un);
7417      -                        un->un_f_is_fibre = FALSE;
7418      -                }
7419      -#else
7420      -                /*
7421      -                 * Note: This source will be implemented when a single fibre
7422      -                 * channel and parallel scsi driver is delivered. The default
7423      -                 * will be to assume that if a device does not support the
7424      -                 * "interconnect-type" property it is a parallel SCSI HBA and
7425      -                 * we will set the interconnect type for parallel scsi.
7426      -                 */
7427 6964                  un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
7428      -                un->un_f_is_fibre = FALSE;
7429      -#endif
7430 6965                  break;
7431 6966          }
7432 6967  
7433 6968          if (un->un_f_is_fibre == TRUE) {
7434 6969                  if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) ==
7435 6970                      SCSI_VERSION_3) {
7436 6971                          switch (un->un_interconnect_type) {
7437 6972                          case SD_INTERCONNECT_FIBRE:
7438 6973                          case SD_INTERCONNECT_SSA:
7439 6974                                  un->un_node_type = DDI_NT_BLOCK_WWN;
7440 6975                                  break;
7441 6976                          default:
7442 6977                                  break;
7443 6978                          }
7444 6979                  }
7445 6980          }
7446 6981  
     6982 +        (void) ddi_prop_update_int(DDI_DEV_T_NONE, devi,
     6983 +            "allow-unconstrained-retire", 1);
     6984 +
7447 6985          /*
7448 6986           * Initialize the Request Sense command for the target
7449 6987           */
7450 6988          if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) {
7451 6989                  goto alloc_rqs_failed;
7452 6990          }
7453 6991  
7454      -        /*
7455      -         * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc
7456      -         * with separate binary for sd and ssd.
7457      -         *
7458      -         * x86 has 1 binary, un_retry_count is set base on connection type.
7459      -         * The hardcoded values will go away when Sparc uses 1 binary
7460      -         * for sd and ssd.  This hardcoded values need to match
7461      -         * SD_RETRY_COUNT in sddef.h
7462      -         * The value used is base on interconnect type.
7463      -         * fibre = 3, parallel = 5
7464      -         */
7465      -#if defined(__i386) || defined(__amd64)
     6992 +        /* The value used is base on interconnect type */
7466 6993          un->un_retry_count = un->un_f_is_fibre ? 3 : 5;
7467      -#else
7468      -        un->un_retry_count = SD_RETRY_COUNT;
7469      -#endif
7470 6994  
7471 6995          /*
7472 6996           * Set the per disk retry count to the default number of retries
7473 6997           * for disks and CDROMs. This value can be overridden by the
7474 6998           * disk property list or an entry in sd.conf.
7475 6999           */
7476 7000          un->un_notready_retry_count =
7477 7001              ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un)
7478 7002              : DISK_NOT_READY_RETRY_COUNT(un);
7479 7003  
↓ open down ↓ 6 lines elided ↑ open up ↑
7486 7010  
7487 7011          /*
7488 7012           * Init the reset threshold for retries.  This number determines
7489 7013           * how many retries must be performed before a reset can be issued
7490 7014           * (for certain error conditions). This can be overridden by entries
7491 7015           * in sd.conf or the device config table.
7492 7016           */
7493 7017          un->un_reset_retry_count = (un->un_retry_count / 2);
7494 7018  
7495 7019          /*
7496      -         * Set the victim_retry_count to the default un_retry_count
     7020 +         * Set the victim_retry_count to the default un_retry_count.
     7021 +         * This value is used in addition to the standard retry count.
     7022 +         * This can be overridden by entries in sd.conf or the device
     7023 +         * config table.
7497 7024           */
7498      -        un->un_victim_retry_count = (2 * un->un_retry_count);
     7025 +        un->un_victim_retry_count = un->un_retry_count;
7499 7026  
7500 7027          /*
7501 7028           * Set the reservation release timeout to the default value of
7502      -         * 5 seconds. This can be overridden by entries in ssd.conf or the
     7029 +         * 5 seconds. This can be overridden by entries in sd.conf or the
7503 7030           * device config table.
7504 7031           */
7505 7032          un->un_reserve_release_time = 5;
7506 7033  
     7034 +        un->un_io_time = sd_io_time;
     7035 +
     7036 +        un->un_slow_io_threshold = sd_slow_io_threshold;
     7037 +
     7038 +        un->un_f_lun_reset_enabled = sd_enable_lun_reset;
     7039 +
7507 7040          /*
7508 7041           * Set up the default maximum transfer size. Note that this may
7509 7042           * get updated later in the attach, when setting up default wide
7510 7043           * operations for disks.
7511 7044           */
7512      -#if defined(__i386) || defined(__amd64)
7513 7045          un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE;
7514 7046          un->un_partial_dma_supported = 1;
7515      -#else
7516      -        un->un_max_xfer_size = (uint_t)maxphys;
7517      -#endif
7518 7047  
7519 7048          /*
7520 7049           * Get "allow bus device reset" property (defaults to "enabled" if
7521 7050           * the property was not defined). This is to disable bus resets for
7522 7051           * certain kinds of error recovery. Note: In the future when a run-time
7523 7052           * fibre check is available the soft state flag should default to
7524 7053           * enabled.
7525 7054           */
7526 7055          if (un->un_f_is_fibre == TRUE) {
7527 7056                  un->un_f_allow_bus_device_reset = TRUE;
7528 7057          } else {
7529 7058                  if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
7530 7059                      "allow-bus-device-reset", 1) != 0) {
7531 7060                          un->un_f_allow_bus_device_reset = TRUE;
7532 7061                          SD_INFO(SD_LOG_ATTACH_DETACH, un,
7533      -                            "sd_unit_attach: un:0x%p Bus device reset "
     7062 +                            "sdattach: un:0x%p Bus device reset "
7534 7063                              "enabled\n", un);
7535 7064                  } else {
7536 7065                          un->un_f_allow_bus_device_reset = FALSE;
7537 7066                          SD_INFO(SD_LOG_ATTACH_DETACH, un,
7538      -                            "sd_unit_attach: un:0x%p Bus device reset "
     7067 +                            "sdattach: un:0x%p Bus device reset "
7539 7068                              "disabled\n", un);
7540 7069                  }
7541 7070          }
7542 7071  
7543 7072          /*
7544 7073           * Check if this is an ATAPI device. ATAPI devices use Group 1
7545 7074           * Read/Write commands and Group 2 Mode Sense/Select commands.
7546 7075           *
7547 7076           * Note: The "obsolete" way of doing this is to check for the "atapi"
7548 7077           * property. The new "variant" property with a value of "atapi" has been
7549 7078           * introduced so that future 'variants' of standard SCSI behavior (like
7550 7079           * atapi) could be specified by the underlying HBA drivers by supplying
7551 7080           * a new value for the "variant" property, instead of having to define a
7552 7081           * new property.
7553 7082           */
7554 7083          if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) {
7555 7084                  un->un_f_cfg_is_atapi = TRUE;
7556 7085                  SD_INFO(SD_LOG_ATTACH_DETACH, un,
7557      -                    "sd_unit_attach: un:0x%p Atapi device\n", un);
     7086 +                    "sdattach: un:0x%p Atapi device\n", un);
7558 7087          }
7559 7088          if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant",
7560 7089              &variantp) == DDI_PROP_SUCCESS) {
7561 7090                  if (strcmp(variantp, "atapi") == 0) {
7562 7091                          un->un_f_cfg_is_atapi = TRUE;
7563 7092                          SD_INFO(SD_LOG_ATTACH_DETACH, un,
7564      -                            "sd_unit_attach: un:0x%p Atapi device\n", un);
     7093 +                            "sdattach: un:0x%p Atapi device\n", un);
7565 7094                  }
7566 7095                  ddi_prop_free(variantp);
7567 7096          }
7568 7097  
7569      -        un->un_cmd_timeout      = SD_IO_TIME;
     7098 +        un->un_cmd_timeout = ((ISCD(un)) ? 2 : 1) * (ushort_t)un->un_io_time;
     7099 +        un->un_uscsi_timeout = un->un_cmd_timeout;
     7100 +        un->un_busy_timeout = SD_BSY_TIMEOUT;
7570 7101  
7571      -        un->un_busy_timeout  = SD_BSY_TIMEOUT;
7572      -
7573      -        /* Info on current states, statuses, etc. (Updated frequently) */
7574      -        un->un_state            = SD_STATE_NORMAL;
     7102 +        /*
     7103 +         * Info on current states, statuses, etc. (Updated frequently)
     7104 +         *
     7105 +         * Current state is ATTACHING until we finished sd_unit_attach.
     7106 +         * Last state is NORMAL so that sd_unit_attach can Restore_state()
     7107 +         * when it finishes successfully.
     7108 +         */
     7109 +        un->un_state            = SD_STATE_ATTACHING;
7575 7110          un->un_last_state       = SD_STATE_NORMAL;
7576 7111  
7577 7112          /* Control & status info for command throttling */
7578 7113          un->un_throttle         = sd_max_throttle;
7579 7114          un->un_saved_throttle   = sd_max_throttle;
7580 7115          un->un_min_throttle     = sd_min_throttle;
7581 7116  
7582 7117          if (un->un_f_is_fibre == TRUE) {
7583 7118                  un->un_f_use_adaptive_throttle = TRUE;
7584 7119          } else {
7585 7120                  un->un_f_use_adaptive_throttle = FALSE;
7586 7121          }
7587 7122  
     7123 +        /* Unit detach has to pause until outstanding commands abort */
     7124 +        un->un_f_detach_waiting = 0;
     7125 +        cv_init(&un->un_detach_cv, NULL, CV_DRIVER, NULL);
     7126 +
7588 7127          /* Removable media support. */
7589 7128          cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
7590 7129          un->un_mediastate               = DKIO_NONE;
7591 7130          un->un_specified_mediastate     = DKIO_NONE;
7592 7131  
7593 7132          /* CVs for suspend/resume (PM or DR) */
7594 7133          cv_init(&un->un_suspend_cv,   NULL, CV_DRIVER, NULL);
7595 7134          cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL);
7596 7135  
7597 7136          /* Power management support. */
↓ open down ↓ 22 lines elided ↑ open up ↑
7620 7159           * can be overridden via [s]sd-config-list "mmc-gesn-polling" property.
7621 7160           */
7622 7161          un->un_f_mmc_gesn_polling = TRUE;
7623 7162  
7624 7163          /*
7625 7164           * physical sector size defaults to DEV_BSIZE currently. We can
7626 7165           * override this value via the driver configuration file so we must
7627 7166           * set it before calling sd_read_unit_properties().
7628 7167           */
7629 7168          un->un_phy_blocksize = DEV_BSIZE;
     7169 +        un->un_f_sdconf_phy_blocksize = FALSE;
7630 7170  
7631 7171          /*
7632 7172           * Retrieve the properties from the static driver table or the driver
7633 7173           * configuration file (.conf) for this unit and update the soft state
7634 7174           * for the device as needed for the indicated properties.
7635 7175           * Note: the property configuration needs to occur here as some of the
7636 7176           * following routines may have dependencies on soft state flags set
7637 7177           * as part of the driver property configuration.
7638 7178           */
7639 7179          sd_read_unit_properties(un);
7640 7180          SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7641      -            "sd_unit_attach: un:0x%p property configuration complete.\n", un);
     7181 +            "sdattach: un:0x%p property configuration complete.\n", un);
7642 7182  
7643 7183          /*
7644 7184           * Only if a device has "hotpluggable" property, it is
7645 7185           * treated as hotpluggable device. Otherwise, it is
7646 7186           * regarded as non-hotpluggable one.
7647 7187           */
7648 7188          if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable",
7649 7189              -1) != -1) {
7650 7190                  un->un_f_is_hotpluggable = TRUE;
7651 7191          }
↓ open down ↓ 65 lines elided ↑ open up ↑
7717 7257           *         and sd_cache_control().
7718 7258           */
7719 7259  
7720 7260          un->un_stats = kstat_create(sd_label, instance,
7721 7261              NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
7722 7262          if (un->un_stats != NULL) {
7723 7263                  un->un_stats->ks_lock = SD_MUTEX(un);
7724 7264                  kstat_install(un->un_stats);
7725 7265          }
7726 7266          SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7727      -            "sd_unit_attach: un:0x%p un_stats created\n", un);
     7267 +            "sdattach: un:0x%p un_stats created\n", un);
7728 7268  
     7269 +        un->un_unmapstats_ks = kstat_create(sd_label, instance, "unmapstats",
     7270 +            "misc", KSTAT_TYPE_NAMED, sizeof (*un->un_unmapstats) /
     7271 +            sizeof (kstat_named_t), 0);
     7272 +        if (un->un_unmapstats_ks) {
     7273 +                un->un_unmapstats = un->un_unmapstats_ks->ks_data;
     7274 +
     7275 +                kstat_named_init(&un->un_unmapstats->us_cmds,
     7276 +                    "commands", KSTAT_DATA_UINT64);
     7277 +                kstat_named_init(&un->un_unmapstats->us_errs,
     7278 +                    "errors", KSTAT_DATA_UINT64);
     7279 +                kstat_named_init(&un->un_unmapstats->us_extents,
     7280 +                    "extents", KSTAT_DATA_UINT64);
     7281 +                kstat_named_init(&un->un_unmapstats->us_bytes,
     7282 +                    "bytes", KSTAT_DATA_UINT64);
     7283 +
     7284 +                kstat_install(un->un_unmapstats_ks);
     7285 +        } else {
     7286 +                cmn_err(CE_NOTE, "!Cannot create unmap kstats for disk %d",
     7287 +                    instance);
     7288 +        }
     7289 +
     7290 +        un->un_lat_ksp = kstat_create(sd_label, instance, "io_latency",
     7291 +            "io_latency", KSTAT_TYPE_RAW, sizeof (un_lat_stat_t),
     7292 +            KSTAT_FLAG_PERSISTENT);
     7293 +
     7294 +        if (un->un_lat_ksp != NULL) {
     7295 +                un->un_lat_ksp->ks_lock = SD_MUTEX(un);
     7296 +                un->un_lat_stats = (un_lat_stat_t *)un->un_lat_ksp->ks_data;
     7297 +                kstat_install(un->un_lat_ksp);
     7298 +        } else {
     7299 +                un->un_lat_stats = NULL;
     7300 +        }
     7301 +
7729 7302          sd_create_errstats(un, instance);
7730 7303          if (un->un_errstats == NULL) {
7731 7304                  goto create_errstats_failed;
7732 7305          }
7733 7306          SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7734      -            "sd_unit_attach: un:0x%p errstats created\n", un);
     7307 +            "sdattach: un:0x%p errstats created\n", un);
7735 7308  
7736 7309          /*
7737 7310           * The following if/else code was relocated here from below as part
7738 7311           * of the fix for bug (4430280). However with the default setup added
7739 7312           * on entry to this routine, it's no longer absolutely necessary for
7740 7313           * this to be before the call to sd_spin_up_unit.
7741 7314           */
7742 7315          if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) {
7743 7316                  int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) ||
7744 7317                      (devp->sd_inq->inq_ansi == 5)) &&
↓ open down ↓ 3 lines elided ↑ open up ↑
7748 7321                   * If tagged queueing is supported by the target
7749 7322                   * and by the host adapter then we will enable it
7750 7323                   */
7751 7324                  un->un_tagflags = 0;
7752 7325                  if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag &&
7753 7326                      (un->un_f_arq_enabled == TRUE)) {
7754 7327                          if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing",
7755 7328                              1, 1) == 1) {
7756 7329                                  un->un_tagflags = FLAG_STAG;
7757 7330                                  SD_INFO(SD_LOG_ATTACH_DETACH, un,
7758      -                                    "sd_unit_attach: un:0x%p tag queueing "
     7331 +                                    "sdattach: un:0x%p tag queueing "
7759 7332                                      "enabled\n", un);
7760 7333                          } else if (scsi_ifgetcap(SD_ADDRESS(un),
7761 7334                              "untagged-qing", 0) == 1) {
7762 7335                                  un->un_f_opt_queueing = TRUE;
7763 7336                                  un->un_saved_throttle = un->un_throttle =
7764 7337                                      min(un->un_throttle, 3);
7765 7338                          } else {
7766 7339                                  un->un_f_opt_queueing = FALSE;
7767 7340                                  un->un_saved_throttle = un->un_throttle = 1;
7768 7341                          }
7769 7342                  } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0)
7770 7343                      == 1) && (un->un_f_arq_enabled == TRUE)) {
7771 7344                          /* The Host Adapter supports internal queueing. */
7772 7345                          un->un_f_opt_queueing = TRUE;
7773 7346                          un->un_saved_throttle = un->un_throttle =
7774 7347                              min(un->un_throttle, 3);
7775 7348                  } else {
7776 7349                          un->un_f_opt_queueing = FALSE;
7777 7350                          un->un_saved_throttle = un->un_throttle = 1;
7778 7351                          SD_INFO(SD_LOG_ATTACH_DETACH, un,
7779      -                            "sd_unit_attach: un:0x%p no tag queueing\n", un);
     7352 +                            "sdattach: un:0x%p no tag queueing\n", un);
7780 7353                  }
7781 7354  
7782 7355                  /*
7783 7356                   * Enable large transfers for SATA/SAS drives
7784 7357                   */
7785 7358                  if (SD_IS_SERIAL(un)) {
7786 7359                          un->un_max_xfer_size =
7787 7360                              ddi_getprop(DDI_DEV_T_ANY, devi, 0,
7788      -                            sd_max_xfer_size, SD_MAX_XFER_SIZE);
     7361 +                            "sd_max_xfer_size", SD_MAX_XFER_SIZE);
7789 7362                          SD_INFO(SD_LOG_ATTACH_DETACH, un,
7790      -                            "sd_unit_attach: un:0x%p max transfer "
     7363 +                            "sdattach: un:0x%p max transfer "
7791 7364                              "size=0x%x\n", un, un->un_max_xfer_size);
7792 7365  
7793 7366                  }
7794 7367  
7795 7368                  /* Setup or tear down default wide operations for disks */
7796      -
7797      -                /*
7798      -                 * Note: Legacy: it may be possible for both "sd_max_xfer_size"
7799      -                 * and "ssd_max_xfer_size" to exist simultaneously on the same
7800      -                 * system and be set to different values. In the future this
7801      -                 * code may need to be updated when the ssd module is
7802      -                 * obsoleted and removed from the system. (4299588)
7803      -                 */
7804 7369                  if (SD_IS_PARALLEL_SCSI(un) &&
7805 7370                      (devp->sd_inq->inq_rdf == RDF_SCSI2) &&
7806 7371                      (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) {
7807 7372                          if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
7808 7373                              1, 1) == 1) {
7809 7374                                  SD_INFO(SD_LOG_ATTACH_DETACH, un,
7810      -                                    "sd_unit_attach: un:0x%p Wide Transfer "
     7375 +                                    "sdattach: un:0x%p Wide Transfer "
7811 7376                                      "enabled\n", un);
7812 7377                          }
7813 7378  
7814 7379                          /*
7815 7380                           * If tagged queuing has also been enabled, then
7816 7381                           * enable large xfers
7817 7382                           */
7818 7383                          if (un->un_saved_throttle == sd_max_throttle) {
7819 7384                                  un->un_max_xfer_size =
7820 7385                                      ddi_getprop(DDI_DEV_T_ANY, devi, 0,
7821      -                                    sd_max_xfer_size, SD_MAX_XFER_SIZE);
     7386 +                                    "sd_max_xfer_size", SD_MAX_XFER_SIZE);
7822 7387                                  SD_INFO(SD_LOG_ATTACH_DETACH, un,
7823      -                                    "sd_unit_attach: un:0x%p max transfer "
     7388 +                                    "sdattach: un:0x%p max transfer "
7824 7389                                      "size=0x%x\n", un, un->un_max_xfer_size);
7825 7390                          }
7826 7391                  } else {
7827 7392                          if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
7828 7393                              0, 1) == 1) {
7829 7394                                  SD_INFO(SD_LOG_ATTACH_DETACH, un,
7830      -                                    "sd_unit_attach: un:0x%p "
     7395 +                                    "sdattach: un:0x%p "
7831 7396                                      "Wide Transfer disabled\n", un);
7832 7397                          }
7833 7398                  }
7834 7399          } else {
7835 7400                  un->un_tagflags = FLAG_STAG;
7836 7401                  un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY,
7837      -                    devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE);
     7402 +                    devi, 0, "sd_max_xfer_size", SD_MAX_XFER_SIZE);
7838 7403          }
7839 7404  
7840 7405          /*
7841 7406           * If this target supports LUN reset, try to enable it.
7842 7407           */
7843 7408          if (un->un_f_lun_reset_enabled) {
7844 7409                  if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) {
7845      -                        SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
     7410 +                        SD_INFO(SD_LOG_ATTACH_DETACH, un, "sdattach: "
7846 7411                              "un:0x%p lun_reset capability set\n", un);
7847 7412                  } else {
7848      -                        SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
     7413 +                        SD_INFO(SD_LOG_ATTACH_DETACH, un, "sdattach: "
7849 7414                              "un:0x%p lun-reset capability not set\n", un);
7850 7415                  }
7851 7416          }
7852 7417  
7853 7418          /*
7854      -         * Adjust the maximum transfer size. This is to fix
     7419 +         * XXX Adjust the maximum transfer size. This was to fix
7855 7420           * the problem of partial DMA support on SPARC. Some
7856 7421           * HBA driver, like aac, has very small dma_attr_maxxfer
7857 7422           * size, which requires partial DMA support on SPARC.
7858      -         * In the future the SPARC pci nexus driver may solve
7859      -         * the problem instead of this fix.
7860 7423           */
7861 7424          max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1);
7862 7425          if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) {
7863      -                /* We need DMA partial even on sparc to ensure sddump() works */
7864 7426                  un->un_max_xfer_size = max_xfer_size;
7865 7427                  if (un->un_partial_dma_supported == 0)
7866 7428                          un->un_partial_dma_supported = 1;
7867 7429          }
7868 7430          if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
7869 7431              DDI_PROP_DONTPASS, "buf_break", 0) == 1) {
7870 7432                  if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr,
7871 7433                      un->un_max_xfer_size) == 1) {
7872 7434                          un->un_buf_breakup_supported = 1;
7873      -                        SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
     7435 +                        SD_INFO(SD_LOG_ATTACH_DETACH, un, "sdattach: "
7874 7436                              "un:0x%p Buf breakup enabled\n", un);
7875 7437                  }
7876 7438          }
7877 7439  
7878 7440          /*
7879 7441           * Set PKT_DMA_PARTIAL flag.
7880 7442           */
7881 7443          if (un->un_partial_dma_supported == 1) {
7882 7444                  un->un_pkt_flags = PKT_DMA_PARTIAL;
7883 7445          } else {
7884 7446                  un->un_pkt_flags = 0;
7885 7447          }
7886 7448  
7887      -        /* Initialize sd_ssc_t for internal uscsi commands */
7888      -        ssc = sd_ssc_init(un);
7889 7449          scsi_fm_init(devp);
7890 7450  
7891 7451          /*
7892      -         * Allocate memory for SCSI FMA stuffs.
     7452 +         * Allocate memory for SCSI FMA stuff.
7893 7453           */
7894 7454          un->un_fm_private =
7895 7455              kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP);
7896 7456          sfip = (struct sd_fm_internal *)un->un_fm_private;
7897 7457          sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd;
7898 7458          sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo;
7899 7459          sfip->fm_ssc.ssc_un = un;
7900 7460  
7901 7461          if (ISCD(un) ||
7902 7462              un->un_f_has_removable_media ||
↓ open down ↓ 14 lines elided ↑ open up ↑
7917 7477                  int fm_scsi_log;
7918 7478                  fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
7919 7479                      DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0);
7920 7480  
7921 7481                  if (fm_scsi_log)
7922 7482                          sfip->fm_log_level = SD_FM_LOG_EREPORT;
7923 7483                  else
7924 7484                          sfip->fm_log_level = SD_FM_LOG_SILENT;
7925 7485          }
7926 7486  
     7487 +        /* Initialize sd_ssc_t for internal uscsi commands */
     7488 +        ssc = sd_ssc_init(un);
     7489 +
     7490 +        mutex_enter(SD_MUTEX(un));
7927 7491          /*
     7492 +         * Initialize the devid for the unit. Indicate target reservation so
     7493 +         * that no real I/O is done for devices that need devid fabrication.
     7494 +         * We will try again in sd_unit_attach() if necessary.
     7495 +         */
     7496 +        if (un->un_f_devid_supported) {
     7497 +                sd_register_devid(ssc, devi, SD_TARGET_IS_RESERVED);
     7498 +        }
     7499 +        mutex_exit(SD_MUTEX(un));
     7500 +
     7501 +        /* Uninitialize sd_ssc_t pointer */
     7502 +        sd_ssc_fini(ssc);
     7503 +
     7504 +        cmlb_alloc_handle(&un->un_cmlbhandle);
     7505 +
     7506 +        if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype,
     7507 +            VOID2BOOLEAN(un->un_f_has_removable_media != 0),
     7508 +            VOID2BOOLEAN(un->un_f_is_hotpluggable != 0),
     7509 +            un->un_node_type, 0, un->un_cmlbhandle,
     7510 +            (void *)SD_PATH_DIRECT) != 0) {
     7511 +                goto cmlb_attach_failed;
     7512 +        }
     7513 +
     7514 +        /*
7928 7515           * At this point in the attach, we have enough info in the
7929 7516           * soft state to be able to issue commands to the target.
7930 7517           *
     7518 +         * Schedule a taskq to finish attach to avoid holding the
     7519 +         * device tree lock for too long. If this fails, rollback
     7520 +         * and fail the attach.
     7521 +         */
     7522 +
     7523 +        if (taskq_dispatch(sd_tq, sd_unit_attach, devi, KM_PUSHPAGE) != NULL)
     7524 +                return (DDI_SUCCESS);
     7525 +
     7526 +        cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
     7527 +        cmlb_free_handle(&un->un_cmlbhandle);
     7528 +
     7529 +cmlb_attach_failed:
     7530 +        mutex_enter(SD_MUTEX(un));
     7531 +
     7532 +        /* Deallocate SCSI FMA memory spaces */
     7533 +        kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
     7534 +
     7535 +        /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */
     7536 +        if (un->un_direct_priority_timeid != NULL) {
     7537 +                timeout_id_t temp_id = un->un_direct_priority_timeid;
     7538 +                un->un_direct_priority_timeid = NULL;
     7539 +                mutex_exit(SD_MUTEX(un));
     7540 +                (void) untimeout(temp_id);
     7541 +                mutex_enter(SD_MUTEX(un));
     7542 +        }
     7543 +
     7544 +        /* Cancel any pending start/stop timeouts */
     7545 +        if (un->un_startstop_timeid != NULL) {
     7546 +                timeout_id_t temp_id = un->un_startstop_timeid;
     7547 +                un->un_startstop_timeid = NULL;
     7548 +                mutex_exit(SD_MUTEX(un));
     7549 +                (void) untimeout(temp_id);
     7550 +                mutex_enter(SD_MUTEX(un));
     7551 +        }
     7552 +
     7553 +        /* Cancel any pending reset-throttle timeouts */
     7554 +        if (un->un_reset_throttle_timeid != NULL) {
     7555 +                timeout_id_t temp_id = un->un_reset_throttle_timeid;
     7556 +                un->un_reset_throttle_timeid = NULL;
     7557 +                mutex_exit(SD_MUTEX(un));
     7558 +                (void) untimeout(temp_id);
     7559 +                mutex_enter(SD_MUTEX(un));
     7560 +        }
     7561 +
     7562 +        /* Cancel rmw warning message timeouts */
     7563 +        if (un->un_rmw_msg_timeid != NULL) {
     7564 +                timeout_id_t temp_id = un->un_rmw_msg_timeid;
     7565 +                un->un_rmw_msg_timeid = NULL;
     7566 +                mutex_exit(SD_MUTEX(un));
     7567 +                (void) untimeout(temp_id);
     7568 +                mutex_enter(SD_MUTEX(un));
     7569 +        }
     7570 +
     7571 +        /* Cancel any pending retry timeouts */
     7572 +        if (un->un_retry_timeid != NULL) {
     7573 +                timeout_id_t temp_id = un->un_retry_timeid;
     7574 +                un->un_retry_timeid = NULL;
     7575 +                mutex_exit(SD_MUTEX(un));
     7576 +                (void) untimeout(temp_id);
     7577 +                mutex_enter(SD_MUTEX(un));
     7578 +        }
     7579 +
     7580 +        /* Cancel any pending delayed cv broadcast timeouts */
     7581 +        if (un->un_dcvb_timeid != NULL) {
     7582 +                timeout_id_t temp_id = un->un_dcvb_timeid;
     7583 +                un->un_dcvb_timeid = NULL;
     7584 +                mutex_exit(SD_MUTEX(un));
     7585 +                (void) untimeout(temp_id);
     7586 +                mutex_enter(SD_MUTEX(un));
     7587 +        }
     7588 +
     7589 +        mutex_exit(SD_MUTEX(un));
     7590 +
     7591 +        /* There should not be any in-progress I/O so ASSERT this check */
     7592 +        ASSERT(un->un_ncmds_in_transport == 0);
     7593 +        ASSERT(un->un_ncmds_in_driver == 0);
     7594 +
     7595 +        /* Do not free the softstate if the callback routine is active */
     7596 +        sd_sync_with_callback(un);
     7597 +
     7598 +        /*
     7599 +         * Partition stats apparently are not used with removables. These would
     7600 +         * not have been created during attach, so no need to clean them up...
     7601 +         */
     7602 +        if (un->un_errstats != NULL) {
     7603 +                kstat_delete(un->un_errstats);
     7604 +                un->un_errstats = NULL;
     7605 +        }
     7606 +
     7607 +create_errstats_failed:
     7608 +
     7609 +        if (un->un_stats != NULL) {
     7610 +                kstat_delete(un->un_stats);
     7611 +                un->un_stats = NULL;
     7612 +        }
     7613 +
     7614 +        if (un->un_unmapstats != NULL) {
     7615 +                kstat_delete(un->un_unmapstats_ks);
     7616 +                un->un_unmapstats_ks = NULL;
     7617 +                un->un_unmapstats = NULL;
     7618 +        }
     7619 +
     7620 +        if (un->un_lat_ksp != NULL) {
     7621 +                kstat_delete(un->un_lat_ksp);
     7622 +                un->un_lat_ksp = NULL;
     7623 +                un->un_lat_stats = NULL;
     7624 +        }
     7625 +
     7626 +        ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
     7627 +        ddi_xbuf_attr_destroy(un->un_xbuf_attr);
     7628 +
     7629 +        ddi_prop_remove_all(devi);
     7630 +        sema_destroy(&un->un_semoclose);
     7631 +        cv_destroy(&un->un_state_cv);
     7632 +        cv_destroy(&un->un_detach_cv);
     7633 +        sd_free_rqs(un);
     7634 +
     7635 +alloc_rqs_failed:
     7636 +
     7637 +        devp->sd_private = NULL;
     7638 +        bzero(un, sizeof (struct sd_lun));      /* Clear any stale data! */
     7639 +
     7640 +        /*
     7641 +         * Note: the man pages are unclear as to whether or not doing a
     7642 +         * ddi_soft_state_free(sd_state, instance) is the right way to
     7643 +         * clean up after the ddi_soft_state_zalloc() if the subsequent
     7644 +         * ddi_get_soft_state() fails.  The implication seems to be
     7645 +         * that the get_soft_state cannot fail if the zalloc succeeds.
     7646 +         */
     7647 +        ddi_soft_state_free(sd_state, instance);
     7648 +
     7649 +probe_failed:
     7650 +        scsi_unprobe(devp);
     7651 +
     7652 +        return (DDI_FAILURE);
     7653 +}
     7654 +
     7655 +
     7656 +/*
     7657 + *    Function: sddetach
     7658 + *
     7659 + * Description: Driver's detach(9E) entry point function.
     7660 + *
     7661 + *   Arguments: devi - opaque device info handle
     7662 + *              cmd  - detach  type
     7663 + *
     7664 + * Return Code: DDI_SUCCESS
     7665 + *              DDI_FAILURE
     7666 + *
     7667 + *     Context: Kernel thread context
     7668 + */
     7669 +
     7670 +static int
     7671 +sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
     7672 +{
     7673 +        switch (cmd) {
     7674 +        case DDI_DETACH:
     7675 +                return (sd_unit_detach(devi));
     7676 +        case DDI_SUSPEND:
     7677 +                return (sd_ddi_suspend(devi));
     7678 +        default:
     7679 +                break;
     7680 +        }
     7681 +        return (DDI_FAILURE);
     7682 +}
     7683 +
     7684 +
     7685 +/*
     7686 + *     Function: sd_sync_with_callback
     7687 + *
     7688 + *  Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft
     7689 + *               state while the callback routine is active.
     7690 + *
     7691 + *    Arguments: un: softstate structure for the instance
     7692 + *
     7693 + *      Context: Kernel thread context
     7694 + */
     7695 +
     7696 +static void
     7697 +sd_sync_with_callback(struct sd_lun *un)
     7698 +{
     7699 +        ASSERT(un != NULL);
     7700 +
     7701 +        mutex_enter(SD_MUTEX(un));
     7702 +
     7703 +        ASSERT(un->un_in_callback >= 0);
     7704 +
     7705 +        while (un->un_in_callback > 0) {
     7706 +                mutex_exit(SD_MUTEX(un));
     7707 +                delay(2);
     7708 +                mutex_enter(SD_MUTEX(un));
     7709 +        }
     7710 +
     7711 +        mutex_exit(SD_MUTEX(un));
     7712 +}
     7713 +
     7714 +/*
     7715 + *    Function: sd_unit_attach
     7716 + *
     7717 + * Description: Performs DDI_ATTACH processing for sdattach(). Allocates
     7718 + *              the soft state structure for the device and performs
     7719 + *              all necessary structure and device initializations.
     7720 + *
     7721 + *   Arguments: devi: the system's dev_info_t for the device.
     7722 + *
     7723 + * Return Code: DDI_SUCCESS if attach is successful.
     7724 + *              DDI_FAILURE if any part of the attach fails.
     7725 + *
     7726 + *     Context: Called at attach(9e) time for the DDI_ATTACH flag.
     7727 + *              Kernel thread context only.  Can sleep.
     7728 + */
     7729 +void
     7730 +sd_unit_attach(void *arg)
     7731 +{
     7732 +        dev_info_t              *devi = arg;
     7733 +        struct  scsi_device     *devp = ddi_get_driver_private(devi);
     7734 +        struct  sd_lun          *un = (struct sd_lun *)devp->sd_private;
     7735 +        char                    name_str[48];
     7736 +        int     reservation_flag = SD_TARGET_IS_UNRESERVED;
     7737 +        int     rval;
     7738 +        int     wc_enabled;
     7739 +        int     wc_changeable;
     7740 +        int     tgt;
     7741 +        uint64_t        capacity;
     7742 +        uint_t          lbasize = 0;
     7743 +        dev_info_t      *pdip = ddi_get_parent(devi);
     7744 +        int             geom_label_valid = 0;
     7745 +        sd_ssc_t        *ssc;
     7746 +        int             status;
     7747 +        char            *devid;
     7748 +
     7749 +        /*
     7750 +         * Retrieve the target ID of the device.
     7751 +         */
     7752 +        tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
     7753 +            SCSI_ADDR_PROP_TARGET, -1);
     7754 +
     7755 +        /*
7931 7756           * All command paths used below MUST issue their commands as
7932 7757           * SD_PATH_DIRECT. This is important as intermediate layers
7933 7758           * are not all initialized yet (such as PM).
7934 7759           */
7935 7760  
     7761 +        /* Initialize sd_ssc_t for internal uscsi commands */
     7762 +        ssc = sd_ssc_init(un);
     7763 +
7936 7764          /*
7937 7765           * Send a TEST UNIT READY command to the device. This should clear
7938 7766           * any outstanding UNIT ATTENTION that may be present.
7939 7767           *
7940 7768           * Note: Don't check for success, just track if there is a reservation,
7941 7769           * this is a throw away command to clear any unit attentions.
7942 7770           *
7943 7771           * Note: This MUST be the first command issued to the target during
7944 7772           * attach to ensure power on UNIT ATTENTIONS are cleared.
7945 7773           * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated
↓ open down ↓ 43 lines elided ↑ open up ↑
7989 7817                                                  sd_enable_descr_sense(ssc);
7990 7818                                          }
7991 7819  #else
7992 7820                                          /* 32-bit kernels can't handle this */
7993 7821                                          scsi_log(SD_DEVINFO(un),
7994 7822                                              sd_label, CE_WARN,
7995 7823                                              "disk has %llu blocks, which "
7996 7824                                              "is too large for a 32-bit "
7997 7825                                              "kernel", capacity);
7998 7826  
7999      -#if defined(__i386) || defined(__amd64)
8000 7827                                          /*
8001 7828                                           * 1TB disk was treated as (1T - 512)B
8002 7829                                           * in the past, so that it might have
8003 7830                                           * valid VTOC and solaris partitions,
8004 7831                                           * we have to allow it to continue to
8005 7832                                           * work.
8006 7833                                           */
8007      -                                        if (capacity -1 > DK_MAX_BLOCKS)
     7834 +                                        if (capacity - 1 > DK_MAX_BLOCKS)
     7835 +                                                goto spinup_failed;
8008 7836  #endif
8009      -                                        goto spinup_failed;
8010      -#endif
8011 7837                                  }
8012 7838  
8013 7839                                  /*
8014 7840                                   * Here it's not necessary to check the case:
8015 7841                                   * the capacity of the device is bigger than
8016 7842                                   * what the max hba cdb can support. Because
8017 7843                                   * sd_send_scsi_READ_CAPACITY will retrieve
8018 7844                                   * the capacity by sending USCSI command, which
8019 7845                                   * is constrained by the max hba cdb. Actually,
8020 7846                                   * sd_send_scsi_READ_CAPACITY will return
↓ open down ↓ 42 lines elided ↑ open up ↑
8063 7889                                      "returned reservation conflict\n", un);
8064 7890                                  reservation_flag = SD_TARGET_IS_RESERVED;
8065 7891                                  sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8066 7892                                  break;
8067 7893                          default:
8068 7894                                  /*
8069 7895                                   * Likewise, should never get here if the
8070 7896                                   * spin-up succeeded. Just continue with
8071 7897                                   * the attach...
8072 7898                                   */
8073      -                                if (status == EIO)
     7899 +                                if (status == EIO) {
8074 7900                                          sd_ssc_assessment(ssc,
8075 7901                                              SD_FMT_STATUS_CHECK);
8076      -                                else
     7902 +                                        goto spinup_failed;
     7903 +                                } else {
8077 7904                                          sd_ssc_assessment(ssc,
8078 7905                                              SD_FMT_IGNORE);
     7906 +                                }
8079 7907                                  break;
8080 7908                          }
8081 7909                          break;
8082 7910                  case EACCES:
8083 7911                          /*
8084 7912                           * Device is reserved by another host.  In this case
8085 7913                           * we could not spin it up or read the capacity, but
8086 7914                           * we continue with the attach anyway.
8087 7915                           */
8088 7916                          SD_INFO(SD_LOG_ATTACH_DETACH, un,
↓ open down ↓ 29 lines elided ↑ open up ↑
8118 7946           * the B_FAILFAST flag (for layered drivers)
8119 7947           */
8120 7948          (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
8121 7949              "ddi-failfast-supported", NULL, 0);
8122 7950  
8123 7951          /*
8124 7952           * Initialize power management
8125 7953           */
8126 7954          mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL);
8127 7955          cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL);
     7956 +#ifdef notyet
8128 7957          sd_setup_pm(ssc, devi);
     7958 +#endif
8129 7959          if (un->un_f_pm_is_enabled == FALSE) {
8130 7960                  /*
8131 7961                   * For performance, point to a jump table that does
8132 7962                   * not include pm.
8133 7963                   * The direct and priority chains don't change with PM.
8134 7964                   *
8135 7965                   * Note: this is currently done based on individual device
8136 7966                   * capabilities. When an interface for determining system
8137 7967                   * power enabled state becomes available, or when additional
8138 7968                   * layers are added to the command chain, these values will
↓ open down ↓ 14 lines elided ↑ open up ↑
8153 7983           *
8154 7984           * Note: The use of a global here can have unintended consequences. A
8155 7985           * per instance variable is preferable to match the capabilities of
8156 7986           * different underlying hba's (4402600)
8157 7987           */
8158 7988          sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi,
8159 7989              DDI_PROP_DONTPASS, "retry-on-reservation-conflict",
8160 7990              sd_retry_on_reservation_conflict);
8161 7991          if (sd_retry_on_reservation_conflict != 0) {
8162 7992                  sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY,
8163      -                    devi, DDI_PROP_DONTPASS, sd_resv_conflict_name,
     7993 +                    devi, DDI_PROP_DONTPASS, "sd_retry_on_reservation_conflict",
8164 7994                      sd_retry_on_reservation_conflict);
8165 7995          }
8166 7996  
8167 7997          /* Set up options for QFULL handling. */
8168 7998          if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
8169 7999              "qfull-retries", -1)) != -1) {
8170 8000                  (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries",
8171 8001                      rval, 1);
8172 8002          }
8173 8003          if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
↓ open down ↓ 15 lines elided ↑ open up ↑
8189 8019          /*
8190 8020           * Check Block Device Characteristics VPD.
8191 8021           */
8192 8022          sd_check_bdc_vpd(ssc);
8193 8023  
8194 8024          /*
8195 8025           * Check whether the drive is in emulation mode.
8196 8026           */
8197 8027          sd_check_emulation_mode(ssc);
8198 8028  
8199      -        cmlb_alloc_handle(&un->un_cmlbhandle);
8200      -
8201      -#if defined(__i386) || defined(__amd64)
8202      -        /*
8203      -         * On x86, compensate for off-by-1 legacy error
8204      -         */
     8029 +        /* Compensate for off-by-1 legacy error */
8205 8030          if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable &&
8206 8031              (lbasize == un->un_sys_blocksize))
8207      -                offbyone = CMLB_OFF_BY_ONE;
8208      -#endif
     8032 +                cmlb_workaround_off_by_one(un->un_cmlbhandle);
8209 8033  
8210      -        if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype,
8211      -            VOID2BOOLEAN(un->un_f_has_removable_media != 0),
8212      -            VOID2BOOLEAN(un->un_f_is_hotpluggable != 0),
8213      -            un->un_node_type, offbyone, un->un_cmlbhandle,
8214      -            (void *)SD_PATH_DIRECT) != 0) {
8215      -                goto cmlb_attach_failed;
8216      -        }
8217      -
8218      -
8219 8034          /*
8220 8035           * Read and validate the device's geometry (ie, disk label)
8221 8036           * A new unformatted drive will not have a valid geometry, but
8222 8037           * the driver needs to successfully attach to this device so
8223 8038           * the drive can be formatted via ioctls.
8224 8039           */
8225 8040          geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0,
8226 8041              (void *)SD_PATH_DIRECT) == 0) ? 1: 0;
8227 8042  
8228 8043          mutex_enter(SD_MUTEX(un));
8229 8044  
8230 8045          /*
8231      -         * Read and initialize the devid for the unit.
     8046 +         * Read and initialize the devid for the unit if not done already.
8232 8047           */
8233      -        if (un->un_f_devid_supported) {
     8048 +        if (un->un_f_devid_supported && un->un_devid == NULL) {
8234 8049                  sd_register_devid(ssc, devi, reservation_flag);
8235 8050          }
8236 8051          mutex_exit(SD_MUTEX(un));
8237 8052  
8238      -#if (defined(__fibre))
8239      -        /*
8240      -         * Register callbacks for fibre only.  You can't do this solely
8241      -         * on the basis of the devid_type because this is hba specific.
8242      -         * We need to query our hba capabilities to find out whether to
8243      -         * register or not.
8244      -         */
8245      -        if (un->un_f_is_fibre) {
8246      -                if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) {
8247      -                        sd_init_event_callbacks(un);
8248      -                        SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8249      -                            "sd_unit_attach: un:0x%p event callbacks inserted",
8250      -                            un);
8251      -                }
8252      -        }
8253      -#endif
8254      -
8255 8053          if (un->un_f_opt_disable_cache == TRUE) {
8256 8054                  /*
8257 8055                   * Disable both read cache and write cache.  This is
8258 8056                   * the historic behavior of the keywords in the config file.
8259 8057                   */
8260 8058                  if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) !=
8261 8059                      0) {
8262 8060                          SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8263 8061                              "sd_unit_attach: un:0x%p Could not disable "
8264 8062                              "caching", un);
↓ open down ↓ 91 lines elided ↑ open up ↑
8356 8154          if (un->un_f_pkstats_enabled && geom_label_valid) {
8357 8155                  sd_set_pstats(un);
8358 8156                  SD_TRACE(SD_LOG_IO_PARTITION, un,
8359 8157                      "sd_unit_attach: un:0x%p pstats created and set\n", un);
8360 8158          }
8361 8159  
8362 8160          sd_set_errstats(un);
8363 8161          SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8364 8162              "sd_unit_attach: un:0x%p errstats set\n", un);
8365 8163  
     8164 +        sd_setup_blk_limits(ssc);
8366 8165  
8367 8166          /*
8368 8167           * After successfully attaching an instance, we record the information
8369 8168           * of how many luns have been attached on the relative target and
8370 8169           * controller for parallel SCSI. This information is used when sd tries
8371 8170           * to set the tagged queuing capability in HBA.
8372 8171           */
8373 8172          if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) {
8374 8173                  sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH);
8375 8174          }
8376 8175  
8377 8176          SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8378 8177              "sd_unit_attach: un:0x%p exit success\n", un);
8379 8178  
8380 8179          /* Uninitialize sd_ssc_t pointer */
8381 8180          sd_ssc_fini(ssc);
8382 8181  
8383      -        return (DDI_SUCCESS);
     8182 +        /* attach finished, switch to SD_STATE_NORMAL */
     8183 +        mutex_enter(SD_MUTEX(un));
     8184 +        New_state(un, SD_STATE_NORMAL);
     8185 +        cv_broadcast(&un->un_suspend_cv);
     8186 +        mutex_exit(SD_MUTEX(un));
8384 8187  
     8188 +        return;
     8189 +
8385 8190          /*
8386 8191           * An error occurred during the attach; clean up & return failure.
8387 8192           */
     8193 +
8388 8194  wm_cache_failed:
8389 8195  devid_failed:
8390      -        ddi_remove_minor_node(devi, NULL);
8391      -
8392      -cmlb_attach_failed:
8393 8196          /*
8394 8197           * Cleanup from the scsi_ifsetcap() calls (437868)
8395 8198           */
8396 8199          (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
8397 8200          (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
8398 8201  
8399 8202          /*
8400 8203           * Refer to the comments of setting tagged-qing in the beginning of
8401 8204           * sd_unit_attach. We can only disable tagged queuing when there is
8402 8205           * no lun attached on the target.
8403 8206           */
8404 8207          if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
8405 8208                  (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
8406 8209          }
8407 8210  
8408 8211          if (un->un_f_is_fibre == FALSE) {
8409 8212                  (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8410 8213          }
8411 8214  
8412 8215  spinup_failed:
8413      -
8414      -        /* Uninitialize sd_ssc_t pointer */
8415      -        sd_ssc_fini(ssc);
8416      -
     8216 +        /* attach failed, switch to SD_STATE_ATTACH_FAILED */
8417 8217          mutex_enter(SD_MUTEX(un));
8418      -
8419      -        /* Deallocate SCSI FMA memory spaces */
8420      -        kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
8421      -
8422      -        /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */
8423      -        if (un->un_direct_priority_timeid != NULL) {
8424      -                timeout_id_t temp_id = un->un_direct_priority_timeid;
8425      -                un->un_direct_priority_timeid = NULL;
8426      -                mutex_exit(SD_MUTEX(un));
8427      -                (void) untimeout(temp_id);
8428      -                mutex_enter(SD_MUTEX(un));
8429      -        }
8430      -
8431      -        /* Cancel any pending start/stop timeouts */
8432      -        if (un->un_startstop_timeid != NULL) {
8433      -                timeout_id_t temp_id = un->un_startstop_timeid;
8434      -                un->un_startstop_timeid = NULL;
8435      -                mutex_exit(SD_MUTEX(un));
8436      -                (void) untimeout(temp_id);
8437      -                mutex_enter(SD_MUTEX(un));
8438      -        }
8439      -
8440      -        /* Cancel any pending reset-throttle timeouts */
8441      -        if (un->un_reset_throttle_timeid != NULL) {
8442      -                timeout_id_t temp_id = un->un_reset_throttle_timeid;
8443      -                un->un_reset_throttle_timeid = NULL;
8444      -                mutex_exit(SD_MUTEX(un));
8445      -                (void) untimeout(temp_id);
8446      -                mutex_enter(SD_MUTEX(un));
8447      -        }
8448      -
8449      -        /* Cancel rmw warning message timeouts */
8450      -        if (un->un_rmw_msg_timeid != NULL) {
8451      -                timeout_id_t temp_id = un->un_rmw_msg_timeid;
8452      -                un->un_rmw_msg_timeid = NULL;
8453      -                mutex_exit(SD_MUTEX(un));
8454      -                (void) untimeout(temp_id);
8455      -                mutex_enter(SD_MUTEX(un));
8456      -        }
8457      -
8458      -        /* Cancel any pending retry timeouts */
8459      -        if (un->un_retry_timeid != NULL) {
8460      -                timeout_id_t temp_id = un->un_retry_timeid;
8461      -                un->un_retry_timeid = NULL;
8462      -                mutex_exit(SD_MUTEX(un));
8463      -                (void) untimeout(temp_id);
8464      -                mutex_enter(SD_MUTEX(un));
8465      -        }
8466      -
8467      -        /* Cancel any pending delayed cv broadcast timeouts */
8468      -        if (un->un_dcvb_timeid != NULL) {
8469      -                timeout_id_t temp_id = un->un_dcvb_timeid;
8470      -                un->un_dcvb_timeid = NULL;
8471      -                mutex_exit(SD_MUTEX(un));
8472      -                (void) untimeout(temp_id);
8473      -                mutex_enter(SD_MUTEX(un));
8474      -        }
8475      -
     8218 +        New_state(un, SD_STATE_ATTACH_FAILED);
     8219 +        cv_broadcast(&un->un_suspend_cv);
8476 8220          mutex_exit(SD_MUTEX(un));
8477 8221  
8478      -        /* There should not be any in-progress I/O so ASSERT this check */
8479      -        ASSERT(un->un_ncmds_in_transport == 0);
8480      -        ASSERT(un->un_ncmds_in_driver == 0);
     8222 +        devid = DEVI(devi)->devi_devid_str;
     8223 +        scsi_fm_ereport_post(un->un_sd, 0,
     8224 +            NULL, "disk.attach-failure", ssc->ssc_uscsi_info->ui_ena,
     8225 +            devid, NULL, DDI_NOSLEEP, NULL,
     8226 +            FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
     8227 +            DEVID_IF_KNOWN(devid));
8481 8228  
8482      -        /* Do not free the softstate if the callback routine is active */
8483      -        sd_sync_with_callback(un);
8484      -
8485      -        /*
8486      -         * Partition stats apparently are not used with removables. These would
8487      -         * not have been created during attach, so no need to clean them up...
8488      -         */
8489      -        if (un->un_errstats != NULL) {
8490      -                kstat_delete(un->un_errstats);
8491      -                un->un_errstats = NULL;
8492      -        }
8493      -
8494      -create_errstats_failed:
8495      -
8496      -        if (un->un_stats != NULL) {
8497      -                kstat_delete(un->un_stats);
8498      -                un->un_stats = NULL;
8499      -        }
8500      -
8501      -        ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
8502      -        ddi_xbuf_attr_destroy(un->un_xbuf_attr);
8503      -
8504      -        ddi_prop_remove_all(devi);
8505      -        sema_destroy(&un->un_semoclose);
8506      -        cv_destroy(&un->un_state_cv);
8507      -
8508      -        sd_free_rqs(un);
8509      -
8510      -alloc_rqs_failed:
8511      -
8512      -        devp->sd_private = NULL;
8513      -        bzero(un, sizeof (struct sd_lun));      /* Clear any stale data! */
8514      -
8515      -        /*
8516      -         * Note: the man pages are unclear as to whether or not doing a
8517      -         * ddi_soft_state_free(sd_state, instance) is the right way to
8518      -         * clean up after the ddi_soft_state_zalloc() if the subsequent
8519      -         * ddi_get_soft_state() fails.  The implication seems to be
8520      -         * that the get_soft_state cannot fail if the zalloc succeeds.
8521      -         */
8522      -#ifndef XPV_HVM_DRIVER
8523      -        ddi_soft_state_free(sd_state, instance);
8524      -#endif /* !XPV_HVM_DRIVER */
8525      -
8526      -probe_failed:
8527      -        scsi_unprobe(devp);
8528      -
8529      -        return (DDI_FAILURE);
     8229 +        /* Uninitialize sd_ssc_t pointer */
     8230 +        sd_ssc_fini(ssc);
     8231 +        SD_ERROR(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach failed: un: %p",
     8232 +            (void *)un);
8530 8233  }
8531 8234  
8532 8235  
8533 8236  /*
8534 8237   *    Function: sd_unit_detach
8535 8238   *
8536 8239   * Description: Performs DDI_DETACH processing for sddetach().
8537 8240   *
8538 8241   * Return Code: DDI_SUCCESS
8539 8242   *              DDI_FAILURE
↓ open down ↓ 9 lines elided ↑ open up ↑
8549 8252          int                     i;
8550 8253          int                     tgt;
8551 8254          dev_t                   dev;
8552 8255          dev_info_t              *pdip = ddi_get_parent(devi);
8553 8256          int                     instance = ddi_get_instance(devi);
8554 8257  
8555 8258          mutex_enter(&sd_detach_mutex);
8556 8259  
8557 8260          /*
8558 8261           * Fail the detach for any of the following:
8559      -         *  - Unable to get the sd_lun struct for the instance
8560      -         *  - A layered driver has an outstanding open on the instance
8561      -         *  - Another thread is already detaching this instance
8562      -         *  - Another thread is currently performing an open
     8262 +         * - Unable to get the sd_lun struct for the instance
     8263 +         * - The instance is still attaching
     8264 +         * - Another thread is already detaching this instance
     8265 +         * - Another thread is currently performing an open
     8266 +         *
     8267 +         * Additionaly, if "device gone" flag is not set:
     8268 +         * - There are outstanding commands in driver
     8269 +         * - There are outstanding commands in transport
8563 8270           */
8564 8271          devp = ddi_get_driver_private(devi);
8565      -        if ((devp == NULL) ||
8566      -            ((un = (struct sd_lun *)devp->sd_private) == NULL) ||
8567      -            (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) ||
8568      -            (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) {
     8272 +        if (devp == NULL || (un = (struct sd_lun *)devp->sd_private) == NULL ||
     8273 +            un->un_detach_count != 0 || un->un_opens_in_progress != 0 ||
     8274 +            (!DEVI_IS_GONE(devi) &&
     8275 +            (un->un_state == SD_STATE_RWAIT ||
     8276 +            un->un_state == SD_STATE_ATTACHING ||
     8277 +            un->un_ncmds_in_driver != 0 ||
     8278 +            un->un_ncmds_in_transport != 0))) {
8569 8279                  mutex_exit(&sd_detach_mutex);
8570 8280                  return (DDI_FAILURE);
8571 8281          }
8572 8282  
8573      -        SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un);
     8283 +        SD_TRACE(SD_LOG_ATTACH_DETACH, un, "%s: entry 0x%p\n", __func__, un);
8574 8284  
8575 8285          /*
8576 8286           * Mark this instance as currently in a detach, to inhibit any
8577 8287           * opens from a layered driver.
8578 8288           */
8579 8289          un->un_detach_count++;
8580 8290          mutex_exit(&sd_detach_mutex);
8581 8291  
8582 8292          tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
8583 8293              SCSI_ADDR_PROP_TARGET, -1);
8584 8294  
8585 8295          dev = sd_make_device(SD_DEVINFO(un));
8586 8296  
8587      -#ifndef lint
8588      -        _NOTE(COMPETING_THREADS_NOW);
8589      -#endif
8590      -
8591 8297          mutex_enter(SD_MUTEX(un));
8592 8298  
8593 8299          /*
8594 8300           * Fail the detach if there are any outstanding layered
8595 8301           * opens on this device.
8596 8302           */
8597 8303          for (i = 0; i < NDKMAP; i++) {
8598 8304                  if (un->un_ocmap.lyropen[i] != 0) {
8599 8305                          goto err_notclosed;
8600 8306                  }
8601 8307          }
8602 8308  
8603 8309          /*
8604      -         * Verify there are NO outstanding commands issued to this device.
8605      -         * ie, un_ncmds_in_transport == 0.
8606      -         * It's possible to have outstanding commands through the physio
8607      -         * code path, even though everything's closed.
     8310 +         * If the attach wasn't successful, some normal cleanup work must not
     8311 +         * be done.
8608 8312           */
8609      -        if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) ||
8610      -            (un->un_direct_priority_timeid != NULL) ||
8611      -            (un->un_state == SD_STATE_RWAIT)) {
     8313 +        if (un->un_state == SD_STATE_ATTACH_FAILED) {
8612 8314                  mutex_exit(SD_MUTEX(un));
8613      -                SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8614      -                    "sd_dr_detach: Detach failure due to outstanding cmds\n");
8615      -                goto err_stillbusy;
     8315 +                goto no_attach_cleanup;
8616 8316          }
8617 8317  
8618 8318          /*
8619 8319           * If we have the device reserved, release the reservation.
8620 8320           */
8621      -        if ((un->un_resvd_status & SD_RESERVE) &&
     8321 +        if (!DEVI_IS_GONE(devi) &&
     8322 +            (un->un_resvd_status & SD_RESERVE) &&
8622 8323              !(un->un_resvd_status & SD_LOST_RESERVE)) {
8623 8324                  mutex_exit(SD_MUTEX(un));
8624 8325                  /*
8625 8326                   * Note: sd_reserve_release sends a command to the device
8626 8327                   * via the sd_ioctlcmd() path, and can sleep.
8627 8328                   */
8628 8329                  if (sd_reserve_release(dev, SD_RELEASE) != 0) {
8629 8330                          SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8630      -                            "sd_dr_detach: Cannot release reservation \n");
     8331 +                            "%s: cannot release reservation\n", __func__);
8631 8332                  }
8632 8333          } else {
8633 8334                  mutex_exit(SD_MUTEX(un));
8634 8335          }
8635 8336  
8636 8337          /*
8637 8338           * Untimeout any reserve recover, throttle reset, restart unit
8638 8339           * and delayed broadcast timeout threads. Protect the timeout pointer
8639 8340           * from getting nulled by their callback functions.
8640 8341           */
↓ open down ↓ 36 lines elided ↑ open up ↑
8677 8378                  mutex_exit(SD_MUTEX(un));
8678 8379                  (void) untimeout(temp_id);
8679 8380          } else {
8680 8381                  mutex_exit(SD_MUTEX(un));
8681 8382          }
8682 8383  
8683 8384          /* Remove any pending reservation reclaim requests for this device */
8684 8385          sd_rmv_resv_reclaim_req(dev);
8685 8386  
8686 8387          mutex_enter(SD_MUTEX(un));
     8388 +        if (un->un_retry_timeid != NULL) {
     8389 +                timeout_id_t temp_id = un->un_retry_timeid;
     8390 +                un->un_retry_timeid = NULL;
     8391 +                mutex_exit(SD_MUTEX(un));
     8392 +                (void) untimeout(temp_id);
     8393 +                mutex_enter(SD_MUTEX(un));
8687 8394  
     8395 +                if (un->un_retry_bp != NULL) {
     8396 +                        un->un_retry_bp->av_forw = un->un_waitq_headp;
     8397 +                        un->un_waitq_headp = un->un_retry_bp;
     8398 +                        if (un->un_waitq_tailp == NULL)
     8399 +                                un->un_waitq_tailp = un->un_retry_bp;
     8400 +                        un->un_retry_bp = NULL;
     8401 +                        un->un_retry_statp = NULL;
     8402 +                }
     8403 +        }
     8404 +
     8405 +        if (DEVI_IS_GONE(SD_DEVINFO(un))) {
     8406 +                /* abort in-flight IO */
     8407 +                (void) scsi_abort(SD_ADDRESS(un), NULL);
     8408 +                /* abort pending IO */
     8409 +                un->un_failfast_state = SD_FAILFAST_ACTIVE;
     8410 +                un->un_failfast_bp = NULL;
     8411 +                sd_failfast_flushq(un, B_TRUE);
     8412 +        }
     8413 +
8688 8414          /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */
8689 8415          if (un->un_direct_priority_timeid != NULL) {
8690 8416                  timeout_id_t temp_id = un->un_direct_priority_timeid;
8691 8417                  un->un_direct_priority_timeid = NULL;
8692 8418                  mutex_exit(SD_MUTEX(un));
8693 8419                  (void) untimeout(temp_id);
8694 8420                  mutex_enter(SD_MUTEX(un));
8695 8421          }
8696 8422  
8697 8423          /* Cancel any active multi-host disk watch thread requests */
8698 8424          if (un->un_mhd_token != NULL) {
8699 8425                  mutex_exit(SD_MUTEX(un));
8700      -                 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token));
8701 8426                  if (scsi_watch_request_terminate(un->un_mhd_token,
8702 8427                      SCSI_WATCH_TERMINATE_NOWAIT)) {
8703 8428                          SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8704      -                            "sd_dr_detach: Cannot cancel mhd watch request\n");
     8429 +                            "%s: cannot cancel mhd watch request\n", __func__);
8705 8430                          /*
8706 8431                           * Note: We are returning here after having removed
8707 8432                           * some driver timeouts above. This is consistent with
8708 8433                           * the legacy implementation but perhaps the watch
8709 8434                           * terminate call should be made with the wait flag set.
8710 8435                           */
8711      -                        goto err_stillbusy;
     8436 +                        goto err_remove_event;
8712 8437                  }
8713 8438                  mutex_enter(SD_MUTEX(un));
8714 8439                  un->un_mhd_token = NULL;
8715 8440          }
8716 8441  
8717 8442          if (un->un_swr_token != NULL) {
8718 8443                  mutex_exit(SD_MUTEX(un));
8719      -                _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token));
8720 8444                  if (scsi_watch_request_terminate(un->un_swr_token,
8721 8445                      SCSI_WATCH_TERMINATE_NOWAIT)) {
8722 8446                          SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8723      -                            "sd_dr_detach: Cannot cancel swr watch request\n");
     8447 +                            "%s: cannot cancel swr watch request\n", __func__);
8724 8448                          /*
8725 8449                           * Note: We are returning here after having removed
8726 8450                           * some driver timeouts above. This is consistent with
8727 8451                           * the legacy implementation but perhaps the watch
8728 8452                           * terminate call should be made with the wait flag set.
8729 8453                           */
8730      -                        goto err_stillbusy;
     8454 +                        goto err_remove_event;
8731 8455                  }
8732 8456                  mutex_enter(SD_MUTEX(un));
8733 8457                  un->un_swr_token = NULL;
8734 8458          }
8735 8459  
8736      -        mutex_exit(SD_MUTEX(un));
8737      -
8738 8460          /*
8739 8461           * Clear any scsi_reset_notifies. We clear the reset notifies
8740 8462           * if we have not registered one.
8741 8463           * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX!
8742 8464           */
     8465 +        mutex_exit(SD_MUTEX(un));
8743 8466          (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
8744 8467              sd_mhd_reset_notify_cb, (caddr_t)un);
8745 8468  
8746      -        /*
8747      -         * protect the timeout pointers from getting nulled by
8748      -         * their callback functions during the cancellation process.
8749      -         * In such a scenario untimeout can be invoked with a null value.
8750      -         */
8751      -        _NOTE(NO_COMPETING_THREADS_NOW);
8752      -
8753 8469          mutex_enter(&un->un_pm_mutex);
8754 8470          if (un->un_pm_idle_timeid != NULL) {
8755 8471                  timeout_id_t temp_id = un->un_pm_idle_timeid;
8756 8472                  un->un_pm_idle_timeid = NULL;
8757 8473                  mutex_exit(&un->un_pm_mutex);
8758 8474  
8759 8475                  /*
8760 8476                   * Timeout is active; cancel it.
8761 8477                   * Note that it'll never be active on a device
8762 8478                   * that does not support PM therefore we don't
↓ open down ↓ 21 lines elided ↑ open up ↑
8784 8500                   */
8785 8501                  (void) untimeout(temp_id);
8786 8502                  (void) pm_idle_component(SD_DEVINFO(un), 0);
8787 8503  
8788 8504          } else {
8789 8505                  mutex_exit(&un->un_pm_mutex);
8790 8506                  if ((un->un_f_pm_is_enabled == TRUE) &&
8791 8507                      (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un))
8792 8508                      != DDI_SUCCESS)) {
8793 8509                          SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8794      -                    "sd_dr_detach: Lower power request failed, ignoring.\n");
     8510 +                            "%s: lower power request failed, ignoring\n",
     8511 +                            __func__);
8795 8512                          /*
8796      -                         * Fix for bug: 4297749, item # 13
8797 8513                           * The above test now includes a check to see if PM is
8798 8514                           * supported by this device before call
8799 8515                           * pm_lower_power().
8800 8516                           * Note, the following is not dead code. The call to
8801 8517                           * pm_lower_power above will generate a call back into
8802 8518                           * our sdpower routine which might result in a timeout
8803 8519                           * handler getting activated. Therefore the following
8804 8520                           * code is valid and necessary.
8805 8521                           */
8806 8522                          mutex_enter(&un->un_pm_mutex);
↓ open down ↓ 42 lines elided ↑ open up ↑
8849 8565                  if ((un->un_insert_event != NULL) &&
8850 8566                      (ddi_remove_event_handler(un->un_insert_cb_id) !=
8851 8567                      DDI_SUCCESS)) {
8852 8568                          /*
8853 8569                           * Note: We are returning here after having done
8854 8570                           * substantial cleanup above. This is consistent
8855 8571                           * with the legacy implementation but this may not
8856 8572                           * be the right thing to do.
8857 8573                           */
8858 8574                          SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8859      -                            "sd_dr_detach: Cannot cancel insert event\n");
     8575 +                            "%s: cannot cancel insert event\n", __func__);
8860 8576                          goto err_remove_event;
8861 8577                  }
8862 8578                  un->un_insert_event = NULL;
8863 8579  
8864 8580                  if ((un->un_remove_event != NULL) &&
8865 8581                      (ddi_remove_event_handler(un->un_remove_cb_id) !=
8866 8582                      DDI_SUCCESS)) {
8867 8583                          /*
8868 8584                           * Note: We are returning here after having done
8869 8585                           * substantial cleanup above. This is consistent
8870 8586                           * with the legacy implementation but this may not
8871 8587                           * be the right thing to do.
8872 8588                           */
8873 8589                          SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8874      -                            "sd_dr_detach: Cannot cancel remove event\n");
     8590 +                            "%s: cannot cancel remove event\n", __func__);
8875 8591                          goto err_remove_event;
8876 8592                  }
8877 8593                  un->un_remove_event = NULL;
8878 8594          }
8879 8595  
8880 8596          /* Do not free the softstate if the callback routine is active */
8881 8597          sd_sync_with_callback(un);
8882 8598  
     8599 +no_attach_cleanup:
     8600 +        /*
     8601 +         * The driver must wait, at least attempt to wait, for any commands
     8602 +         * still in the driver.
     8603 +         */
     8604 +        mutex_enter(SD_MUTEX(un));
     8605 +
     8606 +        while (un->un_ncmds_in_driver != 0) {
     8607 +                clock_t max_delay = ddi_get_lbolt() + SEC_TO_TICK(30);
     8608 +                un->un_f_detach_waiting = 1;
     8609 +                if (cv_timedwait(&un->un_detach_cv, SD_MUTEX(un),
     8610 +                    max_delay) == -1) {
     8611 +                        break;
     8612 +                }
     8613 +        }
     8614 +
     8615 +        un->un_f_detach_waiting = 0;
     8616 +        mutex_exit(SD_MUTEX(un));
     8617 +
8883 8618          cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
8884 8619          cmlb_free_handle(&un->un_cmlbhandle);
8885 8620  
8886 8621          /*
8887 8622           * Hold the detach mutex here, to make sure that no other threads ever
8888 8623           * can access a (partially) freed soft state structure.
8889 8624           */
8890 8625          mutex_enter(&sd_detach_mutex);
8891 8626  
8892 8627          /*
↓ open down ↓ 36 lines elided ↑ open up ↑
8929 8664          /*
8930 8665           * kstat cleanup is done in detach for all device types (4363169).
8931 8666           * We do not want to fail detach if the device kstats are not deleted
8932 8667           * since there is a confusion about the devo_refcnt for the device.
8933 8668           * We just delete the kstats and let detach complete successfully.
8934 8669           */
8935 8670          if (un->un_stats != NULL) {
8936 8671                  kstat_delete(un->un_stats);
8937 8672                  un->un_stats = NULL;
8938 8673          }
     8674 +        if (un->un_unmapstats != NULL) {
     8675 +                kstat_delete(un->un_unmapstats_ks);
     8676 +                un->un_unmapstats_ks = NULL;
     8677 +                un->un_unmapstats = NULL;
     8678 +        }
     8679 +        if (un->un_lat_ksp != NULL) {
     8680 +                kstat_delete(un->un_lat_ksp);
     8681 +                un->un_lat_stats = NULL;
     8682 +                un->un_lat_ksp = NULL;
     8683 +        }
8939 8684          if (un->un_errstats != NULL) {
8940 8685                  kstat_delete(un->un_errstats);
8941 8686                  un->un_errstats = NULL;
8942 8687          }
8943 8688  
8944 8689          /* Remove partition stats */
8945 8690          if (un->un_f_pkstats_enabled) {
8946 8691                  for (i = 0; i < NSDMAP; i++) {
8947 8692                          if (un->un_pstats[i] != NULL) {
8948 8693                                  kstat_delete(un->un_pstats[i]);
↓ open down ↓ 10 lines elided ↑ open up ↑
8959 8704          ddi_prop_remove_all(devi);
8960 8705  
8961 8706          mutex_destroy(&un->un_pm_mutex);
8962 8707          cv_destroy(&un->un_pm_busy_cv);
8963 8708  
8964 8709          cv_destroy(&un->un_wcc_cv);
8965 8710  
8966 8711          /* Open/close semaphore */
8967 8712          sema_destroy(&un->un_semoclose);
8968 8713  
     8714 +        /* Used to wait for outstanding commands */
     8715 +        cv_destroy(&un->un_detach_cv);
     8716 +
8969 8717          /* Removable media condvar. */
8970 8718          cv_destroy(&un->un_state_cv);
8971 8719  
8972 8720          /* Suspend/resume condvar. */
8973 8721          cv_destroy(&un->un_suspend_cv);
8974 8722          cv_destroy(&un->un_disk_busy_cv);
8975 8723  
8976 8724          sd_free_rqs(un);
8977 8725  
8978 8726          /* Free up soft state */
↓ open down ↓ 15 lines elided ↑ open up ↑
8994 8742           * to set the tagged queuing capability in HBA.
8995 8743           * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to
8996 8744           * check if the device is parallel SCSI. However, we don't need to
8997 8745           * check here because we've already checked during attach. No device
8998 8746           * that is not parallel SCSI is in the chain.
8999 8747           */
9000 8748          if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) {
9001 8749                  sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH);
9002 8750          }
9003 8751  
     8752 +        ddi_remove_minor_node(devi, NULL);
     8753 +        (void) devfs_clean(devi, NULL, DV_CLEAN_FORCE);
     8754 +
9004 8755          return (DDI_SUCCESS);
9005 8756  
9006 8757  err_notclosed:
9007 8758          mutex_exit(SD_MUTEX(un));
9008 8759  
9009      -err_stillbusy:
9010      -        _NOTE(NO_COMPETING_THREADS_NOW);
9011      -
9012 8760  err_remove_event:
9013 8761          mutex_enter(&sd_detach_mutex);
9014 8762          un->un_detach_count--;
9015 8763          mutex_exit(&sd_detach_mutex);
9016 8764  
9017      -        SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n");
     8765 +        SD_TRACE(SD_LOG_ATTACH_DETACH, un, "%s: exit failure\n", __func__);
9018 8766          return (DDI_FAILURE);
9019 8767  }
9020 8768  
9021 8769  
9022 8770  /*
9023 8771   *    Function: sd_create_errstats
9024 8772   *
9025 8773   * Description: This routine instantiates the device error stats.
9026 8774   *
9027 8775   *              Note: During attach the stats are instantiated first so they are
↓ open down ↓ 186 lines elided ↑ open up ↑
9214 8962          char    kstatname[KSTAT_STRLEN];
9215 8963          int     instance;
9216 8964          int     i;
9217 8965          diskaddr_t      nblks = 0;
9218 8966          char    *partname = NULL;
9219 8967  
9220 8968          ASSERT(un != NULL);
9221 8969  
9222 8970          instance = ddi_get_instance(SD_DEVINFO(un));
9223 8971  
9224      -        /* Note:x86: is this a VTOC8/VTOC16 difference? */
     8972 +        /* XXX is this a VTOC8/VTOC16 difference? */
9225 8973          for (i = 0; i < NSDMAP; i++) {
9226      -
9227 8974                  if (cmlb_partinfo(un->un_cmlbhandle, i,
9228 8975                      &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0)
9229 8976                          continue;
9230 8977                  mutex_enter(SD_MUTEX(un));
9231 8978  
9232 8979                  if ((un->un_pstats[i] == NULL) &&
9233 8980                      (nblks != 0)) {
9234 8981  
9235 8982                          (void) snprintf(kstatname, sizeof (kstatname),
9236 8983                              "%s%d,%s", sd_label, instance,
↓ open down ↓ 4 lines elided ↑ open up ↑
9241 8988                              1, KSTAT_FLAG_PERSISTENT);
9242 8989                          if (un->un_pstats[i] != NULL) {
9243 8990                                  un->un_pstats[i]->ks_lock = SD_MUTEX(un);
9244 8991                                  kstat_install(un->un_pstats[i]);
9245 8992                          }
9246 8993                  }
9247 8994                  mutex_exit(SD_MUTEX(un));
9248 8995          }
9249 8996  }
9250 8997  
9251      -
9252      -#if (defined(__fibre))
9253 8998  /*
9254      - *    Function: sd_init_event_callbacks
9255      - *
9256      - * Description: This routine initializes the insertion and removal event
9257      - *              callbacks. (fibre only)
9258      - *
9259      - *   Arguments: un - driver soft state (unit) structure
9260      - *
9261      - *     Context: Kernel thread context
9262      - */
9263      -
9264      -static void
9265      -sd_init_event_callbacks(struct sd_lun *un)
9266      -{
9267      -        ASSERT(un != NULL);
9268      -
9269      -        if ((un->un_insert_event == NULL) &&
9270      -            (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT,
9271      -            &un->un_insert_event) == DDI_SUCCESS)) {
9272      -                /*
9273      -                 * Add the callback for an insertion event
9274      -                 */
9275      -                (void) ddi_add_event_handler(SD_DEVINFO(un),
9276      -                    un->un_insert_event, sd_event_callback, (void *)un,
9277      -                    &(un->un_insert_cb_id));
9278      -        }
9279      -
9280      -        if ((un->un_remove_event == NULL) &&
9281      -            (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT,
9282      -            &un->un_remove_event) == DDI_SUCCESS)) {
9283      -                /*
9284      -                 * Add the callback for a removal event
9285      -                 */
9286      -                (void) ddi_add_event_handler(SD_DEVINFO(un),
9287      -                    un->un_remove_event, sd_event_callback, (void *)un,
9288      -                    &(un->un_remove_cb_id));
9289      -        }
9290      -}
9291      -
9292      -
9293      -/*
9294      - *    Function: sd_event_callback
9295      - *
9296      - * Description: This routine handles insert/remove events (photon). The
9297      - *              state is changed to OFFLINE which can be used to supress
9298      - *              error msgs. (fibre only)
9299      - *
9300      - *   Arguments: un - driver soft state (unit) structure
9301      - *
9302      - *     Context: Callout thread context
9303      - */
9304      -/* ARGSUSED */
9305      -static void
9306      -sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg,
9307      -    void *bus_impldata)
9308      -{
9309      -        struct sd_lun *un = (struct sd_lun *)arg;
9310      -
9311      -        _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event));
9312      -        if (event == un->un_insert_event) {
9313      -                SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event");
9314      -                mutex_enter(SD_MUTEX(un));
9315      -                if (un->un_state == SD_STATE_OFFLINE) {
9316      -                        if (un->un_last_state != SD_STATE_SUSPENDED) {
9317      -                                un->un_state = un->un_last_state;
9318      -                        } else {
9319      -                                /*
9320      -                                 * We have gone through SUSPEND/RESUME while
9321      -                                 * we were offline. Restore the last state
9322      -                                 */
9323      -                                un->un_state = un->un_save_state;
9324      -                        }
9325      -                }
9326      -                mutex_exit(SD_MUTEX(un));
9327      -
9328      -        _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event));
9329      -        } else if (event == un->un_remove_event) {
9330      -                SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event");
9331      -                mutex_enter(SD_MUTEX(un));
9332      -                /*
9333      -                 * We need to handle an event callback that occurs during
9334      -                 * the suspend operation, since we don't prevent it.
9335      -                 */
9336      -                if (un->un_state != SD_STATE_OFFLINE) {
9337      -                        if (un->un_state != SD_STATE_SUSPENDED) {
9338      -                                New_state(un, SD_STATE_OFFLINE);
9339      -                        } else {
9340      -                                un->un_last_state = SD_STATE_OFFLINE;
9341      -                        }
9342      -                }
9343      -                mutex_exit(SD_MUTEX(un));
9344      -        } else {
9345      -                scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
9346      -                    "!Unknown event\n");
9347      -        }
9348      -
9349      -}
9350      -#endif
9351      -
9352      -/*
9353 8999   * Values related to caching mode page depending on whether the unit is ATAPI.
9354 9000   */
9355 9001  #define SDC_CDB_GROUP(un) ((un->un_f_cfg_is_atapi == TRUE) ? \
9356 9002          CDB_GROUP1 : CDB_GROUP0)
9357 9003  #define SDC_HDRLEN(un) ((un->un_f_cfg_is_atapi == TRUE) ? \
9358 9004          MODE_HEADER_LENGTH_GRP2 : MODE_HEADER_LENGTH)
9359 9005  /*
9360 9006   * Use mode_cache_scsi3 to ensure we get all of the mode sense data, otherwise
9361 9007   * the mode select will fail (mode_cache_scsi3 is a superset of mode_caching).
9362 9008   */
↓ open down ↓ 382 lines elided ↑ open up ↑
9745 9391              un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) {
9746 9392                  mutex_exit(SD_MUTEX(un));
9747 9393                  /* collect page 86 data if available */
9748 9394                  inq86 = kmem_zalloc(inq86_len, KM_SLEEP);
9749 9395  
9750 9396                  rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len,
9751 9397                      0x01, 0x86, &inq86_resid);
9752 9398  
9753 9399                  if (rval == 0 && (inq86_len - inq86_resid > 6)) {
9754 9400                          SD_TRACE(SD_LOG_COMMON, un,
9755      -                            "sd_get_nv_sup: \
9756      -                            successfully get VPD page: %x \
9757      -                            PAGE LENGTH: %x BYTE 6: %x\n",
     9401 +                            "sd_get_nv_sup: successfully get VPD page: %x "
     9402 +                            "PAGE LENGTH: %x BYTE 6: %x\n",
9758 9403                              inq86[1], inq86[3], inq86[6]);
9759 9404  
9760 9405                          mutex_enter(SD_MUTEX(un));
9761 9406                          /*
9762 9407                           * check the value of NV_SUP bit: only if the device
9763 9408                           * reports NV_SUP bit to be 1, the
9764 9409                           * un_f_sync_nv_supported bit will be set to true.
9765 9410                           */
9766 9411                          if (inq86[6] & SD_VPD_NV_SUP) {
9767 9412                                  un->un_f_sync_nv_supported = TRUE;
↓ open down ↓ 321 lines elided ↑ open up ↑
10089 9734  
10090 9735          dev = *dev_p;
10091 9736          instance = SDUNIT(dev);
10092 9737          mutex_enter(&sd_detach_mutex);
10093 9738  
10094 9739          /*
10095 9740           * Fail the open if there is no softstate for the instance, or
10096 9741           * if another thread somewhere is trying to detach the instance.
10097 9742           */
10098 9743          if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
10099      -            (un->un_detach_count != 0)) {
     9744 +            un->un_detach_count != 0 || DEVI_IS_GONE(SD_DEVINFO(un))) {
10100 9745                  mutex_exit(&sd_detach_mutex);
10101 9746                  /*
10102      -                 * The probe cache only needs to be cleared when open (9e) fails
10103      -                 * with ENXIO (4238046).
     9747 +                 * The probe cache only needs to be cleared when open (9E) fails
     9748 +                 * with ENXIO.
10104 9749                   */
10105      -                /*
10106      -                 * un-conditionally clearing probe cache is ok with
10107      -                 * separate sd/ssd binaries
10108      -                 * x86 platform can be an issue with both parallel
10109      -                 * and fibre in 1 binary
10110      -                 */
10111 9750                  sd_scsi_clear_probe_cache();
10112 9751                  return (ENXIO);
10113 9752          }
10114 9753  
10115 9754          /*
10116 9755           * The un_layer_count is to prevent another thread in specfs from
10117 9756           * trying to detach the instance, which can happen when we are
10118 9757           * called from a higher-layer driver instead of thru specfs.
10119 9758           * This will not be needed when DDI provides a layered driver
10120 9759           * interface that allows specfs to know that an instance is in
↓ open down ↓ 30 lines elided ↑ open up ↑
10151 9790  
10152 9791          /*
10153 9792           * All device accesses go thru sdstrategy() where we check
10154 9793           * on suspend status but there could be a scsi_poll command,
10155 9794           * which bypasses sdstrategy(), so we need to check pm
10156 9795           * status.
10157 9796           */
10158 9797  
10159 9798          if (!nodelay) {
10160 9799                  while ((un->un_state == SD_STATE_SUSPENDED) ||
10161      -                    (un->un_state == SD_STATE_PM_CHANGING)) {
     9800 +                    (un->un_state == SD_STATE_PM_CHANGING) ||
     9801 +                    (un->un_state == SD_STATE_ATTACHING)) {
10162 9802                          cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10163 9803                  }
10164      -
10165 9804                  mutex_exit(SD_MUTEX(un));
10166 9805                  if (sd_pm_entry(un) != DDI_SUCCESS) {
10167 9806                          rval = EIO;
10168 9807                          SD_ERROR(SD_LOG_OPEN_CLOSE, un,
10169 9808                              "sdopen: sd_pm_entry failed\n");
10170 9809                          goto open_failed_with_pm;
10171 9810                  }
10172 9811                  mutex_enter(SD_MUTEX(un));
     9812 +        } else if (un->un_state == SD_STATE_ATTACH_FAILED) {
     9813 +                mutex_exit(SD_MUTEX(un));
     9814 +                rval = EIO;
     9815 +                SD_ERROR(SD_LOG_OPEN_CLOSE, un,
     9816 +                    "sdopen: attach failed, can't open\n");
     9817 +                goto open_failed_not_attached;
10173 9818          }
10174 9819  
10175 9820          /* check for previous exclusive open */
10176 9821          SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un);
10177 9822          SD_TRACE(SD_LOG_OPEN_CLOSE, un,
10178 9823              "sdopen: exclopen=%x, flag=%x, regopen=%x\n",
10179 9824              un->un_exclopen, flag, un->un_ocmap.regopen[otyp]);
10180 9825  
10181 9826          if (un->un_exclopen & (partmask)) {
10182 9827                  goto excl_open_fail;
↓ open down ↓ 70 lines elided ↑ open up ↑
10253 9898                          mutex_enter(SD_MUTEX(un));
10254 9899                  }
10255 9900  
10256 9901                  if ((rval != SD_READY_VALID) ||
10257 9902                      (!ISCD(un) && nblks <= 0)) {
10258 9903                          rval = un->un_f_has_removable_media ? ENXIO : EIO;
10259 9904                          SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10260 9905                              "device not ready or invalid disk block value\n");
10261 9906                          goto open_fail;
10262 9907                  }
10263      -#if defined(__i386) || defined(__amd64)
10264 9908          } else {
10265 9909                  uchar_t *cp;
10266 9910                  /*
10267 9911                   * x86 requires special nodelay handling, so that p0 is
10268 9912                   * always defined and accessible.
10269 9913                   * Invalidate geometry only if device is not already open.
10270 9914                   */
10271 9915                  cp = &un->un_ocmap.chkd[0];
10272 9916                  while (cp < &un->un_ocmap.chkd[OCSIZE]) {
10273 9917                          if (*cp != (uchar_t)0) {
10274 9918                                  break;
10275 9919                          }
10276 9920                          cp++;
10277 9921                  }
10278 9922                  if (cp == &un->un_ocmap.chkd[OCSIZE]) {
10279 9923                          mutex_exit(SD_MUTEX(un));
10280 9924                          cmlb_invalidate(un->un_cmlbhandle,
10281 9925                              (void *)SD_PATH_DIRECT);
10282 9926                          mutex_enter(SD_MUTEX(un));
10283 9927                  }
10284      -
10285      -#endif
10286 9928          }
10287 9929  
10288 9930          if (otyp == OTYP_LYR) {
10289 9931                  un->un_ocmap.lyropen[part]++;
10290 9932          } else {
10291 9933                  un->un_ocmap.regopen[otyp] |= partmask;
10292 9934          }
10293 9935  
10294 9936          /* Set up open and exclusive open flags */
10295 9937          if (flag & FEXCL) {
↓ open down ↓ 23 lines elided ↑ open up ↑
10319 9961                                  mutex_enter(SD_MUTEX(un));
10320 9962                          }
10321 9963                  } else {
10322 9964                          mutex_enter(SD_MUTEX(un));
10323 9965                  }
10324 9966          }
10325 9967  
10326 9968          SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10327 9969              "open of part %d type %d\n", part, otyp);
10328 9970  
     9971 +        /*
     9972 +         * If we made it here, the disk is alive.
     9973 +         * Make sure it is set to normal state.
     9974 +         */
     9975 +        New_state(un, SD_STATE_NORMAL);
     9976 +
10329 9977          mutex_exit(SD_MUTEX(un));
10330 9978          if (!nodelay) {
10331 9979                  sd_pm_exit(un);
10332 9980          }
10333 9981  
10334 9982          sema_v(&un->un_semoclose);
10335 9983  
10336 9984          mutex_enter(&sd_detach_mutex);
10337 9985          un->un_opens_in_progress--;
10338 9986          mutex_exit(&sd_detach_mutex);
↓ open down ↓ 8 lines elided ↑ open up ↑
10347 9995  open_fail:
10348 9996          mutex_exit(SD_MUTEX(un));
10349 9997  
10350 9998          /*
10351 9999           * On a failed open we must exit the pm management.
10352 10000           */
10353 10001          if (!nodelay) {
10354 10002                  sd_pm_exit(un);
10355 10003          }
10356 10004  open_failed_with_pm:
     10005 +open_failed_not_attached:
10357 10006          sema_v(&un->un_semoclose);
10358 10007  
10359 10008          mutex_enter(&sd_detach_mutex);
10360 10009          un->un_opens_in_progress--;
10361 10010          if (otyp == OTYP_LYR) {
10362 10011                  un->un_layer_count--;
10363 10012          }
10364 10013          mutex_exit(&sd_detach_mutex);
10365 10014  
10366 10015          return (rval);
↓ open down ↓ 22 lines elided ↑ open up ↑
10389 10038          uchar_t         *cp;
10390 10039          int             part;
10391 10040          int             nodelay;
10392 10041          int             rval = 0;
10393 10042  
10394 10043          /* Validate the open type */
10395 10044          if (otyp >= OTYPCNT) {
10396 10045                  return (ENXIO);
10397 10046          }
10398 10047  
     10048 +        /* Hold the detach mutex to allow close to complete */
     10049 +        mutex_enter(&sd_detach_mutex);
     10050 +
10399 10051          if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
     10052 +                mutex_exit(&sd_detach_mutex);
10400 10053                  return (ENXIO);
10401 10054          }
10402 10055  
10403 10056          part = SDPART(dev);
10404 10057          nodelay = flag & (FNDELAY | FNONBLOCK);
10405 10058  
10406 10059          SD_TRACE(SD_LOG_OPEN_CLOSE, un,
10407 10060              "sdclose: close of part %d type %d\n", part, otyp);
10408 10061  
10409 10062          /*
10410 10063           * We use a semaphore here in order to serialize
10411 10064           * open and close requests on the device.
10412 10065           */
10413 10066          sema_p(&un->un_semoclose);
10414 10067  
10415 10068          mutex_enter(SD_MUTEX(un));
10416 10069  
10417      -        /* Don't proceed if power is being changed. */
10418      -        while (un->un_state == SD_STATE_PM_CHANGING) {
     10070 +        /* Don't proceed if power is being changed or we're still attaching. */
     10071 +        while ((un->un_state == SD_STATE_PM_CHANGING) ||
     10072 +            (un->un_state == SD_STATE_ATTACHING)) {
10419 10073                  cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10420 10074          }
10421 10075  
10422 10076          if (un->un_exclopen & (1 << part)) {
10423 10077                  un->un_exclopen &= ~(1 << part);
10424 10078          }
10425 10079  
10426 10080          /* Update the open partition map */
10427 10081          if (otyp == OTYP_LYR) {
10428 10082                  un->un_ocmap.lyropen[part] -= 1;
↓ open down ↓ 20 lines elided ↑ open up ↑
10449 10103  
10450 10104                  if (un->un_state == SD_STATE_OFFLINE) {
10451 10105                          if (un->un_f_is_fibre == FALSE) {
10452 10106                                  scsi_log(SD_DEVINFO(un), sd_label,
10453 10107                                      CE_WARN, "offline\n");
10454 10108                          }
10455 10109                          mutex_exit(SD_MUTEX(un));
10456 10110                          cmlb_invalidate(un->un_cmlbhandle,
10457 10111                              (void *)SD_PATH_DIRECT);
10458 10112                          mutex_enter(SD_MUTEX(un));
10459      -
10460      -                } else {
     10113 +                } else if (un->un_state != SD_STATE_ATTACH_FAILED) {
10461 10114                          /*
10462 10115                           * Flush any outstanding writes in NVRAM cache.
10463 10116                           * Note: SYNCHRONIZE CACHE is an optional SCSI-2
10464 10117                           * cmd, it may not work for non-Pluto devices.
10465 10118                           * SYNCHRONIZE CACHE is not required for removables,
10466 10119                           * except DVD-RAM drives.
10467 10120                           *
10468 10121                           * Also note: because SYNCHRONIZE CACHE is currently
10469 10122                           * the only command issued here that requires the
10470 10123                           * drive be powered up, only do the power up before
10471 10124                           * sending the Sync Cache command. If additional
10472 10125                           * commands are added which require a powered up
10473 10126                           * drive, the following sequence may have to change.
10474      -                         *
10475      -                         * And finally, note that parallel SCSI on SPARC
10476      -                         * only issues a Sync Cache to DVD-RAM, a newly
10477      -                         * supported device.
10478 10127                           */
10479      -#if defined(__i386) || defined(__amd64)
10480      -                        if ((un->un_f_sync_cache_supported &&
     10128 +                        if (!DEVI_IS_GONE(SD_DEVINFO(un)) &&
     10129 +                            ((un->un_f_sync_cache_supported &&
10481 10130                              un->un_f_sync_cache_required) ||
10482      -                            un->un_f_dvdram_writable_device == TRUE) {
10483      -#else
10484      -                        if (un->un_f_dvdram_writable_device == TRUE) {
10485      -#endif
     10131 +                            un->un_f_dvdram_writable_device == TRUE)) {
10486 10132                                  mutex_exit(SD_MUTEX(un));
10487 10133                                  if (sd_pm_entry(un) == DDI_SUCCESS) {
10488 10134                                          rval =
10489 10135                                              sd_send_scsi_SYNCHRONIZE_CACHE(un,
10490 10136                                              NULL);
10491 10137                                          /* ignore error if not supported */
10492 10138                                          if (rval == ENOTSUP) {
10493 10139                                                  rval = 0;
10494 10140                                          } else if (rval != 0) {
10495 10141                                                  rval = EIO;
↓ open down ↓ 29 lines elided ↑ open up ↑
10525 10171                                              (nodelay != 0)) {
10526 10172                                                  rval = ENXIO;
10527 10173                                          }
10528 10174                                  } else {
10529 10175                                          rval = EIO;
10530 10176                                  }
10531 10177                                  mutex_enter(SD_MUTEX(un));
10532 10178                          }
10533 10179  
10534 10180                          /*
     10181 +                         * Pardon a device that is currently in failfast
     10182 +                         * active state, to not bias a future open.
     10183 +                         */
     10184 +                        un->un_failfast_state = SD_FAILFAST_INACTIVE;
     10185 +
     10186 +                        /*
10535 10187                           * If a device has removable media, invalidate all
10536 10188                           * parameters related to media, such as geometry,
10537 10189                           * blocksize, and blockcount.
10538 10190                           */
10539 10191                          if (un->un_f_has_removable_media) {
10540 10192                                  sr_ejected(un);
10541 10193                          }
10542 10194  
10543 10195                          /*
10544 10196                           * Destroy the cache (if it exists) which was
↓ open down ↓ 16 lines elided ↑ open up ↑
10561 10213                                              un->un_wm_cache);
10562 10214                                          un->un_wm_cache = NULL;
10563 10215                                  }
10564 10216                          }
10565 10217                  }
10566 10218          }
10567 10219  
10568 10220          mutex_exit(SD_MUTEX(un));
10569 10221          sema_v(&un->un_semoclose);
10570 10222  
10571      -        if (otyp == OTYP_LYR) {
10572      -                mutex_enter(&sd_detach_mutex);
10573      -                /*
10574      -                 * The detach routine may run when the layer count
10575      -                 * drops to zero.
10576      -                 */
     10223 +        if (otyp == OTYP_LYR)
10577 10224                  un->un_layer_count--;
10578      -                mutex_exit(&sd_detach_mutex);
10579      -        }
10580 10225  
     10226 +        mutex_exit(&sd_detach_mutex);
     10227 +
10581 10228          return (rval);
10582 10229  }
10583 10230  
10584 10231  
10585 10232  /*
10586 10233   *    Function: sd_ready_and_valid
10587 10234   *
10588 10235   * Description: Test if device is ready and has a valid geometry.
10589 10236   *
10590 10237   *   Arguments: ssc - sd_ssc_t will contain un
↓ open down ↓ 75 lines elided ↑ open up ↑
10666 10313                          sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
10667 10314                  }
10668 10315  
10669 10316          } else {
10670 10317                  /*
10671 10318                   * Do a test unit ready to clear any unit attention from non-cd
10672 10319                   * devices.
10673 10320                   */
10674 10321                  mutex_exit(SD_MUTEX(un));
10675 10322  
10676      -                status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
     10323 +                status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
10677 10324                  if (status != 0) {
10678 10325                          sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10679 10326                  }
10680 10327  
10681 10328                  mutex_enter(SD_MUTEX(un));
10682 10329          }
10683 10330  
10684 10331  
10685 10332          /*
10686 10333           * If this is a non 512 block device, allocate space for
↓ open down ↓ 185 lines elided ↑ open up ↑
10872 10519   */
10873 10520  /* ARGSUSED */
10874 10521  static int
10875 10522  sdread(dev_t dev, struct uio *uio, cred_t *cred_p)
10876 10523  {
10877 10524          struct sd_lun   *un = NULL;
10878 10525          int             secmask;
10879 10526          int             err = 0;
10880 10527          sd_ssc_t        *ssc;
10881 10528  
10882      -        if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
     10529 +        if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
     10530 +            DEVI_IS_GONE(SD_DEVINFO(un)))
10883 10531                  return (ENXIO);
10884      -        }
10885 10532  
10886 10533          ASSERT(!mutex_owned(SD_MUTEX(un)));
10887 10534  
     10535 +        mutex_enter(SD_MUTEX(un));
     10536 +        while (un->un_state == SD_STATE_ATTACHING)
     10537 +                cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10888 10538  
     10539 +        if (un->un_state == SD_STATE_ATTACH_FAILED) {
     10540 +                mutex_exit(SD_MUTEX(un));
     10541 +                SD_ERROR(SD_LOG_READ_WRITE, un, "sdread: attach failed\n");
     10542 +                return (EIO);
     10543 +        }
     10544 +        mutex_exit(SD_MUTEX(un));
     10545 +
10889 10546          if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
10890 10547                  mutex_enter(SD_MUTEX(un));
10891 10548                  /*
10892 10549                   * Because the call to sd_ready_and_valid will issue I/O we
10893 10550                   * must wait here if either the device is suspended or
10894 10551                   * if it's power level is changing.
10895 10552                   */
10896 10553                  while ((un->un_state == SD_STATE_SUSPENDED) ||
10897 10554                      (un->un_state == SD_STATE_PM_CHANGING)) {
10898 10555                          cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10899 10556                  }
     10557 +
     10558 +                SD_BAIL_CHECK(un);
10900 10559                  un->un_ncmds_in_driver++;
10901 10560                  mutex_exit(SD_MUTEX(un));
10902 10561  
10903 10562                  /* Initialize sd_ssc_t for internal uscsi commands */
10904 10563                  ssc = sd_ssc_init(un);
10905 10564                  if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
10906 10565                          err = EIO;
10907 10566                  } else {
10908 10567                          err = 0;
10909 10568                  }
10910 10569                  sd_ssc_fini(ssc);
10911 10570  
10912 10571                  mutex_enter(SD_MUTEX(un));
10913 10572                  un->un_ncmds_in_driver--;
     10573 +                if (un->un_f_detach_waiting)
     10574 +                        cv_signal(&un->un_detach_cv);
10914 10575                  ASSERT(un->un_ncmds_in_driver >= 0);
10915 10576                  mutex_exit(SD_MUTEX(un));
10916 10577                  if (err != 0)
10917 10578                          return (err);
10918 10579          }
10919 10580  
10920 10581          /*
10921 10582           * Read requests are restricted to multiples of the system block size.
10922 10583           */
10923 10584          if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
↓ open down ↓ 39 lines elided ↑ open up ↑
10963 10624   */
10964 10625  /* ARGSUSED */
10965 10626  static int
10966 10627  sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p)
10967 10628  {
10968 10629          struct sd_lun   *un = NULL;
10969 10630          int             secmask;
10970 10631          int             err = 0;
10971 10632          sd_ssc_t        *ssc;
10972 10633  
10973      -        if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
     10634 +        if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
     10635 +            DEVI_IS_GONE(SD_DEVINFO(un)))
10974 10636                  return (ENXIO);
10975      -        }
10976 10637  
10977 10638          ASSERT(!mutex_owned(SD_MUTEX(un)));
10978 10639  
     10640 +        mutex_enter(SD_MUTEX(un));
     10641 +        while (un->un_state == SD_STATE_ATTACHING)
     10642 +                cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
     10643 +
     10644 +        if (un->un_state == SD_STATE_ATTACH_FAILED) {
     10645 +                mutex_exit(SD_MUTEX(un));
     10646 +                SD_ERROR(SD_LOG_READ_WRITE, un, "sdwrite: attach failed\n");
     10647 +                return (EIO);
     10648 +        }
     10649 +        mutex_exit(SD_MUTEX(un));
     10650 +
10979 10651          if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
10980 10652                  mutex_enter(SD_MUTEX(un));
10981 10653                  /*
10982 10654                   * Because the call to sd_ready_and_valid will issue I/O we
10983 10655                   * must wait here if either the device is suspended or
10984 10656                   * if it's power level is changing.
10985 10657                   */
10986 10658                  while ((un->un_state == SD_STATE_SUSPENDED) ||
10987 10659                      (un->un_state == SD_STATE_PM_CHANGING)) {
10988 10660                          cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10989 10661                  }
     10662 +
     10663 +                SD_BAIL_CHECK(un);
10990 10664                  un->un_ncmds_in_driver++;
10991 10665                  mutex_exit(SD_MUTEX(un));
10992 10666  
10993 10667                  /* Initialize sd_ssc_t for internal uscsi commands */
10994 10668                  ssc = sd_ssc_init(un);
10995 10669                  if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
10996 10670                          err = EIO;
10997 10671                  } else {
10998 10672                          err = 0;
10999 10673                  }
11000 10674                  sd_ssc_fini(ssc);
11001 10675  
11002 10676                  mutex_enter(SD_MUTEX(un));
11003 10677                  un->un_ncmds_in_driver--;
11004 10678                  ASSERT(un->un_ncmds_in_driver >= 0);
     10679 +                if (un->un_f_detach_waiting)
     10680 +                        cv_signal(&un->un_detach_cv);
11005 10681                  mutex_exit(SD_MUTEX(un));
11006 10682                  if (err != 0)
11007 10683                          return (err);
11008 10684          }
11009 10685  
11010 10686          /*
11011 10687           * Write requests are restricted to multiples of the system block size.
11012 10688           */
11013 10689          if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11014 10690              !un->un_f_enable_rmw)
↓ open down ↓ 38 lines elided ↑ open up ↑
11053 10729  /* ARGSUSED */
11054 10730  static int
11055 10731  sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p)
11056 10732  {
11057 10733          struct sd_lun   *un = NULL;
11058 10734          struct uio      *uio = aio->aio_uio;
11059 10735          int             secmask;
11060 10736          int             err = 0;
11061 10737          sd_ssc_t        *ssc;
11062 10738  
11063      -        if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
     10739 +        if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
     10740 +            DEVI_IS_GONE(SD_DEVINFO(un)))
11064 10741                  return (ENXIO);
11065      -        }
11066 10742  
11067 10743          ASSERT(!mutex_owned(SD_MUTEX(un)));
11068 10744  
     10745 +        mutex_enter(SD_MUTEX(un));
     10746 +        while (un->un_state == SD_STATE_ATTACHING)
     10747 +                cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
     10748 +
     10749 +        if (un->un_state == SD_STATE_ATTACH_FAILED) {
     10750 +                mutex_exit(SD_MUTEX(un));
     10751 +                SD_ERROR(SD_LOG_READ_WRITE, un, "sdaread: attach failed\n");
     10752 +                return (EIO);
     10753 +        }
     10754 +        mutex_exit(SD_MUTEX(un));
     10755 +
11069 10756          if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11070 10757                  mutex_enter(SD_MUTEX(un));
11071 10758                  /*
11072 10759                   * Because the call to sd_ready_and_valid will issue I/O we
11073 10760                   * must wait here if either the device is suspended or
11074 10761                   * if it's power level is changing.
11075 10762                   */
11076 10763                  while ((un->un_state == SD_STATE_SUSPENDED) ||
11077 10764                      (un->un_state == SD_STATE_PM_CHANGING)) {
11078 10765                          cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11079 10766                  }
     10767 +
     10768 +                SD_BAIL_CHECK(un);
11080 10769                  un->un_ncmds_in_driver++;
11081 10770                  mutex_exit(SD_MUTEX(un));
11082 10771  
11083 10772                  /* Initialize sd_ssc_t for internal uscsi commands */
11084 10773                  ssc = sd_ssc_init(un);
11085 10774                  if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11086 10775                          err = EIO;
11087 10776                  } else {
11088 10777                          err = 0;
11089 10778                  }
11090 10779                  sd_ssc_fini(ssc);
11091 10780  
11092 10781                  mutex_enter(SD_MUTEX(un));
11093 10782                  un->un_ncmds_in_driver--;
11094 10783                  ASSERT(un->un_ncmds_in_driver >= 0);
     10784 +                if (un->un_f_detach_waiting)
     10785 +                        cv_signal(&un->un_detach_cv);
11095 10786                  mutex_exit(SD_MUTEX(un));
11096 10787                  if (err != 0)
11097 10788                          return (err);
11098 10789          }
11099 10790  
11100 10791          /*
11101 10792           * Read requests are restricted to multiples of the system block size.
11102 10793           */
11103 10794          if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11104 10795              !un->un_f_enable_rmw)
↓ open down ↓ 38 lines elided ↑ open up ↑
11143 10834  /* ARGSUSED */
11144 10835  static int
11145 10836  sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p)
11146 10837  {
11147 10838          struct sd_lun   *un = NULL;
11148 10839          struct uio      *uio = aio->aio_uio;
11149 10840          int             secmask;
11150 10841          int             err = 0;
11151 10842          sd_ssc_t        *ssc;
11152 10843  
11153      -        if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
     10844 +        if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
     10845 +            DEVI_IS_GONE(SD_DEVINFO(un)))
11154 10846                  return (ENXIO);
11155      -        }
11156 10847  
11157 10848          ASSERT(!mutex_owned(SD_MUTEX(un)));
11158 10849  
     10850 +        mutex_enter(SD_MUTEX(un));
     10851 +        while (un->un_state == SD_STATE_ATTACHING)
     10852 +                cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
     10853 +
     10854 +        if (un->un_state == SD_STATE_ATTACH_FAILED) {
     10855 +                mutex_exit(SD_MUTEX(un));
     10856 +                SD_ERROR(SD_LOG_READ_WRITE, un,
     10857 +                    "sdawrite: attach failed\n");
     10858 +                return (EIO);
     10859 +        }
     10860 +        mutex_exit(SD_MUTEX(un));
     10861 +
11159 10862          if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11160 10863                  mutex_enter(SD_MUTEX(un));
11161 10864                  /*
11162 10865                   * Because the call to sd_ready_and_valid will issue I/O we
11163 10866                   * must wait here if either the device is suspended or
11164 10867                   * if it's power level is changing.
11165 10868                   */
11166 10869                  while ((un->un_state == SD_STATE_SUSPENDED) ||
11167 10870                      (un->un_state == SD_STATE_PM_CHANGING)) {
11168 10871                          cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11169 10872                  }
     10873 +
     10874 +                SD_BAIL_CHECK(un);
11170 10875                  un->un_ncmds_in_driver++;
11171 10876                  mutex_exit(SD_MUTEX(un));
11172 10877  
11173 10878                  /* Initialize sd_ssc_t for internal uscsi commands */
11174 10879                  ssc = sd_ssc_init(un);
11175 10880                  if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11176 10881                          err = EIO;
11177 10882                  } else {
11178 10883                          err = 0;
11179 10884                  }
11180 10885                  sd_ssc_fini(ssc);
11181 10886  
11182 10887                  mutex_enter(SD_MUTEX(un));
11183 10888                  un->un_ncmds_in_driver--;
11184 10889                  ASSERT(un->un_ncmds_in_driver >= 0);
     10890 +                if (un->un_f_detach_waiting)
     10891 +                        cv_signal(&un->un_detach_cv);
11185 10892                  mutex_exit(SD_MUTEX(un));
11186 10893                  if (err != 0)
11187 10894                          return (err);
11188 10895          }
11189 10896  
11190 10897          /*
11191 10898           * Write requests are restricted to multiples of the system block size.
11192 10899           */
11193 10900          if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11194 10901              !un->un_f_enable_rmw)
↓ open down ↓ 109 lines elided ↑ open up ↑
11304 11011   *   - The b_private field of the buf(9S) struct holds a pointer to
11305 11012   *     an sd_xbuf struct, which contains information needed to
11306 11013   *     construct the scsi_pkt for the command.
11307 11014   *
11308 11015   *   - The SD_MUTEX(un) is NOT held across calls to the next layer. Each
11309 11016   *     layer must acquire & release the SD_MUTEX(un) as needed.
11310 11017   */
11311 11018  
11312 11019  
11313 11020  /*
11314      - * Create taskq for all targets in the system. This is created at
11315      - * _init(9E) and destroyed at _fini(9E).
11316      - *
11317      - * Note: here we set the minalloc to a reasonably high number to ensure that
11318      - * we will have an adequate supply of task entries available at interrupt time.
11319      - * This is used in conjunction with the TASKQ_PREPOPULATE flag in
11320      - * sd_create_taskq().  Since we do not want to sleep for allocations at
11321      - * interrupt time, set maxalloc equal to minalloc. That way we will just fail
11322      - * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq
11323      - * requests any one instant in time.
11324      - */
11325      -#define SD_TASKQ_NUMTHREADS     8
11326      -#define SD_TASKQ_MINALLOC       256
11327      -#define SD_TASKQ_MAXALLOC       256
11328      -
11329      -static taskq_t  *sd_tq = NULL;
11330      -_NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq))
11331      -
11332      -static int      sd_taskq_minalloc = SD_TASKQ_MINALLOC;
11333      -static int      sd_taskq_maxalloc = SD_TASKQ_MAXALLOC;
11334      -
11335      -/*
11336      - * The following task queue is being created for the write part of
11337      - * read-modify-write of non-512 block size devices.
11338      - * Limit the number of threads to 1 for now. This number has been chosen
11339      - * considering the fact that it applies only to dvd ram drives/MO drives
11340      - * currently. Performance for which is not main criteria at this stage.
11341      - * Note: It needs to be explored if we can use a single taskq in future
11342      - */
11343      -#define SD_WMR_TASKQ_NUMTHREADS 1
11344      -static taskq_t  *sd_wmr_tq = NULL;
11345      -_NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq))
11346      -
11347      -/*
11348 11021   *    Function: sd_taskq_create
11349 11022   *
11350 11023   * Description: Create taskq thread(s) and preallocate task entries
11351 11024   *
11352 11025   * Return Code: Returns a pointer to the allocated taskq_t.
11353 11026   *
11354 11027   *     Context: Can sleep. Requires blockable context.
11355 11028   *
11356 11029   *       Notes: - The taskq() facility currently is NOT part of the DDI.
11357 11030   *                (definitely NOT recommeded for 3rd-party drivers!) :-)
↓ open down ↓ 57 lines elided ↑ open up ↑
11415 11088   *
11416 11089   * Return Code: Always returns zero
11417 11090   *
11418 11091   *     Context: Kernel thread context.
11419 11092   */
11420 11093  
11421 11094  static int
11422 11095  sdstrategy(struct buf *bp)
11423 11096  {
11424 11097          struct sd_lun *un;
     11098 +        int error = EIO;
11425 11099  
11426      -        un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
11427      -        if (un == NULL) {
11428      -                bioerror(bp, EIO);
11429      -                bp->b_resid = bp->b_bcount;
11430      -                biodone(bp);
11431      -                return (0);
11432      -        }
     11100 +        if ((un = ddi_get_soft_state(sd_state,
     11101 +            SD_GET_INSTANCE_FROM_BUF(bp))) == NULL)
     11102 +                goto fail;
11433 11103  
11434      -        /* As was done in the past, fail new cmds. if state is dumping. */
11435      -        if (un->un_state == SD_STATE_DUMPING) {
11436      -                bioerror(bp, ENXIO);
11437      -                bp->b_resid = bp->b_bcount;
11438      -                biodone(bp);
11439      -                return (0);
     11104 +        /* Fail new cmds if state is dumping or device is gone */
     11105 +        if (un->un_state == SD_STATE_DUMPING ||
     11106 +            DEVI_IS_GONE(SD_DEVINFO(un))) {
     11107 +                error = ENXIO;
     11108 +                goto fail;
11440 11109          }
11441 11110  
11442 11111          ASSERT(!mutex_owned(SD_MUTEX(un)));
11443 11112  
11444 11113          /*
11445 11114           * Commands may sneak in while we released the mutex in
11446 11115           * DDI_SUSPEND, we should block new commands. However, old
11447 11116           * commands that are still in the driver at this point should
11448 11117           * still be allowed to drain.
11449 11118           */
11450 11119          mutex_enter(SD_MUTEX(un));
11451 11120          /*
11452 11121           * Must wait here if either the device is suspended or
11453 11122           * if it's power level is changing.
11454 11123           */
11455 11124          while ((un->un_state == SD_STATE_SUSPENDED) ||
11456      -            (un->un_state == SD_STATE_PM_CHANGING)) {
     11125 +            (un->un_state == SD_STATE_PM_CHANGING) ||
     11126 +            (un->un_state == SD_STATE_ATTACHING)) {
11457 11127                  cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11458 11128          }
11459 11129  
     11130 +        if (un->un_state == SD_STATE_ATTACH_FAILED) {
     11131 +                mutex_exit(SD_MUTEX(un));
     11132 +                SD_ERROR(SD_LOG_READ_WRITE, un,
     11133 +                    "sdstrategy: attach failed\n");
     11134 +                goto fail;
     11135 +        }
     11136 +        if (un->un_detach_count != 0) {
     11137 +                mutex_exit(SD_MUTEX(un));
     11138 +                goto fail;
     11139 +        }
     11140 +
11460 11141          un->un_ncmds_in_driver++;
11461 11142  
11462 11143          /*
11463 11144           * atapi: Since we are running the CD for now in PIO mode we need to
11464 11145           * call bp_mapin here to avoid bp_mapin called interrupt context under
11465 11146           * the HBA's init_pkt routine.
11466 11147           */
11467 11148          if (un->un_f_cfg_is_atapi == TRUE) {
11468 11149                  mutex_exit(SD_MUTEX(un));
11469 11150                  bp_mapin(bp);
↓ open down ↓ 7 lines elided ↑ open up ↑
11477 11158  
11478 11159          mutex_exit(SD_MUTEX(un));
11479 11160  
11480 11161          /*
11481 11162           * This will (eventually) allocate the sd_xbuf area and
11482 11163           * call sd_xbuf_strategy().  We just want to return the
11483 11164           * result of ddi_xbuf_qstrategy so that we have an opt-
11484 11165           * imized tail call which saves us a stack frame.
11485 11166           */
11486 11167          return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr));
     11168 +
     11169 +fail:
     11170 +        bioerror(bp, error);
     11171 +        bp->b_resid = bp->b_bcount;
     11172 +        biodone(bp);
     11173 +        return (0);
11487 11174  }
11488 11175  
11489      -
11490 11176  /*
11491 11177   *    Function: sd_xbuf_strategy
11492 11178   *
11493 11179   * Description: Function for initiating IO operations via the
11494 11180   *              ddi_xbuf_qstrategy() mechanism.
11495 11181   *
11496 11182   *     Context: Kernel thread context.
11497 11183   */
11498 11184  
11499 11185  static void
↓ open down ↓ 162 lines elided ↑ open up ↑
11662 11348  {
11663 11349          struct sd_lun           *un;
11664 11350          struct sd_uscsi_info    *uip;
11665 11351          struct sd_xbuf          *xp;
11666 11352          uchar_t                 chain_type;
11667 11353          uchar_t                 cmd;
11668 11354  
11669 11355          ASSERT(bp != NULL);
11670 11356  
11671 11357          un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
11672      -        if (un == NULL) {
     11358 +        if (un == NULL || DEVI_IS_GONE(SD_DEVINFO(un))) {
11673 11359                  bioerror(bp, EIO);
11674 11360                  bp->b_resid = bp->b_bcount;
11675 11361                  biodone(bp);
11676 11362                  return (0);
11677 11363          }
11678 11364  
11679 11365          ASSERT(!mutex_owned(SD_MUTEX(un)));
11680 11366  
11681 11367          SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp);
11682 11368  
↓ open down ↓ 16 lines elided ↑ open up ↑
11699 11385                  mutex_enter(SD_MUTEX(un));
11700 11386          }
11701 11387          un->un_ncmds_in_driver++;
11702 11388          SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n",
11703 11389              un->un_ncmds_in_driver);
11704 11390  
11705 11391          if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) &&
11706 11392              (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1))
11707 11393                  un->un_f_sync_cache_required = TRUE;
11708 11394  
     11395 +        if (sd_failfast_enable & SD_FAILFAST_ENABLE_FAIL_USCSI) {
     11396 +                /*
     11397 +                 * If there are outstanding commands, treat all
     11398 +                 * USCSI commands as if they have B_FAILFAST set.
     11399 +                 */
     11400 +                if (un->un_ncmds_in_driver != 1)
     11401 +                        bp->b_flags |= B_FAILFAST;
     11402 +        }
     11403 +
11709 11404          mutex_exit(SD_MUTEX(un));
11710 11405  
11711 11406          switch (uip->ui_flags) {
11712 11407          case SD_PATH_DIRECT:
11713 11408                  chain_type = SD_CHAIN_DIRECT;
11714 11409                  break;
11715 11410          case SD_PATH_DIRECT_PRIORITY:
11716 11411                  chain_type = SD_CHAIN_DIRECT_PRIORITY;
11717 11412                  break;
11718 11413          default:
↓ open down ↓ 54 lines elided ↑ open up ↑
11773 11468  
11774 11469  static int
11775 11470  sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
11776 11471      enum uio_seg dataspace, int path_flag)
11777 11472  {
11778 11473          struct sd_lun   *un;
11779 11474          sd_ssc_t        *ssc;
11780 11475          int             rval;
11781 11476  
11782 11477          un = ddi_get_soft_state(sd_state, SDUNIT(dev));
11783      -        if (un == NULL) {
     11478 +        if (un == NULL || DEVI_IS_GONE(SD_DEVINFO(un)))
11784 11479                  return (ENXIO);
11785      -        }
11786 11480  
11787 11481          /*
11788 11482           * Using sd_ssc_send to handle uscsi cmd
11789 11483           */
11790 11484          ssc = sd_ssc_init(un);
11791 11485          rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag);
11792 11486          sd_ssc_fini(ssc);
11793 11487  
11794 11488          return (rval);
11795 11489  }
↓ open down ↓ 146 lines elided ↑ open up ↑
11942 11636   *              Waits for command to complete. Can sleep.
11943 11637   */
11944 11638  static int
11945 11639  sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag,
11946 11640      enum uio_seg dataspace, int path_flag)
11947 11641  {
11948 11642          struct sd_uscsi_info    *uip;
11949 11643          struct uscsi_cmd        *uscmd;
11950 11644          struct sd_lun           *un;
11951 11645          dev_t                   dev;
     11646 +        dev_info_t              *dip = SD_DEVINFO(ssc->ssc_un);
11952 11647  
11953 11648          int     format = 0;
11954 11649          int     rval;
11955 11650  
11956 11651          ASSERT(ssc != NULL);
11957 11652          un = ssc->ssc_un;
11958 11653          ASSERT(un != NULL);
11959 11654          uscmd = ssc->ssc_uscsi_cmd;
11960 11655          ASSERT(uscmd != NULL);
11961 11656          ASSERT(!mutex_owned(SD_MUTEX(un)));
↓ open down ↓ 23 lines elided ↑ open up ↑
11985 11680           * followed to avoid missing FMA telemetries.
11986 11681           */
11987 11682          ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT;
11988 11683  
11989 11684          /*
11990 11685           * if USCSI_PMFAILFAST is set and un is in low power, fail the
11991 11686           * command immediately.
11992 11687           */
11993 11688          mutex_enter(SD_MUTEX(un));
11994 11689          mutex_enter(&un->un_pm_mutex);
     11690 +
11995 11691          if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) &&
11996 11692              SD_DEVICE_IS_IN_LOW_POWER(un)) {
11997 11693                  SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:"
11998 11694                      "un:0x%p is in low power\n", un);
11999 11695                  mutex_exit(&un->un_pm_mutex);
12000 11696                  mutex_exit(SD_MUTEX(un));
12001 11697                  return (ECANCELED);
12002 11698          }
12003 11699          mutex_exit(&un->un_pm_mutex);
12004 11700          mutex_exit(SD_MUTEX(un));
↓ open down ↓ 48 lines elided ↑ open up ↑
12053 11749           * situations, and do not have retries performed.
12054 11750           */
12055 11751          if (path_flag == SD_PATH_DIRECT_PRIORITY) {
12056 11752                  uscmd->uscsi_flags |= USCSI_DIAGNOSE;
12057 11753          }
12058 11754          uscmd->uscsi_flags &= ~USCSI_NOINTR;
12059 11755  
12060 11756          dev = SD_GET_DEV(un);
12061 11757          rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd,
12062 11758              sd_uscsi_strategy, NULL, uip);
     11759 +        if (DEVI_IS_GONE(dip)) {
     11760 +                cmn_err(CE_WARN, "%s-%d: device is gone!", __func__, __LINE__);
     11761 +                return (ENXIO);
     11762 +        }
12063 11763  
12064 11764          /*
12065 11765           * mark ssc_flags right after handle_cmd to make sure
12066 11766           * the uscsi has been sent
12067 11767           */
12068 11768          ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED;
12069 11769  
12070 11770  #ifdef SDDEBUG
12071 11771          SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
12072 11772              "uscsi_status: 0x%02x  uscsi_resid:0x%x\n",
↓ open down ↓ 348 lines elided ↑ open up ↑
12421 12121                   * Grab time when the cmd completed.
12422 12122                   * This is used for determining if the system has been
12423 12123                   * idle long enough to make it idle to the PM framework.
12424 12124                   * This is for lowering the overhead, and therefore improving
12425 12125                   * performance per I/O operation.
12426 12126                   */
12427 12127                  un->un_pm_idle_time = gethrtime();
12428 12128  
12429 12129                  un->un_ncmds_in_driver--;
12430 12130                  ASSERT(un->un_ncmds_in_driver >= 0);
     12131 +                if (un->un_f_detach_waiting)
     12132 +                        cv_signal(&un->un_detach_cv);
12431 12133                  SD_INFO(SD_LOG_IO, un,
12432 12134                      "sd_buf_iodone: un_ncmds_in_driver = %ld\n",
12433 12135                      un->un_ncmds_in_driver);
12434 12136  
12435 12137                  mutex_exit(SD_MUTEX(un));
12436 12138          }
12437 12139  
12438 12140          biodone(bp);                            /* bp is gone after this */
12439 12141  
12440 12142          SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n");
↓ open down ↓ 30 lines elided ↑ open up ↑
12471 12173           * Grab time when the cmd completed.
12472 12174           * This is used for determining if the system has been
12473 12175           * idle long enough to make it idle to the PM framework.
12474 12176           * This is for lowering the overhead, and therefore improving
12475 12177           * performance per I/O operation.
12476 12178           */
12477 12179          un->un_pm_idle_time = gethrtime();
12478 12180  
12479 12181          un->un_ncmds_in_driver--;
12480 12182          ASSERT(un->un_ncmds_in_driver >= 0);
     12183 +        if (un->un_f_detach_waiting)
     12184 +                cv_signal(&un->un_detach_cv);
12481 12185          SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n",
12482 12186              un->un_ncmds_in_driver);
12483 12187  
12484 12188          mutex_exit(SD_MUTEX(un));
12485 12189  
12486 12190          if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen >
12487 12191              SENSE_LENGTH) {
12488 12192                  kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH +
12489 12193                      MAX_SENSE_LENGTH);
12490 12194          } else {
↓ open down ↓ 377 lines elided ↑ open up ↑
12868 12572          /*
12869 12573           * We do not need a shadow buf if the device is using
12870 12574           * un->un_sys_blocksize as its block size or if bcount == 0.
12871 12575           * In this case there is no layer-private data block allocated.
12872 12576           */
12873 12577          if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
12874 12578              (bp->b_bcount == 0)) {
12875 12579                  goto done;
12876 12580          }
12877 12581  
12878      -#if defined(__i386) || defined(__amd64)
12879 12582          /* We do not support non-block-aligned transfers for ROD devices */
12880 12583          ASSERT(!ISROD(un));
12881      -#endif
12882 12584  
12883 12585          xp = SD_GET_XBUF(bp);
12884 12586          ASSERT(xp != NULL);
12885 12587  
12886 12588          SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12887 12589              "tgt_blocksize:0x%x sys_blocksize: 0x%x\n",
12888 12590              un->un_tgt_blocksize, DEV_BSIZE);
12889 12591          SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12890 12592              "request start block:0x%x\n", xp->xb_blkno);
12891 12593          SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
↓ open down ↓ 678 lines elided ↑ open up ↑
13570 13272  sd_init_cdb_limits(struct sd_lun *un)
13571 13273  {
13572 13274          int hba_cdb_limit;
13573 13275  
13574 13276          /*
13575 13277           * Use CDB_GROUP1 commands for most devices except for
13576 13278           * parallel SCSI fixed drives in which case we get better
13577 13279           * performance using CDB_GROUP0 commands (where applicable).
13578 13280           */
13579 13281          un->un_mincdb = SD_CDB_GROUP1;
13580      -#if !defined(__fibre)
13581 13282          if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) &&
13582 13283              !un->un_f_has_removable_media) {
13583 13284                  un->un_mincdb = SD_CDB_GROUP0;
13584 13285          }
13585      -#endif
13586 13286  
13587 13287          /*
13588 13288           * Try to read the max-cdb-length supported by HBA.
13589 13289           */
13590 13290          un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1);
13591 13291          if (0 >= un->un_max_hba_cdb) {
13592 13292                  un->un_max_hba_cdb = CDB_GROUP4;
13593 13293                  hba_cdb_limit = SD_CDB_GROUP4;
13594 13294          } else if (0 < un->un_max_hba_cdb &&
13595 13295              un->un_max_hba_cdb < CDB_GROUP1) {
↓ open down ↓ 17 lines elided ↑ open up ↑
13613 13313          un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
13614 13314              min(hba_cdb_limit, SD_CDB_GROUP4);
13615 13315  #else
13616 13316          un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
13617 13317              min(hba_cdb_limit, SD_CDB_GROUP1);
13618 13318  #endif
13619 13319  
13620 13320          un->un_status_len = (int)((un->un_f_arq_enabled == TRUE)
13621 13321              ? sizeof (struct scsi_arq_status) : 1);
13622 13322          if (!ISCD(un))
13623      -                un->un_cmd_timeout = (ushort_t)sd_io_time;
     13323 +                un->un_cmd_timeout = (ushort_t)un->un_io_time;
13624 13324          un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout;
13625 13325  }
13626 13326  
13627 13327  
13628 13328  /*
13629 13329   *    Function: sd_initpkt_for_buf
13630 13330   *
13631 13331   * Description: Allocate and initialize for transport a scsi_pkt struct,
13632 13332   *              based upon the info specified in the given buf struct.
13633 13333   *
↓ open down ↓ 31 lines elided ↑ open up ↑
13665 13365          un = SD_GET_UN(bp);
13666 13366          ASSERT(un != NULL);
13667 13367          ASSERT(mutex_owned(SD_MUTEX(un)));
13668 13368          ASSERT(bp->b_resid == 0);
13669 13369  
13670 13370          SD_TRACE(SD_LOG_IO_CORE, un,
13671 13371              "sd_initpkt_for_buf: entry: buf:0x%p\n", bp);
13672 13372  
13673 13373          mutex_exit(SD_MUTEX(un));
13674 13374  
13675      -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13676 13375          if (xp->xb_pkt_flags & SD_XB_DMA_FREED) {
13677 13376                  /*
13678 13377                   * Already have a scsi_pkt -- just need DMA resources.
13679 13378                   * We must recompute the CDB in case the mapping returns
13680 13379                   * a nonzero pkt_resid.
13681 13380                   * Note: if this is a portion of a PKT_DMA_PARTIAL transfer
13682 13381                   * that is being retried, the unmap/remap of the DMA resouces
13683 13382                   * will result in the entire transfer starting over again
13684 13383                   * from the very first block.
13685 13384                   */
13686 13385                  ASSERT(xp->xb_pktp != NULL);
13687 13386                  pktp = xp->xb_pktp;
13688 13387          } else {
13689 13388                  pktp = NULL;
13690 13389          }
13691      -#endif /* __i386 || __amd64 */
13692 13390  
13693 13391          startblock = xp->xb_blkno;      /* Absolute block num. */
13694 13392          blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
13695 13393  
13696 13394          cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK);
13697 13395  
13698 13396          /*
13699 13397           * sd_setup_rw_pkt will determine the appropriate CDB group to use,
13700 13398           * call scsi_init_pkt, and build the CDB.
13701 13399           */
↓ open down ↓ 27 lines elided ↑ open up ↑
13729 13427                  pktp->pkt_flags = un->un_tagflags;
13730 13428                  pktp->pkt_time  = un->un_cmd_timeout;
13731 13429                  pktp->pkt_comp  = sdintr;
13732 13430  
13733 13431                  pktp->pkt_private = bp;
13734 13432                  *pktpp = pktp;
13735 13433  
13736 13434                  SD_TRACE(SD_LOG_IO_CORE, un,
13737 13435                      "sd_initpkt_for_buf: exit: buf:0x%p\n", bp);
13738 13436  
13739      -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13740 13437                  xp->xb_pkt_flags &= ~SD_XB_DMA_FREED;
13741      -#endif
13742 13438  
13743 13439                  mutex_enter(SD_MUTEX(un));
13744 13440                  return (SD_PKT_ALLOC_SUCCESS);
13745 13441  
13746 13442          }
13747 13443  
13748 13444          /*
13749 13445           * SD_PKT_ALLOC_FAILURE is the only expected failure code
13750 13446           * from sd_setup_rw_pkt.
13751 13447           */
↓ open down ↓ 643 lines elided ↑ open up ↑
14395 14091          ASSERT(xp != NULL);
14396 14092          un = SD_GET_UN(bp);
14397 14093          ASSERT(un != NULL);
14398 14094          ASSERT(!mutex_owned(SD_MUTEX(un)));
14399 14095  
14400 14096          if (bp->b_flags & (B_PAGEIO | B_PHYS)) {
14401 14097                  bp_mapin(bp);
14402 14098          }
14403 14099  
14404 14100          bflags &= (B_READ | B_WRITE);
14405      -#if defined(__i386) || defined(__amd64)
14406 14101          new_bp = getrbuf(KM_SLEEP);
14407 14102          new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP);
14408 14103          new_bp->b_bcount = datalen;
14409 14104          new_bp->b_flags = bflags |
14410 14105              (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW));
14411      -#else
14412      -        new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL,
14413      -            datalen, bflags, SLEEP_FUNC, NULL);
14414      -#endif
14415 14106          new_bp->av_forw = NULL;
14416 14107          new_bp->av_back = NULL;
14417 14108          new_bp->b_dev   = bp->b_dev;
14418 14109          new_bp->b_blkno = blkno;
14419 14110          new_bp->b_iodone = func;
14420 14111          new_bp->b_edev  = bp->b_edev;
14421 14112          new_bp->b_resid = 0;
14422 14113  
14423 14114          /* We need to preserve the B_FAILFAST flag */
14424 14115          if (bp->b_flags & B_FAILFAST) {
↓ open down ↓ 68 lines elided ↑ open up ↑
14493 14184  
14494 14185  static void
14495 14186  sd_shadow_buf_free(struct buf *bp)
14496 14187  {
14497 14188          struct sd_xbuf  *xp;
14498 14189  
14499 14190          ASSERT(bp != NULL);
14500 14191          xp = SD_GET_XBUF(bp);
14501 14192          ASSERT(xp != NULL);
14502 14193  
14503      -#if defined(__sparc)
14504 14194          /*
14505      -         * Call bp_mapout() before freeing the buf,  in case a lower
14506      -         * layer or HBA  had done a bp_mapin().  we must do this here
14507      -         * as we are the "originator" of the shadow buf.
14508      -         */
14509      -        bp_mapout(bp);
14510      -#endif
14511      -
14512      -        /*
14513 14195           * Null out b_iodone before freeing the bp, to ensure that the driver
14514 14196           * never gets confused by a stale value in this field. (Just a little
14515 14197           * extra defensiveness here.)
14516 14198           */
14517 14199          bp->b_iodone = NULL;
14518 14200  
14519      -#if defined(__i386) || defined(__amd64)
14520 14201          kmem_free(bp->b_un.b_addr, bp->b_bcount);
14521 14202          freerbuf(bp);
14522      -#else
14523      -        scsi_free_consistent_buf(bp);
14524      -#endif
14525 14203  
14526 14204          kmem_free(xp, sizeof (struct sd_xbuf));
14527 14205  }
14528 14206  
14529 14207  
14530 14208  /*
14531 14209   *    Function: sd_print_transport_rejected_message
14532 14210   *
14533 14211   * Description: This implements the ludicrously complex rules for printing
14534 14212   *              a "transport rejected" message.  This is to address the
↓ open down ↓ 205 lines elided ↑ open up ↑
14740 14418   *              or runout callback context. This function may not block or
14741 14419   *              call routines that block.
14742 14420   */
14743 14421  
14744 14422  static void
14745 14423  sd_start_cmds(struct sd_lun *un, struct buf *immed_bp)
14746 14424  {
14747 14425          struct  sd_xbuf *xp;
14748 14426          struct  buf     *bp;
14749 14427          void    (*statp)(kstat_io_t *);
14750      -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14751 14428          void    (*saved_statp)(kstat_io_t *);
14752      -#endif
14753 14429          int     rval;
14754 14430          struct sd_fm_internal *sfip = NULL;
14755 14431  
14756 14432          ASSERT(un != NULL);
14757 14433          ASSERT(mutex_owned(SD_MUTEX(un)));
14758 14434          ASSERT(un->un_ncmds_in_transport >= 0);
14759 14435          ASSERT(un->un_throttle >= 0);
14760 14436  
14761 14437          SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n");
14762 14438  
     14439 +        /*
     14440 +         * If device is currently retired, we should abort all pending I/O.
     14441 +         */
     14442 +        if (DEVI(un->un_sd->sd_dev)->devi_flags & DEVI_RETIRED) {
     14443 +                if (immed_bp) {
     14444 +                        immed_bp->b_resid = immed_bp->b_bcount;
     14445 +                        bioerror(immed_bp, ENXIO);
     14446 +                        biodone(immed_bp);
     14447 +                }
     14448 +                /* abort in-flight IO */
     14449 +                (void) scsi_abort(SD_ADDRESS(un), NULL);
     14450 +                /* abort pending IO */
     14451 +                un->un_failfast_state = SD_FAILFAST_ACTIVE;
     14452 +                un->un_failfast_bp = NULL;
     14453 +                sd_failfast_flushq(un, B_TRUE);
     14454 +                return;
     14455 +        }
     14456 +
14763 14457          do {
14764      -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14765 14458                  saved_statp = NULL;
14766      -#endif
14767 14459  
14768 14460                  /*
14769 14461                   * If we are syncing or dumping, fail the command to
14770 14462                   * avoid recursively calling back into scsi_transport().
14771 14463                   * The dump I/O itself uses a separate code path so this
14772 14464                   * only prevents non-dump I/O from being sent while dumping.
14773 14465                   * File system sync takes place before dumping begins.
14774 14466                   * During panic, filesystem I/O is allowed provided
14775 14467                   * un_in_callback is <= 1.  This is to prevent recursion
14776 14468                   * such as sd_start_cmds -> scsi_transport -> sdintr ->
↓ open down ↓ 36 lines elided ↑ open up ↑
14813 14505                                   * count will get decremented correctly below.
14814 14506                                   * Also we must clear un->un_retry_statp to
14815 14507                                   * ensure that we do not act on a stale value
14816 14508                                   * in this field.
14817 14509                                   */
14818 14510                                  if ((un->un_retry_statp == kstat_waitq_enter) ||
14819 14511                                      (un->un_retry_statp ==
14820 14512                                      kstat_runq_back_to_waitq)) {
14821 14513                                          statp = kstat_waitq_to_runq;
14822 14514                                  }
14823      -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14824 14515                                  saved_statp = un->un_retry_statp;
14825      -#endif
14826 14516                                  un->un_retry_statp = NULL;
14827 14517  
14828 14518                                  SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
14829 14519                                      "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p "
14830 14520                                      "un_throttle:%d un_ncmds_in_transport:%d\n",
14831 14521                                      un, un->un_retry_bp, un->un_throttle,
14832 14522                                      un->un_ncmds_in_transport);
14833 14523                          } else {
14834 14524                                  SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: "
14835 14525                                      "processing priority bp:0x%p\n", bp);
↓ open down ↓ 56 lines elided ↑ open up ↑
14892 14582  
14893 14583                  /*
14894 14584                   * Reset the state to normal. This is the mechanism by which
14895 14585                   * the state transitions from either SD_STATE_RWAIT or
14896 14586                   * SD_STATE_OFFLINE to SD_STATE_NORMAL.
14897 14587                   * If state is SD_STATE_PM_CHANGING then this command is
14898 14588                   * part of the device power control and the state must
14899 14589                   * not be put back to normal. Doing so would would
14900 14590                   * allow new commands to proceed when they shouldn't,
14901 14591                   * the device may be going off.
     14592 +                 *
     14593 +                 * Similarly, if the state is SD_STATE_ATTACHING we should
     14594 +                 * not set it to SD_STATE_NORMAL to avoid corruption.
14902 14595                   */
14903 14596                  if ((un->un_state != SD_STATE_SUSPENDED) &&
14904      -                    (un->un_state != SD_STATE_PM_CHANGING)) {
     14597 +                    (un->un_state != SD_STATE_PM_CHANGING) &&
     14598 +                    (un->un_state != SD_STATE_ATTACHING)) {
14905 14599                          New_state(un, SD_STATE_NORMAL);
14906 14600                  }
14907 14601  
14908 14602                  xp = SD_GET_XBUF(bp);
14909 14603                  ASSERT(xp != NULL);
14910 14604  
14911      -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14912 14605                  /*
14913 14606                   * Allocate the scsi_pkt if we need one, or attach DMA
14914 14607                   * resources if we have a scsi_pkt that needs them. The
14915 14608                   * latter should only occur for commands that are being
14916 14609                   * retried.
14917 14610                   */
14918 14611                  if ((xp->xb_pktp == NULL) ||
14919 14612                      ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) {
14920      -#else
14921      -                if (xp->xb_pktp == NULL) {
14922      -#endif
14923 14613                          /*
14924 14614                           * There is no scsi_pkt allocated for this buf. Call
14925 14615                           * the initpkt function to allocate & init one.
14926 14616                           *
14927 14617                           * The scsi_init_pkt runout callback functionality is
14928 14618                           * implemented as follows:
14929 14619                           *
14930 14620                           * 1) The initpkt function always calls
14931 14621                           *    scsi_init_pkt(9F) with sdrunout specified as the
14932 14622                           *    callback routine.
↓ open down ↓ 47 lines elided ↑ open up ↑
14980 14670                                   * Since retries and RQS commands always have a
14981 14671                                   * scsi_pkt allocated, these cases should never
14982 14672                                   * get here. So the only cases this needs to
14983 14673                                   * handle is a bp from the waitq (which we put
14984 14674                                   * back onto the waitq for sdrunout), or a bp
14985 14675                                   * sent as an immed_bp (which we just fail).
14986 14676                                   */
14987 14677                                  SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14988 14678                                      "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n");
14989 14679  
14990      -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14991      -
14992 14680                                  if (bp == immed_bp) {
14993 14681                                          /*
14994 14682                                           * If SD_XB_DMA_FREED is clear, then
14995 14683                                           * this is a failure to allocate a
14996 14684                                           * scsi_pkt, and we must fail the
14997 14685                                           * command.
14998 14686                                           */
14999 14687                                          if ((xp->xb_pkt_flags &
15000 14688                                              SD_XB_DMA_FREED) == 0) {
15001 14689                                                  break;
↓ open down ↓ 51 lines elided ↑ open up ↑
15053 14741                                              NULL)) {
15054 14742  
15055 14743                                                  un->un_retry_timeid =
15056 14744                                                      timeout(
15057 14745                                                      sd_start_retry_command,
15058 14746                                                      un, SD_RESTART_TIMEOUT);
15059 14747                                          }
15060 14748                                          goto exit;
15061 14749                                  }
15062 14750  
15063      -#else
15064      -                                if (bp == immed_bp) {
15065      -                                        break;  /* Just fail the command */
15066      -                                }
15067      -#endif
15068      -
15069 14751                                  /* Add the buf back to the head of the waitq */
15070 14752                                  bp->av_forw = un->un_waitq_headp;
15071 14753                                  un->un_waitq_headp = bp;
15072 14754                                  if (un->un_waitq_tailp == NULL) {
15073 14755                                          un->un_waitq_tailp = bp;
15074 14756                                  }
15075 14757                                  goto exit;
15076 14758  
15077 14759                          case SD_PKT_ALLOC_FAILURE_NO_DMA:
15078 14760                                  /*
15079 14761                                   * HBA DMA resource failure. Fail the command
15080 14762                                   * and continue processing of the queues.
15081 14763                                   */
15082 14764                                  SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15083 14765                                      "sd_start_cmds: "
15084 14766                                      "SD_PKT_ALLOC_FAILURE_NO_DMA\n");
15085 14767                                  break;
15086 14768  
15087 14769                          case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL:
15088 14770                                  /*
15089      -                                 * Note:x86: Partial DMA mapping not supported
15090      -                                 * for USCSI commands, and all the needed DMA
15091      -                                 * resources were not allocated.
     14771 +                                 * Partial DMA mapping not supported for USCSI
     14772 +                                 * commands, and all the needed DMA resources
     14773 +                                 * were not allocated.
15092 14774                                   */
15093 14775                                  SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15094 14776                                      "sd_start_cmds: "
15095 14777                                      "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n");
15096 14778                                  break;
15097 14779  
15098 14780                          case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL:
15099 14781                                  /*
15100      -                                 * Note:x86: Request cannot fit into CDB based
15101      -                                 * on lba and len.
     14782 +                                 * Request cannot fit into CDB based on lba
     14783 +                                 * and len.
15102 14784                                   */
15103 14785                                  SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15104 14786                                      "sd_start_cmds: "
15105 14787                                      "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n");
15106 14788                                  break;
15107 14789  
15108 14790                          default:
15109 14791                                  /* Should NEVER get here! */
15110 14792                                  panic("scsi_initpkt error");
15111 14793                                  /*NOTREACHED*/
↓ open down ↓ 18 lines elided ↑ open up ↑
15130 14812                          continue;
15131 14813                  }
15132 14814  got_pkt:
15133 14815                  if (bp == immed_bp) {
15134 14816                          /* goto the head of the class.... */
15135 14817                          xp->xb_pktp->pkt_flags |= FLAG_HEAD;
15136 14818                  }
15137 14819  
15138 14820                  un->un_ncmds_in_transport++;
15139 14821                  SD_UPDATE_KSTATS(un, statp, bp);
     14822 +                /* The start time MAY be overriden by the HBA driver. */
     14823 +                xp->xb_pktp->pkt_start = gethrtime();
     14824 +                xp->xb_pktp->pkt_stop = 0;
15140 14825  
15141 14826                  /*
15142 14827                   * Call scsi_transport() to send the command to the target.
15143 14828                   * According to SCSA architecture, we must drop the mutex here
15144 14829                   * before calling scsi_transport() in order to avoid deadlock.
15145 14830                   * Note that the scsi_pkt's completion routine can be executed
15146 14831                   * (from interrupt context) even before the call to
15147 14832                   * scsi_transport() returns.
15148 14833                   */
15149 14834                  SD_TRACE(SD_LOG_IO_CORE, un,
15150 14835                      "sd_start_cmds: calling scsi_transport()\n");
15151 14836                  DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp);
15152 14837  
     14838 +#ifdef SD_FAULT_INJECTION
     14839 +                /*
     14840 +                 * Packet is ready for submission to the HBA. Perform HBA-based
     14841 +                 * fault-injection.
     14842 +                 */
     14843 +                sd_prefaultinjection(xp->xb_pktp);
     14844 +#endif /* SD_FAULT_INJECTION */
     14845 +
15153 14846                  mutex_exit(SD_MUTEX(un));
15154 14847                  rval = scsi_transport(xp->xb_pktp);
15155 14848                  mutex_enter(SD_MUTEX(un));
15156 14849  
15157 14850                  SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15158 14851                      "sd_start_cmds: scsi_transport() returned %d\n", rval);
15159 14852  
15160 14853                  switch (rval) {
15161 14854                  case TRAN_ACCEPT:
15162 14855                          /* Clear this with every pkt accepted by the HBA */
15163 14856                          un->un_tran_fatal_count = 0;
15164 14857                          break;  /* Success; try the next cmd (if any) */
15165 14858  
15166 14859                  case TRAN_BUSY:
15167 14860                          un->un_ncmds_in_transport--;
15168 14861                          ASSERT(un->un_ncmds_in_transport >= 0);
15169 14862  
     14863 +#ifdef SD_FAULT_INJECTION
15170 14864                          /*
     14865 +                         * If the packet was rejected during active fault
     14866 +                         * injection session, move to the next fault slot
     14867 +                         * and reset packet flag related to rejection.
     14868 +                         */
     14869 +                        if (sd_fault_injection_on) {
     14870 +                                uint_t i = un->sd_fi_fifo_start;
     14871 +
     14872 +                                if (un->sd_fi_fifo_tran[i] != NULL) {
     14873 +                                        kmem_free(un->sd_fi_fifo_tran[i],
     14874 +                                            sizeof (struct sd_fi_tran));
     14875 +                                        un->sd_fi_fifo_tran[i] = NULL;
     14876 +                                }
     14877 +                                un->sd_fi_fifo_start++;
     14878 +                        }
     14879 +
     14880 +                        if (xp->xb_pktp->pkt_flags & FLAG_PKT_BUSY) {
     14881 +                                xp->xb_pktp->pkt_flags &= ~FLAG_PKT_BUSY;
     14882 +                        }
     14883 +#endif /* SD_FAULT_INJECTION */
     14884 +
     14885 +                        /*
15171 14886                           * Don't retry request sense, the sense data
15172 14887                           * is lost when another request is sent.
15173 14888                           * Free up the rqs buf and retry
15174 14889                           * the original failed cmd.  Update kstat.
15175 14890                           */
15176      -                        if (bp == un->un_rqs_bp) {
     14891 +                        if ((un->un_ncmds_in_transport > 0) &&
     14892 +                            (bp == un->un_rqs_bp)) {
15177 14893                                  SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15178 14894                                  bp = sd_mark_rqs_idle(un, xp);
15179 14895                                  sd_retry_command(un, bp, SD_RETRIES_STANDARD,
15180 14896                                      NULL, NULL, EIO, un->un_busy_timeout / 500,
15181 14897                                      kstat_waitq_enter);
15182 14898                                  goto exit;
15183 14899                          }
15184 14900  
15185      -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
15186 14901                          /*
15187 14902                           * Free the DMA resources for the  scsi_pkt. This will
15188 14903                           * allow mpxio to select another path the next time
15189 14904                           * we call scsi_transport() with this scsi_pkt.
15190 14905                           * See sdintr() for the rationalization behind this.
15191 14906                           */
15192 14907                          if ((un->un_f_is_fibre == TRUE) &&
15193 14908                              ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
15194 14909                              ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) {
15195 14910                                  scsi_dmafree(xp->xb_pktp);
15196 14911                                  xp->xb_pkt_flags |= SD_XB_DMA_FREED;
15197 14912                          }
15198      -#endif
15199 14913  
15200 14914                          if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) {
15201 14915                                  /*
15202 14916                                   * Commands that are SD_PATH_DIRECT_PRIORITY
15203 14917                                   * are for error recovery situations. These do
15204 14918                                   * not use the normal command waitq, so if they
15205 14919                                   * get a TRAN_BUSY we cannot put them back onto
15206 14920                                   * the waitq for later retry. One possible
15207 14921                                   * problem is that there could already be some
15208 14922                                   * other command on un_retry_bp that is waiting
↓ open down ↓ 26 lines elided ↑ open up ↑
15235 14949                          /*
15236 14950                           * For TRAN_BUSY, we want to reduce the throttle value,
15237 14951                           * unless we are retrying a command.
15238 14952                           */
15239 14953                          if (bp != un->un_retry_bp) {
15240 14954                                  sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY);
15241 14955                          }
15242 14956  
15243 14957                          /*
15244 14958                           * Set up the bp to be tried again 10 ms later.
15245      -                         * Note:x86: Is there a timeout value in the sd_lun
     14959 +                         * XXX Is there a timeout value in the sd_lun
15246 14960                           * for this condition?
15247 14961                           */
15248 14962                          sd_set_retry_bp(un, bp, un->un_busy_timeout / 500,
15249 14963                              kstat_runq_back_to_waitq);
15250 14964                          goto exit;
15251 14965  
15252 14966                  case TRAN_FATAL_ERROR:
15253 14967                          un->un_tran_fatal_count++;
15254 14968                          /* FALLTHRU */
15255 14969  
↓ open down ↓ 108 lines elided ↑ open up ↑
15364 15078              (xp->xb_pktp->pkt_resid == 0)) {
15365 15079  
15366 15080                  if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) {
15367 15081                          /*
15368 15082                           * Successfully set up next portion of cmd
15369 15083                           * transfer, try sending it
15370 15084                           */
15371 15085                          sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
15372 15086                              NULL, NULL, 0, (clock_t)0, NULL);
15373 15087                          sd_start_cmds(un, NULL);
15374      -                        return; /* Note:x86: need a return here? */
     15088 +                        return; /* XXX need a return here? */
15375 15089                  }
15376 15090          }
15377 15091  
15378 15092          /*
15379 15093           * If this is the failfast bp, clear it from un_failfast_bp. This
15380 15094           * can happen if upon being re-tried the failfast bp either
15381 15095           * succeeded or encountered another error (possibly even a different
15382 15096           * error than the one that precipitated the failfast state, but in
15383 15097           * that case it would have had to exhaust retries as well). Regardless,
15384 15098           * this should not occur whenever the instance is in the active
↓ open down ↓ 195 lines elided ↑ open up ↑
15580 15294   *
15581 15295   *                      SD_RETRIES_NOCHECK
15582 15296   *                      SD_RESD_RETRIES_STANDARD
15583 15297   *                      SD_RETRIES_VICTIM
15584 15298   *
15585 15299   *                 Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE
15586 15300   *                 if the check should be made to see of FLAG_ISOLATE is set
15587 15301   *                 in the pkt. If FLAG_ISOLATE is set, then the command is
15588 15302   *                 not retried, it is simply failed.
15589 15303   *
     15304 + *                 Optionally may be bitwise-OR'ed with SD_RETRIES_FAILFAST
     15305 + *                 to indicate a retry following a command timeout, and check
     15306 + *                 if the target should transition to failfast pending or
     15307 + *                 failfast active. If the buf has B_FAILFAST set, the
     15308 + *                 command should be failed when failfast is active.
     15309 + *
15590 15310   *              user_funcp - Ptr to function to call before dispatching the
15591 15311   *                 command. May be NULL if no action needs to be performed.
15592 15312   *                 (Primarily intended for printing messages.)
15593 15313   *
15594 15314   *              user_arg - Optional argument to be passed along to
15595 15315   *                 the user_funcp call.
15596 15316   *
15597 15317   *              failure_code - errno return code to set in the bp if the
15598 15318   *                 command is going to be failed.
15599 15319   *
↓ open down ↓ 84 lines elided ↑ open up ↑
15684 15404           * If the caller wants us to check FLAG_ISOLATE, then see if that
15685 15405           * is set; if it is then we do not want to retry the command.
15686 15406           * Normally, FLAG_ISOLATE is only used with USCSI cmds.
15687 15407           */
15688 15408          if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) {
15689 15409                  if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) {
15690 15410                          goto fail_command;
15691 15411                  }
15692 15412          }
15693 15413  
     15414 +        if (sd_failfast_enable & (SD_FAILFAST_ENABLE_FAIL_RETRIES |
     15415 +            SD_FAILFAST_ENABLE_FAIL_ALL_RETRIES)) {
     15416 +                if (sd_failfast_enable & SD_FAILFAST_ENABLE_FAIL_ALL_RETRIES) {
     15417 +                        /*
     15418 +                         * Fail ALL retries when in active failfast state,
     15419 +                         * regardless of reason.
     15420 +                         */
     15421 +                        if (un->un_failfast_state == SD_FAILFAST_ACTIVE) {
     15422 +                                goto fail_command;
     15423 +                        }
     15424 +                }
     15425 +                /*
     15426 +                 * Treat bufs being retried as if they have the
     15427 +                 * B_FAILFAST flag set.
     15428 +                 */
     15429 +                bp->b_flags |= B_FAILFAST;
     15430 +        }
15694 15431  
15695 15432          /*
15696 15433           * If SD_RETRIES_FAILFAST is set, it indicates that either a
15697 15434           * command timeout or a selection timeout has occurred. This means
15698 15435           * that we were unable to establish an kind of communication with
15699 15436           * the target, and subsequent retries and/or commands are likely
15700 15437           * to encounter similar results and take a long time to complete.
15701 15438           *
15702 15439           * If this is a failfast error condition, we need to update the
15703 15440           * failfast state, even if this bp does not have B_FAILFAST set.
↓ open down ↓ 31 lines elided ↑ open up ↑
15735 15472  
15736 15473                          } else if (un->un_failfast_bp == bp) {
15737 15474                                  /*
15738 15475                                   * This is the second time *this* bp has
15739 15476                                   * encountered a failfast error condition,
15740 15477                                   * so enter active failfast state & flush
15741 15478                                   * queues as appropriate.
15742 15479                                   */
15743 15480                                  un->un_failfast_state = SD_FAILFAST_ACTIVE;
15744 15481                                  un->un_failfast_bp = NULL;
15745      -                                sd_failfast_flushq(un);
     15482 +                                sd_failfast_flushq(un, B_FALSE);
15746 15483  
15747 15484                                  /*
15748 15485                                   * Fail this bp now if B_FAILFAST set;
15749 15486                                   * otherwise continue with retries. (It would
15750 15487                                   * be pretty ironic if this bp succeeded on a
15751 15488                                   * subsequent retry after we just flushed all
15752 15489                                   * the queues).
15753 15490                                   */
15754 15491                                  if (bp->b_flags & B_FAILFAST) {
15755 15492                                          goto fail_command;
↓ open down ↓ 20 lines elided ↑ open up ↑
15776 15513                  }
15777 15514          } else {
15778 15515                  /*
15779 15516                   * SD_RETRIES_FAILFAST is clear, which indicates that we
15780 15517                   * likely were able to at least establish some level of
15781 15518                   * communication with the target and subsequent commands
15782 15519                   * and/or retries are likely to get through to the target,
15783 15520                   * In this case we want to be aggressive about clearing
15784 15521                   * the failfast state. Note that this does not affect
15785 15522                   * the "failfast pending" condition.
     15523 +                 *
     15524 +                 * We limit this to retries that are not a side effect of an
     15525 +                 * unrelated event, as it would be unwise to clear failfast
     15526 +                 * active state when we see retries due to a reset.
15786 15527                   */
15787      -                un->un_failfast_state = SD_FAILFAST_INACTIVE;
     15528 +                if ((sd_failfast_enable & SD_FAILFAST_ENABLE_FORCE_INACTIVE) &&
     15529 +                    (retry_check_flag & SD_RETRIES_MASK) != SD_RETRIES_VICTIM)
     15530 +                        un->un_failfast_state = SD_FAILFAST_INACTIVE;
15788 15531          }
15789 15532  
15790 15533  
15791 15534          /*
15792 15535           * Check the specified retry count to see if we can still do
15793 15536           * any retries with this pkt before we should fail it.
15794 15537           */
15795 15538          switch (retry_check_flag & SD_RETRIES_MASK) {
15796 15539          case SD_RETRIES_VICTIM:
15797 15540                  /*
↓ open down ↓ 420 lines elided ↑ open up ↑
16218 15961  
16219 15962  
16220 15963  /*
16221 15964   *    Function: sd_send_request_sense_command
16222 15965   *
16223 15966   * Description: Sends a REQUEST SENSE command to the target
16224 15967   *
16225 15968   *     Context: May be called from interrupt context.
16226 15969   */
16227 15970  
16228      -static void
16229      -sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
16230      -    struct scsi_pkt *pktp)
     15971 +static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
     15972 +    int retry_check_flag, struct scsi_pkt *pktp)
16231 15973  {
16232 15974          ASSERT(bp != NULL);
16233 15975          ASSERT(un != NULL);
16234 15976          ASSERT(mutex_owned(SD_MUTEX(un)));
16235 15977  
16236 15978          SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: "
16237 15979              "entry: buf:0x%p\n", bp);
16238 15980  
16239 15981          /*
16240 15982           * If we are syncing or dumping, then fail the command to avoid a
↓ open down ↓ 6 lines elided ↑ open up ↑
16247 15989                  SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16248 15990                      "sd_send_request_sense_command: syncing/dumping, exit\n");
16249 15991                  return;
16250 15992          }
16251 15993  
16252 15994          /*
16253 15995           * Retry the failed command and don't issue the request sense if:
16254 15996           *    1) the sense buf is busy
16255 15997           *    2) we have 1 or more outstanding commands on the target
16256 15998           *    (the sense data will be cleared or invalidated any way)
16257      -         *
16258      -         * Note: There could be an issue with not checking a retry limit here,
16259      -         * the problem is determining which retry limit to check.
16260 15999           */
16261 16000          if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) {
16262 16001                  /* Don't retry if the command is flagged as non-retryable */
16263 16002                  if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
16264      -                        sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
     16003 +                        sd_retry_command(un, bp, retry_check_flag,
16265 16004                              NULL, NULL, 0, un->un_busy_timeout,
16266 16005                              kstat_waitq_enter);
16267 16006                          SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16268 16007                              "sd_send_request_sense_command: "
16269 16008                              "at full throttle, retrying exit\n");
16270 16009                  } else {
16271 16010                          sd_return_failed_command(un, bp, EIO);
16272 16011                          SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16273 16012                              "sd_send_request_sense_command: "
16274 16013                              "at full throttle, non-retryable exit\n");
↓ open down ↓ 150 lines elided ↑ open up ↑
16425 16164                  return (DDI_FAILURE);
16426 16165          }
16427 16166  
16428 16167          /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */
16429 16168          (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp,
16430 16169              SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0);
16431 16170  
16432 16171          SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp);
16433 16172  
16434 16173          /* Set up the other needed members in the ARQ scsi_pkt. */
16435      -        un->un_rqs_pktp->pkt_comp   = sdintr;
16436      -        un->un_rqs_pktp->pkt_time   = sd_io_time;
16437      -        un->un_rqs_pktp->pkt_flags |=
16438      -            (FLAG_SENSING | FLAG_HEAD); /* (1222170) */
     16174 +        un->un_rqs_pktp->pkt_comp = sdintr;
     16175 +        un->un_rqs_pktp->pkt_time = ((ISCD(un)) ? 2 : 1) *
     16176 +            (ushort_t)un->un_io_time;
     16177 +        un->un_rqs_pktp->pkt_flags |= (FLAG_SENSING | FLAG_HEAD);
16439 16178  
16440 16179          /*
16441 16180           * Allocate  & init the sd_xbuf struct for the RQS command. Do not
16442 16181           * provide any intpkt, destroypkt routines as we take care of
16443 16182           * scsi_pkt allocation/freeing here and in sd_free_rqs().
16444 16183           */
16445 16184          xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
16446 16185          sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL);
16447 16186          xp->xb_pktp = un->un_rqs_pktp;
16448 16187          SD_INFO(SD_LOG_ATTACH_DETACH, un,
↓ open down ↓ 14 lines elided ↑ open up ↑
16463 16202           *
16464 16203           * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return
16465 16204           * failure, while for other HBAs (pln) scsi_ifsetcap will always
16466 16205           * return success.  However, in both of these cases ARQ is always
16467 16206           * enabled and scsi_ifgetcap will always return true. The best approach
16468 16207           * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap().
16469 16208           *
16470 16209           * The 3rd case is the HBA (adp) always return enabled on
16471 16210           * scsi_ifgetgetcap even when it's not enable, the best approach
16472 16211           * is issue a scsi_ifsetcap then a scsi_ifgetcap
16473      -         * Note: this case is to circumvent the Adaptec bug. (x86 only)
16474 16212           */
16475 16213  
16476 16214          if (un->un_f_is_fibre == TRUE) {
16477 16215                  un->un_f_arq_enabled = TRUE;
16478 16216          } else {
16479      -#if defined(__i386) || defined(__amd64)
16480 16217                  /*
16481      -                 * Circumvent the Adaptec bug, remove this code when
16482      -                 * the bug is fixed
     16218 +                 * XXX Circumvent the Adaptec bug, remove this code when
     16219 +                 * the bug is fixed.
16483 16220                   */
16484 16221                  (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1);
16485      -#endif
16486 16222                  switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) {
16487 16223                  case 0:
16488 16224                          SD_INFO(SD_LOG_ATTACH_DETACH, un,
16489 16225                              "sd_alloc_rqs: HBA supports ARQ\n");
16490 16226                          /*
16491 16227                           * ARQ is supported by this HBA but currently is not
16492 16228                           * enabled. Attempt to enable it and if successful then
16493 16229                           * mark this instance as ARQ enabled.
16494 16230                           */
16495 16231                          if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1)
↓ open down ↓ 274 lines elided ↑ open up ↑
16770 16506          mutex_exit(SD_MUTEX(un));
16771 16507          /*
16772 16508           * This callback routine always returns 1 (i.e. do not reschedule)
16773 16509           * because we always specify sdrunout as the callback handler for
16774 16510           * scsi_init_pkt inside the call to sd_start_cmds.
16775 16511           */
16776 16512          SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n");
16777 16513          return (1);
16778 16514  }
16779 16515  
     16516 +static void
     16517 +sd_slow_io_ereport(struct scsi_pkt *pktp)
     16518 +{
     16519 +        struct buf *bp;
     16520 +        struct sd_lun *un;
     16521 +        char *devid;
16780 16522  
     16523 +        ASSERT(pktp != NULL);
     16524 +        bp = (struct buf *)pktp->pkt_private;
     16525 +        ASSERT(bp != NULL);
     16526 +        un = SD_GET_UN(bp);
     16527 +        ASSERT(un != NULL);
     16528 +
     16529 +        SD_ERROR(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
     16530 +            "Slow IO detected SD: 0x%p delta in nsec: %llu",
     16531 +            (void *)un, pktp->pkt_stop - pktp->pkt_start);
     16532 +
     16533 +        devid = DEVI(un->un_sd->sd_dev)->devi_devid_str;
     16534 +        scsi_fm_ereport_post(un->un_sd, 0, NULL, "cmd.disk.slow-io",
     16535 +            fm_ena_generate(0, FM_ENA_FMT1), devid, NULL, DDI_NOSLEEP, NULL,
     16536 +            FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
     16537 +            "start", DATA_TYPE_UINT64, pktp->pkt_start,
     16538 +            "stop", DATA_TYPE_UINT64, pktp->pkt_stop,
     16539 +            "delta", DATA_TYPE_UINT64, pktp->pkt_stop - pktp->pkt_start,
     16540 +            "threshold", DATA_TYPE_UINT64, un->un_slow_io_threshold,
     16541 +            "pkt-reason", DATA_TYPE_UINT32, pktp->pkt_reason,
     16542 +            NULL);
     16543 +}
     16544 +
     16545 +/* Clamp the value between 0..max using min as the offset */
     16546 +static int
     16547 +clamp_lat(int bucket, int min, int max)
     16548 +{
     16549 +
     16550 +        if (max < bucket)
     16551 +                bucket = max;
     16552 +        if (min > bucket)
     16553 +                bucket = min;
     16554 +
     16555 +        return (bucket - min);
     16556 +}
     16557 +
16781 16558  /*
16782 16559   *    Function: sdintr
16783 16560   *
16784 16561   * Description: Completion callback routine for scsi_pkt(9S) structs
16785 16562   *              sent to the HBA driver via scsi_transport(9F).
16786 16563   *
16787 16564   *     Context: Interrupt context
16788 16565   */
16789 16566  
16790 16567  static void
16791 16568  sdintr(struct scsi_pkt *pktp)
16792 16569  {
16793 16570          struct buf      *bp;
16794 16571          struct sd_xbuf  *xp;
16795 16572          struct sd_lun   *un;
16796 16573          size_t          actual_len;
16797 16574          sd_ssc_t        *sscp;
     16575 +        hrtime_t        io_delta = 0LL;
     16576 +        int             bucket;
16798 16577  
16799 16578          ASSERT(pktp != NULL);
16800 16579          bp = (struct buf *)pktp->pkt_private;
16801 16580          ASSERT(bp != NULL);
16802 16581          xp = SD_GET_XBUF(bp);
16803 16582          ASSERT(xp != NULL);
16804 16583          ASSERT(xp->xb_pktp != NULL);
16805 16584          un = SD_GET_UN(bp);
16806 16585          ASSERT(un != NULL);
16807 16586          ASSERT(!mutex_owned(SD_MUTEX(un)));
↓ open down ↓ 16 lines elided ↑ open up ↑
16824 16603          ASSERT(sscp != NULL);
16825 16604  
16826 16605          /* Reduce the count of the #commands currently in transport */
16827 16606          un->un_ncmds_in_transport--;
16828 16607          ASSERT(un->un_ncmds_in_transport >= 0);
16829 16608  
16830 16609          /* Increment counter to indicate that the callback routine is active */
16831 16610          un->un_in_callback++;
16832 16611  
16833 16612          SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
     16613 +        /* If the HBA driver did not set the stop time, set it now. */
     16614 +        if (pktp->pkt_stop == 0)
     16615 +                pktp->pkt_stop = gethrtime();
     16616 +        /*
     16617 +         * If there are HBA drivers or layered drivers which do not participate
     16618 +         * in slow-io diagnosis, the start time, set above may be overwritten
     16619 +         * with zero. If pkt_start is zero, the delta should also be zero.
     16620 +         */
     16621 +        if (pktp->pkt_start != 0)
     16622 +                io_delta = pktp->pkt_stop - pktp->pkt_start;
     16623 +        if (un->un_slow_io_threshold > 0 && io_delta > un->un_slow_io_threshold)
     16624 +                sd_slow_io_ereport(pktp);
     16625 +        if (un->un_lat_stats) {
     16626 +                un->un_lat_stats->l_nrequest++;
     16627 +                un->un_lat_stats->l_sum += io_delta;
16834 16628  
     16629 +                /* Track the latency in usec and quantize by power of 2 */
     16630 +                bucket = clamp_lat(ddi_fls(io_delta / 1000),
     16631 +                    SD_LAT_MIN_USEC_SHIFT, SD_LAT_MAX_USEC_SHIFT - 1);
     16632 +                ASSERT3S(bucket, >=, 0);
     16633 +                ASSERT3S(bucket, <, ARRAY_SIZE(un->un_lat_stats->l_histogram));
     16634 +                un->un_lat_stats->l_histogram[bucket]++;
     16635 +        }
     16636 +
16835 16637  #ifdef  SDDEBUG
16836 16638          if (bp == un->un_retry_bp) {
16837 16639                  SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: "
16838 16640                      "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n",
16839 16641                      un, un->un_retry_bp, un->un_ncmds_in_transport);
16840 16642          }
16841 16643  #endif
16842 16644  
16843 16645          /*
16844 16646           * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media
↓ open down ↓ 78 lines elided ↑ open up ↑
16923 16725                                      SENSE_LENGTH);
16924 16726                          }
16925 16727  
16926 16728                          /* fail the command */
16927 16729                          SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16928 16730                              "sdintr: arq done and FLAG_DIAGNOSE set\n");
16929 16731                          sd_return_failed_command(un, bp, EIO);
16930 16732                          goto exit;
16931 16733                  }
16932 16734  
16933      -#if (defined(__i386) || defined(__amd64))       /* DMAFREE for x86 only */
16934 16735                  /*
16935 16736                   * We want to either retry or fail this command, so free
16936 16737                   * the DMA resources here.  If we retry the command then
16937 16738                   * the DMA resources will be reallocated in sd_start_cmds().
16938 16739                   * Note that when PKT_DMA_PARTIAL is used, this reallocation
16939 16740                   * causes the *entire* transfer to start over again from the
16940 16741                   * beginning of the request, even for PARTIAL chunks that
16941 16742                   * have already transferred successfully.
16942 16743                   */
16943 16744                  if ((un->un_f_is_fibre == TRUE) &&
16944 16745                      ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
16945 16746                      ((pktp->pkt_flags & FLAG_SENSING) == 0))  {
16946 16747                          scsi_dmafree(pktp);
16947 16748                          xp->xb_pkt_flags |= SD_XB_DMA_FREED;
16948 16749                  }
16949      -#endif
16950 16750  
16951 16751                  SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16952 16752                      "sdintr: arq done, sd_handle_auto_request_sense\n");
16953 16753  
16954 16754                  sd_handle_auto_request_sense(un, bp, xp, pktp);
16955 16755                  goto exit;
16956 16756          }
16957 16757  
16958 16758          /* Next see if this is the REQUEST SENSE pkt for the instance */
16959 16759          if (pktp->pkt_flags & FLAG_SENSING)  {
↓ open down ↓ 55 lines elided ↑ open up ↑
17015 16815                   * is done.
17016 16816                   */
17017 16817                  un->un_in_callback--;
17018 16818                  ASSERT(un->un_in_callback >= 0);
17019 16819                  mutex_exit(SD_MUTEX(un));
17020 16820  
17021 16821                  return;
17022 16822          }
17023 16823  
17024 16824  not_successful:
17025      -
17026      -#if (defined(__i386) || defined(__amd64))       /* DMAFREE for x86 only */
17027 16825          /*
17028 16826           * The following is based upon knowledge of the underlying transport
17029 16827           * and its use of DMA resources.  This code should be removed when
17030 16828           * PKT_DMA_PARTIAL support is taken out of the disk driver in favor
17031 16829           * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf()
17032 16830           * and sd_start_cmds().
17033 16831           *
17034 16832           * Free any DMA resources associated with this command if there
17035 16833           * is a chance it could be retried or enqueued for later retry.
17036 16834           * If we keep the DMA binding then mpxio cannot reissue the
↓ open down ↓ 7 lines elided ↑ open up ↑
17044 16842           * This is only done for non-uscsi commands (and also skipped for the
17045 16843           * driver's internal RQS command). Also just do this for Fibre Channel
17046 16844           * devices as these are the only ones that support mpxio.
17047 16845           */
17048 16846          if ((un->un_f_is_fibre == TRUE) &&
17049 16847              ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
17050 16848              ((pktp->pkt_flags & FLAG_SENSING) == 0))  {
17051 16849                  scsi_dmafree(pktp);
17052 16850                  xp->xb_pkt_flags |= SD_XB_DMA_FREED;
17053 16851          }
17054      -#endif
17055 16852  
17056 16853          /*
17057 16854           * The command did not successfully complete as requested so check
17058 16855           * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
17059 16856           * driver command that should not be retried so just return. If
17060 16857           * FLAG_DIAGNOSE is not set the error will be processed below.
17061 16858           */
17062 16859          if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
17063 16860                  SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17064 16861                      "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n");
17065 16862                  /*
17066 16863                   * Issue a request sense if a check condition caused the error
17067 16864                   * (we handle the auto request sense case above), otherwise
17068 16865                   * just fail the command.
17069 16866                   */
17070 16867                  if ((pktp->pkt_reason == CMD_CMPLT) &&
17071 16868                      (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) {
17072      -                        sd_send_request_sense_command(un, bp, pktp);
     16869 +                        sd_send_request_sense_command(un, bp,
     16870 +                            SD_RETRIES_STANDARD, pktp);
17073 16871                  } else {
17074 16872                          sd_return_failed_command(un, bp, EIO);
17075 16873                  }
17076 16874                  goto exit;
17077 16875          }
17078 16876  
17079 16877          /*
17080 16878           * The command did not successfully complete as requested so process
17081 16879           * the error, retry, and/or attempt recovery.
17082 16880           */
↓ open down ↓ 623 lines elided ↑ open up ↑
17706 17504                  goto sense_failed;
17707 17505          }
17708 17506  
17709 17507          return (SD_SENSE_DATA_IS_VALID);
17710 17508  
17711 17509  sense_failed:
17712 17510          /*
17713 17511           * If the request sense failed (for whatever reason), attempt
17714 17512           * to retry the original command.
17715 17513           */
17716      -#if defined(__i386) || defined(__amd64)
17717      -        /*
17718      -         * SD_RETRY_DELAY is conditionally compile (#if fibre) in
17719      -         * sddef.h for Sparc platform, and x86 uses 1 binary
17720      -         * for both SCSI/FC.
17721      -         * The SD_RETRY_DELAY value need to be adjusted here
17722      -         * when SD_RETRY_DELAY change in sddef.h
17723      -         */
17724 17514          sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17725 17515              sd_print_sense_failed_msg, msgp, EIO,
17726 17516              un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL);
17727      -#else
17728      -        sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17729      -            sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL);
17730      -#endif
17731 17517  
17732 17518          return (SD_SENSE_DATA_IS_INVALID);
17733 17519  }
17734 17520  
17735 17521  /*
17736 17522   *    Function: sd_decode_sense
17737 17523   *
17738 17524   * Description: Take recovery action(s) when SCSI Sense Data is received.
17739 17525   *
17740 17526   *     Context: Interrupt context.
↓ open down ↓ 1425 lines elided ↑ open up ↑
19166 18952          ASSERT(bp != NULL);
19167 18953          ASSERT(xp != NULL);
19168 18954          ASSERT(pktp != NULL);
19169 18955  
19170 18956          SD_UPDATE_ERRSTATS(un, sd_harderrs);
19171 18957          SD_UPDATE_RESERVATION_STATUS(un, pktp);
19172 18958  
19173 18959          funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ?
19174 18960              sd_print_retry_msg : NULL;
19175 18961  
19176      -        sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
     18962 +        sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
19177 18963              funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19178 18964  }
19179 18965  
19180 18966  
19181 18967  /*
19182 18968   *    Function: sd_pkt_reason_cmd_tag_reject
19183 18969   *
19184 18970   * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason.
19185 18971   *
19186 18972   *     Context: May be called from interrupt context
↓ open down ↓ 82 lines elided ↑ open up ↑
19269 19055  
19270 19056          /*
19271 19057           * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the
19272 19058           * command will be retried after the request sense). Otherwise, retry
19273 19059           * the command. Note: we are issuing the request sense even though the
19274 19060           * retry limit may have been reached for the failed command.
19275 19061           */
19276 19062          if (un->un_f_arq_enabled == FALSE) {
19277 19063                  SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
19278 19064                      "no ARQ, sending request sense command\n");
19279      -                sd_send_request_sense_command(un, bp, pktp);
     19065 +                sd_send_request_sense_command(un, bp, SD_RETRIES_STANDARD,
     19066 +                    pktp);
19280 19067          } else {
19281 19068                  SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
19282 19069                      "ARQ,retrying request sense command\n");
19283      -#if defined(__i386) || defined(__amd64)
19284      -                /*
19285      -                 * The SD_RETRY_DELAY value need to be adjusted here
19286      -                 * when SD_RETRY_DELAY change in sddef.h
19287      -                 */
19288 19070                  sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
19289      -                    un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0,
     19071 +                    un->un_f_is_fibre ? drv_usectohz(100000) : (clock_t)0,
19290 19072                      NULL);
19291      -#else
19292      -                sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL,
19293      -                    EIO, SD_RETRY_DELAY, NULL);
19294      -#endif
19295 19073          }
19296 19074  
19297 19075          SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n");
19298 19076  }
19299 19077  
19300 19078  
19301 19079  /*
19302 19080   *    Function: sd_pkt_status_busy
19303 19081   *
19304 19082   * Description: Recovery actions for a "STATUS_BUSY" SCSI command status.
↓ open down ↓ 735 lines elided ↑ open up ↑
20040 19818  
20041 19819          cdb.scc_cmd = SCMD_READ_CAPACITY;
20042 19820  
20043 19821          ucmd_buf.uscsi_cdb      = (char *)&cdb;
20044 19822          ucmd_buf.uscsi_cdblen   = CDB_GROUP1;
20045 19823          ucmd_buf.uscsi_bufaddr  = (caddr_t)capacity_buf;
20046 19824          ucmd_buf.uscsi_buflen   = SD_CAPACITY_SIZE;
20047 19825          ucmd_buf.uscsi_rqbuf    = (caddr_t)&sense_buf;
20048 19826          ucmd_buf.uscsi_rqlen    = sizeof (sense_buf);
20049 19827          ucmd_buf.uscsi_flags    = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20050      -        ucmd_buf.uscsi_timeout  = 60;
     19828 +        ucmd_buf.uscsi_timeout  = un->un_uscsi_timeout;
20051 19829  
20052 19830          status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20053 19831              UIO_SYSSPACE, path_flag);
20054 19832  
20055 19833          switch (status) {
20056 19834          case 0:
20057 19835                  /* Return failure if we did not get valid capacity data. */
20058 19836                  if (ucmd_buf.uscsi_resid != 0) {
20059 19837                          sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20060 19838                              "sd_send_scsi_READ_CAPACITY received invalid "
↓ open down ↓ 195 lines elided ↑ open up ↑
20256 20034  
20257 20035          capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP);
20258 20036  
20259 20037          ucmd_buf.uscsi_cdb      = (char *)&cdb;
20260 20038          ucmd_buf.uscsi_cdblen   = CDB_GROUP4;
20261 20039          ucmd_buf.uscsi_bufaddr  = (caddr_t)capacity16_buf;
20262 20040          ucmd_buf.uscsi_buflen   = SD_CAPACITY_16_SIZE;
20263 20041          ucmd_buf.uscsi_rqbuf    = (caddr_t)&sense_buf;
20264 20042          ucmd_buf.uscsi_rqlen    = sizeof (sense_buf);
20265 20043          ucmd_buf.uscsi_flags    = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20266      -        ucmd_buf.uscsi_timeout  = 60;
     20044 +        ucmd_buf.uscsi_timeout  = un->un_uscsi_timeout;
20267 20045  
20268 20046          /*
20269 20047           * Read Capacity (16) is a Service Action In command.  One
20270 20048           * command byte (0x9E) is overloaded for multiple operations,
20271 20049           * with the second CDB byte specifying the desired operation
20272 20050           */
20273 20051          cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4;
20274 20052          cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4;
20275 20053  
20276 20054          /*
↓ open down ↓ 23 lines elided ↑ open up ↑
20300 20078                   * According to the SCSI spec, the READ CAPACITY 16
20301 20079                   * command returns the following:
20302 20080                   *
20303 20081                   *  bytes 0-7: Maximum logical block address available.
20304 20082                   *              (MSB in byte:0 & LSB in byte:7)
20305 20083                   *
20306 20084                   *  bytes 8-11: Block length in bytes
20307 20085                   *              (MSB in byte:8 & LSB in byte:11)
20308 20086                   *
20309 20087                   *  byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT
     20088 +                 *
     20089 +                 *  byte 14:
     20090 +                 *      bit 7: Thin-Provisioning Enabled
     20091 +                 *      bit 6: Thin-Provisioning Read Zeros
20310 20092                   */
20311 20093                  capacity = BE_64(capacity16_buf[0]);
20312 20094                  lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]);
20313 20095                  lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f;
20314 20096  
     20097 +                un->un_thin_flags = 0;
     20098 +                if (((uint8_t *)capacity16_buf)[14] & (1 << 7))
     20099 +                        un->un_thin_flags |= SD_THIN_PROV_ENABLED;
     20100 +                if (((uint8_t *)capacity16_buf)[14] & (1 << 6))
     20101 +                        un->un_thin_flags |= SD_THIN_PROV_READ_ZEROS;
     20102 +
20315 20103                  pbsize = lbasize << lbpb_exp;
20316 20104  
20317 20105                  /*
20318 20106                   * Done with capacity16_buf
20319 20107                   */
20320 20108                  kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20321 20109  
20322 20110                  /*
20323 20111                   * if the reported capacity is set to all 0xf's, then
20324 20112                   * this disk is too large.  This could only happen with
↓ open down ↓ 153 lines elided ↑ open up ↑
20478 20266          cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ?
20479 20267              (uchar_t)(flag << 4) : (uchar_t)flag;
20480 20268  
20481 20269          ucmd_buf.uscsi_cdb      = (char *)&cdb;
20482 20270          ucmd_buf.uscsi_cdblen   = CDB_GROUP0;
20483 20271          ucmd_buf.uscsi_bufaddr  = NULL;
20484 20272          ucmd_buf.uscsi_buflen   = 0;
20485 20273          ucmd_buf.uscsi_rqbuf    = (caddr_t)&sense_buf;
20486 20274          ucmd_buf.uscsi_rqlen    = sizeof (struct scsi_extended_sense);
20487 20275          ucmd_buf.uscsi_flags    = USCSI_RQENABLE | USCSI_SILENT;
20488      -        ucmd_buf.uscsi_timeout  = 200;
     20276 +        ucmd_buf.uscsi_timeout  = 3 * un->un_uscsi_timeout;
20489 20277  
20490 20278          status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20491 20279              UIO_SYSSPACE, path_flag);
20492 20280  
20493 20281          switch (status) {
20494 20282          case 0:
20495 20283                  sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20496 20284                  break;  /* Success! */
20497 20285          case EIO:
20498 20286                  switch (ucmd_buf.uscsi_status) {
↓ open down ↓ 198 lines elided ↑ open up ↑
20697 20485          cdb.cdb_opaque[2] = page_code;
20698 20486          FORMG0COUNT(&cdb, buflen);
20699 20487  
20700 20488          ucmd_buf.uscsi_cdb      = (char *)&cdb;
20701 20489          ucmd_buf.uscsi_cdblen   = CDB_GROUP0;
20702 20490          ucmd_buf.uscsi_bufaddr  = (caddr_t)bufaddr;
20703 20491          ucmd_buf.uscsi_buflen   = buflen;
20704 20492          ucmd_buf.uscsi_rqbuf    = NULL;
20705 20493          ucmd_buf.uscsi_rqlen    = 0;
20706 20494          ucmd_buf.uscsi_flags    = USCSI_READ | USCSI_SILENT;
20707      -        ucmd_buf.uscsi_timeout  = 200;  /* Excessive legacy value */
     20495 +        ucmd_buf.uscsi_timeout  = 2 * un->un_uscsi_timeout;
20708 20496  
20709 20497          status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20710 20498              UIO_SYSSPACE, SD_PATH_DIRECT);
20711 20499  
20712 20500          /*
20713 20501           * Only handle status == 0, the upper-level caller
20714 20502           * will put different assessment based on the context.
20715 20503           */
20716 20504          if (status == 0)
20717 20505                  sd_ssc_assessment(ssc, SD_FMT_STANDARD);
↓ open down ↓ 82 lines elided ↑ open up ↑
20800 20588          ucmd_buf.uscsi_bufaddr  = NULL;
20801 20589          ucmd_buf.uscsi_buflen   = 0;
20802 20590          ucmd_buf.uscsi_rqbuf    = (caddr_t)&sense_buf;
20803 20591          ucmd_buf.uscsi_rqlen    = sizeof (struct scsi_extended_sense);
20804 20592          ucmd_buf.uscsi_flags    = USCSI_RQENABLE | USCSI_SILENT;
20805 20593  
20806 20594          /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */
20807 20595          if ((flag & SD_DONT_RETRY_TUR) != 0) {
20808 20596                  ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE;
20809 20597          }
20810      -        ucmd_buf.uscsi_timeout  = 60;
     20598 +        ucmd_buf.uscsi_timeout  = un->un_uscsi_timeout;
20811 20599  
20812 20600          status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20813 20601              UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT :
20814 20602              SD_PATH_STANDARD));
20815 20603  
20816 20604          switch (status) {
20817 20605          case 0:
20818 20606                  sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20819 20607                  break;  /* Success! */
20820 20608          case EIO:
↓ open down ↓ 76 lines elided ↑ open up ↑
20897 20685          cdb.cdb_opaque[1] = usr_cmd;
20898 20686          FORMG1COUNT(&cdb, data_len);
20899 20687  
20900 20688          ucmd_buf.uscsi_cdb      = (char *)&cdb;
20901 20689          ucmd_buf.uscsi_cdblen   = CDB_GROUP1;
20902 20690          ucmd_buf.uscsi_bufaddr  = (caddr_t)data_bufp;
20903 20691          ucmd_buf.uscsi_buflen   = data_len;
20904 20692          ucmd_buf.uscsi_rqbuf    = (caddr_t)&sense_buf;
20905 20693          ucmd_buf.uscsi_rqlen    = sizeof (struct scsi_extended_sense);
20906 20694          ucmd_buf.uscsi_flags    = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20907      -        ucmd_buf.uscsi_timeout  = 60;
     20695 +        ucmd_buf.uscsi_timeout  = un->un_uscsi_timeout;
20908 20696  
20909 20697          status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20910 20698              UIO_SYSSPACE, SD_PATH_STANDARD);
20911 20699  
20912 20700          switch (status) {
20913 20701          case 0:
20914 20702                  sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20915 20703  
20916 20704                  break;  /* Success! */
20917 20705          case EIO:
↓ open down ↓ 85 lines elided ↑ open up ↑
21003 20791          cdb.cdb_opaque[1] = usr_cmd;
21004 20792          FORMG1COUNT(&cdb, data_len);
21005 20793  
21006 20794          ucmd_buf.uscsi_cdb      = (char *)&cdb;
21007 20795          ucmd_buf.uscsi_cdblen   = CDB_GROUP1;
21008 20796          ucmd_buf.uscsi_bufaddr  = (caddr_t)prp;
21009 20797          ucmd_buf.uscsi_buflen   = data_len;
21010 20798          ucmd_buf.uscsi_rqbuf    = (caddr_t)&sense_buf;
21011 20799          ucmd_buf.uscsi_rqlen    = sizeof (struct scsi_extended_sense);
21012 20800          ucmd_buf.uscsi_flags    = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
21013      -        ucmd_buf.uscsi_timeout  = 60;
     20801 +        ucmd_buf.uscsi_timeout  = un->un_uscsi_timeout;
21014 20802  
21015 20803          switch (usr_cmd) {
21016 20804          case SD_SCSI3_REGISTER: {
21017 20805                  mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp;
21018 20806  
21019 20807                  bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21020 20808                  bcopy(ptr->newkey.key, prp->service_key,
21021 20809                      MHIOC_RESV_KEY_SIZE);
21022 20810                  prp->aptpl = ptr->aptpl;
21023 20811                  break;
↓ open down ↓ 174 lines elided ↑ open up ↑
21198 20986           */
21199 20987          uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP);
21200 20988          uscmd->uscsi_cdblen = CDB_GROUP1;
21201 20989          uscmd->uscsi_cdb = (caddr_t)cdb;
21202 20990          uscmd->uscsi_bufaddr = NULL;
21203 20991          uscmd->uscsi_buflen = 0;
21204 20992          uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
21205 20993          uscmd->uscsi_rqlen = SENSE_LENGTH;
21206 20994          uscmd->uscsi_rqresid = SENSE_LENGTH;
21207 20995          uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
21208      -        uscmd->uscsi_timeout = sd_io_time;
     20996 +        uscmd->uscsi_timeout = un->un_cmd_timeout;
21209 20997  
21210 20998          /*
21211 20999           * Allocate an sd_uscsi_info struct and fill it with the info
21212 21000           * needed by sd_initpkt_for_uscsi().  Then put the pointer into
21213 21001           * b_private in the buf for sd_initpkt_for_uscsi().  Note that
21214 21002           * since we allocate the buf here in this function, we do not
21215 21003           * need to preserve the prior contents of b_private.
21216 21004           * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
21217 21005           */
21218 21006          uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
↓ open down ↓ 126 lines elided ↑ open up ↑
21345 21133                   * has removable media.
21346 21134                   */
21347 21135                  if (!un->un_f_has_removable_media) {
21348 21136                          scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
21349 21137                              "SYNCHRONIZE CACHE command failed (%d)\n", status);
21350 21138                  }
21351 21139                  break;
21352 21140          }
21353 21141  
21354 21142  done:
21355      -        if (uip->ui_dkc.dkc_callback != NULL) {
     21143 +        if (uip->ui_dkc.dkc_callback != NULL)
21356 21144                  (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status);
21357      -        }
21358 21145  
21359 21146          ASSERT((bp->b_flags & B_REMAPPED) == 0);
21360 21147          freerbuf(bp);
21361 21148          kmem_free(uip, sizeof (struct sd_uscsi_info));
21362 21149          kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH);
21363 21150          kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen);
21364 21151          kmem_free(uscmd, sizeof (struct uscsi_cmd));
21365 21152  
21366 21153          return (status);
21367 21154  }
21368 21155  
     21156 +/*
     21157 + * Issues a single SCSI UNMAP command with a prepared UNMAP parameter list.
     21158 + * Returns zero on success, or the non-zero command error code on failure.
     21159 + */
     21160 +static int
     21161 +sd_send_scsi_UNMAP_issue_one(sd_ssc_t *ssc, unmap_param_hdr_t *uph,
     21162 +    uint64_t num_descr, uint64_t bytes)
     21163 +{
     21164 +        struct sd_lun           *un = ssc->ssc_un;
     21165 +        struct scsi_extended_sense      sense_buf;
     21166 +        union scsi_cdb          cdb;
     21167 +        struct uscsi_cmd        ucmd_buf;
     21168 +        int                     status;
     21169 +        const uint64_t          param_size = sizeof (unmap_param_hdr_t) +
     21170 +            num_descr * sizeof (unmap_blk_descr_t);
21369 21171  
     21172 +        uph->uph_data_len = BE_16(param_size - 2);
     21173 +        uph->uph_descr_data_len = BE_16(param_size - 8);
     21174 +
     21175 +        bzero(&cdb, sizeof (cdb));
     21176 +        bzero(&ucmd_buf, sizeof (ucmd_buf));
     21177 +        bzero(&sense_buf, sizeof (struct scsi_extended_sense));
     21178 +
     21179 +        cdb.scc_cmd = SCMD_UNMAP;
     21180 +        FORMG1COUNT(&cdb, param_size);
     21181 +
     21182 +        ucmd_buf.uscsi_cdb      = (char *)&cdb;
     21183 +        ucmd_buf.uscsi_cdblen   = (uchar_t)CDB_GROUP1;
     21184 +        ucmd_buf.uscsi_bufaddr  = (caddr_t)uph;
     21185 +        ucmd_buf.uscsi_buflen   = param_size;
     21186 +        ucmd_buf.uscsi_rqbuf    = (caddr_t)&sense_buf;
     21187 +        ucmd_buf.uscsi_rqlen    = sizeof (struct scsi_extended_sense);
     21188 +        ucmd_buf.uscsi_flags    = USCSI_WRITE | USCSI_RQENABLE | USCSI_SILENT;
     21189 +        ucmd_buf.uscsi_timeout  = un->un_cmd_timeout;
     21190 +
     21191 +        status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE,
     21192 +            SD_PATH_STANDARD);
     21193 +
     21194 +        switch (status) {
     21195 +        case 0:
     21196 +                sd_ssc_assessment(ssc, SD_FMT_STANDARD);
     21197 +
     21198 +                if (un->un_unmapstats) {
     21199 +                        atomic_inc_64(&un->un_unmapstats->us_cmds.value.ui64);
     21200 +                        atomic_add_64(&un->un_unmapstats->us_extents.value.ui64,
     21201 +                            num_descr);
     21202 +                        atomic_add_64(&un->un_unmapstats->us_bytes.value.ui64,
     21203 +                            bytes);
     21204 +                }
     21205 +                break;  /* Success! */
     21206 +        case EIO:
     21207 +                if (un->un_unmapstats)
     21208 +                        atomic_inc_64(&un->un_unmapstats->us_errs.value.ui64);
     21209 +                switch (ucmd_buf.uscsi_status) {
     21210 +                case STATUS_RESERVATION_CONFLICT:
     21211 +                        status = EACCES;
     21212 +                        break;
     21213 +                default:
     21214 +                        break;
     21215 +                }
     21216 +                break;
     21217 +        default:
     21218 +                if (un->un_unmapstats)
     21219 +                        atomic_inc_64(&un->un_unmapstats->us_errs.value.ui64);
     21220 +                break;
     21221 +        }
     21222 +
     21223 +        return (status);
     21224 +}
     21225 +
21370 21226  /*
     21227 + * Returns a pointer to the i'th block descriptor inside an UNMAP param list.
     21228 + */
     21229 +static inline unmap_blk_descr_t *
     21230 +UNMAP_blk_descr_i(void *buf, uint64_t i)
     21231 +{
     21232 +        return ((unmap_blk_descr_t *)((uint8_t *)buf +
     21233 +            sizeof (unmap_param_hdr_t) + (i * sizeof (unmap_blk_descr_t))));
     21234 +}
     21235 +
     21236 +/*
     21237 + * Takes the list of extents from sd_send_scsi_UNMAP, chops it up, prepares
     21238 + * UNMAP block descriptors and issues individual SCSI UNMAP commands. While
     21239 + * doing so we consult the block limits to determine at most how many
     21240 + * extents and LBAs we can UNMAP in one command.
     21241 + * If a command fails for whatever, reason, extent list processing is aborted
     21242 + * and the failed command's status is returned. Otherwise returns 0 on
     21243 + * success.
     21244 + */
     21245 +static int
     21246 +sd_send_scsi_UNMAP_issue(dev_t dev, sd_ssc_t *ssc, const dkioc_free_list_t *dfl)
     21247 +{
     21248 +        struct sd_lun           *un = ssc->ssc_un;
     21249 +        unmap_param_hdr_t       *uph;
     21250 +        sd_blk_limits_t         *lim = &un->un_blk_lim;
     21251 +        int                     rval = 0;
     21252 +        int                     partition;
     21253 +        /* partition offset & length in system blocks */
     21254 +        diskaddr_t              part_off_sysblks = 0, part_len_sysblks = 0;
     21255 +        uint64_t                part_off, part_len;
     21256 +        uint64_t                descr_cnt_lim, byte_cnt_lim;
     21257 +        uint64_t                descr_issued = 0, bytes_issued = 0;
     21258 +
     21259 +        uph = kmem_zalloc(SD_UNMAP_PARAM_LIST_MAXSZ, KM_SLEEP);
     21260 +
     21261 +        partition = SDPART(dev);
     21262 +        (void) cmlb_partinfo(un->un_cmlbhandle, partition, &part_len_sysblks,
     21263 +            &part_off_sysblks, NULL, NULL, (void *)SD_PATH_DIRECT);
     21264 +        part_off = SD_SYSBLOCKS2BYTES(part_off_sysblks);
     21265 +        part_len = SD_SYSBLOCKS2BYTES(part_len_sysblks);
     21266 +
     21267 +        ASSERT(un->un_blk_lim.lim_max_unmap_lba_cnt != 0);
     21268 +        ASSERT(un->un_blk_lim.lim_max_unmap_descr_cnt != 0);
     21269 +        /* Spec says 0xffffffff are special values, so compute maximums. */
     21270 +        byte_cnt_lim = lim->lim_max_unmap_lba_cnt < UINT32_MAX ?
     21271 +            (uint64_t)lim->lim_max_unmap_lba_cnt * un->un_tgt_blocksize :
     21272 +            UINT64_MAX;
     21273 +        descr_cnt_lim = MIN(lim->lim_max_unmap_descr_cnt, SD_UNMAP_MAX_DESCR);
     21274 +
     21275 +        for (size_t i = 0; i < dfl->dfl_num_exts; i++) {
     21276 +                const dkioc_free_list_ext_t *ext = &dfl->dfl_exts[i];
     21277 +                uint64_t ext_start = ext->dfle_start;
     21278 +                uint64_t ext_length = ext->dfle_length;
     21279 +
     21280 +                while (ext_length > 0) {
     21281 +                        unmap_blk_descr_t *ubd;
     21282 +                        /* Respect device limit on LBA count per command */
     21283 +                        uint64_t len = MIN(MIN(ext_length, byte_cnt_lim -
     21284 +                            bytes_issued), SD_TGTBLOCKS2BYTES(un, UINT32_MAX));
     21285 +
     21286 +                        /* check partition limits */
     21287 +                        if (ext_start + len > part_len) {
     21288 +                                rval = SET_ERROR(EINVAL);
     21289 +                                goto out;
     21290 +                        }
     21291 +#ifdef  DEBUG
     21292 +                        if (dfl->dfl_ck_func)
     21293 +                                dfl->dfl_ck_func(dfl->dfl_offset + ext_start,
     21294 +                                    len, dfl->dfl_ck_arg);
     21295 +#endif
     21296 +                        ASSERT3U(descr_issued, <, descr_cnt_lim);
     21297 +                        ASSERT3U(bytes_issued, <, byte_cnt_lim);
     21298 +                        ubd = UNMAP_blk_descr_i(uph, descr_issued);
     21299 +
     21300 +                        /* adjust in-partition addresses to be device-global */
     21301 +                        ubd->ubd_lba = BE_64(SD_BYTES2TGTBLOCKS(un,
     21302 +                            dfl->dfl_offset + ext_start + part_off));
     21303 +                        ubd->ubd_lba_cnt = BE_32(SD_BYTES2TGTBLOCKS(un, len));
     21304 +
     21305 +                        descr_issued++;
     21306 +                        bytes_issued += len;
     21307 +
     21308 +                        /* Issue command when device limits reached */
     21309 +                        if (descr_issued == descr_cnt_lim ||
     21310 +                            bytes_issued == byte_cnt_lim) {
     21311 +                                rval = sd_send_scsi_UNMAP_issue_one(ssc, uph,
     21312 +                                    descr_issued, bytes_issued);
     21313 +                                if (rval != 0)
     21314 +                                        goto out;
     21315 +                                descr_issued = 0;
     21316 +                                bytes_issued = 0;
     21317 +                        }
     21318 +
     21319 +                        ext_start += len;
     21320 +                        ext_length -= len;
     21321 +                }
     21322 +        }
     21323 +
     21324 +        if (descr_issued > 0) {
     21325 +                /* issue last command */
     21326 +                rval = sd_send_scsi_UNMAP_issue_one(ssc, uph, descr_issued,
     21327 +                    bytes_issued);
     21328 +        }
     21329 +
     21330 +out:
     21331 +        kmem_free(uph, SD_UNMAP_PARAM_LIST_MAXSZ);
     21332 +        return (rval);
     21333 +}
     21334 +
     21335 +/*
     21336 + * Issues one or several UNMAP commands based on a list of extents to be
     21337 + * unmapped. The internal multi-command processing is hidden, as the exact
     21338 + * number of commands and extents per command is limited by both SCSI
     21339 + * command syntax and device limits (as expressed in the SCSI Block Limits
     21340 + * VPD page and un_blk_lim in struct sd_lun).
     21341 + * Returns zero on success, or the error code of the first failed SCSI UNMAP
     21342 + * command.
     21343 + */
     21344 +static int
     21345 +sd_send_scsi_UNMAP(dev_t dev, sd_ssc_t *ssc, dkioc_free_list_t *dfl, int flag)
     21346 +{
     21347 +        struct sd_lun           *un = ssc->ssc_un;
     21348 +        int                     rval = 0;
     21349 +
     21350 +        ASSERT(!mutex_owned(SD_MUTEX(un)));
     21351 +        ASSERT(dfl != NULL);
     21352 +
     21353 +        /* Per spec, any of these conditions signals lack of UNMAP support. */
     21354 +        if (!(un->un_thin_flags & SD_THIN_PROV_ENABLED) ||
     21355 +            un->un_blk_lim.lim_max_unmap_descr_cnt == 0 ||
     21356 +            un->un_blk_lim.lim_max_unmap_lba_cnt == 0) {
     21357 +                return (SET_ERROR(ENOTSUP));
     21358 +        }
     21359 +
     21360 +        /* For userspace calls we must copy in. */
     21361 +        if (!(flag & FKIOCTL) && (dfl = dfl_copyin(dfl, flag, KM_SLEEP)) ==
     21362 +            NULL)
     21363 +                return (SET_ERROR(EFAULT));
     21364 +
     21365 +        rval = sd_send_scsi_UNMAP_issue(dev, ssc, dfl);
     21366 +
     21367 +        if (!(flag & FKIOCTL)) {
     21368 +                dfl_free(dfl);
     21369 +                dfl = NULL;
     21370 +        }
     21371 +
     21372 +        return (rval);
     21373 +}
     21374 +
     21375 +/*
21371 21376   *    Function: sd_send_scsi_GET_CONFIGURATION
21372 21377   *
21373 21378   * Description: Issues the get configuration command to the device.
21374 21379   *              Called from sd_check_for_writable_cd & sd_get_media_info
21375 21380   *              caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN
21376 21381   *   Arguments: ssc
21377 21382   *              ucmdbuf
21378 21383   *              rqbuf
21379 21384   *              rqbuflen
21380 21385   *              bufaddr
↓ open down ↓ 35 lines elided ↑ open up ↑
21416 21421          /*
21417 21422           * Set up cdb field for the get configuration command.
21418 21423           */
21419 21424          cdb[0] = SCMD_GET_CONFIGURATION;
21420 21425          cdb[1] = 0x02;  /* Requested Type */
21421 21426          cdb[8] = SD_PROFILE_HEADER_LEN;
21422 21427          ucmdbuf->uscsi_cdb = cdb;
21423 21428          ucmdbuf->uscsi_cdblen = CDB_GROUP1;
21424 21429          ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
21425 21430          ucmdbuf->uscsi_buflen = buflen;
21426      -        ucmdbuf->uscsi_timeout = sd_io_time;
     21431 +        ucmdbuf->uscsi_timeout = un->un_uscsi_timeout;
21427 21432          ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
21428 21433          ucmdbuf->uscsi_rqlen = rqbuflen;
21429 21434          ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ;
21430 21435  
21431 21436          status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
21432 21437              UIO_SYSSPACE, path_flag);
21433 21438  
21434 21439          switch (status) {
21435 21440          case 0:
21436 21441                  sd_ssc_assessment(ssc, SD_FMT_STANDARD);
↓ open down ↓ 72 lines elided ↑ open up ↑
21509 21514           * Set up cdb field for the get configuration command.
21510 21515           */
21511 21516          cdb[0] = SCMD_GET_CONFIGURATION;
21512 21517          cdb[1] = 0x02;  /* Requested Type */
21513 21518          cdb[3] = feature;
21514 21519          cdb[8] = buflen;
21515 21520          ucmdbuf->uscsi_cdb = cdb;
21516 21521          ucmdbuf->uscsi_cdblen = CDB_GROUP1;
21517 21522          ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
21518 21523          ucmdbuf->uscsi_buflen = buflen;
21519      -        ucmdbuf->uscsi_timeout = sd_io_time;
     21524 +        ucmdbuf->uscsi_timeout = un->un_uscsi_timeout;
21520 21525          ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
21521 21526          ucmdbuf->uscsi_rqlen = rqbuflen;
21522 21527          ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ;
21523 21528  
21524 21529          status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
21525 21530              UIO_SYSSPACE, path_flag);
21526 21531  
21527 21532          switch (status) {
21528 21533          case 0:
21529 21534  
↓ open down ↓ 92 lines elided ↑ open up ↑
21622 21627          ASSERT(headlen <= buflen);
21623 21628          SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21624 21629  
21625 21630          ucmd_buf.uscsi_cdb      = (char *)&cdb;
21626 21631          ucmd_buf.uscsi_cdblen   = (uchar_t)cdbsize;
21627 21632          ucmd_buf.uscsi_bufaddr  = (caddr_t)bufaddr;
21628 21633          ucmd_buf.uscsi_buflen   = buflen;
21629 21634          ucmd_buf.uscsi_rqbuf    = (caddr_t)&sense_buf;
21630 21635          ucmd_buf.uscsi_rqlen    = sizeof (struct scsi_extended_sense);
21631 21636          ucmd_buf.uscsi_flags    = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
21632      -        ucmd_buf.uscsi_timeout  = 60;
     21637 +        ucmd_buf.uscsi_timeout  = un->un_uscsi_timeout;
21633 21638  
21634 21639          status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21635 21640              UIO_SYSSPACE, path_flag);
21636 21641  
21637 21642          switch (status) {
21638 21643          case 0:
21639 21644                  /*
21640 21645                   * sr_check_wp() uses 0x3f page code and check the header of
21641 21646                   * mode page to determine if target device is write-protected.
21642 21647                   * But some USB devices return 0 bytes for 0x3f page code. For
↓ open down ↓ 98 lines elided ↑ open up ↑
21741 21746  
21742 21747          SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21743 21748  
21744 21749          ucmd_buf.uscsi_cdb      = (char *)&cdb;
21745 21750          ucmd_buf.uscsi_cdblen   = (uchar_t)cdbsize;
21746 21751          ucmd_buf.uscsi_bufaddr  = (caddr_t)bufaddr;
21747 21752          ucmd_buf.uscsi_buflen   = buflen;
21748 21753          ucmd_buf.uscsi_rqbuf    = (caddr_t)&sense_buf;
21749 21754          ucmd_buf.uscsi_rqlen    = sizeof (struct scsi_extended_sense);
21750 21755          ucmd_buf.uscsi_flags    = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
21751      -        ucmd_buf.uscsi_timeout  = 60;
     21756 +        ucmd_buf.uscsi_timeout  = un->un_uscsi_timeout;
21752 21757  
21753 21758          status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21754 21759              UIO_SYSSPACE, path_flag);
21755 21760  
21756 21761          switch (status) {
21757 21762          case 0:
21758 21763                  sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21759 21764                  break;  /* Success! */
21760 21765          case EIO:
21761 21766                  switch (ucmd_buf.uscsi_status) {
↓ open down ↓ 117 lines elided ↑ open up ↑
21879 21884          /* Set LUN bit(s) in CDB if this is a SCSI-1 device */
21880 21885          SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21881 21886  
21882 21887          ucmd_buf.uscsi_cdb      = (char *)&cdb;
21883 21888          ucmd_buf.uscsi_cdblen   = (uchar_t)cdbsize;
21884 21889          ucmd_buf.uscsi_bufaddr  = bufaddr;
21885 21890          ucmd_buf.uscsi_buflen   = buflen;
21886 21891          ucmd_buf.uscsi_rqbuf    = (caddr_t)&sense_buf;
21887 21892          ucmd_buf.uscsi_rqlen    = sizeof (struct scsi_extended_sense);
21888 21893          ucmd_buf.uscsi_flags    = flag | USCSI_RQENABLE | USCSI_SILENT;
21889      -        ucmd_buf.uscsi_timeout  = 60;
     21894 +        ucmd_buf.uscsi_timeout  = un->un_cmd_timeout;
21890 21895          status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21891 21896              UIO_SYSSPACE, path_flag);
21892 21897  
21893 21898          switch (status) {
21894 21899          case 0:
21895 21900                  sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21896 21901                  break;  /* Success! */
21897 21902          case EIO:
21898 21903                  switch (ucmd_buf.uscsi_status) {
21899 21904                  case STATUS_RESERVATION_CONFLICT:
↓ open down ↓ 59 lines elided ↑ open up ↑
21959 21964          cdb.cdb_opaque[6] = (uchar_t)(param_ptr  & 0x00FF);
21960 21965          FORMG1COUNT(&cdb, buflen);
21961 21966  
21962 21967          ucmd_buf.uscsi_cdb      = (char *)&cdb;
21963 21968          ucmd_buf.uscsi_cdblen   = CDB_GROUP1;
21964 21969          ucmd_buf.uscsi_bufaddr  = (caddr_t)bufaddr;
21965 21970          ucmd_buf.uscsi_buflen   = buflen;
21966 21971          ucmd_buf.uscsi_rqbuf    = (caddr_t)&sense_buf;
21967 21972          ucmd_buf.uscsi_rqlen    = sizeof (struct scsi_extended_sense);
21968 21973          ucmd_buf.uscsi_flags    = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
21969      -        ucmd_buf.uscsi_timeout  = 60;
     21974 +        ucmd_buf.uscsi_timeout  = un->un_uscsi_timeout;
21970 21975  
21971 21976          status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21972 21977              UIO_SYSSPACE, path_flag);
21973 21978  
21974 21979          switch (status) {
21975 21980          case 0:
21976 21981                  break;
21977 21982          case EIO:
21978 21983                  switch (ucmd_buf.uscsi_status) {
21979 21984                  case STATUS_RESERVATION_CONFLICT:
↓ open down ↓ 111 lines elided ↑ open up ↑
22091 22096          cdb.cdb_opaque[4] = class_req;
22092 22097          FORMG1COUNT(&cdb, buflen);
22093 22098  
22094 22099          ucmd_buf.uscsi_cdb      = (char *)&cdb;
22095 22100          ucmd_buf.uscsi_cdblen   = CDB_GROUP1;
22096 22101          ucmd_buf.uscsi_bufaddr  = (caddr_t)bufaddr;
22097 22102          ucmd_buf.uscsi_buflen   = buflen;
22098 22103          ucmd_buf.uscsi_rqbuf    = NULL;
22099 22104          ucmd_buf.uscsi_rqlen    = 0;
22100 22105          ucmd_buf.uscsi_flags    = USCSI_READ | USCSI_SILENT;
22101      -        ucmd_buf.uscsi_timeout  = 60;
     22106 +        ucmd_buf.uscsi_timeout  = un->un_uscsi_timeout;
22102 22107  
22103 22108          status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
22104 22109              UIO_SYSSPACE, SD_PATH_DIRECT);
22105 22110  
22106 22111          /*
22107 22112           * Only handle status == 0, the upper-level caller
22108 22113           * will put different assessment based on the context.
22109 22114           */
22110 22115          if (status == 0) {
22111 22116                  sd_ssc_assessment(ssc, SD_FMT_STANDARD);
↓ open down ↓ 61 lines elided ↑ open up ↑
22173 22178          /*
22174 22179           * All device accesses go thru sdstrategy where we check on suspend
22175 22180           * status
22176 22181           */
22177 22182          if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
22178 22183                  return (ENXIO);
22179 22184          }
22180 22185  
22181 22186          ASSERT(!mutex_owned(SD_MUTEX(un)));
22182 22187  
22183      -        /* Initialize sd_ssc_t for internal uscsi commands */
22184      -        ssc = sd_ssc_init(un);
22185      -
22186      -        is_valid = SD_IS_VALID_LABEL(un);
22187      -
22188 22188          /*
22189 22189           * Moved this wait from sd_uscsi_strategy to here for
22190 22190           * reasons of deadlock prevention. Internal driver commands,
22191 22191           * specifically those to change a devices power level, result
22192 22192           * in a call to sd_uscsi_strategy.
22193 22193           */
22194 22194          mutex_enter(SD_MUTEX(un));
22195 22195          while ((un->un_state == SD_STATE_SUSPENDED) ||
22196      -            (un->un_state == SD_STATE_PM_CHANGING)) {
     22196 +            (un->un_state == SD_STATE_PM_CHANGING) ||
     22197 +            (un->un_state == SD_STATE_ATTACHING)) {
22197 22198                  cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
22198 22199          }
     22200 +
     22201 +        if (un->un_state == SD_STATE_ATTACH_FAILED) {
     22202 +                mutex_exit(SD_MUTEX(un));
     22203 +                SD_ERROR(SD_LOG_READ_WRITE, un,
     22204 +                    "sdioctl: attach failed\n");
     22205 +                return (EIO);
     22206 +        }
     22207 +
22199 22208          /*
22200 22209           * Twiddling the counter here protects commands from now
22201 22210           * through to the top of sd_uscsi_strategy. Without the
22202 22211           * counter inc. a power down, for example, could get in
22203 22212           * after the above check for state is made and before
22204 22213           * execution gets to the top of sd_uscsi_strategy.
22205 22214           * That would cause problems.
22206 22215           */
22207 22216          un->un_ncmds_in_driver++;
     22217 +        mutex_exit(SD_MUTEX(un));
22208 22218  
     22219 +        /* Initialize sd_ssc_t for internal uscsi commands */
     22220 +        ssc = sd_ssc_init(un);
     22221 +
     22222 +        is_valid = SD_IS_VALID_LABEL(un);
     22223 +
     22224 +        mutex_enter(SD_MUTEX(un));
     22225 +
22209 22226          if (!is_valid &&
22210 22227              (flag & (FNDELAY | FNONBLOCK))) {
22211 22228                  switch (cmd) {
22212 22229                  case DKIOCGGEOM:        /* SD_PATH_DIRECT */
22213 22230                  case DKIOCGVTOC:
22214 22231                  case DKIOCGEXTVTOC:
22215 22232                  case DKIOCGAPART:
22216 22233                  case DKIOCPARTINFO:
22217 22234                  case DKIOCEXTPARTINFO:
22218 22235                  case DKIOCSGEOM:
22219 22236                  case DKIOCSAPART:
22220 22237                  case DKIOCGETEFI:
22221 22238                  case DKIOCPARTITION:
22222 22239                  case DKIOCSVTOC:
22223 22240                  case DKIOCSEXTVTOC:
22224 22241                  case DKIOCSETEFI:
22225 22242                  case DKIOCGMBOOT:
22226 22243                  case DKIOCSMBOOT:
22227 22244                  case DKIOCG_PHYGEOM:
22228 22245                  case DKIOCG_VIRTGEOM:
22229      -#if defined(__i386) || defined(__amd64)
22230 22246                  case DKIOCSETEXTPART:
22231      -#endif
22232 22247                          /* let cmlb handle it */
22233 22248                          goto skip_ready_valid;
22234      -
22235 22249                  case CDROMPAUSE:
22236 22250                  case CDROMRESUME:
22237 22251                  case CDROMPLAYMSF:
22238 22252                  case CDROMPLAYTRKIND:
22239 22253                  case CDROMREADTOCHDR:
22240 22254                  case CDROMREADTOCENTRY:
22241 22255                  case CDROMSTOP:
22242 22256                  case CDROMSTART:
22243 22257                  case CDROMVOLCTRL:
22244 22258                  case CDROMSUBCHNL:
↓ open down ↓ 3 lines elided ↑ open up ↑
22248 22262                  case CDROMSBLKMODE:
22249 22263                  case CDROMGBLKMODE:
22250 22264                  case CDROMGDRVSPEED:
22251 22265                  case CDROMSDRVSPEED:
22252 22266                  case CDROMCDDA:
22253 22267                  case CDROMCDXA:
22254 22268                  case CDROMSUBCODE:
22255 22269                          if (!ISCD(un)) {
22256 22270                                  un->un_ncmds_in_driver--;
22257 22271                                  ASSERT(un->un_ncmds_in_driver >= 0);
     22272 +                                if (un->un_f_detach_waiting)
     22273 +                                        cv_signal(&un->un_detach_cv);
22258 22274                                  mutex_exit(SD_MUTEX(un));
22259 22275                                  err = ENOTTY;
22260 22276                                  goto done_without_assess;
22261 22277                          }
22262 22278                          break;
22263 22279                  case FDEJECT:
22264 22280                  case DKIOCEJECT:
22265 22281                  case CDROMEJECT:
22266 22282                          if (!un->un_f_eject_media_supported) {
22267 22283                                  un->un_ncmds_in_driver--;
22268 22284                                  ASSERT(un->un_ncmds_in_driver >= 0);
     22285 +                                if (un->un_f_detach_waiting)
     22286 +                                        cv_signal(&un->un_detach_cv);
22269 22287                                  mutex_exit(SD_MUTEX(un));
22270 22288                                  err = ENOTTY;
22271 22289                                  goto done_without_assess;
22272 22290                          }
22273 22291                          break;
22274 22292                  case DKIOCFLUSHWRITECACHE:
22275 22293                          mutex_exit(SD_MUTEX(un));
22276 22294                          err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
22277 22295                          if (err != 0) {
22278 22296                                  mutex_enter(SD_MUTEX(un));
22279 22297                                  un->un_ncmds_in_driver--;
22280 22298                                  ASSERT(un->un_ncmds_in_driver >= 0);
     22299 +                                if (un->un_f_detach_waiting)
     22300 +                                        cv_signal(&un->un_detach_cv);
22281 22301                                  mutex_exit(SD_MUTEX(un));
22282 22302                                  err = EIO;
22283 22303                                  goto done_quick_assess;
22284 22304                          }
22285 22305                          mutex_enter(SD_MUTEX(un));
22286 22306                          /* FALLTHROUGH */
22287 22307                  case DKIOCREMOVABLE:
22288 22308                  case DKIOCHOTPLUGGABLE:
22289 22309                  case DKIOCINFO:
22290 22310                  case DKIOCGMEDIAINFO:
↓ open down ↓ 38 lines elided ↑ open up ↑
22329 22349                                  } else {
22330 22350                                  /* Do not map SD_RESERVED_BY_OTHERS to EIO */
22331 22351                                          if (err == SD_RESERVED_BY_OTHERS) {
22332 22352                                                  err = EACCES;
22333 22353                                          } else {
22334 22354                                                  err = EIO;
22335 22355                                          }
22336 22356                                  }
22337 22357                                  un->un_ncmds_in_driver--;
22338 22358                                  ASSERT(un->un_ncmds_in_driver >= 0);
     22359 +                                if (un->un_f_detach_waiting)
     22360 +                                        cv_signal(&un->un_detach_cv);
22339 22361                                  mutex_exit(SD_MUTEX(un));
22340 22362  
22341 22363                                  goto done_without_assess;
22342 22364                          }
22343 22365                  }
22344 22366          }
22345 22367  
22346 22368  skip_ready_valid:
22347 22369          mutex_exit(SD_MUTEX(un));
22348 22370  
↓ open down ↓ 23 lines elided ↑ open up ↑
22372 22394          case DKIOCSAPART:
22373 22395          case DKIOCGETEFI:
22374 22396          case DKIOCPARTITION:
22375 22397          case DKIOCSVTOC:
22376 22398          case DKIOCSEXTVTOC:
22377 22399          case DKIOCSETEFI:
22378 22400          case DKIOCGMBOOT:
22379 22401          case DKIOCSMBOOT:
22380 22402          case DKIOCG_PHYGEOM:
22381 22403          case DKIOCG_VIRTGEOM:
22382      -#if defined(__i386) || defined(__amd64)
22383 22404          case DKIOCSETEXTPART:
22384      -#endif
22385 22405                  SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd);
22386 22406  
22387 22407                  /* TUR should spin up */
22388 22408  
22389 22409                  if (un->un_f_has_removable_media)
22390 22410                          err = sd_send_scsi_TEST_UNIT_READY(ssc,
22391 22411                              SD_CHECK_FOR_MEDIA);
22392 22412  
22393 22413                  else
22394 22414                          err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
↓ open down ↓ 358 lines elided ↑ open up ↑
22753 22773                  SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n");
22754 22774                  if (!ISCD(un)) {
22755 22775                          err = ENOTTY;
22756 22776                  } else {
22757 22777                          err = sr_play_msf(dev, (caddr_t)arg, flag);
22758 22778                  }
22759 22779                  break;
22760 22780  
22761 22781          case CDROMPLAYTRKIND:
22762 22782                  SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n");
22763      -#if defined(__i386) || defined(__amd64)
22764 22783                  /*
22765 22784                   * not supported on ATAPI CD drives, use CDROMPLAYMSF instead
22766 22785                   */
22767 22786                  if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
22768      -#else
22769      -                if (!ISCD(un)) {
22770      -#endif
22771 22787                          err = ENOTTY;
22772 22788                  } else {
22773 22789                          err = sr_play_trkind(dev, (caddr_t)arg, flag);
22774 22790                  }
22775 22791                  break;
22776 22792  
22777 22793          case CDROMREADTOCHDR:
22778 22794                  SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n");
22779 22795                  if (!ISCD(un)) {
22780 22796                          err = ENOTTY;
↓ open down ↓ 270 lines elided ↑ open up ↑
23051 23067  /* SDIOC FaultInjection testing ioctls */
23052 23068          case SDIOCSTART:
23053 23069          case SDIOCSTOP:
23054 23070          case SDIOCINSERTPKT:
23055 23071          case SDIOCINSERTXB:
23056 23072          case SDIOCINSERTUN:
23057 23073          case SDIOCINSERTARQ:
23058 23074          case SDIOCPUSH:
23059 23075          case SDIOCRETRIEVE:
23060 23076          case SDIOCRUN:
     23077 +        case SDIOCINSERTTRAN:
23061 23078                  SD_INFO(SD_LOG_SDTEST, un, "sdioctl:"
23062 23079                      "SDIOC detected cmd:0x%X:\n", cmd);
23063 23080                  /* call error generator */
23064      -                sd_faultinjection_ioctl(cmd, arg, un);
23065      -                err = 0;
     23081 +                err = sd_faultinjection_ioctl(cmd, arg, un);
23066 23082                  break;
23067 23083  
23068 23084  #endif /* SD_FAULT_INJECTION */
23069 23085  
23070 23086          case DKIOCFLUSHWRITECACHE:
23071 23087                  {
23072 23088                          struct dk_callback *dkc = (struct dk_callback *)arg;
23073 23089  
23074 23090                          mutex_enter(SD_MUTEX(un));
23075 23091                          if (!un->un_f_sync_cache_supported ||
↓ open down ↓ 20 lines elided ↑ open up ↑
23096 23112                              dkc->dkc_callback != NULL) {
23097 23113                                  /* async SYNC CACHE request */
23098 23114                                  err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
23099 23115                          } else {
23100 23116                                  /* synchronous SYNC CACHE request */
23101 23117                                  err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL);
23102 23118                          }
23103 23119                  }
23104 23120                  break;
23105 23121  
     23122 +        case DKIOCFREE:
     23123 +                {
     23124 +                        dkioc_free_list_t *dfl = (dkioc_free_list_t *)arg;
     23125 +
     23126 +                        /* bad userspace ioctls shouldn't panic */
     23127 +                        if (dfl == NULL && !(flag & FKIOCTL)) {
     23128 +                                err = SET_ERROR(EINVAL);
     23129 +                                break;
     23130 +                        }
     23131 +                        /* synchronous UNMAP request */
     23132 +                        err = sd_send_scsi_UNMAP(dev, ssc, dfl, flag);
     23133 +                }
     23134 +                break;
     23135 +
23106 23136          case DKIOCGETWCE: {
23107 23137  
23108 23138                  int wce;
23109 23139  
23110 23140                  if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) {
23111 23141                          break;
23112 23142                  }
23113 23143  
23114 23144                  if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) {
23115 23145                          err = EFAULT;
↓ open down ↓ 125 lines elided ↑ open up ↑
23241 23271                  break;
23242 23272          }
23243 23273  
23244 23274          default:
23245 23275                  err = ENOTTY;
23246 23276                  break;
23247 23277          }
23248 23278          mutex_enter(SD_MUTEX(un));
23249 23279          un->un_ncmds_in_driver--;
23250 23280          ASSERT(un->un_ncmds_in_driver >= 0);
     23281 +        if (un->un_f_detach_waiting)
     23282 +                cv_signal(&un->un_detach_cv);
23251 23283          mutex_exit(SD_MUTEX(un));
23252 23284  
23253 23285  
23254 23286  done_without_assess:
23255 23287          sd_ssc_fini(ssc);
23256 23288  
23257 23289          SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
23258 23290          return (err);
23259 23291  
23260 23292  done_with_assess:
23261 23293          mutex_enter(SD_MUTEX(un));
23262 23294          un->un_ncmds_in_driver--;
23263 23295          ASSERT(un->un_ncmds_in_driver >= 0);
     23296 +        if (un->un_f_detach_waiting)
     23297 +                cv_signal(&un->un_detach_cv);
23264 23298          mutex_exit(SD_MUTEX(un));
23265 23299  
23266 23300  done_quick_assess:
23267 23301          if (err != 0)
23268 23302                  sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23269 23303          /* Uninitialize sd_ssc_t pointer */
23270 23304          sd_ssc_fini(ssc);
23271 23305  
23272 23306          SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
23273 23307          return (err);
↓ open down ↓ 84 lines elided ↑ open up ↑
23358 23392   * Description: This routine returns the information required to populate
23359 23393   *              the fields for the dk_minfo/dk_minfo_ext structures.
23360 23394   *
23361 23395   *   Arguments: dev             - the device number
23362 23396   *              dki_media_type  - media_type
23363 23397   *              dki_lbsize      - logical block size
23364 23398   *              dki_capacity    - capacity in blocks
23365 23399   *              dki_pbsize      - physical block size (if requested)
23366 23400   *
23367 23401   * Return Code: 0
23368      - *              EACCESS
     23402 + *              EACCES
23369 23403   *              EFAULT
23370 23404   *              ENXIO
23371 23405   *              EIO
23372 23406   */
23373 23407  static int
23374 23408  sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize,
23375 23409      diskaddr_t *dki_capacity, uint_t *dki_pbsize)
23376 23410  {
23377 23411          struct sd_lun           *un = NULL;
23378 23412          struct uscsi_cmd        com;
↓ open down ↓ 97 lines elided ↑ open up ↑
23476 23510          }
23477 23511  
23478 23512          /*
23479 23513           * Now read the capacity so we can provide the lbasize,
23480 23514           * pbsize and capacity.
23481 23515           */
23482 23516          if (dki_pbsize && un->un_f_descr_format_supported) {
23483 23517                  rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
23484 23518                      &pbsize, SD_PATH_DIRECT);
23485 23519  
23486      -                /*
23487      -                 * Override the physical blocksize if the instance already
23488      -                 * has a larger value.
23489      -                 */
23490      -                pbsize = MAX(pbsize, un->un_phy_blocksize);
     23520 +                if (un->un_f_sdconf_phy_blocksize) /* keep sd.conf's pbs */
     23521 +                        pbsize = un->un_phy_blocksize;
     23522 +                else /* override the pbs if the instance has a larger value */
     23523 +                        pbsize = MAX(pbsize, un->un_phy_blocksize);
23491 23524          }
23492 23525  
23493 23526          if (dki_pbsize == NULL || rval != 0 ||
23494 23527              !un->un_f_descr_format_supported) {
23495 23528                  rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
23496 23529                      SD_PATH_DIRECT);
23497 23530  
23498 23531                  switch (rval) {
23499 23532                  case 0:
23500 23533                          if (un->un_f_enable_rmw &&
↓ open down ↓ 239 lines elided ↑ open up ↑
23740 23773                   * This is a special case IOCTL that doesn't return
23741 23774                   * until the media state changes. Routine sdpower
23742 23775                   * knows about and handles this so don't count it
23743 23776                   * as an active cmd in the driver, which would
23744 23777                   * keep the device busy to the pm framework.
23745 23778                   * If the count isn't decremented the device can't
23746 23779                   * be powered down.
23747 23780                   */
23748 23781                  un->un_ncmds_in_driver--;
23749 23782                  ASSERT(un->un_ncmds_in_driver >= 0);
     23783 +                if (un->un_f_detach_waiting)
     23784 +                        cv_signal(&un->un_detach_cv);
23750 23785  
23751 23786                  /*
23752 23787                   * if a prior request had been made, this will be the same
23753 23788                   * token, as scsi_watch was designed that way.
23754 23789                   */
23755 23790                  un->un_swr_token = token;
23756 23791                  un->un_specified_mediastate = state;
23757 23792  
23758 23793                  /*
23759 23794                   * now wait for media change
↓ open down ↓ 473 lines elided ↑ open up ↑
24233 24268   *              supported log pages.
24234 24269   *
24235 24270   *   Arguments: ssc   - ssc contains pointer to driver soft state (unit)
24236 24271   *                      structure for this target.
24237 24272   *              log_page -
24238 24273   *
24239 24274   * Return Code: -1 - on error (log sense is optional and may not be supported).
24240 24275   *              0  - log page not found.
24241 24276   *              1  - log page found.
24242 24277   */
24243      -
     24278 +#ifdef notyet
24244 24279  static int
24245 24280  sd_log_page_supported(sd_ssc_t *ssc, int log_page)
24246 24281  {
24247 24282          uchar_t *log_page_data;
24248 24283          int     i;
24249 24284          int     match = 0;
24250 24285          int     log_size;
24251 24286          int     status = 0;
24252 24287          struct sd_lun   *un;
24253 24288  
↓ open down ↓ 44 lines elided ↑ open up ↑
24298 24333           * until we run out of log pages or a match is found.
24299 24334           */
24300 24335          for (i = 4; (i < (log_size + 4)) && !match; i++) {
24301 24336                  if (log_page_data[i] == log_page) {
24302 24337                          match++;
24303 24338                  }
24304 24339          }
24305 24340          kmem_free(log_page_data, 0xFF);
24306 24341          return (match);
24307 24342  }
     24343 +#endif
24308 24344  
24309      -
24310 24345  /*
24311 24346   *    Function: sd_mhdioc_failfast
24312 24347   *
24313 24348   * Description: This routine is the driver entry point for handling ioctl
24314 24349   *              requests to enable/disable the multihost failfast option.
24315 24350   *              (MHIOCENFAILFAST)
24316 24351   *
24317 24352   *   Arguments: dev     - the device number
24318 24353   *              arg     - user specified probing interval.
24319 24354   *              flag    - this argument is a pass through to ddi_copyxxx()
↓ open down ↓ 1415 lines elided ↑ open up ↑
25735 25770          size_t          io_start_offset;
25736 25771          int             doing_rmw = FALSE;
25737 25772          int             rval;
25738 25773          ssize_t         dma_resid;
25739 25774          daddr_t         oblkno;
25740 25775          diskaddr_t      nblks = 0;
25741 25776          diskaddr_t      start_block;
25742 25777  
25743 25778          instance = SDUNIT(dev);
25744 25779          if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
     25780 +            (un->un_state == SD_STATE_ATTACHING) ||
     25781 +            (un->un_state == SD_STATE_ATTACH_FAILED) ||
25745 25782              !SD_IS_VALID_LABEL(un) || ISCD(un)) {
25746 25783                  return (ENXIO);
25747 25784          }
25748 25785  
25749      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un))
25750      -
25751 25786          SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n");
25752 25787  
25753 25788          partition = SDPART(dev);
25754 25789          SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition);
25755 25790  
25756 25791          if (!(NOT_DEVBSIZE(un))) {
25757 25792                  int secmask = 0;
25758 25793                  int blknomask = 0;
25759 25794  
25760 25795                  blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
↓ open down ↓ 2223 lines elided ↑ open up ↑
27984 28019          /*
27985 28020           * Bytes 7 & 8 are the 4 byte allocation length for TOC header.
27986 28021           * (2 byte data len + 1 byte starting track # + 1 byte ending track #)
27987 28022           */
27988 28023          cdb[8] = 0x04;
27989 28024          com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27990 28025          com->uscsi_cdb     = cdb;
27991 28026          com->uscsi_cdblen  = CDB_GROUP1;
27992 28027          com->uscsi_bufaddr = buffer;
27993 28028          com->uscsi_buflen  = 0x04;
27994      -        com->uscsi_timeout = 300;
     28029 +        com->uscsi_timeout = 3 * un->un_cmd_timeout;
27995 28030          com->uscsi_flags   = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27996 28031  
27997 28032          rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27998 28033              SD_PATH_STANDARD);
27999 28034          if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
28000 28035                  hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]);
28001 28036                  hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]);
28002 28037          } else {
28003 28038                  hdr->cdth_trk0 = buffer[2];
28004 28039                  hdr->cdth_trk1 = buffer[3];
↓ open down ↓ 1328 lines elided ↑ open up ↑
29333 29368                           * The range need to be locked, try to get a wmap.
29334 29369                           * First attempt it with NO_SLEEP, want to avoid a sleep
29335 29370                           * if possible as we will have to release the sd mutex
29336 29371                           * if we have to sleep.
29337 29372                           */
29338 29373                          if (wmp == NULL)
29339 29374                                  wmp = kmem_cache_alloc(un->un_wm_cache,
29340 29375                                      KM_NOSLEEP);
29341 29376                          if (wmp == NULL) {
29342 29377                                  mutex_exit(SD_MUTEX(un));
29343      -                                _NOTE(DATA_READABLE_WITHOUT_LOCK
29344      -                                    (sd_lun::un_wm_cache))
29345 29378                                  wmp = kmem_cache_alloc(un->un_wm_cache,
29346 29379                                      KM_SLEEP);
29347 29380                                  mutex_enter(SD_MUTEX(un));
29348 29381                                  /*
29349 29382                                   * we released the mutex so recheck and go to
29350 29383                                   * check list state.
29351 29384                                   */
29352 29385                                  state = SD_WM_CHK_LIST;
29353 29386                          } else {
29354 29387                                  /*
↓ open down ↓ 378 lines elided ↑ open up ↑
29733 29766   *              in b_flags and move them onto the failfast queue, then kick
29734 29767   *              off a thread to return all bp's on the failfast queue to
29735 29768   *              their owners with an error set.
29736 29769   *
29737 29770   *   Arguments: un - pointer to the soft state struct for the instance.
29738 29771   *
29739 29772   *     Context: may execute in interrupt context.
29740 29773   */
29741 29774  
29742 29775  static void
29743      -sd_failfast_flushq(struct sd_lun *un)
     29776 +sd_failfast_flushq(struct sd_lun *un, boolean_t flush_all)
29744 29777  {
29745 29778          struct buf *bp;
29746 29779          struct buf *next_waitq_bp;
29747 29780          struct buf *prev_waitq_bp = NULL;
29748 29781  
29749 29782          ASSERT(un != NULL);
29750 29783          ASSERT(mutex_owned(SD_MUTEX(un)));
29751 29784          ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE);
29752 29785          ASSERT(un->un_failfast_bp == NULL);
29753 29786  
29754 29787          SD_TRACE(SD_LOG_IO_FAILFAST, un,
29755 29788              "sd_failfast_flushq: entry: un:0x%p\n", un);
29756 29789  
29757 29790          /*
29758 29791           * Check if we should flush all bufs when entering failfast state, or
29759 29792           * just those with B_FAILFAST set.
29760 29793           */
29761      -        if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) {
     29794 +        if ((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) ||
     29795 +            flush_all) {
29762 29796                  /*
29763 29797                   * Move *all* bp's on the wait queue to the failfast flush
29764 29798                   * queue, including those that do NOT have B_FAILFAST set.
29765 29799                   */
29766 29800                  if (un->un_failfast_headp == NULL) {
29767 29801                          ASSERT(un->un_failfast_tailp == NULL);
29768 29802                          un->un_failfast_headp = un->un_waitq_headp;
29769 29803                  } else {
29770 29804                          ASSERT(un->un_failfast_tailp != NULL);
29771 29805                          un->un_failfast_tailp->av_forw = un->un_waitq_headp;
↓ open down ↓ 239 lines elided ↑ open up ↑
30011 30045  }
30012 30046  
30013 30047  /*
30014 30048   * Note: The following sd_faultinjection_ioctl( ) routines implement
30015 30049   * driver support for handling fault injection for error analysis
30016 30050   * causing faults in multiple layers of the driver.
30017 30051   *
30018 30052   */
30019 30053  
30020 30054  #ifdef SD_FAULT_INJECTION
30021      -static uint_t   sd_fault_injection_on = 0;
30022 30055  
30023 30056  /*
30024 30057   *    Function: sd_faultinjection_ioctl()
30025 30058   *
30026 30059   * Description: This routine is the driver entry point for handling
30027 30060   *              faultinjection ioctls to inject errors into the
30028 30061   *              layer model
30029 30062   *
30030 30063   *   Arguments: cmd     - the ioctl cmd received
30031 30064   *              arg     - the arguments from user and returns
30032 30065   */
30033 30066  
30034      -static void
30035      -sd_faultinjection_ioctl(int cmd, intptr_t arg,  struct sd_lun *un)
     30067 +static int
     30068 +sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un)
30036 30069  {
30037 30070          uint_t i = 0;
30038 30071          uint_t rval;
     30072 +        int ret = 0;
30039 30073  
30040 30074          SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n");
30041 30075  
30042 30076          mutex_enter(SD_MUTEX(un));
30043 30077  
30044 30078          switch (cmd) {
30045 30079          case SDIOCRUN:
30046 30080                  /* Allow pushed faults to be injected */
30047 30081                  SD_INFO(SD_LOG_SDTEST, un,
30048 30082                      "sd_faultinjection_ioctl: Injecting Fault Run\n");
↓ open down ↓ 9 lines elided ↑ open up ↑
30058 30092                  SD_INFO(SD_LOG_SDTEST, un,
30059 30093                      "sd_faultinjection_ioctl: Injecting Fault Start\n");
30060 30094  
30061 30095                  sd_fault_injection_on = 0;
30062 30096                  un->sd_injection_mask = 0xFFFFFFFF;
30063 30097                  for (i = 0; i < SD_FI_MAX_ERROR; i++) {
30064 30098                          un->sd_fi_fifo_pkt[i] = NULL;
30065 30099                          un->sd_fi_fifo_xb[i] = NULL;
30066 30100                          un->sd_fi_fifo_un[i] = NULL;
30067 30101                          un->sd_fi_fifo_arq[i] = NULL;
     30102 +                        un->sd_fi_fifo_tran[i] = NULL;
30068 30103                  }
30069 30104                  un->sd_fi_fifo_start = 0;
30070 30105                  un->sd_fi_fifo_end = 0;
30071 30106  
30072 30107                  mutex_enter(&(un->un_fi_mutex));
30073 30108                  un->sd_fi_log[0] = '\0';
30074 30109                  un->sd_fi_buf_len = 0;
30075 30110                  mutex_exit(&(un->un_fi_mutex));
30076 30111  
30077 30112                  SD_INFO(SD_LOG_IOERR, un,
↓ open down ↓ 18 lines elided ↑ open up ↑
30096 30131                                      sizeof (struct sd_fi_xb));
30097 30132                          }
30098 30133                          if (un->sd_fi_fifo_un[i] != NULL) {
30099 30134                                  kmem_free(un->sd_fi_fifo_un[i],
30100 30135                                      sizeof (struct sd_fi_un));
30101 30136                          }
30102 30137                          if (un->sd_fi_fifo_arq[i] != NULL) {
30103 30138                                  kmem_free(un->sd_fi_fifo_arq[i],
30104 30139                                      sizeof (struct sd_fi_arq));
30105 30140                          }
     30141 +                        if (un->sd_fi_fifo_tran[i] != NULL) {
     30142 +                                kmem_free(un->sd_fi_fifo_tran[i],
     30143 +                                    sizeof (struct sd_fi_tran));
     30144 +                        }
30106 30145                          un->sd_fi_fifo_pkt[i] = NULL;
30107 30146                          un->sd_fi_fifo_un[i] = NULL;
30108 30147                          un->sd_fi_fifo_xb[i] = NULL;
30109 30148                          un->sd_fi_fifo_arq[i] = NULL;
     30149 +                        un->sd_fi_fifo_tran[i] = NULL;
30110 30150                  }
30111 30151                  un->sd_fi_fifo_start = 0;
30112 30152                  un->sd_fi_fifo_end = 0;
30113 30153  
30114 30154                  SD_INFO(SD_LOG_IOERR, un,
30115 30155                      "sd_faultinjection_ioctl: stop finished\n");
30116 30156                  break;
30117 30157  
30118 30158          case SDIOCINSERTPKT:
30119 30159                  /* Store a packet struct to be pushed onto fifo */
30120 30160                  SD_INFO(SD_LOG_SDTEST, un,
30121 30161                      "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n");
30122 30162  
30123 30163                  i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30124 30164  
     30165 +                if (un->sd_fi_fifo_tran[i] != NULL) {
     30166 +                        ret = EBUSY;
     30167 +                        break;
     30168 +                }
     30169 +
30125 30170                  sd_fault_injection_on = 0;
30126 30171  
30127 30172                  /* No more that SD_FI_MAX_ERROR allowed in Queue */
30128 30173                  if (un->sd_fi_fifo_pkt[i] != NULL) {
30129 30174                          kmem_free(un->sd_fi_fifo_pkt[i],
30130 30175                              sizeof (struct sd_fi_pkt));
30131 30176                  }
30132 30177                  if (arg != NULL) {
30133 30178                          un->sd_fi_fifo_pkt[i] =
30134 30179                              kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP);
30135 30180                          if (un->sd_fi_fifo_pkt[i] == NULL) {
30136 30181                                  /* Alloc failed don't store anything */
     30182 +                                ret = ENOMEM;
30137 30183                                  break;
30138 30184                          }
30139 30185                          rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i],
30140 30186                              sizeof (struct sd_fi_pkt), 0);
30141 30187                          if (rval == -1) {
30142 30188                                  kmem_free(un->sd_fi_fifo_pkt[i],
30143 30189                                      sizeof (struct sd_fi_pkt));
30144 30190                                  un->sd_fi_fifo_pkt[i] = NULL;
     30191 +                                ret = EFAULT;
     30192 +                                break;
30145 30193                          }
30146 30194                  } else {
30147 30195                          SD_INFO(SD_LOG_IOERR, un,
30148 30196                              "sd_faultinjection_ioctl: pkt null\n");
30149 30197                  }
30150 30198                  break;
30151 30199  
     30200 +        case SDIOCINSERTTRAN:
     30201 +                /* Store a tran packet struct to be pushed onto fifo. */
     30202 +                SD_INFO(SD_LOG_SDTEST, un,
     30203 +                    "sd_faultinjection_ioctl: Injecting Fault Insert TRAN\n");
     30204 +                i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
     30205 +
     30206 +                /*
     30207 +                 * HBA-related fault injections can't be mixed with target-level
     30208 +                 * fault injections.
     30209 +                 */
     30210 +                if (un->sd_fi_fifo_pkt[i] != NULL ||
     30211 +                    un->sd_fi_fifo_xb[i] != NULL ||
     30212 +                    un->sd_fi_fifo_un[i] != NULL ||
     30213 +                    un->sd_fi_fifo_arq[i] != NULL) {
     30214 +                        ret = EBUSY;
     30215 +                        break;
     30216 +                }
     30217 +
     30218 +                sd_fault_injection_on = 0;
     30219 +
     30220 +                if (un->sd_fi_fifo_tran[i] != NULL) {
     30221 +                        kmem_free(un->sd_fi_fifo_tran[i],
     30222 +                            sizeof (struct sd_fi_tran));
     30223 +                        un->sd_fi_fifo_tran[i] = NULL;
     30224 +                }
     30225 +                if (arg != NULL) {
     30226 +                        un->sd_fi_fifo_tran[i] =
     30227 +                            kmem_alloc(sizeof (struct sd_fi_tran), KM_NOSLEEP);
     30228 +                        if (un->sd_fi_fifo_tran[i] == NULL) {
     30229 +                                /* Alloc failed don't store anything */
     30230 +                                ret = ENOMEM;
     30231 +                                break;
     30232 +                        }
     30233 +                        rval = ddi_copyin((void *)arg, un->sd_fi_fifo_tran[i],
     30234 +                            sizeof (struct sd_fi_tran), 0);
     30235 +
     30236 +                        if (rval == 0) {
     30237 +                                switch (un->sd_fi_fifo_tran[i]->tran_cmd) {
     30238 +                                        case SD_FLTINJ_CMD_BUSY:
     30239 +                                        case SD_FLTINJ_CMD_TIMEOUT:
     30240 +                                                break;
     30241 +                                        default:
     30242 +                                                ret = EINVAL;
     30243 +                                                break;
     30244 +                                }
     30245 +                        } else {
     30246 +                                ret = EFAULT;
     30247 +                        }
     30248 +
     30249 +                        if (ret != 0) {
     30250 +                                kmem_free(un->sd_fi_fifo_tran[i],
     30251 +                                    sizeof (struct sd_fi_tran));
     30252 +                                un->sd_fi_fifo_tran[i] = NULL;
     30253 +                                break;
     30254 +                        }
     30255 +                } else {
     30256 +                        SD_INFO(SD_LOG_IOERR, un,
     30257 +                            "sd_faultinjection_ioctl: tran null\n");
     30258 +                }
     30259 +                break;
     30260 +
30152 30261          case SDIOCINSERTXB:
30153 30262                  /* Store a xb struct to be pushed onto fifo */
30154 30263                  SD_INFO(SD_LOG_SDTEST, un,
30155 30264                      "sd_faultinjection_ioctl: Injecting Fault Insert XB\n");
30156 30265  
30157 30266                  i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30158 30267  
     30268 +                if (un->sd_fi_fifo_tran[i] != NULL) {
     30269 +                        ret = EBUSY;
     30270 +                        break;
     30271 +                }
     30272 +
30159 30273                  sd_fault_injection_on = 0;
30160 30274  
30161 30275                  if (un->sd_fi_fifo_xb[i] != NULL) {
30162 30276                          kmem_free(un->sd_fi_fifo_xb[i],
30163 30277                              sizeof (struct sd_fi_xb));
30164 30278                          un->sd_fi_fifo_xb[i] = NULL;
30165 30279                  }
30166 30280                  if (arg != NULL) {
30167 30281                          un->sd_fi_fifo_xb[i] =
30168 30282                              kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP);
30169 30283                          if (un->sd_fi_fifo_xb[i] == NULL) {
30170 30284                                  /* Alloc failed don't store anything */
     30285 +                                ret = ENOMEM;
30171 30286                                  break;
30172 30287                          }
30173 30288                          rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i],
30174 30289                              sizeof (struct sd_fi_xb), 0);
30175 30290  
30176 30291                          if (rval == -1) {
30177 30292                                  kmem_free(un->sd_fi_fifo_xb[i],
30178 30293                                      sizeof (struct sd_fi_xb));
30179 30294                                  un->sd_fi_fifo_xb[i] = NULL;
     30295 +                                ret = EFAULT;
     30296 +                                break;
30180 30297                          }
30181 30298                  } else {
30182 30299                          SD_INFO(SD_LOG_IOERR, un,
30183 30300                              "sd_faultinjection_ioctl: xb null\n");
30184 30301                  }
30185 30302                  break;
30186 30303  
30187 30304          case SDIOCINSERTUN:
30188 30305                  /* Store a un struct to be pushed onto fifo */
30189 30306                  SD_INFO(SD_LOG_SDTEST, un,
30190 30307                      "sd_faultinjection_ioctl: Injecting Fault Insert UN\n");
30191 30308  
30192 30309                  i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
     30310 +                if (un->sd_fi_fifo_tran[i] != NULL) {
     30311 +                        ret = EBUSY;
     30312 +                        break;
     30313 +                }
30193 30314  
30194 30315                  sd_fault_injection_on = 0;
30195 30316  
30196 30317                  if (un->sd_fi_fifo_un[i] != NULL) {
30197 30318                          kmem_free(un->sd_fi_fifo_un[i],
30198 30319                              sizeof (struct sd_fi_un));
30199 30320                          un->sd_fi_fifo_un[i] = NULL;
30200 30321                  }
30201 30322                  if (arg != NULL) {
30202 30323                          un->sd_fi_fifo_un[i] =
30203 30324                              kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP);
30204 30325                          if (un->sd_fi_fifo_un[i] == NULL) {
30205 30326                                  /* Alloc failed don't store anything */
     30327 +                                ret = ENOMEM;
30206 30328                                  break;
30207 30329                          }
30208 30330                          rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i],
30209 30331                              sizeof (struct sd_fi_un), 0);
30210 30332                          if (rval == -1) {
30211 30333                                  kmem_free(un->sd_fi_fifo_un[i],
30212 30334                                      sizeof (struct sd_fi_un));
30213 30335                                  un->sd_fi_fifo_un[i] = NULL;
     30336 +                                ret = EFAULT;
     30337 +                                break;
30214 30338                          }
30215 30339  
30216 30340                  } else {
30217 30341                          SD_INFO(SD_LOG_IOERR, un,
30218 30342                              "sd_faultinjection_ioctl: un null\n");
30219 30343                  }
30220 30344  
30221 30345                  break;
30222 30346  
30223 30347          case SDIOCINSERTARQ:
30224 30348                  /* Store a arq struct to be pushed onto fifo */
30225 30349                  SD_INFO(SD_LOG_SDTEST, un,
30226 30350                      "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n");
30227 30351                  i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
     30352 +                if (un->sd_fi_fifo_tran[i] != NULL) {
     30353 +                        ret = EBUSY;
     30354 +                        break;
     30355 +                }
30228 30356  
30229 30357                  sd_fault_injection_on = 0;
30230 30358  
30231 30359                  if (un->sd_fi_fifo_arq[i] != NULL) {
30232 30360                          kmem_free(un->sd_fi_fifo_arq[i],
30233 30361                              sizeof (struct sd_fi_arq));
30234 30362                          un->sd_fi_fifo_arq[i] = NULL;
30235 30363                  }
30236 30364                  if (arg != NULL) {
30237 30365                          un->sd_fi_fifo_arq[i] =
30238 30366                              kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP);
30239 30367                          if (un->sd_fi_fifo_arq[i] == NULL) {
30240 30368                                  /* Alloc failed don't store anything */
     30369 +                                ret = ENOMEM;
30241 30370                                  break;
30242 30371                          }
30243 30372                          rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i],
30244 30373                              sizeof (struct sd_fi_arq), 0);
30245 30374                          if (rval == -1) {
30246 30375                                  kmem_free(un->sd_fi_fifo_arq[i],
30247 30376                                      sizeof (struct sd_fi_arq));
30248 30377                                  un->sd_fi_fifo_arq[i] = NULL;
     30378 +                                ret = EFAULT;
     30379 +                                break;
30249 30380                          }
30250 30381  
30251 30382                  } else {
30252 30383                          SD_INFO(SD_LOG_IOERR, un,
30253 30384                              "sd_faultinjection_ioctl: arq null\n");
30254 30385                  }
30255 30386  
30256 30387                  break;
30257 30388  
30258 30389          case SDIOCPUSH:
30259      -                /* Push stored xb, pkt, un, and arq onto fifo */
     30390 +                /* Push stored xb, pkt, un, arq and tran onto fifo */
30260 30391                  sd_fault_injection_on = 0;
30261 30392  
30262 30393                  if (arg != NULL) {
30263 30394                          rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0);
30264 30395                          if (rval != -1 &&
30265 30396                              un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
30266 30397                                  un->sd_fi_fifo_end += i;
30267 30398                          }
30268 30399                  } else {
30269 30400                          SD_INFO(SD_LOG_IOERR, un,
↓ open down ↓ 24 lines elided ↑ open up ↑
30294 30425                           * arg is possibly invalid setting
30295 30426                           * it to NULL for return
30296 30427                           */
30297 30428                          arg = NULL;
30298 30429                  }
30299 30430                  break;
30300 30431          }
30301 30432  
30302 30433          mutex_exit(SD_MUTEX(un));
30303 30434          SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: exit\n");
     30435 +        return (ret);
30304 30436  }
30305 30437  
30306 30438  
30307 30439  /*
30308 30440   *    Function: sd_injection_log()
30309 30441   *
30310 30442   * Description: This routine adds buff to the already existing injection log
30311 30443   *              for retrieval via faultinjection_ioctl for use in fault
30312 30444   *              detection and recovery
30313 30445   *
↓ open down ↓ 19 lines elided ↑ open up ↑
30333 30465                  for (i = 0; i < len; i++) {
30334 30466                          *destp++ = *buf++;
30335 30467                  }
30336 30468                  un->sd_fi_buf_len += len;
30337 30469                  un->sd_fi_log[un->sd_fi_buf_len] = '\0';
30338 30470          }
30339 30471  
30340 30472          mutex_exit(&(un->un_fi_mutex));
30341 30473  }
30342 30474  
     30475 +/*
     30476 + * This function is called just before sending the packet to the HBA.
     30477 + * Caller must hold per-LUN mutex. Mutex is held locked upon return.
     30478 + */
     30479 +static void
     30480 +sd_prefaultinjection(struct scsi_pkt *pktp)
     30481 +{
     30482 +        uint_t i;
     30483 +        struct buf *bp;
     30484 +        struct sd_lun *un;
     30485 +        struct sd_fi_tran *fi_tran;
30343 30486  
     30487 +        ASSERT(pktp != NULL);
     30488 +
     30489 +        /* pull bp and un from pktp */
     30490 +        bp = (struct buf *)pktp->pkt_private;
     30491 +        un = SD_GET_UN(bp);
     30492 +
     30493 +        /* if injection is off return */
     30494 +        if (sd_fault_injection_on == 0 ||
     30495 +            un->sd_fi_fifo_start == un->sd_fi_fifo_end) {
     30496 +                return;
     30497 +        }
     30498 +
     30499 +        ASSERT(un != NULL);
     30500 +        ASSERT(mutex_owned(SD_MUTEX(un)));
     30501 +
     30502 +        /* take next set off fifo */
     30503 +        i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR;
     30504 +
     30505 +        fi_tran = un->sd_fi_fifo_tran[i];
     30506 +        if (fi_tran != NULL) {
     30507 +                switch (fi_tran->tran_cmd) {
     30508 +                        case SD_FLTINJ_CMD_BUSY:
     30509 +                                pktp->pkt_flags |= FLAG_PKT_BUSY;
     30510 +                                break;
     30511 +                        case SD_FLTINJ_CMD_TIMEOUT:
     30512 +                                pktp->pkt_flags |= FLAG_PKT_TIMEOUT;
     30513 +                                break;
     30514 +                        default:
     30515 +                                return;
     30516 +                }
     30517 +        }
     30518 +        /*
     30519 +         * We don't deallocate any data here - it will be deallocated after
     30520 +         * the packet has been processed by the HBA.
     30521 +         */
     30522 +}
     30523 +
     30524 +
30344 30525  /*
30345 30526   *    Function: sd_faultinjection()
30346 30527   *
30347 30528   * Description: This routine takes the pkt and changes its
30348 30529   *              content based on error injection scenerio.
30349 30530   *
30350 30531   *   Arguments: pktp    - packet to be changed
30351 30532   */
30352 30533  
30353 30534  static void
↓ open down ↓ 204 lines elided ↑ open up ↑
30558 30739   *         false          true            true         |   Yes
30559 30740   *         true            x                x          |   Yes
30560 30741   *     --------------------------------------------------------------
30561 30742   *
30562 30743   *
30563 30744   * 4. Building default VTOC label
30564 30745   *
30565 30746   *     As section 3 says, sd checks if some kinds of devices have VTOC label.
30566 30747   *     If those devices have no valid VTOC label, sd(7d) will attempt to
30567 30748   *     create default VTOC for them. Currently sd creates default VTOC label
30568      - *     for all devices on x86 platform (VTOC_16), but only for removable
30569      - *     media devices on SPARC (VTOC_8).
     30749 + *     for all devices on x86 platform (VTOC_16).
30570 30750   *
30571 30751   *     -----------------------------------------------------------
30572 30752   *       removable media hotpluggable platform   |   Default Label
30573 30753   *     -----------------------------------------------------------
30574      - *             false          false    sparc     |     No
30575 30754   *             false          true      x86      |     Yes
30576      - *             false          true     sparc     |     Yes
30577 30755   *             true             x        x       |     Yes
30578 30756   *     ----------------------------------------------------------
30579 30757   *
30580 30758   *
30581 30759   * 5. Supported blocksizes of target devices
30582 30760   *
30583 30761   *     Sd supports non-512-byte blocksize for removable media devices only.
30584 30762   *     For other devices, only 512-byte blocksize is supported. This may be
30585 30763   *     changed in near future because some RAID devices require non-512-byte
30586 30764   *     blocksize
↓ open down ↓ 12 lines elided ↑ open up ↑
30599 30777   *     Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query
30600 30778   *     if a device is removable media device. It return 1 for removable media
30601 30779   *     devices, and 0 for others.
30602 30780   *
30603 30781   *     The automatic mounting subsystem should distinguish between the types
30604 30782   *     of devices and apply automounting policies to each.
30605 30783   *
30606 30784   *
30607 30785   * 7. fdisk partition management
30608 30786   *
30609      - *     Fdisk is traditional partition method on x86 platform. Sd(7d) driver
30610      - *     just supports fdisk partitions on x86 platform. On sparc platform, sd
30611      - *     doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize
30612      - *     fdisk partitions on both x86 and SPARC platform.
     30787 + *     Fdisk is traditional partition method on x86 platform. sd(7D) driver
     30788 + *     just supports fdisk partitions on x86 platform.
30613 30789   *
30614 30790   *     -----------------------------------------------------------
30615 30791   *       platform   removable media  USB/1394  |  fdisk supported
30616 30792   *     -----------------------------------------------------------
30617 30793   *        x86         X               X        |       true
30618      - *     ------------------------------------------------------------
30619      - *        sparc       X               X        |       false
30620      - *     ------------------------------------------------------------
     30794 + *     -----------------------------------------------------------
30621 30795   *
30622 30796   *
30623 30797   * 8. MBOOT/MBR
30624 30798   *
30625      - *     Although sd(7d) doesn't support fdisk on SPARC platform, it does support
30626      - *     read/write mboot for removable media devices on sparc platform.
30627      - *
30628 30799   *     -----------------------------------------------------------
30629 30800   *       platform   removable media  USB/1394  |  mboot supported
30630 30801   *     -----------------------------------------------------------
30631 30802   *        x86         X               X        |       true
30632      - *     ------------------------------------------------------------
30633      - *        sparc      false           false     |       false
30634      - *        sparc      false           true      |       true
30635      - *        sparc      true            false     |       true
30636      - *        sparc      true            true      |       true
30637      - *     ------------------------------------------------------------
     30803 + *     -----------------------------------------------------------
30638 30804   *
30639 30805   *
30640 30806   * 9.  error handling during opening device
30641 30807   *
30642 30808   *     If failed to open a disk device, an errno is returned. For some kinds
30643 30809   *     of errors, different errno is returned depending on if this device is
30644 30810   *     a removable media device. This brings USB/1394 hard disks in line with
30645 30811   *     expected hard disk behavior. It is not expected that this breaks any
30646 30812   *     application.
30647 30813   *
↓ open down ↓ 517 lines elided ↑ open up ↑
31165 31331          }
31166 31332  }
31167 31333  
31168 31334  /*
31169 31335   *    Function: sd_ssc_ereport_post
31170 31336   *
31171 31337   * Description: Will be called when SD driver need to post an ereport.
31172 31338   *
31173 31339   *    Context: Kernel thread or interrupt context.
31174 31340   */
31175      -
31176      -#define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown"
31177      -
31178 31341  static void
31179 31342  sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess)
31180 31343  {
31181      -        int uscsi_path_instance = 0;
31182 31344          uchar_t uscsi_pkt_reason;
31183 31345          uint32_t uscsi_pkt_state;
31184 31346          uint32_t uscsi_pkt_statistics;
31185 31347          uint64_t uscsi_ena;
31186 31348          uchar_t op_code;
31187 31349          uint8_t *sensep;
31188 31350          union scsi_cdb *cdbp;
31189 31351          uint_t cdblen = 0;
31190 31352          uint_t senlen = 0;
31191 31353          struct sd_lun *un;
↓ open down ↓ 22 lines elided ↑ open up ↑
31214 31376  
31215 31377          /*
31216 31378           * If we are syncing or dumping, the command will not be executed
31217 31379           * so we bypass this situation.
31218 31380           */
31219 31381          if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
31220 31382              (un->un_state == SD_STATE_DUMPING))
31221 31383                  return;
31222 31384  
31223 31385          uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason;
31224      -        uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance;
31225 31386          uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state;
31226 31387          uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics;
31227 31388          uscsi_ena = ssc->ssc_uscsi_info->ui_ena;
31228 31389  
31229 31390          sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
31230 31391          cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb;
31231 31392  
31232 31393          /* In rare cases, EG:DOORLOCK, the cdb could be NULL */
31233 31394          if (cdbp == NULL) {
31234 31395                  scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
↓ open down ↓ 28 lines elided ↑ open up ↑
31263 31424                  case SD_FM_DRV_FATAL:
31264 31425                  default:
31265 31426                          (void) sprintf(assessment, "%s", "unknown");
31266 31427          }
31267 31428          /*
31268 31429           * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered
31269 31430           * command, we will post ereport.io.scsi.cmd.disk.recovered.
31270 31431           * driver-assessment will always be "recovered" here.
31271 31432           */
31272 31433          if (drv_assess == SD_FM_DRV_RECOVERY) {
31273      -                scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
     31434 +                scsi_fm_ereport_post(un->un_sd, 0, NULL,
31274 31435                      "cmd.disk.recovered", uscsi_ena, devid, NULL,
31275 31436                      DDI_NOSLEEP, NULL,
31276 31437                      FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31277 31438                      DEVID_IF_KNOWN(devid),
31278 31439                      "driver-assessment", DATA_TYPE_STRING, assessment,
31279 31440                      "op-code", DATA_TYPE_UINT8, op_code,
31280 31441                      "cdb", DATA_TYPE_UINT8_ARRAY,
31281 31442                      cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31282 31443                      "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31283 31444                      "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
↓ open down ↓ 6 lines elided ↑ open up ↑
31290 31451           * If there is un-expected/un-decodable data, we should post
31291 31452           * ereport.io.scsi.cmd.disk.dev.uderr.
31292 31453           * driver-assessment will be set based on parameter drv_assess.
31293 31454           * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back.
31294 31455           * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered.
31295 31456           * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered.
31296 31457           * SSC_FLAGS_INVALID_DATA - invalid data sent back.
31297 31458           */
31298 31459          if (ssc->ssc_flags & ssc_invalid_flags) {
31299 31460                  if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) {
31300      -                        scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
     31461 +                        scsi_fm_ereport_post(un->un_sd, 0,
31301 31462                              NULL, "cmd.disk.dev.uderr", uscsi_ena, devid,
31302 31463                              NULL, DDI_NOSLEEP, NULL,
31303 31464                              FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31304 31465                              DEVID_IF_KNOWN(devid),
31305 31466                              "driver-assessment", DATA_TYPE_STRING,
31306 31467                              drv_assess == SD_FM_DRV_FATAL ?
31307 31468                              "fail" : assessment,
31308 31469                              "op-code", DATA_TYPE_UINT8, op_code,
31309 31470                              "cdb", DATA_TYPE_UINT8_ARRAY,
31310 31471                              cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
↓ open down ↓ 8 lines elided ↑ open up ↑
31319 31480                              "un-decode-value", DATA_TYPE_UINT8_ARRAY,
31320 31481                              senlen, sensep,
31321 31482                              NULL);
31322 31483                  } else {
31323 31484                          /*
31324 31485                           * For other type of invalid data, the
31325 31486                           * un-decode-value field would be empty because the
31326 31487                           * un-decodable content could be seen from upper
31327 31488                           * level payload or inside un-decode-info.
31328 31489                           */
31329      -                        scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
     31490 +                        scsi_fm_ereport_post(un->un_sd, 0,
31330 31491                              NULL,
31331 31492                              "cmd.disk.dev.uderr", uscsi_ena, devid,
31332 31493                              NULL, DDI_NOSLEEP, NULL,
31333 31494                              FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31334 31495                              DEVID_IF_KNOWN(devid),
31335 31496                              "driver-assessment", DATA_TYPE_STRING,
31336 31497                              drv_assess == SD_FM_DRV_FATAL ?
31337 31498                              "fail" : assessment,
31338 31499                              "op-code", DATA_TYPE_UINT8, op_code,
31339 31500                              "cdb", DATA_TYPE_UINT8_ARRAY,
↓ open down ↓ 21 lines elided ↑ open up ↑
31361 31522                   * set inside sd_start_cmds due to errors(bad packet or
31362 31523                   * fatal transport error), we should take it as a
31363 31524                   * transport error, so we post ereport.io.scsi.cmd.disk.tran.
31364 31525                   * driver-assessment will be set based on drv_assess.
31365 31526                   * We will set devid to NULL because it is a transport
31366 31527                   * error.
31367 31528                   */
31368 31529                  if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)
31369 31530                          ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT;
31370 31531  
31371      -                scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
     31532 +                scsi_fm_ereport_post(un->un_sd, 0, NULL,
31372 31533                      "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL,
31373 31534                      FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31374 31535                      DEVID_IF_KNOWN(devid),
31375 31536                      "driver-assessment", DATA_TYPE_STRING,
31376 31537                      drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
31377 31538                      "op-code", DATA_TYPE_UINT8, op_code,
31378 31539                      "cdb", DATA_TYPE_UINT8_ARRAY,
31379 31540                      cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31380 31541                      "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31381 31542                      "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state,
↓ open down ↓ 23 lines elided ↑ open up ↑
31405 31566                          if (sense_key == KEY_RECOVERABLE_ERROR &&
31406 31567                              sense_asc == 0x00 && sense_ascq == 0x1d)
31407 31568                                  return;
31408 31569  
31409 31570                          if (sense_key == KEY_MEDIUM_ERROR) {
31410 31571                                  /*
31411 31572                                   * driver-assessment should be "fatal" if
31412 31573                                   * drv_assess is SD_FM_DRV_FATAL.
31413 31574                                   */
31414 31575                                  scsi_fm_ereport_post(un->un_sd,
31415      -                                    uscsi_path_instance, NULL,
     31576 +                                    0, NULL,
31416 31577                                      "cmd.disk.dev.rqs.merr",
31417 31578                                      uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL,
31418 31579                                      FM_VERSION, DATA_TYPE_UINT8,
31419 31580                                      FM_EREPORT_VERS0,
31420 31581                                      DEVID_IF_KNOWN(devid),
31421 31582                                      "driver-assessment",
31422 31583                                      DATA_TYPE_STRING,
31423 31584                                      drv_assess == SD_FM_DRV_FATAL ?
31424 31585                                      "fatal" : assessment,
31425 31586                                      "op-code",
↓ open down ↓ 28 lines elided ↑ open up ↑
31454 31615                                      ssc->ssc_uscsi_info->ui_lba,
31455 31616                                      NULL);
31456 31617                          } else {
31457 31618                                  /*
31458 31619                                   * if sense-key == 0x4(hardware
31459 31620                                   * error), driver-assessment should
31460 31621                                   * be "fatal" if drv_assess is
31461 31622                                   * SD_FM_DRV_FATAL.
31462 31623                                   */
31463 31624                                  scsi_fm_ereport_post(un->un_sd,
31464      -                                    uscsi_path_instance, NULL,
     31625 +                                    0, NULL,
31465 31626                                      "cmd.disk.dev.rqs.derr",
31466 31627                                      uscsi_ena, devid,
31467 31628                                      NULL, DDI_NOSLEEP, NULL,
31468 31629                                      FM_VERSION,
31469 31630                                      DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31470 31631                                      DEVID_IF_KNOWN(devid),
31471 31632                                      "driver-assessment",
31472 31633                                      DATA_TYPE_STRING,
31473 31634                                      drv_assess == SD_FM_DRV_FATAL ?
31474 31635                                      (sense_key == 0x4 ?
↓ open down ↓ 34 lines elided ↑ open up ↑
31509 31670                           */
31510 31671                          if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD)
31511 31672                                  return;
31512 31673  
31513 31674                          /*
31514 31675                           * Post ereport.io.scsi.cmd.disk.dev.serr if we got the
31515 31676                           * stat-code but with sense data unavailable.
31516 31677                           * driver-assessment will be set based on parameter
31517 31678                           * drv_assess.
31518 31679                           */
31519      -                        scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
     31680 +                        scsi_fm_ereport_post(un->un_sd, 0,
31520 31681                              NULL,
31521 31682                              "cmd.disk.dev.serr", uscsi_ena,
31522 31683                              devid, NULL, DDI_NOSLEEP, NULL,
31523 31684                              FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31524 31685                              DEVID_IF_KNOWN(devid),
31525 31686                              "driver-assessment", DATA_TYPE_STRING,
31526 31687                              drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
31527 31688                              "op-code", DATA_TYPE_UINT8, op_code,
31528 31689                              "cdb",
31529 31690                              DATA_TYPE_UINT8_ARRAY,
↓ open down ↓ 233 lines elided ↑ open up ↑
31763 31924                  mutex_exit(SD_MUTEX(un));
31764 31925                  rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
31765 31926                      &pbsize, SD_PATH_DIRECT);
31766 31927                  mutex_enter(SD_MUTEX(un));
31767 31928  
31768 31929                  if (rval != 0) {
31769 31930                          un->un_phy_blocksize = DEV_BSIZE;
31770 31931                  } else {
31771 31932                          if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) {
31772 31933                                  un->un_phy_blocksize = DEV_BSIZE;
31773      -                        } else if (pbsize > un->un_phy_blocksize) {
     31934 +                        } else if (pbsize > un->un_phy_blocksize &&
     31935 +                            !un->un_f_sdconf_phy_blocksize) {
31774 31936                                  /*
31775      -                                 * Don't reset the physical blocksize
31776      -                                 * unless we've detected a larger value.
     31937 +                                 * Reset the physical block size
     31938 +                                 * if we've detected a larger value and
     31939 +                                 * we didn't already set the physical
     31940 +                                 * block size in sd.conf
31777 31941                                   */
31778 31942                                  un->un_phy_blocksize = pbsize;
31779 31943                          }
31780 31944                  }
31781 31945          }
31782 31946  
31783 31947          for (i = 0; i < sd_flash_dev_table_size; i++) {
31784 31948                  devid_len = (int)strlen(sd_flash_dev_table[i]);
31785 31949                  if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len)
31786 31950                      == SD_SUCCESS) {
31787 31951                          un->un_phy_blocksize = SSD_SECSIZE;
31788 31952                          if (un->un_f_is_solid_state &&
31789 31953                              un->un_phy_blocksize != un->un_tgt_blocksize)
31790 31954                                  un->un_f_enable_rmw = TRUE;
31791 31955                  }
31792 31956          }
31793 31957  
31794 31958          mutex_exit(SD_MUTEX(un));
31795 31959  }
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX